Spaces:
Running
Running
kevinwang676
commited on
Commit
•
6a422c8
1
Parent(s):
c68a54c
Upload folder using huggingface_hub
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitattributes +1 -0
- agentfabric/.ipynb_checkpoints/Untitled-checkpoint.ipynb +6 -0
- agentfabric/README.md +64 -0
- agentfabric/README_CN.md +52 -0
- agentfabric/Untitled.ipynb +172 -0
- agentfabric/__init__.py +0 -0
- agentfabric/app.py +660 -0
- agentfabric/appBot.py +169 -0
- agentfabric/assets/app.css +147 -0
- agentfabric/assets/appBot.css +129 -0
- agentfabric/assets/bot.jpg +0 -0
- agentfabric/assets/user.jpg +0 -0
- agentfabric/builder_core.py +276 -0
- agentfabric/config/builder_config.json +26 -0
- agentfabric/config/builder_config_ci.json +31 -0
- agentfabric/config/builder_config_template.json +26 -0
- agentfabric/config/builder_config_wuxia.json +24 -0
- agentfabric/config/custom_bot_avatar.png +0 -0
- agentfabric/config/model_config.json +77 -0
- agentfabric/config/tool_config.json +35 -0
- agentfabric/config_utils.py +170 -0
- agentfabric/custom_prompt.py +303 -0
- agentfabric/gradio_utils.py +410 -0
- agentfabric/help_tools.py +170 -0
- agentfabric/i18n.py +57 -0
- agentfabric/modelscope_agent/__init__.py +0 -0
- agentfabric/modelscope_agent/agent.py +334 -0
- agentfabric/modelscope_agent/agent_types.py +20 -0
- agentfabric/modelscope_agent/llm/__init__.py +2 -0
- agentfabric/modelscope_agent/llm/base.py +64 -0
- agentfabric/modelscope_agent/llm/custom_llm.py +97 -0
- agentfabric/modelscope_agent/llm/dashscope_llm.py +125 -0
- agentfabric/modelscope_agent/llm/llm_factory.py +28 -0
- agentfabric/modelscope_agent/llm/modelscope_llm.py +132 -0
- agentfabric/modelscope_agent/llm/openai.py +71 -0
- agentfabric/modelscope_agent/llm/utils.py +39 -0
- agentfabric/modelscope_agent/output_parser.py +181 -0
- agentfabric/modelscope_agent/output_wrapper.py +219 -0
- agentfabric/modelscope_agent/prompt/__init__.py +6 -0
- agentfabric/modelscope_agent/prompt/chatglm3_prompt.py +41 -0
- agentfabric/modelscope_agent/prompt/messages_prompt.py +93 -0
- agentfabric/modelscope_agent/prompt/mrkl_prompt.py +118 -0
- agentfabric/modelscope_agent/prompt/ms_prompt.py +34 -0
- agentfabric/modelscope_agent/prompt/prompt.py +232 -0
- agentfabric/modelscope_agent/prompt/prompt_factory.py +16 -0
- agentfabric/modelscope_agent/prompt/raw_prompt_builder.py +34 -0
- agentfabric/modelscope_agent/retrieve.py +115 -0
- agentfabric/modelscope_agent/tools/__init__.py +36 -0
- agentfabric/modelscope_agent/tools/amap_weather.py +64 -0
- agentfabric/modelscope_agent/tools/code_interperter.py +125 -0
.gitattributes
CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
+
agentfabric/modelscope_agent/tools/code_interpreter_utils/AlibabaPuHuiTi-3-45-Light.ttf filter=lfs diff=lfs merge=lfs -text
|
agentfabric/.ipynb_checkpoints/Untitled-checkpoint.ipynb
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"cells": [],
|
3 |
+
"metadata": {},
|
4 |
+
"nbformat": 4,
|
5 |
+
"nbformat_minor": 5
|
6 |
+
}
|
agentfabric/README.md
ADDED
@@ -0,0 +1,64 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
# 详细文档见https://modelscope.cn/docs/%E5%88%9B%E7%A9%BA%E9%97%B4%E5%8D%A1%E7%89%87
|
3 |
+
domain: #领域:cv/nlp/audio/multi-modal/AutoML
|
4 |
+
- multi-modal
|
5 |
+
tags: #自定义标签
|
6 |
+
- agent
|
7 |
+
- AgentFabric
|
8 |
+
|
9 |
+
## 启动文件(若SDK为Gradio/Streamlit,默认为app.py, 若为Static HTML, 默认为index.html)
|
10 |
+
deployspec:
|
11 |
+
entry_file: app.py
|
12 |
+
|
13 |
+
license: Apache License 2.0
|
14 |
+
---
|
15 |
+
<h1> Modelscope AgentFabric: Customizable AI-Agents For All</h1>
|
16 |
+
|
17 |
+
<p align="center">
|
18 |
+
<br>
|
19 |
+
<img src="https://modelscope.oss-cn-beijing.aliyuncs.com/modelscope.gif" width="400"/>
|
20 |
+
<br>
|
21 |
+
<p>
|
22 |
+
|
23 |
+
## Introduction
|
24 |
+
**ModelScope AgentFabric** is an interactive framework to facilitate creation of agents tailored to various real-world applications. AgentFabric is built around pluggable and customizable LLMs, and enhance capabilities of instrcution following, extra knowledge retrieval and leveraging external tools. The AgentFabric is woven with interfaces including:
|
25 |
+
- ⚡ **Agent Builder**: an automatic instructions and tools provider for customizing user's agents through natural conversational interactions.
|
26 |
+
- ⚡ **User Agent**: a customized agent for building real-world applications, with instructions, extra-knowledge and tools provided by builder agent and/or user inputs.
|
27 |
+
- ⚡ **Configuration Tooling**: the interface to customize user agent configurations. Allows real-time preview of agent behavior as new confiugrations are updated.
|
28 |
+
|
29 |
+
🔗 We currently leverage AgentFabric to build various agents around [Qwen2.0 LLM API](https://help.aliyun.com/zh/dashscope/developer-reference/api-details) available via DashScope. We are also actively exploring
|
30 |
+
other options to incorporate (and compare) more LLMs via API, as well as via native ModelScope models.
|
31 |
+
|
32 |
+
|
33 |
+
## Installation
|
34 |
+
Simply clone the repo and install dependency.
|
35 |
+
```bash
|
36 |
+
git clone https://github.com/modelscope/modelscope-agent.git
|
37 |
+
cd modelscope-agent && pip install -r requirements.txt && pip install -r demo/agentfabric/requirements.txt
|
38 |
+
```
|
39 |
+
|
40 |
+
## Prerequisites
|
41 |
+
|
42 |
+
- Python 3.10
|
43 |
+
- Accessibility to LLM API service such as [DashScope](https://help.aliyun.com/zh/dashscope/developer-reference/activate-dashscope-and-create-an-api-key) (free to start).
|
44 |
+
|
45 |
+
## Usage
|
46 |
+
|
47 |
+
```bash
|
48 |
+
export PYTHONPATH=$PYTHONPATH:/path/to/your/modelscope-agent
|
49 |
+
export DASHSCOPE_API_KEY=your_api_key
|
50 |
+
cd modelscope-agent/demo/agentfabric
|
51 |
+
python app.py
|
52 |
+
```
|
53 |
+
|
54 |
+
## 🚀 Roadmap
|
55 |
+
- [x] Allow customizable agent-building via configurations.
|
56 |
+
- [x] Agent-building through interactive conversations with LLMs.
|
57 |
+
- [x] Support multi-user preview on ModelScope space. [link](https://modelscope.cn/studios/wenmengzhou/AgentFabric/summary) [PR #98](https://github.com/modelscope/modelscope-agent/pull/98)
|
58 |
+
- [x] Optimize knowledge retrival. [PR #105](https://github.com/modelscope/modelscope-agent/pull/105) [PR #107](https://github.com/modelscope/modelscope-agent/pull/107) [PR #109](https://github.com/modelscope/modelscope-agent/pull/109)
|
59 |
+
- [x] Allow publication and sharing of agent. [PR #111](https://github.com/modelscope/modelscope-agent/pull/111)
|
60 |
+
- [ ] Support more pluggable LLMs via API or ModelScope interface.
|
61 |
+
- [ ] Improve long context via memory.
|
62 |
+
- [ ] Improve logging and profiling.
|
63 |
+
- [ ] Fine-tuning for specific agent.
|
64 |
+
- [ ] Evaluation for agents in different scenarios.
|
agentfabric/README_CN.md
ADDED
@@ -0,0 +1,52 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
<h1> Modelscope AgentFabric: 开放可定制的AI智能体构建框架</h1>
|
3 |
+
|
4 |
+
<p align="center">
|
5 |
+
<br>
|
6 |
+
<img src="https://modelscope.oss-cn-beijing.aliyuncs.com/modelscope.gif" width="400"/>
|
7 |
+
<br>
|
8 |
+
<p>
|
9 |
+
|
10 |
+
## 介绍
|
11 |
+
|
12 |
+
**Modelscope AgentFabric**是一个交互式智能体框架,用于方便地创建针对各种现实应用量身定制智能体。AgentFabric围绕可插拔和可定制的LLM构建,并增强了指令执行、额外知识检索和利用外部工具的能力。AgentFabric提供的交互界面包括:
|
13 |
+
- **⚡ 智能体构建器**:一个自动指令和工具提供者,通过与用户聊天来定制用户的智能体
|
14 |
+
- **⚡ 用户智能体**:一个为用户的实际应用定制的智能体,提供构建智能体或用户输入的指令、额外知识和工具
|
15 |
+
- **⚡ 配置设置工具**:支持用户定制用户智能体的配置,并实时预览用户智能体的性能
|
16 |
+
|
17 |
+
🔗 我们目前围绕DashScope提供的 [Qwen2.0 LLM API](https://help.aliyun.com/zh/dashscope/developer-reference/api-details) 来在AgentFabric上构建不同的智能体应用。同时我们正在积极探索,通过API或者ModelScope原生模型等方式,引入不同的举办强大基础能力的LLMs,来构建丰富多样的Agents。
|
18 |
+
|
19 |
+
## 安装
|
20 |
+
|
21 |
+
克隆仓库并安装依赖:
|
22 |
+
|
23 |
+
```bash
|
24 |
+
git clone https://github.com/modelscope/modelscope-agent.git
|
25 |
+
cd modelscope-agent && pip install -r requirements.txt && pip install -r demo/agentfabric/requirements.txt
|
26 |
+
```
|
27 |
+
|
28 |
+
## 前提条件
|
29 |
+
|
30 |
+
- Python 3.10
|
31 |
+
- 获取使用Qwen 2.0模型所需的API-key,可从[DashScope](https://help.aliyun.com/zh/dashscope/developer-reference/activate-dashscope-and-create-an-api-key)免费开通和获取。
|
32 |
+
|
33 |
+
## 使用方法
|
34 |
+
|
35 |
+
```bash
|
36 |
+
export PYTHONPATH=$PYTHONPATH:/path/to/your/modelscope-agent
|
37 |
+
export DASHSCOPE_API_KEY=your_api_key
|
38 |
+
cd modelscope-agent/demo/agentfabric
|
39 |
+
python app.py
|
40 |
+
```
|
41 |
+
|
42 |
+
## 🚀 发展路线规划
|
43 |
+
- [x] 支持人工配置构建智能体
|
44 |
+
- [x] 基于LLM对话构建智能体
|
45 |
+
- [x] 支持在ModelScope创空间上使用 [link](https://modelscope.cn/studios/wenmengzhou/AgentFabric/summary) [PR #98](https://github.com/modelscope/modelscope-agent/pull/98)
|
46 |
+
- [x] 知识库检索效果优化 [PR #105](https://github.com/modelscope/modelscope-agent/pull/105) [PR #107](https://github.com/modelscope/modelscope-agent/pull/107) [PR #109](https://github.com/modelscope/modelscope-agent/pull/109)
|
47 |
+
- [x] 支持智能体发布和分享
|
48 |
+
- [ ] 支持其他多种LLM模型API和ModelScope模型
|
49 |
+
- [ ] 处理长文本输入到内存
|
50 |
+
- [ ] 生产级支持:日志和性能分析
|
51 |
+
- [ ] 支持智能体微调
|
52 |
+
- [ ] 在不同场景中智能体的效果评估
|
agentfabric/Untitled.ipynb
ADDED
@@ -0,0 +1,172 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"cells": [
|
3 |
+
{
|
4 |
+
"cell_type": "code",
|
5 |
+
"execution_count": 2,
|
6 |
+
"id": "52e2928d-b216-4d5c-a198-b24b3bc6d5a6",
|
7 |
+
"metadata": {},
|
8 |
+
"outputs": [
|
9 |
+
{
|
10 |
+
"name": "stdout",
|
11 |
+
"output_type": "stream",
|
12 |
+
"text": [
|
13 |
+
"/workspace/modelscope-agent/apps/agentfabric\n"
|
14 |
+
]
|
15 |
+
}
|
16 |
+
],
|
17 |
+
"source": [
|
18 |
+
"cd agentfabric"
|
19 |
+
]
|
20 |
+
},
|
21 |
+
{
|
22 |
+
"cell_type": "code",
|
23 |
+
"execution_count": 3,
|
24 |
+
"id": "84bdd7ce-176d-464d-b0b3-620585c22541",
|
25 |
+
"metadata": {},
|
26 |
+
"outputs": [
|
27 |
+
{
|
28 |
+
"name": "stdout",
|
29 |
+
"output_type": "stream",
|
30 |
+
"text": [
|
31 |
+
"Collecting huggingface_hub\n",
|
32 |
+
" Downloading huggingface_hub-0.19.4-py3-none-any.whl.metadata (14 kB)\n",
|
33 |
+
"Requirement already satisfied: filelock in /opt/conda/lib/python3.10/site-packages (from huggingface_hub) (3.9.0)\n",
|
34 |
+
"Requirement already satisfied: fsspec>=2023.5.0 in /opt/conda/lib/python3.10/site-packages (from huggingface_hub) (2023.10.0)\n",
|
35 |
+
"Requirement already satisfied: requests in /opt/conda/lib/python3.10/site-packages (from huggingface_hub) (2.31.0)\n",
|
36 |
+
"Requirement already satisfied: tqdm>=4.42.1 in /opt/conda/lib/python3.10/site-packages (from huggingface_hub) (4.65.0)\n",
|
37 |
+
"Requirement already satisfied: pyyaml>=5.1 in /opt/conda/lib/python3.10/site-packages (from huggingface_hub) (6.0.1)\n",
|
38 |
+
"Requirement already satisfied: typing-extensions>=3.7.4.3 in /opt/conda/lib/python3.10/site-packages (from huggingface_hub) (4.7.1)\n",
|
39 |
+
"Requirement already satisfied: packaging>=20.9 in /opt/conda/lib/python3.10/site-packages (from huggingface_hub) (23.1)\n",
|
40 |
+
"Requirement already satisfied: charset-normalizer<4,>=2 in /opt/conda/lib/python3.10/site-packages (from requests->huggingface_hub) (2.0.4)\n",
|
41 |
+
"Requirement already satisfied: idna<4,>=2.5 in /opt/conda/lib/python3.10/site-packages (from requests->huggingface_hub) (3.4)\n",
|
42 |
+
"Requirement already satisfied: urllib3<3,>=1.21.1 in /opt/conda/lib/python3.10/site-packages (from requests->huggingface_hub) (1.26.18)\n",
|
43 |
+
"Requirement already satisfied: certifi>=2017.4.17 in /opt/conda/lib/python3.10/site-packages (from requests->huggingface_hub) (2023.7.22)\n",
|
44 |
+
"Downloading huggingface_hub-0.19.4-py3-none-any.whl (311 kB)\n",
|
45 |
+
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m311.7/311.7 kB\u001b[0m \u001b[31m4.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m00:01\u001b[0m00:01\u001b[0m\n",
|
46 |
+
"\u001b[?25hInstalling collected packages: huggingface_hub\n",
|
47 |
+
"Successfully installed huggingface_hub-0.19.4\n",
|
48 |
+
"\u001b[33mWARNING: Running pip as the 'root' user can result in broken permissions and conflicting behaviour with the system package manager. It is recommended to use a virtual environment instead: https://pip.pypa.io/warnings/venv\u001b[0m\u001b[33m\n",
|
49 |
+
"\u001b[0mNote: you may need to restart the kernel to use updated packages.\n"
|
50 |
+
]
|
51 |
+
}
|
52 |
+
],
|
53 |
+
"source": [
|
54 |
+
"pip install huggingface_hub"
|
55 |
+
]
|
56 |
+
},
|
57 |
+
{
|
58 |
+
"cell_type": "code",
|
59 |
+
"execution_count": 4,
|
60 |
+
"id": "f7ebcbea-3bb2-468f-b159-ff20693e98c8",
|
61 |
+
"metadata": {},
|
62 |
+
"outputs": [
|
63 |
+
{
|
64 |
+
"data": {
|
65 |
+
"application/vnd.jupyter.widget-view+json": {
|
66 |
+
"model_id": "58fa0d78829446c98f1facc6250b2d5b",
|
67 |
+
"version_major": 2,
|
68 |
+
"version_minor": 0
|
69 |
+
},
|
70 |
+
"text/plain": [
|
71 |
+
"VBox(children=(HTML(value='<center> <img\\nsrc=https://huggingface.co/front/assets/huggingface_logo-noborder.sv…"
|
72 |
+
]
|
73 |
+
},
|
74 |
+
"metadata": {},
|
75 |
+
"output_type": "display_data"
|
76 |
+
}
|
77 |
+
],
|
78 |
+
"source": [
|
79 |
+
"from huggingface_hub import login\n",
|
80 |
+
"login()"
|
81 |
+
]
|
82 |
+
},
|
83 |
+
{
|
84 |
+
"cell_type": "code",
|
85 |
+
"execution_count": 6,
|
86 |
+
"id": "fe90bf8c-0ce1-4989-b855-498f773b9a50",
|
87 |
+
"metadata": {},
|
88 |
+
"outputs": [
|
89 |
+
{
|
90 |
+
"name": "stdout",
|
91 |
+
"output_type": "stream",
|
92 |
+
"text": [
|
93 |
+
"/workspace/modelscope-agent/apps\n"
|
94 |
+
]
|
95 |
+
}
|
96 |
+
],
|
97 |
+
"source": [
|
98 |
+
"cd .."
|
99 |
+
]
|
100 |
+
},
|
101 |
+
{
|
102 |
+
"cell_type": "code",
|
103 |
+
"execution_count": 5,
|
104 |
+
"id": "76f4884e-81f4-45a0-8180-6ed14fd23e21",
|
105 |
+
"metadata": {},
|
106 |
+
"outputs": [
|
107 |
+
{
|
108 |
+
"ename": "BadRequestError",
|
109 |
+
"evalue": " (Request ID: Root=1-6575633c-3007e4fc53be037b5ceb5402;4cd41d1e-0094-48a4-bbe9-db4d06cbc023)\n\nBad request for commit endpoint:\n\"license\" must be one of [apache-2.0, mit, openrail, bigscience-openrail-m, creativeml-openrail-m, bigscience-bloom-rail-1.0, bigcode-openrail-m, afl-3.0, artistic-2.0, bsl-1.0, bsd, bsd-2-clause, bsd-3-clause, bsd-3-clause-clear, c-uda, cc, cc0-1.0, cc-by-2.0, cc-by-2.5, cc-by-3.0, cc-by-4.0, cc-by-sa-3.0, cc-by-sa-4.0, cc-by-nc-2.0, cc-by-nc-3.0, cc-by-nc-4.0, cc-by-nd-4.0, cc-by-nc-nd-3.0, cc-by-nc-nd-4.0, cc-by-nc-sa-2.0, cc-by-nc-sa-3.0, cc-by-nc-sa-4.0, cdla-sharing-1.0, cdla-permissive-1.0, cdla-permissive-2.0, wtfpl, ecl-2.0, epl-1.0, epl-2.0, eupl-1.1, agpl-3.0, gfdl, gpl, gpl-2.0, gpl-3.0, lgpl, lgpl-2.1, lgpl-3.0, isc, lppl-1.3c, ms-pl, mpl-2.0, odc-by, odbl, openrail++, osl-3.0, postgresql, ofl-1.1, ncsa, unlicense, zlib, pddl, lgpl-lr, deepfloyd-if-license, llama2, unknown, other, array]",
|
110 |
+
"output_type": "error",
|
111 |
+
"traceback": [
|
112 |
+
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
|
113 |
+
"\u001b[0;31mHTTPError\u001b[0m Traceback (most recent call last)",
|
114 |
+
"File \u001b[0;32m/opt/conda/lib/python3.10/site-packages/huggingface_hub/utils/_errors.py:270\u001b[0m, in \u001b[0;36mhf_raise_for_status\u001b[0;34m(response, endpoint_name)\u001b[0m\n\u001b[1;32m 269\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[0;32m--> 270\u001b[0m \u001b[43mresponse\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mraise_for_status\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 271\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m HTTPError \u001b[38;5;28;01mas\u001b[39;00m e:\n",
|
115 |
+
"File \u001b[0;32m/opt/conda/lib/python3.10/site-packages/requests/models.py:1021\u001b[0m, in \u001b[0;36mResponse.raise_for_status\u001b[0;34m(self)\u001b[0m\n\u001b[1;32m 1020\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m http_error_msg:\n\u001b[0;32m-> 1021\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m HTTPError(http_error_msg, response\u001b[38;5;241m=\u001b[39m\u001b[38;5;28mself\u001b[39m)\n",
|
116 |
+
"\u001b[0;31mHTTPError\u001b[0m: 400 Client Error: Bad Request for url: https://huggingface.co/api/spaces/kevinwang676/AI-Agent/commit/main",
|
117 |
+
"\nThe above exception was the direct cause of the following exception:\n",
|
118 |
+
"\u001b[0;31mBadRequestError\u001b[0m Traceback (most recent call last)",
|
119 |
+
"Cell \u001b[0;32mIn[5], line 4\u001b[0m\n\u001b[1;32m 1\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01mhuggingface_hub\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m HfApi\n\u001b[1;32m 2\u001b[0m api \u001b[38;5;241m=\u001b[39m HfApi()\n\u001b[0;32m----> 4\u001b[0m \u001b[43mapi\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mupload_folder\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 5\u001b[0m \u001b[43m \u001b[49m\u001b[43mfolder_path\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m,\u001b[49m\n\u001b[1;32m 6\u001b[0m \u001b[43m \u001b[49m\u001b[43mrepo_id\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mkevinwang676/AI-Agent\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m,\u001b[49m\n\u001b[1;32m 7\u001b[0m \u001b[43m \u001b[49m\u001b[43mrepo_type\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mspace\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m,\u001b[49m\n\u001b[1;32m 8\u001b[0m \u001b[43m)\u001b[49m\n",
|
120 |
+
"File \u001b[0;32m/opt/conda/lib/python3.10/site-packages/huggingface_hub/utils/_validators.py:118\u001b[0m, in \u001b[0;36mvalidate_hf_hub_args.<locals>._inner_fn\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m 115\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m check_use_auth_token:\n\u001b[1;32m 116\u001b[0m kwargs \u001b[38;5;241m=\u001b[39m smoothly_deprecate_use_auth_token(fn_name\u001b[38;5;241m=\u001b[39mfn\u001b[38;5;241m.\u001b[39m\u001b[38;5;18m__name__\u001b[39m, has_token\u001b[38;5;241m=\u001b[39mhas_token, kwargs\u001b[38;5;241m=\u001b[39mkwargs)\n\u001b[0;32m--> 118\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mfn\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n",
|
121 |
+
"File \u001b[0;32m/opt/conda/lib/python3.10/site-packages/huggingface_hub/hf_api.py:1045\u001b[0m, in \u001b[0;36mfuture_compatible.<locals>._inner\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 1042\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mrun_as_future(fn, \u001b[38;5;28mself\u001b[39m, \u001b[38;5;241m*\u001b[39margs, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs)\n\u001b[1;32m 1044\u001b[0m \u001b[38;5;66;03m# Otherwise, call the function normally\u001b[39;00m\n\u001b[0;32m-> 1045\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mfn\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n",
|
122 |
+
"File \u001b[0;32m/opt/conda/lib/python3.10/site-packages/huggingface_hub/hf_api.py:4138\u001b[0m, in \u001b[0;36mHfApi.upload_folder\u001b[0;34m(self, repo_id, folder_path, path_in_repo, commit_message, commit_description, token, repo_type, revision, create_pr, parent_commit, allow_patterns, ignore_patterns, delete_patterns, multi_commits, multi_commits_verbose, run_as_future)\u001b[0m\n\u001b[1;32m 4126\u001b[0m pr_url \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mcreate_commits_on_pr(\n\u001b[1;32m 4127\u001b[0m repo_id\u001b[38;5;241m=\u001b[39mrepo_id,\n\u001b[1;32m 4128\u001b[0m repo_type\u001b[38;5;241m=\u001b[39mrepo_type,\n\u001b[0;32m (...)\u001b[0m\n\u001b[1;32m 4135\u001b[0m verbose\u001b[38;5;241m=\u001b[39mmulti_commits_verbose,\n\u001b[1;32m 4136\u001b[0m )\n\u001b[1;32m 4137\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[0;32m-> 4138\u001b[0m commit_info \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mcreate_commit\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 4139\u001b[0m \u001b[43m \u001b[49m\u001b[43mrepo_type\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mrepo_type\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 4140\u001b[0m \u001b[43m \u001b[49m\u001b[43mrepo_id\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mrepo_id\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 4141\u001b[0m \u001b[43m \u001b[49m\u001b[43moperations\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mcommit_operations\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 4142\u001b[0m \u001b[43m \u001b[49m\u001b[43mcommit_message\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mcommit_message\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 4143\u001b[0m \u001b[43m \u001b[49m\u001b[43mcommit_description\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mcommit_description\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 4144\u001b[0m \u001b[43m \u001b[49m\u001b[43mtoken\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mtoken\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 4145\u001b[0m \u001b[43m \u001b[49m\u001b[43mrevision\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mrevision\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 4146\u001b[0m \u001b[43m \u001b[49m\u001b[43mcreate_pr\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mcreate_pr\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 4147\u001b[0m \u001b[43m \u001b[49m\u001b[43mparent_commit\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mparent_commit\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 4148\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 4149\u001b[0m pr_url \u001b[38;5;241m=\u001b[39m commit_info\u001b[38;5;241m.\u001b[39mpr_url\n\u001b[1;32m 4151\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m create_pr \u001b[38;5;129;01mand\u001b[39;00m pr_url \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n",
|
123 |
+
"File \u001b[0;32m/opt/conda/lib/python3.10/site-packages/huggingface_hub/utils/_validators.py:118\u001b[0m, in \u001b[0;36mvalidate_hf_hub_args.<locals>._inner_fn\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m 115\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m check_use_auth_token:\n\u001b[1;32m 116\u001b[0m kwargs \u001b[38;5;241m=\u001b[39m smoothly_deprecate_use_auth_token(fn_name\u001b[38;5;241m=\u001b[39mfn\u001b[38;5;241m.\u001b[39m\u001b[38;5;18m__name__\u001b[39m, has_token\u001b[38;5;241m=\u001b[39mhas_token, kwargs\u001b[38;5;241m=\u001b[39mkwargs)\n\u001b[0;32m--> 118\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mfn\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n",
|
124 |
+
"File \u001b[0;32m/opt/conda/lib/python3.10/site-packages/huggingface_hub/hf_api.py:1045\u001b[0m, in \u001b[0;36mfuture_compatible.<locals>._inner\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 1042\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mrun_as_future(fn, \u001b[38;5;28mself\u001b[39m, \u001b[38;5;241m*\u001b[39margs, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs)\n\u001b[1;32m 1044\u001b[0m \u001b[38;5;66;03m# Otherwise, call the function normally\u001b[39;00m\n\u001b[0;32m-> 1045\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mfn\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n",
|
125 |
+
"File \u001b[0;32m/opt/conda/lib/python3.10/site-packages/huggingface_hub/hf_api.py:3237\u001b[0m, in \u001b[0;36mHfApi.create_commit\u001b[0;34m(self, repo_id, operations, commit_message, commit_description, token, repo_type, revision, create_pr, num_threads, parent_commit, run_as_future)\u001b[0m\n\u001b[1;32m 3235\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[1;32m 3236\u001b[0m commit_resp \u001b[38;5;241m=\u001b[39m get_session()\u001b[38;5;241m.\u001b[39mpost(url\u001b[38;5;241m=\u001b[39mcommit_url, headers\u001b[38;5;241m=\u001b[39mheaders, data\u001b[38;5;241m=\u001b[39mdata, params\u001b[38;5;241m=\u001b[39mparams)\n\u001b[0;32m-> 3237\u001b[0m \u001b[43mhf_raise_for_status\u001b[49m\u001b[43m(\u001b[49m\u001b[43mcommit_resp\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mendpoint_name\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mcommit\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m)\u001b[49m\n\u001b[1;32m 3238\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m RepositoryNotFoundError \u001b[38;5;28;01mas\u001b[39;00m e:\n\u001b[1;32m 3239\u001b[0m e\u001b[38;5;241m.\u001b[39mappend_to_message(_CREATE_COMMIT_NO_REPO_ERROR_MESSAGE)\n",
|
126 |
+
"File \u001b[0;32m/opt/conda/lib/python3.10/site-packages/huggingface_hub/utils/_errors.py:326\u001b[0m, in \u001b[0;36mhf_raise_for_status\u001b[0;34m(response, endpoint_name)\u001b[0m\n\u001b[1;32m 322\u001b[0m \u001b[38;5;28;01melif\u001b[39;00m response\u001b[38;5;241m.\u001b[39mstatus_code \u001b[38;5;241m==\u001b[39m \u001b[38;5;241m400\u001b[39m:\n\u001b[1;32m 323\u001b[0m message \u001b[38;5;241m=\u001b[39m (\n\u001b[1;32m 324\u001b[0m \u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;130;01m\\n\u001b[39;00m\u001b[38;5;130;01m\\n\u001b[39;00m\u001b[38;5;124mBad request for \u001b[39m\u001b[38;5;132;01m{\u001b[39;00mendpoint_name\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m endpoint:\u001b[39m\u001b[38;5;124m\"\u001b[39m \u001b[38;5;28;01mif\u001b[39;00m endpoint_name \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m \u001b[38;5;28;01melse\u001b[39;00m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;130;01m\\n\u001b[39;00m\u001b[38;5;130;01m\\n\u001b[39;00m\u001b[38;5;124mBad request:\u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[1;32m 325\u001b[0m )\n\u001b[0;32m--> 326\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m BadRequestError(message, response\u001b[38;5;241m=\u001b[39mresponse) \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01me\u001b[39;00m\n\u001b[1;32m 328\u001b[0m \u001b[38;5;66;03m# Convert `HTTPError` into a `HfHubHTTPError` to display request information\u001b[39;00m\n\u001b[1;32m 329\u001b[0m \u001b[38;5;66;03m# as well (request id and/or server error message)\u001b[39;00m\n\u001b[1;32m 330\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m HfHubHTTPError(\u001b[38;5;28mstr\u001b[39m(e), response\u001b[38;5;241m=\u001b[39mresponse) \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01me\u001b[39;00m\n",
|
127 |
+
"\u001b[0;31mBadRequestError\u001b[0m: (Request ID: Root=1-6575633c-3007e4fc53be037b5ceb5402;4cd41d1e-0094-48a4-bbe9-db4d06cbc023)\n\nBad request for commit endpoint:\n\"license\" must be one of [apache-2.0, mit, openrail, bigscience-openrail-m, creativeml-openrail-m, bigscience-bloom-rail-1.0, bigcode-openrail-m, afl-3.0, artistic-2.0, bsl-1.0, bsd, bsd-2-clause, bsd-3-clause, bsd-3-clause-clear, c-uda, cc, cc0-1.0, cc-by-2.0, cc-by-2.5, cc-by-3.0, cc-by-4.0, cc-by-sa-3.0, cc-by-sa-4.0, cc-by-nc-2.0, cc-by-nc-3.0, cc-by-nc-4.0, cc-by-nd-4.0, cc-by-nc-nd-3.0, cc-by-nc-nd-4.0, cc-by-nc-sa-2.0, cc-by-nc-sa-3.0, cc-by-nc-sa-4.0, cdla-sharing-1.0, cdla-permissive-1.0, cdla-permissive-2.0, wtfpl, ecl-2.0, epl-1.0, epl-2.0, eupl-1.1, agpl-3.0, gfdl, gpl, gpl-2.0, gpl-3.0, lgpl, lgpl-2.1, lgpl-3.0, isc, lppl-1.3c, ms-pl, mpl-2.0, odc-by, odbl, openrail++, osl-3.0, postgresql, ofl-1.1, ncsa, unlicense, zlib, pddl, lgpl-lr, deepfloyd-if-license, llama2, unknown, other, array]"
|
128 |
+
]
|
129 |
+
}
|
130 |
+
],
|
131 |
+
"source": [
|
132 |
+
"from huggingface_hub import HfApi\n",
|
133 |
+
"api = HfApi()\n",
|
134 |
+
"\n",
|
135 |
+
"api.upload_folder(\n",
|
136 |
+
" folder_path=\"\",\n",
|
137 |
+
" repo_id=\"kevinwang676/AI-Agent\",\n",
|
138 |
+
" repo_type=\"space\",\n",
|
139 |
+
")"
|
140 |
+
]
|
141 |
+
},
|
142 |
+
{
|
143 |
+
"cell_type": "code",
|
144 |
+
"execution_count": null,
|
145 |
+
"id": "2ee3f7dc-08f8-4675-b48a-a61095caf08b",
|
146 |
+
"metadata": {},
|
147 |
+
"outputs": [],
|
148 |
+
"source": []
|
149 |
+
}
|
150 |
+
],
|
151 |
+
"metadata": {
|
152 |
+
"kernelspec": {
|
153 |
+
"display_name": "Python 3 (ipykernel)",
|
154 |
+
"language": "python",
|
155 |
+
"name": "python3"
|
156 |
+
},
|
157 |
+
"language_info": {
|
158 |
+
"codemirror_mode": {
|
159 |
+
"name": "ipython",
|
160 |
+
"version": 3
|
161 |
+
},
|
162 |
+
"file_extension": ".py",
|
163 |
+
"mimetype": "text/x-python",
|
164 |
+
"name": "python",
|
165 |
+
"nbconvert_exporter": "python",
|
166 |
+
"pygments_lexer": "ipython3",
|
167 |
+
"version": "3.10.13"
|
168 |
+
}
|
169 |
+
},
|
170 |
+
"nbformat": 4,
|
171 |
+
"nbformat_minor": 5
|
172 |
+
}
|
agentfabric/__init__.py
ADDED
File without changes
|
agentfabric/app.py
ADDED
@@ -0,0 +1,660 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import random
|
3 |
+
import re
|
4 |
+
import shutil
|
5 |
+
import traceback
|
6 |
+
|
7 |
+
import gradio as gr
|
8 |
+
import json
|
9 |
+
import yaml
|
10 |
+
from builder_core import beauty_output, init_builder_chatbot_agent
|
11 |
+
from config_utils import (DEFAULT_AGENT_DIR, Config, get_avatar_image,
|
12 |
+
get_ci_dir, get_user_cfg_file, get_user_dir,
|
13 |
+
is_valid_plugin_configuration, parse_configuration,
|
14 |
+
save_avatar_image, save_builder_configuration,
|
15 |
+
save_plugin_configuration)
|
16 |
+
from gradio_utils import ChatBot, format_cover_html, format_goto_publish_html
|
17 |
+
from i18n import I18n
|
18 |
+
from publish_util import pop_user_info_from_config, prepare_agent_zip
|
19 |
+
from user_core import init_user_chatbot_agent
|
20 |
+
|
21 |
+
|
22 |
+
def init_user(uuid_str, state):
|
23 |
+
try:
|
24 |
+
seed = state.get('session_seed', random.randint(0, 1000000000))
|
25 |
+
user_agent = init_user_chatbot_agent(uuid_str)
|
26 |
+
user_agent.seed = seed
|
27 |
+
state['user_agent'] = user_agent
|
28 |
+
except Exception as e:
|
29 |
+
error = traceback.format_exc()
|
30 |
+
print(f'Error:{e}, with detail: {error}')
|
31 |
+
return state
|
32 |
+
|
33 |
+
|
34 |
+
def init_builder(uuid_str, state):
|
35 |
+
|
36 |
+
try:
|
37 |
+
builder_agent = init_builder_chatbot_agent(uuid_str)
|
38 |
+
state['builder_agent'] = builder_agent
|
39 |
+
except Exception as e:
|
40 |
+
error = traceback.format_exc()
|
41 |
+
print(f'Error:{e}, with detail: {error}')
|
42 |
+
return state
|
43 |
+
|
44 |
+
|
45 |
+
def update_builder(uuid_str, state):
|
46 |
+
builder_agent = state['builder_agent']
|
47 |
+
|
48 |
+
try:
|
49 |
+
builder_cfg_file = get_user_cfg_file(uuid_str=uuid_str)
|
50 |
+
with open(builder_cfg_file, 'r') as f:
|
51 |
+
config = json.load(f)
|
52 |
+
builder_agent.update_config_to_history(config)
|
53 |
+
except Exception as e:
|
54 |
+
error = traceback.format_exc()
|
55 |
+
print(f'Error:{e}, with detail: {error}')
|
56 |
+
return state
|
57 |
+
|
58 |
+
|
59 |
+
def check_uuid(uuid_str):
|
60 |
+
if not uuid_str or uuid_str == '':
|
61 |
+
if os.getenv('MODELSCOPE_ENVIRONMENT') == 'studio':
|
62 |
+
raise gr.Error('请登陆后使用! (Please login first)')
|
63 |
+
else:
|
64 |
+
uuid_str = 'local_user'
|
65 |
+
return uuid_str
|
66 |
+
|
67 |
+
|
68 |
+
def process_configuration(uuid_str, bot_avatar, name, description,
|
69 |
+
instructions, model, suggestions, knowledge_files,
|
70 |
+
capabilities_checkboxes, openapi_schema,
|
71 |
+
openapi_auth, openapi_auth_apikey,
|
72 |
+
openapi_auth_apikey_type, openapi_privacy_policy,
|
73 |
+
state):
|
74 |
+
uuid_str = check_uuid(uuid_str)
|
75 |
+
tool_cfg = state['tool_cfg']
|
76 |
+
capabilities = state['capabilities']
|
77 |
+
bot_avatar, bot_avatar_path = save_avatar_image(bot_avatar, uuid_str)
|
78 |
+
suggestions_filtered = [row for row in suggestions if row[0]]
|
79 |
+
user_dir = get_user_dir(uuid_str)
|
80 |
+
if knowledge_files is not None:
|
81 |
+
new_knowledge_files = [
|
82 |
+
os.path.join(user_dir, os.path.basename((f.name)))
|
83 |
+
for f in knowledge_files
|
84 |
+
]
|
85 |
+
for src_file, dst_file in zip(knowledge_files, new_knowledge_files):
|
86 |
+
if not os.path.exists(dst_file):
|
87 |
+
shutil.copy(src_file.name, dst_file)
|
88 |
+
else:
|
89 |
+
new_knowledge_files = []
|
90 |
+
|
91 |
+
builder_cfg = {
|
92 |
+
'name': name,
|
93 |
+
'avatar': bot_avatar,
|
94 |
+
'description': description,
|
95 |
+
'instruction': instructions,
|
96 |
+
'prompt_recommend': [row[0] for row in suggestions_filtered],
|
97 |
+
'knowledge': new_knowledge_files,
|
98 |
+
'tools': {
|
99 |
+
capability: dict(
|
100 |
+
name=tool_cfg[capability]['name'],
|
101 |
+
is_active=tool_cfg[capability]['is_active'],
|
102 |
+
use=True if capability in capabilities_checkboxes else False)
|
103 |
+
for capability in map(lambda item: item[1], capabilities)
|
104 |
+
},
|
105 |
+
'model': model,
|
106 |
+
}
|
107 |
+
|
108 |
+
try:
|
109 |
+
try:
|
110 |
+
schema_dict = json.loads(openapi_schema)
|
111 |
+
except json.decoder.JSONDecodeError:
|
112 |
+
schema_dict = yaml.safe_load(openapi_schema)
|
113 |
+
except Exception as e:
|
114 |
+
raise gr.Error(
|
115 |
+
f'OpenAPI schema format error, should be one of json and yaml: {e}'
|
116 |
+
)
|
117 |
+
|
118 |
+
openapi_plugin_cfg = {
|
119 |
+
'schema': schema_dict,
|
120 |
+
'auth': {
|
121 |
+
'type': openapi_auth,
|
122 |
+
'apikey': openapi_auth_apikey,
|
123 |
+
'apikey_type': openapi_auth_apikey_type
|
124 |
+
},
|
125 |
+
'privacy_policy': openapi_privacy_policy
|
126 |
+
}
|
127 |
+
if is_valid_plugin_configuration(openapi_plugin_cfg):
|
128 |
+
save_plugin_configuration(openapi_plugin_cfg, uuid_str)
|
129 |
+
except Exception as e:
|
130 |
+
error = traceback.format_exc()
|
131 |
+
print(f'Error:{e}, with detail: {error}')
|
132 |
+
|
133 |
+
save_builder_configuration(builder_cfg, uuid_str)
|
134 |
+
update_builder(uuid_str, state)
|
135 |
+
init_user(uuid_str, state)
|
136 |
+
return [
|
137 |
+
gr.HTML.update(
|
138 |
+
visible=True,
|
139 |
+
value=format_cover_html(builder_cfg, bot_avatar_path)),
|
140 |
+
gr.Chatbot.update(
|
141 |
+
visible=False,
|
142 |
+
avatar_images=get_avatar_image(bot_avatar, uuid_str)),
|
143 |
+
gr.Dataset.update(samples=suggestions_filtered),
|
144 |
+
gr.DataFrame.update(value=suggestions_filtered)
|
145 |
+
]
|
146 |
+
|
147 |
+
|
148 |
+
# 创建 Gradio 界面
|
149 |
+
demo = gr.Blocks(css='assets/app.css')
|
150 |
+
with demo:
|
151 |
+
|
152 |
+
uuid_str = gr.Textbox(label='modelscope_uuid', visible=False)
|
153 |
+
draw_seed = random.randint(0, 1000000000)
|
154 |
+
state = gr.State({'session_seed': draw_seed})
|
155 |
+
i18n = I18n('zh-cn')
|
156 |
+
with gr.Row():
|
157 |
+
with gr.Column(scale=5):
|
158 |
+
header = gr.Markdown(i18n.get('header'))
|
159 |
+
with gr.Column(scale=1):
|
160 |
+
language = gr.Dropdown(
|
161 |
+
choices=[('中文', 'zh-cn'), ('English', 'en')],
|
162 |
+
show_label=False,
|
163 |
+
container=False,
|
164 |
+
value='zh-cn',
|
165 |
+
interactive=True)
|
166 |
+
with gr.Row():
|
167 |
+
with gr.Column():
|
168 |
+
with gr.Tabs() as tabs:
|
169 |
+
with gr.Tab(i18n.get_whole('create'), id=0) as create_tab:
|
170 |
+
with gr.Column():
|
171 |
+
# "Create" 标签页的 Chatbot 组件
|
172 |
+
start_text = '欢迎使用agent创建助手。我可以帮助您创建一个定制agent。'\
|
173 |
+
'您希望您的agent主要用于什么领域或任务?比如,您可以说,我想做一个RPG游戏agent'
|
174 |
+
create_chatbot = gr.Chatbot(
|
175 |
+
show_label=False, value=[[None, start_text]])
|
176 |
+
create_chat_input = gr.Textbox(
|
177 |
+
label=i18n.get('message'),
|
178 |
+
placeholder=i18n.get('message_placeholder'))
|
179 |
+
create_send_button = gr.Button(
|
180 |
+
i18n.get('sendOnLoading'), interactive=False)
|
181 |
+
|
182 |
+
configure_tab = gr.Tab(i18n.get_whole('configure'), id=1)
|
183 |
+
with configure_tab:
|
184 |
+
with gr.Column():
|
185 |
+
# "Configure" 标签页的配置输入字段
|
186 |
+
with gr.Row():
|
187 |
+
bot_avatar_comp = gr.Image(
|
188 |
+
label=i18n.get('form_avatar'),
|
189 |
+
placeholder='Chatbot avatar image',
|
190 |
+
source='upload',
|
191 |
+
interactive=True,
|
192 |
+
type='filepath',
|
193 |
+
scale=1,
|
194 |
+
width=182,
|
195 |
+
height=182,
|
196 |
+
)
|
197 |
+
with gr.Column(scale=4):
|
198 |
+
name_input = gr.Textbox(
|
199 |
+
label=i18n.get('form_name'),
|
200 |
+
placeholder=i18n.get(
|
201 |
+
'form_name_placeholder'))
|
202 |
+
description_input = gr.Textbox(
|
203 |
+
label=i18n.get('form_description'),
|
204 |
+
placeholder=i18n.get(
|
205 |
+
'form_description_placeholder'))
|
206 |
+
|
207 |
+
instructions_input = gr.Textbox(
|
208 |
+
label=i18n.get('form_instructions'),
|
209 |
+
placeholder=i18n.get(
|
210 |
+
'form_instructions_placeholder'),
|
211 |
+
lines=3)
|
212 |
+
model_selector = model_selector = gr.Dropdown(
|
213 |
+
label=i18n.get('form_model'))
|
214 |
+
suggestion_input = gr.Dataframe(
|
215 |
+
show_label=False,
|
216 |
+
value=[['']],
|
217 |
+
datatype=['str'],
|
218 |
+
headers=[i18n.get_whole('form_prompt_suggestion')],
|
219 |
+
type='array',
|
220 |
+
col_count=(1, 'fixed'),
|
221 |
+
interactive=True)
|
222 |
+
knowledge_input = gr.File(
|
223 |
+
label=i18n.get('form_knowledge'),
|
224 |
+
file_count='multiple',
|
225 |
+
file_types=['text', '.json', '.csv', '.pdf'])
|
226 |
+
capabilities_checkboxes = gr.CheckboxGroup(
|
227 |
+
label=i18n.get('form_capabilities'))
|
228 |
+
|
229 |
+
with gr.Accordion(
|
230 |
+
i18n.get('open_api_accordion'),
|
231 |
+
open=False) as open_api_accordion:
|
232 |
+
openapi_schema = gr.Textbox(
|
233 |
+
label='Schema',
|
234 |
+
placeholder=
|
235 |
+
'Enter your OpenAPI schema here, JSON or YAML format only'
|
236 |
+
)
|
237 |
+
|
238 |
+
with gr.Group():
|
239 |
+
openapi_auth_type = gr.Radio(
|
240 |
+
label='Authentication Type',
|
241 |
+
choices=['None', 'API Key'],
|
242 |
+
value='None')
|
243 |
+
openapi_auth_apikey = gr.Textbox(
|
244 |
+
label='API Key',
|
245 |
+
placeholder='Enter your API Key here')
|
246 |
+
openapi_auth_apikey_type = gr.Radio(
|
247 |
+
label='API Key type', choices=['Bearer'])
|
248 |
+
openapi_privacy_policy = gr.Textbox(
|
249 |
+
label='Privacy Policy',
|
250 |
+
placeholder='Enter privacy policy URL')
|
251 |
+
|
252 |
+
configure_button = gr.Button(
|
253 |
+
i18n.get('form_update_button'))
|
254 |
+
|
255 |
+
with gr.Column():
|
256 |
+
# Preview
|
257 |
+
preview_header = gr.HTML(
|
258 |
+
f"""<div class="preview_header">{i18n.get('preview')}<div>""")
|
259 |
+
|
260 |
+
user_chat_bot_cover = gr.HTML(format_cover_html({}, None))
|
261 |
+
user_chatbot = ChatBot(
|
262 |
+
value=[[None, None]],
|
263 |
+
elem_id='user_chatbot',
|
264 |
+
elem_classes=['markdown-body'],
|
265 |
+
avatar_images=get_avatar_image('', uuid_str),
|
266 |
+
height=650,
|
267 |
+
latex_delimiters=[],
|
268 |
+
show_label=False,
|
269 |
+
visible=False)
|
270 |
+
preview_chat_input = gr.Textbox(
|
271 |
+
label=i18n.get('message'),
|
272 |
+
placeholder=i18n.get('message_placeholder'))
|
273 |
+
user_chat_bot_suggest = gr.Dataset(
|
274 |
+
label=i18n.get('prompt_suggestion'),
|
275 |
+
components=[preview_chat_input],
|
276 |
+
samples=[])
|
277 |
+
# preview_send_button = gr.Button('Send')
|
278 |
+
with gr.Row():
|
279 |
+
upload_button = gr.UploadButton(
|
280 |
+
i18n.get('upload_btn'),
|
281 |
+
file_types=[
|
282 |
+
'.csv', '.doc', '.docx', '.xls', '.xlsx', '.txt',
|
283 |
+
'.md', '.pdf', '.jpeg', '.png', '.jpg', '.gif'
|
284 |
+
],
|
285 |
+
file_count='multiple')
|
286 |
+
preview_send_button = gr.Button(
|
287 |
+
i18n.get('sendOnLoading'), interactive=False)
|
288 |
+
user_chat_bot_suggest.select(
|
289 |
+
lambda evt: evt[0],
|
290 |
+
inputs=[user_chat_bot_suggest],
|
291 |
+
outputs=[preview_chat_input])
|
292 |
+
with gr.Accordion(
|
293 |
+
label=i18n.get('publish'),
|
294 |
+
open=False) as publish_accordion:
|
295 |
+
with gr.Row():
|
296 |
+
with gr.Column():
|
297 |
+
publish_button = gr.Button(i18n.get_whole('build'))
|
298 |
+
gr.Markdown(f'#### 1.{i18n.get_whole("build_hint")}')
|
299 |
+
|
300 |
+
with gr.Column():
|
301 |
+
publish_link = gr.HTML(
|
302 |
+
value=format_goto_publish_html(
|
303 |
+
i18n.get_whole('publish'), '', {}, True))
|
304 |
+
gr.Markdown(f'#### 2.{i18n.get_whole("publish_hint")}')
|
305 |
+
|
306 |
+
configure_updated_outputs = [
|
307 |
+
state,
|
308 |
+
# config form
|
309 |
+
bot_avatar_comp,
|
310 |
+
name_input,
|
311 |
+
description_input,
|
312 |
+
instructions_input,
|
313 |
+
model_selector,
|
314 |
+
suggestion_input,
|
315 |
+
knowledge_input,
|
316 |
+
capabilities_checkboxes,
|
317 |
+
# bot
|
318 |
+
user_chat_bot_cover,
|
319 |
+
user_chat_bot_suggest,
|
320 |
+
preview_send_button,
|
321 |
+
create_send_button,
|
322 |
+
]
|
323 |
+
|
324 |
+
# 初始化表单
|
325 |
+
def init_ui_config(uuid_str, _state, builder_cfg, model_cfg, tool_cfg):
|
326 |
+
print('builder_cfg:', builder_cfg)
|
327 |
+
# available models
|
328 |
+
models = list(model_cfg.keys())
|
329 |
+
capabilities = [(tool_cfg[tool_key]['name'], tool_key)
|
330 |
+
for tool_key in tool_cfg.keys()
|
331 |
+
if tool_cfg[tool_key].get('is_active', False)]
|
332 |
+
_state['model_cfg'] = model_cfg
|
333 |
+
_state['tool_cfg'] = tool_cfg
|
334 |
+
_state['capabilities'] = capabilities
|
335 |
+
bot_avatar = get_avatar_image(builder_cfg.get('avatar', ''),
|
336 |
+
uuid_str)[1]
|
337 |
+
suggests = builder_cfg.get('prompt_recommend', [])
|
338 |
+
return {
|
339 |
+
state:
|
340 |
+
_state,
|
341 |
+
bot_avatar_comp:
|
342 |
+
gr.Image.update(value=bot_avatar),
|
343 |
+
name_input:
|
344 |
+
builder_cfg.get('name', ''),
|
345 |
+
description_input:
|
346 |
+
builder_cfg.get('description'),
|
347 |
+
instructions_input:
|
348 |
+
builder_cfg.get('instruction'),
|
349 |
+
model_selector:
|
350 |
+
gr.Dropdown.update(
|
351 |
+
value=builder_cfg.get('model', models[0]), choices=models),
|
352 |
+
suggestion_input: [[str] for str in suggests],
|
353 |
+
knowledge_input:
|
354 |
+
builder_cfg.get('knowledge', [])
|
355 |
+
if len(builder_cfg['knowledge']) > 0 else None,
|
356 |
+
capabilities_checkboxes:
|
357 |
+
gr.CheckboxGroup.update(
|
358 |
+
value=[
|
359 |
+
tool for tool in builder_cfg.get('tools', {}).keys()
|
360 |
+
if builder_cfg.get('tools').get(tool).get('use', False)
|
361 |
+
],
|
362 |
+
choices=capabilities),
|
363 |
+
# bot
|
364 |
+
user_chat_bot_cover:
|
365 |
+
format_cover_html(builder_cfg, bot_avatar),
|
366 |
+
user_chat_bot_suggest:
|
367 |
+
gr.Dataset.update(samples=[[item] for item in suggests]),
|
368 |
+
}
|
369 |
+
|
370 |
+
# tab 切换的事件处理
|
371 |
+
def on_congifure_tab_select(_state, uuid_str):
|
372 |
+
uuid_str = check_uuid(uuid_str)
|
373 |
+
configure_updated = _state.get('configure_updated', False)
|
374 |
+
if configure_updated:
|
375 |
+
builder_cfg, model_cfg, tool_cfg, available_tool_list, _, _ = parse_configuration(
|
376 |
+
uuid_str)
|
377 |
+
_state['configure_updated'] = False
|
378 |
+
return init_ui_config(uuid_str, _state, builder_cfg, model_cfg,
|
379 |
+
tool_cfg)
|
380 |
+
else:
|
381 |
+
return {state: _state}
|
382 |
+
|
383 |
+
configure_tab.select(
|
384 |
+
on_congifure_tab_select,
|
385 |
+
inputs=[state, uuid_str],
|
386 |
+
outputs=configure_updated_outputs)
|
387 |
+
|
388 |
+
# 配置 "Create" 标签页的消息发送功能
|
389 |
+
def format_message_with_builder_cfg(_state, chatbot, builder_cfg,
|
390 |
+
uuid_str):
|
391 |
+
uuid_str = check_uuid(uuid_str)
|
392 |
+
bot_avatar = builder_cfg.get('avatar', '')
|
393 |
+
prompt_recommend = builder_cfg.get('prompt_recommend', [])
|
394 |
+
suggestion = [[row] for row in prompt_recommend]
|
395 |
+
bot_avatar_path = get_avatar_image(bot_avatar, uuid_str)[1]
|
396 |
+
save_builder_configuration(builder_cfg, uuid_str)
|
397 |
+
_state['configure_updated'] = True
|
398 |
+
return {
|
399 |
+
create_chatbot:
|
400 |
+
chatbot,
|
401 |
+
user_chat_bot_cover:
|
402 |
+
gr.HTML.update(
|
403 |
+
visible=True,
|
404 |
+
value=format_cover_html(builder_cfg, bot_avatar_path)),
|
405 |
+
user_chatbot:
|
406 |
+
gr.Chatbot.update(
|
407 |
+
visible=False,
|
408 |
+
avatar_images=get_avatar_image(bot_avatar, uuid_str)),
|
409 |
+
user_chat_bot_suggest:
|
410 |
+
gr.Dataset.update(samples=suggestion)
|
411 |
+
}
|
412 |
+
|
413 |
+
def create_send_message(chatbot, input, _state, uuid_str):
|
414 |
+
uuid_str = check_uuid(uuid_str)
|
415 |
+
# 将发送的消息添加到聊天历史
|
416 |
+
builder_agent = _state['builder_agent']
|
417 |
+
chatbot.append((input, ''))
|
418 |
+
yield {
|
419 |
+
create_chatbot: chatbot,
|
420 |
+
create_chat_input: gr.Textbox.update(value=''),
|
421 |
+
}
|
422 |
+
response = ''
|
423 |
+
for frame in builder_agent.stream_run(
|
424 |
+
input, print_info=True, uuid_str=uuid_str):
|
425 |
+
llm_result = frame.get('llm_text', '')
|
426 |
+
exec_result = frame.get('exec_result', '')
|
427 |
+
step_result = frame.get('step', '')
|
428 |
+
print(frame)
|
429 |
+
if len(exec_result) != 0:
|
430 |
+
if isinstance(exec_result, dict):
|
431 |
+
exec_result = exec_result['result']
|
432 |
+
assert isinstance(exec_result, Config)
|
433 |
+
yield format_message_with_builder_cfg(
|
434 |
+
_state,
|
435 |
+
chatbot,
|
436 |
+
exec_result.to_dict(),
|
437 |
+
uuid_str=uuid_str)
|
438 |
+
else:
|
439 |
+
# llm result
|
440 |
+
if isinstance(llm_result, dict):
|
441 |
+
content = llm_result['content']
|
442 |
+
else:
|
443 |
+
content = llm_result
|
444 |
+
frame_text = content
|
445 |
+
response = beauty_output(f'{response}{frame_text}',
|
446 |
+
step_result)
|
447 |
+
chatbot[-1] = (input, response)
|
448 |
+
yield {
|
449 |
+
create_chatbot: chatbot,
|
450 |
+
}
|
451 |
+
|
452 |
+
create_send_button.click(
|
453 |
+
create_send_message,
|
454 |
+
inputs=[create_chatbot, create_chat_input, state, uuid_str],
|
455 |
+
outputs=[
|
456 |
+
create_chatbot, user_chat_bot_cover, user_chatbot,
|
457 |
+
user_chat_bot_suggest, create_chat_input
|
458 |
+
])
|
459 |
+
|
460 |
+
# 配置 "Configure" 标签页的提交按钮功能
|
461 |
+
configure_button.click(
|
462 |
+
process_configuration,
|
463 |
+
inputs=[
|
464 |
+
uuid_str, bot_avatar_comp, name_input, description_input,
|
465 |
+
instructions_input, model_selector, suggestion_input,
|
466 |
+
knowledge_input, capabilities_checkboxes, openapi_schema,
|
467 |
+
openapi_auth_type, openapi_auth_apikey, openapi_auth_apikey_type,
|
468 |
+
openapi_privacy_policy, state
|
469 |
+
],
|
470 |
+
outputs=[
|
471 |
+
user_chat_bot_cover, user_chatbot, user_chat_bot_suggest,
|
472 |
+
suggestion_input
|
473 |
+
])
|
474 |
+
|
475 |
+
# 配置 "Preview" 的消息发送功能
|
476 |
+
def preview_send_message(chatbot, input, _state):
|
477 |
+
# 将发送的消息添加到聊天历史
|
478 |
+
user_agent = _state['user_agent']
|
479 |
+
if 'new_file_paths' in _state:
|
480 |
+
new_file_paths = _state['new_file_paths']
|
481 |
+
else:
|
482 |
+
new_file_paths = []
|
483 |
+
_state['new_file_paths'] = []
|
484 |
+
|
485 |
+
chatbot.append((input, ''))
|
486 |
+
yield {
|
487 |
+
user_chatbot: gr.Chatbot.update(visible=True, value=chatbot),
|
488 |
+
user_chat_bot_cover: gr.HTML.update(visible=False),
|
489 |
+
preview_chat_input: gr.Textbox.update(value='')
|
490 |
+
}
|
491 |
+
|
492 |
+
response = ''
|
493 |
+
try:
|
494 |
+
for frame in user_agent.stream_run(
|
495 |
+
input,
|
496 |
+
print_info=True,
|
497 |
+
remote=False,
|
498 |
+
append_files=new_file_paths):
|
499 |
+
llm_result = frame.get('llm_text', '')
|
500 |
+
exec_result = frame.get('exec_result', '')
|
501 |
+
if len(exec_result) != 0:
|
502 |
+
# action_exec_result
|
503 |
+
if isinstance(exec_result, dict):
|
504 |
+
exec_result = str(exec_result['result'])
|
505 |
+
frame_text = f'<result>{exec_result}</result>'
|
506 |
+
else:
|
507 |
+
# llm result
|
508 |
+
frame_text = llm_result
|
509 |
+
|
510 |
+
# important! do not change this
|
511 |
+
response += frame_text
|
512 |
+
chatbot[-1] = (input, response)
|
513 |
+
yield {user_chatbot: chatbot}
|
514 |
+
except Exception as e:
|
515 |
+
if 'dashscope.common.error.AuthenticationError' in str(e):
|
516 |
+
msg = 'DASHSCOPE_API_KEY should be set via environment variable. You can acquire this in ' \
|
517 |
+
'https://help.aliyun.com/zh/dashscope/developer-reference/activate-dashscope-and-create-an-api-key'
|
518 |
+
else:
|
519 |
+
msg = str(e)
|
520 |
+
chatbot[-1] = (input, msg)
|
521 |
+
yield {user_chatbot: chatbot}
|
522 |
+
|
523 |
+
preview_send_button.click(
|
524 |
+
preview_send_message,
|
525 |
+
inputs=[user_chatbot, preview_chat_input, state],
|
526 |
+
outputs=[user_chatbot, user_chat_bot_cover, preview_chat_input])
|
527 |
+
|
528 |
+
def upload_file(chatbot, upload_button, _state, uuid_str):
|
529 |
+
uuid_str = check_uuid(uuid_str)
|
530 |
+
new_file_paths = []
|
531 |
+
if 'file_paths' in _state:
|
532 |
+
file_paths = _state['file_paths']
|
533 |
+
else:
|
534 |
+
file_paths = []
|
535 |
+
for file in upload_button:
|
536 |
+
file_name = os.path.basename(file.name)
|
537 |
+
# covert xxx.json to xxx_uuid_str.json
|
538 |
+
file_name = file_name.replace('.', f'_{uuid_str}.')
|
539 |
+
file_path = os.path.join(get_ci_dir(), file_name)
|
540 |
+
if not os.path.exists(file_path):
|
541 |
+
# make sure file path's directory exists
|
542 |
+
os.makedirs(os.path.dirname(file_path), exist_ok=True)
|
543 |
+
shutil.copy(file.name, file_path)
|
544 |
+
file_paths.append(file_path)
|
545 |
+
new_file_paths.append(file_path)
|
546 |
+
chatbot.append((None, f'上传文件{file_name},成功'))
|
547 |
+
yield {
|
548 |
+
user_chatbot: gr.Chatbot.update(visible=True, value=chatbot),
|
549 |
+
user_chat_bot_cover: gr.HTML.update(visible=False),
|
550 |
+
preview_chat_input: gr.Textbox.update(value='')
|
551 |
+
}
|
552 |
+
|
553 |
+
_state['file_paths'] = file_paths
|
554 |
+
_state['new_file_paths'] = new_file_paths
|
555 |
+
|
556 |
+
upload_button.upload(
|
557 |
+
upload_file,
|
558 |
+
inputs=[user_chatbot, upload_button, state, uuid_str],
|
559 |
+
outputs=[user_chatbot, user_chat_bot_cover, preview_chat_input])
|
560 |
+
|
561 |
+
# configuration for publish
|
562 |
+
def publish_agent(name, uuid_str, state):
|
563 |
+
uuid_str = check_uuid(uuid_str)
|
564 |
+
user_info = pop_user_info_from_config(DEFAULT_AGENT_DIR, uuid_str)
|
565 |
+
output_url = prepare_agent_zip(name, DEFAULT_AGENT_DIR, uuid_str,
|
566 |
+
state)
|
567 |
+
# output_url = "https://test.url"
|
568 |
+
return format_goto_publish_html(
|
569 |
+
i18n.get_whole('publish'), output_url, user_info)
|
570 |
+
|
571 |
+
publish_button.click(
|
572 |
+
publish_agent,
|
573 |
+
inputs=[name_input, uuid_str, state],
|
574 |
+
outputs=[publish_link],
|
575 |
+
)
|
576 |
+
|
577 |
+
def change_lang(language):
|
578 |
+
i18n = I18n(language)
|
579 |
+
return {
|
580 |
+
bot_avatar_comp:
|
581 |
+
gr.Image(label=i18n.get('form_avatar')),
|
582 |
+
name_input:
|
583 |
+
gr.Textbox(
|
584 |
+
label=i18n.get('form_name'),
|
585 |
+
placeholder=i18n.get('form_name_placeholder')),
|
586 |
+
description_input:
|
587 |
+
gr.Textbox(
|
588 |
+
label=i18n.get('form_description'),
|
589 |
+
placeholder=i18n.get('form_description_placeholder')),
|
590 |
+
instructions_input:
|
591 |
+
gr.Textbox(
|
592 |
+
label=i18n.get('form_instructions'),
|
593 |
+
placeholder=i18n.get('form_instructions_placeholder')),
|
594 |
+
model_selector:
|
595 |
+
gr.Dropdown(label=i18n.get('form_model')),
|
596 |
+
knowledge_input:
|
597 |
+
gr.File(label=i18n.get('form_knowledge')),
|
598 |
+
capabilities_checkboxes:
|
599 |
+
gr.CheckboxGroup(label=i18n.get('form_capabilities')),
|
600 |
+
open_api_accordion:
|
601 |
+
gr.Accordion(label=i18n.get('open_api_accordion')),
|
602 |
+
configure_button:
|
603 |
+
gr.Button(i18n.get('form_update_button')),
|
604 |
+
preview_header:
|
605 |
+
gr.HTML(
|
606 |
+
f"""<div class="preview_header">{i18n.get('preview')}<div>"""),
|
607 |
+
preview_send_button:
|
608 |
+
gr.Button.update(value=i18n.get('send')),
|
609 |
+
create_chat_input:
|
610 |
+
gr.Textbox(
|
611 |
+
label=i18n.get('message'),
|
612 |
+
placeholder=i18n.get('message_placeholder')),
|
613 |
+
create_send_button:
|
614 |
+
gr.Button.update(value=i18n.get('send')),
|
615 |
+
user_chat_bot_suggest:
|
616 |
+
gr.Dataset(label=i18n.get('prompt_suggestion')),
|
617 |
+
preview_chat_input:
|
618 |
+
gr.Textbox(
|
619 |
+
label=i18n.get('message'),
|
620 |
+
placeholder=i18n.get('message_placeholder')),
|
621 |
+
publish_accordion:
|
622 |
+
gr.Accordion(label=i18n.get('publish')),
|
623 |
+
upload_button:
|
624 |
+
gr.UploadButton(i18n.get('upload_btn')),
|
625 |
+
header:
|
626 |
+
gr.Markdown(i18n.get('header')),
|
627 |
+
}
|
628 |
+
|
629 |
+
language.select(
|
630 |
+
change_lang,
|
631 |
+
inputs=[language],
|
632 |
+
outputs=configure_updated_outputs + [
|
633 |
+
configure_button, create_chat_input, open_api_accordion,
|
634 |
+
preview_header, preview_chat_input, publish_accordion,
|
635 |
+
upload_button, header
|
636 |
+
])
|
637 |
+
|
638 |
+
def init_all(uuid_str, _state):
|
639 |
+
uuid_str = check_uuid(uuid_str)
|
640 |
+
builder_cfg, model_cfg, tool_cfg, available_tool_list, _, _ = parse_configuration(
|
641 |
+
uuid_str)
|
642 |
+
ret = init_ui_config(uuid_str, _state, builder_cfg, model_cfg,
|
643 |
+
tool_cfg)
|
644 |
+
yield ret
|
645 |
+
init_user(uuid_str, _state)
|
646 |
+
init_builder(uuid_str, _state)
|
647 |
+
yield {
|
648 |
+
state:
|
649 |
+
_state,
|
650 |
+
preview_send_button:
|
651 |
+
gr.Button.update(value=i18n.get('send'), interactive=True),
|
652 |
+
create_send_button:
|
653 |
+
gr.Button.update(value=i18n.get('send'), interactive=True),
|
654 |
+
}
|
655 |
+
|
656 |
+
demo.load(
|
657 |
+
init_all, inputs=[uuid_str, state], outputs=configure_updated_outputs)
|
658 |
+
|
659 |
+
demo.queue(concurrency_count=10)
|
660 |
+
demo.launch(share=True, show_error=True)
|
agentfabric/appBot.py
ADDED
@@ -0,0 +1,169 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import random
|
3 |
+
import shutil
|
4 |
+
import sys
|
5 |
+
import traceback
|
6 |
+
|
7 |
+
import gradio as gr
|
8 |
+
from config_utils import get_avatar_image, get_ci_dir, parse_configuration
|
9 |
+
from gradio_utils import ChatBot, format_cover_html
|
10 |
+
from user_core import init_user_chatbot_agent
|
11 |
+
|
12 |
+
uuid_str = 'local_user'
|
13 |
+
builder_cfg, model_cfg, tool_cfg, available_tool_list, _, _ = parse_configuration(
|
14 |
+
uuid_str)
|
15 |
+
suggests = builder_cfg.get('prompt_recommend', [])
|
16 |
+
avatar_pairs = get_avatar_image(builder_cfg.get('avatar', ''), uuid_str)
|
17 |
+
|
18 |
+
customTheme = gr.themes.Default(
|
19 |
+
primary_hue=gr.themes.utils.colors.blue,
|
20 |
+
radius_size=gr.themes.utils.sizes.radius_none,
|
21 |
+
)
|
22 |
+
|
23 |
+
|
24 |
+
def check_uuid(uuid_str):
|
25 |
+
if not uuid_str or uuid_str == '':
|
26 |
+
if os.getenv('MODELSCOPE_ENVIRONMENT') == 'studio':
|
27 |
+
raise gr.Error('请登陆后使用! (Please login first)')
|
28 |
+
else:
|
29 |
+
uuid_str = 'local_user'
|
30 |
+
return uuid_str
|
31 |
+
|
32 |
+
|
33 |
+
def init_user(state):
|
34 |
+
try:
|
35 |
+
seed = state.get('session_seed', random.randint(0, 1000000000))
|
36 |
+
user_agent = init_user_chatbot_agent(uuid_str)
|
37 |
+
user_agent.seed = seed
|
38 |
+
state['user_agent'] = user_agent
|
39 |
+
except Exception as e:
|
40 |
+
error = traceback.format_exc()
|
41 |
+
print(f'Error:{e}, with detail: {error}')
|
42 |
+
return state
|
43 |
+
|
44 |
+
|
45 |
+
# 创建 Gradio 界面
|
46 |
+
demo = gr.Blocks(css='assets/appBot.css', theme=customTheme)
|
47 |
+
with demo:
|
48 |
+
gr.Markdown(
|
49 |
+
'# <center> \N{fire} AgentFabric powered by Modelscope-agent ([github star](https://github.com/modelscope/modelscope-agent/tree/main))</center>' # noqa E501
|
50 |
+
)
|
51 |
+
draw_seed = random.randint(0, 1000000000)
|
52 |
+
state = gr.State({'session_seed': draw_seed})
|
53 |
+
with gr.Row(elem_classes='container'):
|
54 |
+
with gr.Column(scale=4):
|
55 |
+
with gr.Column():
|
56 |
+
# Preview
|
57 |
+
user_chatbot = ChatBot(
|
58 |
+
value=[[None, '尝试问我一点什么吧~']],
|
59 |
+
elem_id='user_chatbot',
|
60 |
+
elem_classes=['markdown-body'],
|
61 |
+
avatar_images=avatar_pairs,
|
62 |
+
height=600,
|
63 |
+
latex_delimiters=[],
|
64 |
+
show_label=False)
|
65 |
+
with gr.Row():
|
66 |
+
with gr.Column(scale=12):
|
67 |
+
preview_chat_input = gr.Textbox(
|
68 |
+
show_label=False,
|
69 |
+
container=False,
|
70 |
+
placeholder='跟我聊聊吧~')
|
71 |
+
with gr.Column(min_width=70, scale=1):
|
72 |
+
upload_button = gr.UploadButton(
|
73 |
+
'上传',
|
74 |
+
file_types=[
|
75 |
+
'.csv', '.doc', '.docx', '.xls', '.xlsx', '.txt',
|
76 |
+
'.md', '.pdf', '.jpeg', '.png', '.jpg', '.gif'
|
77 |
+
],
|
78 |
+
file_count='multiple')
|
79 |
+
with gr.Column(min_width=70, scale=1):
|
80 |
+
preview_send_button = gr.Button('发送', variant='primary')
|
81 |
+
|
82 |
+
with gr.Column(scale=1):
|
83 |
+
user_chat_bot_cover = gr.HTML(
|
84 |
+
format_cover_html(builder_cfg, avatar_pairs[1]))
|
85 |
+
user_chat_bot_suggest = gr.Examples(
|
86 |
+
label='Prompt Suggestions',
|
87 |
+
examples=suggests,
|
88 |
+
inputs=[preview_chat_input])
|
89 |
+
|
90 |
+
def upload_file(chatbot, upload_button, _state):
|
91 |
+
_uuid_str = check_uuid(uuid_str)
|
92 |
+
new_file_paths = []
|
93 |
+
if 'file_paths' in _state:
|
94 |
+
file_paths = _state['file_paths']
|
95 |
+
else:
|
96 |
+
file_paths = []
|
97 |
+
for file in upload_button:
|
98 |
+
file_name = os.path.basename(file.name)
|
99 |
+
# covert xxx.json to xxx_uuid_str.json
|
100 |
+
file_name = file_name.replace('.', f'_{_uuid_str}.')
|
101 |
+
file_path = os.path.join(get_ci_dir(), file_name)
|
102 |
+
if not os.path.exists(file_path):
|
103 |
+
# make sure file path's directory exists
|
104 |
+
os.makedirs(os.path.dirname(file_path), exist_ok=True)
|
105 |
+
shutil.copy(file.name, file_path)
|
106 |
+
file_paths.append(file_path)
|
107 |
+
new_file_paths.append(file_path)
|
108 |
+
chatbot.append((None, f'上传文件{file_name},成功'))
|
109 |
+
yield {
|
110 |
+
user_chatbot: gr.Chatbot.update(visible=True, value=chatbot),
|
111 |
+
preview_chat_input: gr.Textbox.update(value='')
|
112 |
+
}
|
113 |
+
|
114 |
+
_state['file_paths'] = file_paths
|
115 |
+
_state['new_file_paths'] = new_file_paths
|
116 |
+
|
117 |
+
upload_button.upload(
|
118 |
+
upload_file,
|
119 |
+
inputs=[user_chatbot, upload_button, state],
|
120 |
+
outputs=[user_chatbot, preview_chat_input])
|
121 |
+
|
122 |
+
def send_message(chatbot, input, _state):
|
123 |
+
# 将发送的消息添加到聊天历史
|
124 |
+
user_agent = _state['user_agent']
|
125 |
+
if 'new_file_paths' in _state:
|
126 |
+
new_file_paths = _state['new_file_paths']
|
127 |
+
else:
|
128 |
+
new_file_paths = []
|
129 |
+
_state['new_file_paths'] = []
|
130 |
+
chatbot.append((input, ''))
|
131 |
+
yield {
|
132 |
+
user_chatbot: chatbot,
|
133 |
+
preview_chat_input: gr.Textbox.update(value=''),
|
134 |
+
}
|
135 |
+
|
136 |
+
response = ''
|
137 |
+
|
138 |
+
for frame in user_agent.stream_run(
|
139 |
+
input, print_info=True, remote=False,
|
140 |
+
append_files=new_file_paths):
|
141 |
+
# is_final = frame.get("frame_is_final")
|
142 |
+
llm_result = frame.get('llm_text', '')
|
143 |
+
exec_result = frame.get('exec_result', '')
|
144 |
+
# llm_result = llm_result.split("<|user|>")[0].strip()
|
145 |
+
if len(exec_result) != 0:
|
146 |
+
# action_exec_result
|
147 |
+
if isinstance(exec_result, dict):
|
148 |
+
exec_result = str(exec_result['result'])
|
149 |
+
frame_text = f'<result>{exec_result}</result>'
|
150 |
+
else:
|
151 |
+
# llm result
|
152 |
+
frame_text = llm_result
|
153 |
+
|
154 |
+
# important! do not change this
|
155 |
+
response += frame_text
|
156 |
+
chatbot[-1] = (input, response)
|
157 |
+
yield {
|
158 |
+
user_chatbot: chatbot,
|
159 |
+
}
|
160 |
+
|
161 |
+
preview_send_button.click(
|
162 |
+
send_message,
|
163 |
+
inputs=[user_chatbot, preview_chat_input, state],
|
164 |
+
outputs=[user_chatbot, preview_chat_input])
|
165 |
+
|
166 |
+
demo.load(init_user, inputs=[state], outputs=[state])
|
167 |
+
|
168 |
+
demo.queue()
|
169 |
+
demo.launch()
|
agentfabric/assets/app.css
ADDED
@@ -0,0 +1,147 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
/* code highlight: https://python-markdown.github.io/extensions/code_hilite/ */
|
2 |
+
.codehilite .hll { background-color: #ffffcc }
|
3 |
+
.codehilite { background: #f8f8f8; }
|
4 |
+
.codehilite .c { color: #408080; font-style: italic } /* Comment */
|
5 |
+
.codehilite .err { border: 1px solid #FF0000 } /* Error */
|
6 |
+
.codehilite .k { color: #008000; font-weight: bold } /* Keyword */
|
7 |
+
.codehilite .o { color: #666666 } /* Operator */
|
8 |
+
.codehilite .ch { color: #408080; font-style: italic } /* Comment.Hashbang */
|
9 |
+
.codehilite .cm { color: #408080; font-style: italic } /* Comment.Multiline */
|
10 |
+
.codehilite .cp { color: #BC7A00 } /* Comment.Preproc */
|
11 |
+
.codehilite .cpf { color: #408080; font-style: italic } /* Comment.PreprocFile */
|
12 |
+
.codehilite .c1 { color: #408080; font-style: italic } /* Comment.Single */
|
13 |
+
.codehilite .cs { color: #408080; font-style: italic } /* Comment.Special */
|
14 |
+
.codehilite .gd { color: #A00000 } /* Generic.Deleted */
|
15 |
+
.codehilite .ge { font-style: italic } /* Generic.Emph */
|
16 |
+
.codehilite .gr { color: #FF0000 } /* Generic.Error */
|
17 |
+
.codehilite .gh { color: #000080; font-weight: bold } /* Generic.Heading */
|
18 |
+
.codehilite .gi { color: #00A000 } /* Generic.Inserted */
|
19 |
+
.codehilite .go { color: #888888 } /* Generic.Output */
|
20 |
+
.codehilite .gp { color: #000080; font-weight: bold } /* Generic.Prompt */
|
21 |
+
.codehilite .gs { font-weight: bold } /* Generic.Strong */
|
22 |
+
.codehilite .gu { color: #800080; font-weight: bold } /* Generic.Subheading */
|
23 |
+
.codehilite .gt { color: #0044DD } /* Generic.Traceback */
|
24 |
+
.codehilite .kc { color: #008000; font-weight: bold } /* Keyword.Constant */
|
25 |
+
.codehilite .kd { color: #008000; font-weight: bold } /* Keyword.Declaration */
|
26 |
+
.codehilite .kn { color: #008000; font-weight: bold } /* Keyword.Namespace */
|
27 |
+
.codehilite .kp { color: #008000 } /* Keyword.Pseudo */
|
28 |
+
.codehilite .kr { color: #008000; font-weight: bold } /* Keyword.Reserved */
|
29 |
+
.codehilite .kt { color: #B00040 } /* Keyword.Type */
|
30 |
+
.codehilite .m { color: #666666 } /* Literal.Number */
|
31 |
+
.codehilite .s { color: #BA2121 } /* Literal.String */
|
32 |
+
.codehilite .na { color: #7D9029 } /* Name.Attribute */
|
33 |
+
.codehilite .nb { color: #008000 } /* Name.Builtin */
|
34 |
+
.codehilite .nc { color: #0000FF; font-weight: bold } /* Name.Class */
|
35 |
+
.codehilite .no { color: #880000 } /* Name.Constant */
|
36 |
+
.codehilite .nd { color: #AA22FF } /* Name.Decorator */
|
37 |
+
.codehilite .ni { color: #999999; font-weight: bold } /* Name.Entity */
|
38 |
+
.codehilite .ne { color: #D2413A; font-weight: bold } /* Name.Exception */
|
39 |
+
.codehilite .nf { color: #0000FF } /* Name.Function */
|
40 |
+
.codehilite .nl { color: #A0A000 } /* Name.Label */
|
41 |
+
.codehilite .nn { color: #0000FF; font-weight: bold } /* Name.Namespace */
|
42 |
+
.codehilite .nt { color: #008000; font-weight: bold } /* Name.Tag */
|
43 |
+
.codehilite .nv { color: #19177C } /* Name.Variable */
|
44 |
+
.codehilite .ow { color: #AA22FF; font-weight: bold } /* Operator.Word */
|
45 |
+
.codehilite .w { color: #bbbbbb } /* Text.Whitespace */
|
46 |
+
.codehilite .mb { color: #666666 } /* Literal.Number.Bin */
|
47 |
+
.codehilite .mf { color: #666666 } /* Literal.Number.Float */
|
48 |
+
.codehilite .mh { color: #666666 } /* Literal.Number.Hex */
|
49 |
+
.codehilite .mi { color: #666666 } /* Literal.Number.Integer */
|
50 |
+
.codehilite .mo { color: #666666 } /* Literal.Number.Oct */
|
51 |
+
.codehilite .sa { color: #BA2121 } /* Literal.String.Affix */
|
52 |
+
.codehilite .sb { color: #BA2121 } /* Literal.String.Backtick */
|
53 |
+
.codehilite .sc { color: #BA2121 } /* Literal.String.Char */
|
54 |
+
.codehilite .dl { color: #BA2121 } /* Literal.String.Delimiter */
|
55 |
+
.codehilite .sd { color: #BA2121; font-style: italic } /* Literal.String.Doc */
|
56 |
+
.codehilite .s2 { color: #BA2121 } /* Literal.String.Double */
|
57 |
+
.codehilite .se { color: #BB6622; font-weight: bold } /* Literal.String.Escape */
|
58 |
+
.codehilite .sh { color: #BA2121 } /* Literal.String.Heredoc */
|
59 |
+
.codehilite .si { color: #BB6688; font-weight: bold } /* Literal.String.Interpol */
|
60 |
+
.codehilite .sx { color: #008000 } /* Literal.String.Other */
|
61 |
+
.codehilite .sr { color: #BB6688 } /* Literal.String.Regex */
|
62 |
+
.codehilite .s1 { color: #BA2121 } /* Literal.String.Single */
|
63 |
+
.codehilite .ss { color: #19177C } /* Literal.String.Symbol */
|
64 |
+
.codehilite .bp { color: #008000 } /* Name.Builtin.Pseudo */
|
65 |
+
.codehilite .fm { color: #0000FF } /* Name.Function.Magic */
|
66 |
+
.codehilite .vc { color: #19177C } /* Name.Variable.Class */
|
67 |
+
.codehilite .vg { color: #19177C } /* Name.Variable.Global */
|
68 |
+
.codehilite .vi { color: #19177C } /* Name.Variable.Instance */
|
69 |
+
.codehilite .vm { color: #19177C } /* Name.Variable.Magic */
|
70 |
+
.codehilite .il { color: #666666 } /* Literal.Number.Integer.Long */
|
71 |
+
|
72 |
+
.preview_header {
|
73 |
+
font-size: 18px;
|
74 |
+
font-weight: 500;
|
75 |
+
text-align: center;
|
76 |
+
margin-bottom: -12px;
|
77 |
+
}
|
78 |
+
|
79 |
+
.bot_cover {
|
80 |
+
display: flex;
|
81 |
+
flex-direction: column;
|
82 |
+
justify-content: center;
|
83 |
+
align-items: center;
|
84 |
+
min-height: 650px;
|
85 |
+
border: 1px solid rgb(229, 231, 235);
|
86 |
+
border-radius: 8px;
|
87 |
+
padding: 20px 40px;
|
88 |
+
}
|
89 |
+
|
90 |
+
.bot_avatar {
|
91 |
+
width: 100px;
|
92 |
+
height: 100px;
|
93 |
+
border-radius: 50%;
|
94 |
+
overflow: hidden;
|
95 |
+
}
|
96 |
+
|
97 |
+
.bot_avatar img {
|
98 |
+
width: 100px;
|
99 |
+
height: 100px;
|
100 |
+
}
|
101 |
+
|
102 |
+
.bot_name {
|
103 |
+
font-size: 36px;
|
104 |
+
margin-top: 10px;
|
105 |
+
}
|
106 |
+
|
107 |
+
.bot_desp {
|
108 |
+
color: #ddd;
|
109 |
+
}
|
110 |
+
|
111 |
+
.publish_link_container > a {
|
112 |
+
display: block;
|
113 |
+
border-radius: var(--button-large-radius);
|
114 |
+
padding: var(--button-large-padding);
|
115 |
+
font-weight: var(--button-large-text-weight);
|
116 |
+
font-size: var(--button-large-text-size);
|
117 |
+
border: var(--button-border-width) solid var(--button-secondary-border-color);
|
118 |
+
background: var(--button-secondary-background-fill);
|
119 |
+
color: var(--button-secondary-text-color) !important;
|
120 |
+
cursor: pointer;
|
121 |
+
text-decoration: none !important;
|
122 |
+
text-align: center;
|
123 |
+
}
|
124 |
+
|
125 |
+
.publish_link_container > .disabled {
|
126 |
+
cursor: not-allowed;
|
127 |
+
opacity: .5;
|
128 |
+
filter: grayscale(30%);
|
129 |
+
}
|
130 |
+
|
131 |
+
.markdown-body .message {
|
132 |
+
white-space: pre-wrap;
|
133 |
+
}
|
134 |
+
|
135 |
+
.markdown-body details {
|
136 |
+
white-space: nowrap;
|
137 |
+
}
|
138 |
+
.markdown-body .bot details:not(:last-child) {
|
139 |
+
margin-bottom: 1px;
|
140 |
+
}
|
141 |
+
.markdown-body summary {
|
142 |
+
background-color: #4b5563;
|
143 |
+
color: #eee;
|
144 |
+
padding: 0 4px;
|
145 |
+
border-radius: 4px;
|
146 |
+
font-size: 0.9em;
|
147 |
+
}
|
agentfabric/assets/appBot.css
ADDED
@@ -0,0 +1,129 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
/* code highlight: https://python-markdown.github.io/extensions/code_hilite/ */
|
2 |
+
.codehilite .hll { background-color: #ffffcc }
|
3 |
+
.codehilite { background: #f8f8f8; }
|
4 |
+
.codehilite .c { color: #408080; font-style: italic } /* Comment */
|
5 |
+
.codehilite .err { border: 1px solid #FF0000 } /* Error */
|
6 |
+
.codehilite .k { color: #008000; font-weight: bold } /* Keyword */
|
7 |
+
.codehilite .o { color: #666666 } /* Operator */
|
8 |
+
.codehilite .ch { color: #408080; font-style: italic } /* Comment.Hashbang */
|
9 |
+
.codehilite .cm { color: #408080; font-style: italic } /* Comment.Multiline */
|
10 |
+
.codehilite .cp { color: #BC7A00 } /* Comment.Preproc */
|
11 |
+
.codehilite .cpf { color: #408080; font-style: italic } /* Comment.PreprocFile */
|
12 |
+
.codehilite .c1 { color: #408080; font-style: italic } /* Comment.Single */
|
13 |
+
.codehilite .cs { color: #408080; font-style: italic } /* Comment.Special */
|
14 |
+
.codehilite .gd { color: #A00000 } /* Generic.Deleted */
|
15 |
+
.codehilite .ge { font-style: italic } /* Generic.Emph */
|
16 |
+
.codehilite .gr { color: #FF0000 } /* Generic.Error */
|
17 |
+
.codehilite .gh { color: #000080; font-weight: bold } /* Generic.Heading */
|
18 |
+
.codehilite .gi { color: #00A000 } /* Generic.Inserted */
|
19 |
+
.codehilite .go { color: #888888 } /* Generic.Output */
|
20 |
+
.codehilite .gp { color: #000080; font-weight: bold } /* Generic.Prompt */
|
21 |
+
.codehilite .gs { font-weight: bold } /* Generic.Strong */
|
22 |
+
.codehilite .gu { color: #800080; font-weight: bold } /* Generic.Subheading */
|
23 |
+
.codehilite .gt { color: #0044DD } /* Generic.Traceback */
|
24 |
+
.codehilite .kc { color: #008000; font-weight: bold } /* Keyword.Constant */
|
25 |
+
.codehilite .kd { color: #008000; font-weight: bold } /* Keyword.Declaration */
|
26 |
+
.codehilite .kn { color: #008000; font-weight: bold } /* Keyword.Namespace */
|
27 |
+
.codehilite .kp { color: #008000 } /* Keyword.Pseudo */
|
28 |
+
.codehilite .kr { color: #008000; font-weight: bold } /* Keyword.Reserved */
|
29 |
+
.codehilite .kt { color: #B00040 } /* Keyword.Type */
|
30 |
+
.codehilite .m { color: #666666 } /* Literal.Number */
|
31 |
+
.codehilite .s { color: #BA2121 } /* Literal.String */
|
32 |
+
.codehilite .na { color: #7D9029 } /* Name.Attribute */
|
33 |
+
.codehilite .nb { color: #008000 } /* Name.Builtin */
|
34 |
+
.codehilite .nc { color: #0000FF; font-weight: bold } /* Name.Class */
|
35 |
+
.codehilite .no { color: #880000 } /* Name.Constant */
|
36 |
+
.codehilite .nd { color: #AA22FF } /* Name.Decorator */
|
37 |
+
.codehilite .ni { color: #999999; font-weight: bold } /* Name.Entity */
|
38 |
+
.codehilite .ne { color: #D2413A; font-weight: bold } /* Name.Exception */
|
39 |
+
.codehilite .nf { color: #0000FF } /* Name.Function */
|
40 |
+
.codehilite .nl { color: #A0A000 } /* Name.Label */
|
41 |
+
.codehilite .nn { color: #0000FF; font-weight: bold } /* Name.Namespace */
|
42 |
+
.codehilite .nt { color: #008000; font-weight: bold } /* Name.Tag */
|
43 |
+
.codehilite .nv { color: #19177C } /* Name.Variable */
|
44 |
+
.codehilite .ow { color: #AA22FF; font-weight: bold } /* Operator.Word */
|
45 |
+
.codehilite .w { color: #bbbbbb } /* Text.Whitespace */
|
46 |
+
.codehilite .mb { color: #666666 } /* Literal.Number.Bin */
|
47 |
+
.codehilite .mf { color: #666666 } /* Literal.Number.Float */
|
48 |
+
.codehilite .mh { color: #666666 } /* Literal.Number.Hex */
|
49 |
+
.codehilite .mi { color: #666666 } /* Literal.Number.Integer */
|
50 |
+
.codehilite .mo { color: #666666 } /* Literal.Number.Oct */
|
51 |
+
.codehilite .sa { color: #BA2121 } /* Literal.String.Affix */
|
52 |
+
.codehilite .sb { color: #BA2121 } /* Literal.String.Backtick */
|
53 |
+
.codehilite .sc { color: #BA2121 } /* Literal.String.Char */
|
54 |
+
.codehilite .dl { color: #BA2121 } /* Literal.String.Delimiter */
|
55 |
+
.codehilite .sd { color: #BA2121; font-style: italic } /* Literal.String.Doc */
|
56 |
+
.codehilite .s2 { color: #BA2121 } /* Literal.String.Double */
|
57 |
+
.codehilite .se { color: #BB6622; font-weight: bold } /* Literal.String.Escape */
|
58 |
+
.codehilite .sh { color: #BA2121 } /* Literal.String.Heredoc */
|
59 |
+
.codehilite .si { color: #BB6688; font-weight: bold } /* Literal.String.Interpol */
|
60 |
+
.codehilite .sx { color: #008000 } /* Literal.String.Other */
|
61 |
+
.codehilite .sr { color: #BB6688 } /* Literal.String.Regex */
|
62 |
+
.codehilite .s1 { color: #BA2121 } /* Literal.String.Single */
|
63 |
+
.codehilite .ss { color: #19177C } /* Literal.String.Symbol */
|
64 |
+
.codehilite .bp { color: #008000 } /* Name.Builtin.Pseudo */
|
65 |
+
.codehilite .fm { color: #0000FF } /* Name.Function.Magic */
|
66 |
+
.codehilite .vc { color: #19177C } /* Name.Variable.Class */
|
67 |
+
.codehilite .vg { color: #19177C } /* Name.Variable.Global */
|
68 |
+
.codehilite .vi { color: #19177C } /* Name.Variable.Instance */
|
69 |
+
.codehilite .vm { color: #19177C } /* Name.Variable.Magic */
|
70 |
+
.codehilite .il { color: #666666 } /* Literal.Number.Integer.Long */
|
71 |
+
|
72 |
+
.preview_header {
|
73 |
+
font-size: 24px;
|
74 |
+
font-weight: 500;
|
75 |
+
text-align: center;
|
76 |
+
}
|
77 |
+
|
78 |
+
.bot_cover {
|
79 |
+
display: flex;
|
80 |
+
flex-direction: column;
|
81 |
+
justify-content: center;
|
82 |
+
align-items: center;
|
83 |
+
min-height: 300px;
|
84 |
+
border: 1px solid rgb(229, 231, 235);
|
85 |
+
padding: 20px 20px;
|
86 |
+
}
|
87 |
+
|
88 |
+
.bot_avatar {
|
89 |
+
width: 100px;
|
90 |
+
height: 100px;
|
91 |
+
border-radius: 50%;
|
92 |
+
overflow: hidden;
|
93 |
+
}
|
94 |
+
|
95 |
+
.bot_avatar img {
|
96 |
+
width: 100px;
|
97 |
+
height: 100px;
|
98 |
+
}
|
99 |
+
|
100 |
+
.bot_name {
|
101 |
+
font-size: 36px;
|
102 |
+
margin-top: 10px;
|
103 |
+
}
|
104 |
+
|
105 |
+
.bot_desp {
|
106 |
+
color: #ddd;
|
107 |
+
}
|
108 |
+
|
109 |
+
.container {
|
110 |
+
flex-direction: row-reverse;
|
111 |
+
}
|
112 |
+
|
113 |
+
.markdown-body .message {
|
114 |
+
white-space: pre-wrap;
|
115 |
+
}
|
116 |
+
|
117 |
+
.markdown-body details {
|
118 |
+
white-space: nowrap;
|
119 |
+
}
|
120 |
+
.markdown-body .bot details:not(:last-child) {
|
121 |
+
margin-bottom: 1px;
|
122 |
+
}
|
123 |
+
.markdown-body summary {
|
124 |
+
background-color: #4b5563;
|
125 |
+
color: #eee;
|
126 |
+
padding: 0 4px;
|
127 |
+
border-radius: 4px;
|
128 |
+
font-size: 0.9em;
|
129 |
+
}
|
agentfabric/assets/bot.jpg
ADDED
agentfabric/assets/user.jpg
ADDED
agentfabric/builder_core.py
ADDED
@@ -0,0 +1,276 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# flake8: noqa E501
|
2 |
+
import re
|
3 |
+
import traceback
|
4 |
+
from typing import Dict
|
5 |
+
|
6 |
+
import json
|
7 |
+
from config_utils import parse_configuration
|
8 |
+
from help_tools import LogoGeneratorTool, config_conversion
|
9 |
+
from modelscope_agent.agent import AgentExecutor
|
10 |
+
from modelscope_agent.agent_types import AgentType
|
11 |
+
from modelscope_agent.llm import LLMFactory
|
12 |
+
from modelscope_agent.prompt import MessagesGenerator
|
13 |
+
|
14 |
+
SYSTEM = 'You are a helpful assistant.'
|
15 |
+
|
16 |
+
PROMPT_CUSTOM = """你现在要扮演一个制造AI角色(AI-Agent)的AI助手(QwenBuilder)。
|
17 |
+
你需要和用户进行对话,明确用户对AI-Agent的要求。并根据已有信息和你的联想能力,尽可能填充完整的配置文件:
|
18 |
+
|
19 |
+
配置文件为json格式:
|
20 |
+
{"name": "... # AI-Agent的名字", "description": "... # 对AI-Agent的要求,简单描述", "instructions": "... # 分点描述对AI-Agent的具体功能要求,尽量详细一些,类型是一个字符串数组,起始为[]", "prompt_recommend": "... # 推荐的用户将对AI-Agent说的指令,用于指导用户使用AI-Agent,类型是一个字符串数组,请尽可能补充4句左右,起始为["你可以做什么?"]", "logo_prompt": "... # 画AI-Agent的logo的指令,不需要画logo或不需要更新logo时可以为空,类型是string"}
|
21 |
+
|
22 |
+
在接下来的对话中,请在回答时严格使用如下格式,先作出回复,再生成配置文件,不要回复其他任何内容:
|
23 |
+
Answer: ... # 你希望对用户说的话,用于询问用户对AI-Agent的要求,不要重复确认用户已经提出的要求,而应该拓展出新的角度来询问用户,尽量细节和丰富,禁止为空
|
24 |
+
Config: ... # 生成的配置文件,严格按照以上json格式
|
25 |
+
RichConfig: ... # 格式和核心内容和Config相同,但是保证name和description不为空;instructions需要在Config的基础上扩充字数,使指令更加详尽,如果用户给出了详细指令,请完全保留;补充prompt_recommend,并保证prompt_recommend是推荐的用户将对AI-Agent说的指令。请注意从用户的视角来描述prompt_recommend、description和instructions。
|
26 |
+
|
27 |
+
一个优秀的RichConfig样例如下:
|
28 |
+
{"name": "小红书文案生成助手", "description": "一个专为小红书用户设计的文案生成助手。", "instructions": "1. 理解并回应用户的指令;2. 根据用户的需求生成高质量的小红书风格文案;3. 使用表情提升文本丰富度", "prompt_recommend": ["你可以帮我生成一段关于旅行的文案吗?", "你会写什么样的文案?", "可以推荐一个小红书文案模版吗?"], "logo_prompt": "一个写作助手logo,包含一只羽毛钢笔"}
|
29 |
+
|
30 |
+
|
31 |
+
明白了请说“好的。”, 不要说其他的。"""
|
32 |
+
|
33 |
+
LOGO_TOOL_NAME = 'logo_designer'
|
34 |
+
|
35 |
+
ANSWER = 'Answer'
|
36 |
+
CONFIG = 'Config'
|
37 |
+
ASSISTANT_PROMPT = """{}: <answer>\n{}: <config>\nRichConfig: <rich_config>""".format(
|
38 |
+
ANSWER, CONFIG)
|
39 |
+
|
40 |
+
UPDATING_CONFIG_STEP = '🚀Updating Config...'
|
41 |
+
CONFIG_UPDATED_STEP = '✅Config Updated!'
|
42 |
+
UPDATING_LOGO_STEP = '🚀Updating Logo...'
|
43 |
+
LOGO_UPDATED_STEP = '✅Logo Updated!'
|
44 |
+
|
45 |
+
|
46 |
+
def init_builder_chatbot_agent(uuid_str):
|
47 |
+
# build model
|
48 |
+
builder_cfg, model_cfg, _, _, _, _ = parse_configuration(uuid_str)
|
49 |
+
|
50 |
+
# additional tool
|
51 |
+
additional_tool_list = {LOGO_TOOL_NAME: LogoGeneratorTool()}
|
52 |
+
tool_cfg = {LOGO_TOOL_NAME: {'is_remote_tool': True}}
|
53 |
+
|
54 |
+
# build llm
|
55 |
+
print(f'using builder model {builder_cfg.model}')
|
56 |
+
llm = LLMFactory.build_llm(builder_cfg.model, model_cfg)
|
57 |
+
llm.set_agent_type(AgentType.Messages)
|
58 |
+
|
59 |
+
# build prompt
|
60 |
+
starter_messages = [{
|
61 |
+
'role': 'system',
|
62 |
+
'content': SYSTEM
|
63 |
+
}, {
|
64 |
+
'role': 'user',
|
65 |
+
'content': PROMPT_CUSTOM
|
66 |
+
}, {
|
67 |
+
'role': 'assistant',
|
68 |
+
'content': '好的。'
|
69 |
+
}]
|
70 |
+
|
71 |
+
# prompt generator
|
72 |
+
prompt_generator = MessagesGenerator(
|
73 |
+
system_template=SYSTEM, custom_starter_messages=starter_messages)
|
74 |
+
|
75 |
+
# build agent
|
76 |
+
agent = BuilderChatbotAgent(
|
77 |
+
llm,
|
78 |
+
tool_cfg,
|
79 |
+
agent_type=AgentType.Messages,
|
80 |
+
prompt_generator=prompt_generator,
|
81 |
+
additional_tool_list=additional_tool_list)
|
82 |
+
agent.set_available_tools([LOGO_TOOL_NAME])
|
83 |
+
return agent
|
84 |
+
|
85 |
+
|
86 |
+
class BuilderChatbotAgent(AgentExecutor):
|
87 |
+
|
88 |
+
def __init__(self, llm, tool_cfg, agent_type, prompt_generator,
|
89 |
+
additional_tool_list):
|
90 |
+
|
91 |
+
super().__init__(
|
92 |
+
llm,
|
93 |
+
tool_cfg,
|
94 |
+
agent_type=agent_type,
|
95 |
+
additional_tool_list=additional_tool_list,
|
96 |
+
prompt_generator=prompt_generator,
|
97 |
+
tool_retrieval=False)
|
98 |
+
|
99 |
+
# used to reconstruct assistant message when builder config is updated
|
100 |
+
self._last_assistant_structured_response = {}
|
101 |
+
|
102 |
+
def stream_run(self,
|
103 |
+
task: str,
|
104 |
+
remote: bool = True,
|
105 |
+
print_info: bool = False,
|
106 |
+
uuid_str: str = '') -> Dict:
|
107 |
+
|
108 |
+
# retrieve tools
|
109 |
+
tool_list = self.retrieve_tools(task)
|
110 |
+
self.prompt_generator.init_prompt(task, tool_list, [])
|
111 |
+
function_list = []
|
112 |
+
|
113 |
+
llm_result, exec_result = '', ''
|
114 |
+
|
115 |
+
idx = 0
|
116 |
+
|
117 |
+
while True:
|
118 |
+
idx += 1
|
119 |
+
llm_artifacts = self.prompt_generator.generate(
|
120 |
+
llm_result, exec_result)
|
121 |
+
if print_info:
|
122 |
+
print(f'|LLM inputs in round {idx}:\n{llm_artifacts}')
|
123 |
+
|
124 |
+
llm_result = ''
|
125 |
+
try:
|
126 |
+
parser_obj = AnswerParser()
|
127 |
+
for s in self.llm.stream_generate(llm_artifacts=llm_artifacts):
|
128 |
+
llm_result += s
|
129 |
+
answer, finish = parser_obj.parse_answer(llm_result)
|
130 |
+
if answer == '':
|
131 |
+
continue
|
132 |
+
result = {'llm_text': answer}
|
133 |
+
if finish:
|
134 |
+
result.update({'step': UPDATING_CONFIG_STEP})
|
135 |
+
yield result
|
136 |
+
|
137 |
+
if print_info:
|
138 |
+
print(f'|LLM output in round {idx}:\n{llm_result}')
|
139 |
+
except Exception as e:
|
140 |
+
yield {'error': 'llm result is not valid'}
|
141 |
+
|
142 |
+
try:
|
143 |
+
re_pattern_config = re.compile(
|
144 |
+
pattern=r'Config: ([\s\S]+)\nRichConfig')
|
145 |
+
res = re_pattern_config.search(llm_result)
|
146 |
+
if res is None:
|
147 |
+
return
|
148 |
+
config = res.group(1).strip()
|
149 |
+
self._last_assistant_structured_response['config_str'] = config
|
150 |
+
|
151 |
+
rich_config = llm_result[llm_result.rfind('RichConfig:')
|
152 |
+
+ len('RichConfig:'):].strip()
|
153 |
+
try:
|
154 |
+
answer = json.loads(rich_config)
|
155 |
+
except Exception:
|
156 |
+
print('parse RichConfig error')
|
157 |
+
return
|
158 |
+
self._last_assistant_structured_response[
|
159 |
+
'rich_config_dict'] = answer
|
160 |
+
builder_cfg = config_conversion(answer, uuid_str=uuid_str)
|
161 |
+
yield {'exec_result': {'result': builder_cfg}}
|
162 |
+
yield {'step': CONFIG_UPDATED_STEP}
|
163 |
+
except ValueError as e:
|
164 |
+
print(e)
|
165 |
+
yield {'error content=[{}]'.format(llm_result)}
|
166 |
+
return
|
167 |
+
|
168 |
+
# record the llm_result result
|
169 |
+
_ = self.prompt_generator.generate(
|
170 |
+
{
|
171 |
+
'role': 'assistant',
|
172 |
+
'content': llm_result
|
173 |
+
}, '')
|
174 |
+
|
175 |
+
messages = self.prompt_generator.history
|
176 |
+
if 'logo_prompt' in answer and len(messages) > 4 and (
|
177 |
+
answer['logo_prompt'] not in messages[-3]['content']):
|
178 |
+
# draw logo
|
179 |
+
yield {'step': UPDATING_LOGO_STEP}
|
180 |
+
params = {
|
181 |
+
'user_requirement': answer['logo_prompt'],
|
182 |
+
'uuid_str': uuid_str
|
183 |
+
}
|
184 |
+
|
185 |
+
tool = self.tool_list[LOGO_TOOL_NAME]
|
186 |
+
try:
|
187 |
+
exec_result = tool(**params, remote=remote)
|
188 |
+
yield {'exec_result': exec_result}
|
189 |
+
yield {'step': LOGO_UPDATED_STEP}
|
190 |
+
|
191 |
+
return
|
192 |
+
except Exception as e:
|
193 |
+
exec_result = f'Action call error: {LOGO_TOOL_NAME}: {params}. \n Error message: {e}'
|
194 |
+
yield {'error': exec_result}
|
195 |
+
self.prompt_generator.reset()
|
196 |
+
return
|
197 |
+
else:
|
198 |
+
return
|
199 |
+
|
200 |
+
def update_config_to_history(self, config: Dict):
|
201 |
+
""" update builder config to message when user modify configuration
|
202 |
+
|
203 |
+
Args:
|
204 |
+
config info read from builder config file
|
205 |
+
"""
|
206 |
+
if len(
|
207 |
+
self.prompt_generator.history
|
208 |
+
) > 0 and self.prompt_generator.history[-1]['role'] == 'assistant':
|
209 |
+
answer = self._last_assistant_structured_response['answer_str']
|
210 |
+
simple_config = self._last_assistant_structured_response[
|
211 |
+
'config_str']
|
212 |
+
|
213 |
+
rich_config_dict = {
|
214 |
+
k: config[k]
|
215 |
+
for k in ['name', 'description', 'prompt_recommend']
|
216 |
+
}
|
217 |
+
rich_config_dict[
|
218 |
+
'logo_prompt'] = self._last_assistant_structured_response[
|
219 |
+
'rich_config_dict']['logo_prompt']
|
220 |
+
rich_config_dict['instructions'] = config['instruction'].split(';')
|
221 |
+
|
222 |
+
rich_config = json.dumps(rich_config_dict, ensure_ascii=False)
|
223 |
+
new_content = ASSISTANT_PROMPT.replace('<answer>', answer).replace(
|
224 |
+
'<config>', simple_config).replace('<rich_config>',
|
225 |
+
rich_config)
|
226 |
+
self.prompt_generator.history[-1]['content'] = new_content
|
227 |
+
|
228 |
+
|
229 |
+
def beauty_output(response: str, step_result: str):
|
230 |
+
flag_list = [
|
231 |
+
CONFIG_UPDATED_STEP, UPDATING_CONFIG_STEP, LOGO_UPDATED_STEP,
|
232 |
+
UPDATING_LOGO_STEP
|
233 |
+
]
|
234 |
+
|
235 |
+
if step_result in flag_list:
|
236 |
+
end_str = ''
|
237 |
+
for item in flag_list:
|
238 |
+
if response.endswith(item):
|
239 |
+
end_str = item
|
240 |
+
if end_str == '':
|
241 |
+
response = f'{response}\n{step_result}'
|
242 |
+
elif end_str in [CONFIG_UPDATED_STEP, LOGO_UPDATED_STEP]:
|
243 |
+
response = f'{response}\n{step_result}'
|
244 |
+
else:
|
245 |
+
response = response[:-len('\n' + end_str)]
|
246 |
+
response = f'{response}\n{step_result}'
|
247 |
+
|
248 |
+
return response
|
249 |
+
|
250 |
+
|
251 |
+
class AnswerParser(object):
|
252 |
+
|
253 |
+
def __init__(self):
|
254 |
+
self._history = ''
|
255 |
+
|
256 |
+
def parse_answer(self, llm_result: str):
|
257 |
+
finish = False
|
258 |
+
answer_prompt = ANSWER + ': '
|
259 |
+
|
260 |
+
if len(llm_result) >= len(answer_prompt):
|
261 |
+
start_pos = llm_result.find(answer_prompt)
|
262 |
+
end_pos = llm_result.find(f'\n{CONFIG}')
|
263 |
+
if start_pos >= 0:
|
264 |
+
if end_pos > start_pos:
|
265 |
+
result = llm_result[start_pos + len(answer_prompt):end_pos]
|
266 |
+
finish = True
|
267 |
+
else:
|
268 |
+
result = llm_result[start_pos + len(answer_prompt):]
|
269 |
+
else:
|
270 |
+
result = llm_result
|
271 |
+
else:
|
272 |
+
result = ''
|
273 |
+
|
274 |
+
new_result = result[len(self._history):]
|
275 |
+
self._history = result
|
276 |
+
return new_result, finish
|
agentfabric/config/builder_config.json
ADDED
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"name": "",
|
3 |
+
"avatar": "custom_bot_avatar.png",
|
4 |
+
"description": "",
|
5 |
+
"instruction": "",
|
6 |
+
"prompt_recommend": [
|
7 |
+
"你可以做什么?",
|
8 |
+
"你有什么功能?",
|
9 |
+
"如何使用你的功能?",
|
10 |
+
"能否给我一些示例指令?"
|
11 |
+
],
|
12 |
+
"knowledge": [],
|
13 |
+
"tools": {
|
14 |
+
"image_gen": {
|
15 |
+
"name": "Wanx Image Generation",
|
16 |
+
"is_active": true,
|
17 |
+
"use": true
|
18 |
+
},
|
19 |
+
"code_interpreter": {
|
20 |
+
"name": "Code Interpreter",
|
21 |
+
"is_active": true,
|
22 |
+
"use": false
|
23 |
+
}
|
24 |
+
},
|
25 |
+
"model": "qwen-max"
|
26 |
+
}
|
agentfabric/config/builder_config_ci.json
ADDED
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"name": "Python数据分析师",
|
3 |
+
"avatar": "image.png",
|
4 |
+
"description": "使用python解决任务时,你可以运行代码并得到结果,如果运行结果有错误,你需要尽可能对代码进行改进。你可以处理用户上传到电脑的文件。",
|
5 |
+
"instruction": "1. 你会数学解题;\n2. 你会数据分析和可视化;\n3. 用户上传文件时,你必须先了解文件结构再进行下一步操作;如果没有上传文件但要求画图,则编造示例数据画图\n4. 调用工具前你需要说明理由;Think step by step\n5. 代码出错时你需要反思并改进",
|
6 |
+
"prompt_recommend": [
|
7 |
+
"制作示例饼图来报告某网站流量来源。",
|
8 |
+
"鸡兔同笼 32头 88腿 多少兔",
|
9 |
+
"帮我把这个链接“https://modelscope.cn/my/overview”网址,转成二维码,并展示图片",
|
10 |
+
"一支钢笔5元,一支铅笔3元,一个文具盒10元,一套文具包括2支钢笔,3支铅笔,1个文具盒,一共多少钱?"
|
11 |
+
],
|
12 |
+
"knowledge": [],
|
13 |
+
"tools": {
|
14 |
+
"image_gen": {
|
15 |
+
"name": "Wanx Image Generation",
|
16 |
+
"is_active": true,
|
17 |
+
"use": false
|
18 |
+
},
|
19 |
+
"code_interpreter": {
|
20 |
+
"name": "Code Interpreter",
|
21 |
+
"is_active": true,
|
22 |
+
"use": true
|
23 |
+
},
|
24 |
+
"amap_weather": {
|
25 |
+
"name": "高德天气",
|
26 |
+
"is_active": true,
|
27 |
+
"use": false
|
28 |
+
}
|
29 |
+
},
|
30 |
+
"model": "qwen-max"
|
31 |
+
}
|
agentfabric/config/builder_config_template.json
ADDED
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"name": "AI-Agent",
|
3 |
+
"avatar": "logo.png",
|
4 |
+
"description": "我希望AI-Agent能够像多啦A梦一样,拥有各种神奇的技能和能力,可以帮我解决生活中的各种问题。",
|
5 |
+
"instruction": "请告诉我你想要什么帮助,我会尽力提供解决方案。;如果你有任何问题,请随时向我提问,我会尽我所能回答你的问题。;我可以帮你查找信息、提供建议、提醒日程等,只需要你告诉我你需要什么。",
|
6 |
+
"prompt_recommend": [
|
7 |
+
"你好,我是AI-Agent,有什么可以帮助你的吗?",
|
8 |
+
"嗨,很高兴见到你,我是AI-Agent,你可以问我任何问题。",
|
9 |
+
"你好,我是AI-Agent,需要我帮你做些什么吗?",
|
10 |
+
"嗨,我是AI-Agent,有什么我可以帮到你的吗?"
|
11 |
+
],
|
12 |
+
"knowledge": [],
|
13 |
+
"tools": {
|
14 |
+
"image_gen": {
|
15 |
+
"name": "Wanx Image Generation",
|
16 |
+
"is_active": true,
|
17 |
+
"use": true
|
18 |
+
},
|
19 |
+
"code_interpreter": {
|
20 |
+
"name": "Code Interpreter",
|
21 |
+
"is_active": true,
|
22 |
+
"use": false
|
23 |
+
}
|
24 |
+
},
|
25 |
+
"model": "qwen-max"
|
26 |
+
}
|
agentfabric/config/builder_config_wuxia.json
ADDED
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"name": "武侠小说家",
|
3 |
+
"avatar": "custom_bot_avatar.png",
|
4 |
+
"description": "能够生成武侠小说并配图",
|
5 |
+
"instruction": "你的指令是为我提供一个基于金庸武侠小说世界的在线RPG游戏体验。在这个游戏中,玩家将扮演金庸故事中的一个关键角色,游戏情景将基于他的小说。这个游戏的玩法是互动式的,并遵循以下特定格式:\n\n<场景描述>:根据玩家的选择,故事情节将按照金庸小说的线索发展。你将描述角色所处的环境和情况。\n\n<场景图片>:对于每个场景,你将创造一个概括该情况的图像。这些图像的风格将类似于1980年代RPG游戏,大小是16:9宽屏比例。在这个步骤你需要调用画图工具,绘制<场景描述>。\n\n<选择>:在每次互动中,你将为玩家提供三个行动选项,分别标为A、B、C,以及第四个选项“D: 输入玩家的选择”。故事情节将根据玩家选择的行动进展。如果一个选择不是直接来自小说,你将创造性地适应故事,最终引导它回归原始情节。\n\n整个故事将围绕金庸小说中丰富而复杂的世界展开。每次互动必须包括<场景描述>、<场景图片>和<选择>。所有内容将以繁体中文呈现。你的重点将仅仅放在提供场景描述,场景图片和选择上,不包含其他游戏指导。场景尽量不要重复,要丰富一些。",
|
6 |
+
"prompt_recommend": [
|
7 |
+
"扮演小龙女",
|
8 |
+
"扮演杨过"
|
9 |
+
],
|
10 |
+
"knowledge": [],
|
11 |
+
"tools": {
|
12 |
+
"image_gen": {
|
13 |
+
"name": "Wanx Image Generation",
|
14 |
+
"is_active": true,
|
15 |
+
"use": true
|
16 |
+
},
|
17 |
+
"code_interpreter": {
|
18 |
+
"name": "Code Interpreter",
|
19 |
+
"is_active": true,
|
20 |
+
"use": false
|
21 |
+
}
|
22 |
+
},
|
23 |
+
"model": "qwen-max"
|
24 |
+
}
|
agentfabric/config/custom_bot_avatar.png
ADDED
agentfabric/config/model_config.json
ADDED
@@ -0,0 +1,77 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"qwen-turbo": {
|
3 |
+
"type": "dashscope",
|
4 |
+
"model": "qwen-turbo",
|
5 |
+
"generate_cfg": {
|
6 |
+
"use_raw_prompt": true,
|
7 |
+
"top_p": 0.8
|
8 |
+
}
|
9 |
+
},
|
10 |
+
"qwen-plus": {
|
11 |
+
"type": "dashscope",
|
12 |
+
"model": "qwen-plus",
|
13 |
+
"generate_cfg": {
|
14 |
+
"use_raw_prompt": true,
|
15 |
+
"top_p": 0.8
|
16 |
+
}
|
17 |
+
},
|
18 |
+
"qwen-max": {
|
19 |
+
"type": "dashscope",
|
20 |
+
"model": "qwen-max",
|
21 |
+
"length_constraint": {
|
22 |
+
"knowledge": 4000,
|
23 |
+
"input": 6000
|
24 |
+
},
|
25 |
+
"generate_cfg": {
|
26 |
+
"use_raw_prompt": true,
|
27 |
+
"top_p": 0.8
|
28 |
+
}
|
29 |
+
},
|
30 |
+
"qwen-7b": {
|
31 |
+
"type": "modelscope",
|
32 |
+
"model_id": "qwen/Qwen-7B-Chat",
|
33 |
+
"model_revision": "v1.1.8",
|
34 |
+
"generate_cfg": {
|
35 |
+
"use_raw_prompt": true,
|
36 |
+
"top_p": 0.8,
|
37 |
+
"max_length": 2000
|
38 |
+
}
|
39 |
+
},
|
40 |
+
"qwen-7b-api": {
|
41 |
+
"type": "dashscope",
|
42 |
+
"model": "qwen-7b-chat",
|
43 |
+
"generate_cfg": {
|
44 |
+
"use_raw_prompt": true,
|
45 |
+
"top_p": 0.8,
|
46 |
+
"debug": false
|
47 |
+
}
|
48 |
+
},
|
49 |
+
"qwen-14b": {
|
50 |
+
"type": "modelscope",
|
51 |
+
"model_id": "qwen/Qwen-14B-Chat",
|
52 |
+
"model_revision": "v1.0.8",
|
53 |
+
"generate_cfg": {
|
54 |
+
"use_raw_prompt": true,
|
55 |
+
"top_p": 0.8,
|
56 |
+
"max_length": 2000
|
57 |
+
}
|
58 |
+
},
|
59 |
+
"qwen-14b-api": {
|
60 |
+
"type": "dashscope",
|
61 |
+
"model": "qwen-14b-chat",
|
62 |
+
"generate_cfg": {
|
63 |
+
"use_raw_prompt": true,
|
64 |
+
"top_p": 0.8,
|
65 |
+
"debug": false
|
66 |
+
}
|
67 |
+
},
|
68 |
+
"qwen-72b-api": {
|
69 |
+
"type": "dashscope",
|
70 |
+
"model": "qwen-72b-chat",
|
71 |
+
"generate_cfg": {
|
72 |
+
"use_raw_prompt": true,
|
73 |
+
"top_p": 0.8,
|
74 |
+
"debug": false
|
75 |
+
}
|
76 |
+
}
|
77 |
+
}
|
agentfabric/config/tool_config.json
ADDED
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"image_gen": {
|
3 |
+
"name": "Wanx Image Generation",
|
4 |
+
"is_active": true,
|
5 |
+
"use": true,
|
6 |
+
"is_remote_tool": true
|
7 |
+
},
|
8 |
+
"code_interpreter": {
|
9 |
+
"name": "Code Interpreter",
|
10 |
+
"is_active": true,
|
11 |
+
"use": false,
|
12 |
+
"is_remote_tool": false,
|
13 |
+
"max_output": 2000
|
14 |
+
},
|
15 |
+
"web_browser": {
|
16 |
+
"name": "Web Browsing",
|
17 |
+
"is_active": false,
|
18 |
+
"use": false
|
19 |
+
},
|
20 |
+
"amap_weather": {
|
21 |
+
"name": "高德天气",
|
22 |
+
"is_active": true,
|
23 |
+
"use": false
|
24 |
+
},
|
25 |
+
"wordart_texture_generation": {
|
26 |
+
"name": "艺术字纹理生成",
|
27 |
+
"is_active": true,
|
28 |
+
"use": false
|
29 |
+
},
|
30 |
+
"web_search": {
|
31 |
+
"name": "Web Searching",
|
32 |
+
"is_active": false,
|
33 |
+
"use": false
|
34 |
+
}
|
35 |
+
}
|
agentfabric/config_utils.py
ADDED
@@ -0,0 +1,170 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import shutil
|
3 |
+
import traceback
|
4 |
+
|
5 |
+
import json
|
6 |
+
from modelscope_agent.tools.openapi_plugin import (OpenAPIPluginTool,
|
7 |
+
openapi_schema_convert)
|
8 |
+
|
9 |
+
from modelscope.utils.config import Config
|
10 |
+
|
11 |
+
DEFAULT_AGENT_DIR = '/tmp/agentfabric'
|
12 |
+
DEFAULT_BUILDER_CONFIG_DIR = os.path.join(DEFAULT_AGENT_DIR, 'config')
|
13 |
+
DEFAULT_BUILDER_CONFIG_FILE = os.path.join(DEFAULT_BUILDER_CONFIG_DIR,
|
14 |
+
'builder_config.json')
|
15 |
+
DEFAULT_OPENAPI_PLUGIN_CONFIG_FILE = os.path.join(
|
16 |
+
DEFAULT_BUILDER_CONFIG_DIR, 'openapi_plugin_config.json')
|
17 |
+
DEFAULT_MODEL_CONFIG_FILE = './config/model_config.json'
|
18 |
+
DEFAULT_TOOL_CONFIG_FILE = './config/tool_config.json'
|
19 |
+
DEFAULT_CODE_INTERPRETER_DIR = os.getenv('CODE_INTERPRETER_WORK_DIR',
|
20 |
+
'/tmp/ci_workspace')
|
21 |
+
|
22 |
+
|
23 |
+
def get_user_dir(uuid_str=''):
|
24 |
+
return os.path.join(DEFAULT_BUILDER_CONFIG_DIR, uuid_str)
|
25 |
+
|
26 |
+
|
27 |
+
def get_ci_dir():
|
28 |
+
return DEFAULT_CODE_INTERPRETER_DIR
|
29 |
+
|
30 |
+
|
31 |
+
def get_user_cfg_file(uuid_str=''):
|
32 |
+
builder_cfg_file = os.getenv('BUILDER_CONFIG_FILE',
|
33 |
+
DEFAULT_BUILDER_CONFIG_FILE)
|
34 |
+
# convert from ./config/builder_config.json to ./config/user/builder_config.json
|
35 |
+
builder_cfg_file = builder_cfg_file.replace('config/', 'config/user/')
|
36 |
+
|
37 |
+
# convert from ./config/user/builder_config.json to ./config/uuid/builder_config.json
|
38 |
+
if uuid_str != '':
|
39 |
+
builder_cfg_file = builder_cfg_file.replace('user', uuid_str)
|
40 |
+
return builder_cfg_file
|
41 |
+
|
42 |
+
|
43 |
+
def get_user_openapi_plugin_cfg_file(uuid_str=''):
|
44 |
+
openapi_plugin_cfg_file = os.getenv('OPENAPI_PLUGIN_CONFIG_FILE',
|
45 |
+
DEFAULT_OPENAPI_PLUGIN_CONFIG_FILE)
|
46 |
+
openapi_plugin_cfg_file = openapi_plugin_cfg_file.replace(
|
47 |
+
'config/', 'config/user/')
|
48 |
+
if uuid_str != '':
|
49 |
+
openapi_plugin_cfg_file = openapi_plugin_cfg_file.replace(
|
50 |
+
'user', uuid_str)
|
51 |
+
return openapi_plugin_cfg_file
|
52 |
+
|
53 |
+
|
54 |
+
def save_builder_configuration(builder_cfg, uuid_str=''):
|
55 |
+
builder_cfg_file = get_user_cfg_file(uuid_str)
|
56 |
+
if uuid_str != '' and not os.path.exists(
|
57 |
+
os.path.dirname(builder_cfg_file)):
|
58 |
+
os.makedirs(os.path.dirname(builder_cfg_file))
|
59 |
+
with open(builder_cfg_file, 'w', encoding='utf-8') as f:
|
60 |
+
f.write(json.dumps(builder_cfg, indent=2, ensure_ascii=False))
|
61 |
+
|
62 |
+
|
63 |
+
def is_valid_plugin_configuration(openapi_plugin_cfg):
|
64 |
+
if 'schema' in openapi_plugin_cfg:
|
65 |
+
schema = openapi_plugin_cfg['schema']
|
66 |
+
if isinstance(schema, dict):
|
67 |
+
return True
|
68 |
+
else:
|
69 |
+
return False
|
70 |
+
|
71 |
+
|
72 |
+
def save_plugin_configuration(openapi_plugin_cfg, uuid_str):
|
73 |
+
openapi_plugin_cfg_file = get_user_openapi_plugin_cfg_file(uuid_str)
|
74 |
+
if uuid_str != '' and not os.path.exists(
|
75 |
+
os.path.dirname(openapi_plugin_cfg_file)):
|
76 |
+
os.makedirs(os.path.dirname(openapi_plugin_cfg_file))
|
77 |
+
with open(openapi_plugin_cfg_file, 'w', encoding='utf-8') as f:
|
78 |
+
f.write(json.dumps(openapi_plugin_cfg, indent=2, ensure_ascii=False))
|
79 |
+
|
80 |
+
|
81 |
+
def get_avatar_image(bot_avatar, uuid_str=''):
|
82 |
+
user_avatar_path = os.path.join(
|
83 |
+
os.path.dirname(__file__), 'assets/user.jpg')
|
84 |
+
bot_avatar_path = os.path.join(os.path.dirname(__file__), 'assets/bot.jpg')
|
85 |
+
if len(bot_avatar) > 0:
|
86 |
+
bot_avatar_path = os.path.join(DEFAULT_BUILDER_CONFIG_DIR, uuid_str,
|
87 |
+
bot_avatar)
|
88 |
+
if uuid_str != '':
|
89 |
+
# use default if not exists
|
90 |
+
if not os.path.exists(bot_avatar_path):
|
91 |
+
# create parents directory
|
92 |
+
os.makedirs(os.path.dirname(bot_avatar_path), exist_ok=True)
|
93 |
+
# copy the template to the address
|
94 |
+
temp_bot_avatar_path = os.path.join(DEFAULT_BUILDER_CONFIG_DIR,
|
95 |
+
bot_avatar)
|
96 |
+
if not os.path.exists(temp_bot_avatar_path):
|
97 |
+
# fall back to default local avatar image
|
98 |
+
temp_bot_avatar_path = os.path.join('./config', bot_avatar)
|
99 |
+
if not os.path.exists(temp_bot_avatar_path):
|
100 |
+
temp_bot_avatar_path = os.path.join(
|
101 |
+
'./config', 'custom_bot_avatar.png')
|
102 |
+
|
103 |
+
shutil.copy(temp_bot_avatar_path, bot_avatar_path)
|
104 |
+
|
105 |
+
return [user_avatar_path, bot_avatar_path]
|
106 |
+
|
107 |
+
|
108 |
+
def save_avatar_image(image_path, uuid_str=''):
|
109 |
+
bot_avatar = os.path.basename(image_path)
|
110 |
+
bot_avatar_path = os.path.join(DEFAULT_BUILDER_CONFIG_DIR, uuid_str,
|
111 |
+
bot_avatar)
|
112 |
+
shutil.copy(image_path, bot_avatar_path)
|
113 |
+
return bot_avatar, bot_avatar_path
|
114 |
+
|
115 |
+
|
116 |
+
def parse_configuration(uuid_str=''):
|
117 |
+
"""parse configuration
|
118 |
+
|
119 |
+
Args:
|
120 |
+
|
121 |
+
Returns:
|
122 |
+
dict: parsed configuration
|
123 |
+
|
124 |
+
"""
|
125 |
+
model_cfg_file = os.getenv('MODEL_CONFIG_FILE', DEFAULT_MODEL_CONFIG_FILE)
|
126 |
+
|
127 |
+
builder_cfg_file = get_user_cfg_file(uuid_str)
|
128 |
+
# use default if not exists
|
129 |
+
if not os.path.exists(builder_cfg_file):
|
130 |
+
# create parents directory
|
131 |
+
os.makedirs(os.path.dirname(builder_cfg_file), exist_ok=True)
|
132 |
+
# copy the template to the address
|
133 |
+
builder_cfg_file_temp = './config/builder_config.json'
|
134 |
+
|
135 |
+
if builder_cfg_file_temp != builder_cfg_file:
|
136 |
+
shutil.copy(builder_cfg_file_temp, builder_cfg_file)
|
137 |
+
|
138 |
+
tool_cfg_file = os.getenv('TOOL_CONFIG_FILE', DEFAULT_TOOL_CONFIG_FILE)
|
139 |
+
|
140 |
+
builder_cfg = Config.from_file(builder_cfg_file)
|
141 |
+
model_cfg = Config.from_file(model_cfg_file)
|
142 |
+
tool_cfg = Config.from_file(tool_cfg_file)
|
143 |
+
|
144 |
+
tools_info = builder_cfg.tools
|
145 |
+
available_tool_list = []
|
146 |
+
for key, value in tools_info.items():
|
147 |
+
if value['use']:
|
148 |
+
available_tool_list.append(key)
|
149 |
+
tool_cfg[key]['use'] = value['use']
|
150 |
+
|
151 |
+
openapi_plugin_file = get_user_openapi_plugin_cfg_file(uuid_str)
|
152 |
+
plugin_cfg = {}
|
153 |
+
available_plugin_list = []
|
154 |
+
if os.path.exists(openapi_plugin_file):
|
155 |
+
openapi_plugin_cfg = Config.from_file(openapi_plugin_file)
|
156 |
+
try:
|
157 |
+
config_dict = openapi_schema_convert(
|
158 |
+
schema=openapi_plugin_cfg.schema,
|
159 |
+
auth=openapi_plugin_cfg.auth.to_dict())
|
160 |
+
plugin_cfg = Config(config_dict)
|
161 |
+
for name, config in config_dict.items():
|
162 |
+
available_plugin_list.append(name)
|
163 |
+
except Exception as e:
|
164 |
+
error = traceback.format_exc()
|
165 |
+
print(f'Error:{e}, with detail: {error}')
|
166 |
+
print(
|
167 |
+
'Error:FormatError, with detail: The format of the plugin config file is incorrect.'
|
168 |
+
)
|
169 |
+
|
170 |
+
return builder_cfg, model_cfg, tool_cfg, available_tool_list, plugin_cfg, available_plugin_list
|
agentfabric/custom_prompt.py
ADDED
@@ -0,0 +1,303 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import copy
|
2 |
+
import os
|
3 |
+
import re
|
4 |
+
|
5 |
+
import json
|
6 |
+
from config_utils import get_user_cfg_file
|
7 |
+
from modelscope_agent.prompt.prompt import (KNOWLEDGE_INTRODUCTION_PROMPT,
|
8 |
+
KNOWLEDGE_PROMPT, LengthConstraint,
|
9 |
+
PromptGenerator, build_raw_prompt)
|
10 |
+
|
11 |
+
from modelscope.utils.config import Config
|
12 |
+
|
13 |
+
DEFAULT_SYSTEM_TEMPLATE = """
|
14 |
+
|
15 |
+
# 工具
|
16 |
+
|
17 |
+
## 你拥有如下工具:
|
18 |
+
|
19 |
+
<tool_list>
|
20 |
+
|
21 |
+
## 当你需要调用工具时,请在你的回复中穿插如下的工具调用命令,可以根据需求调用零次或多次:
|
22 |
+
|
23 |
+
工具调用
|
24 |
+
Action: 工具的名称,必须是<tool_name_list>之一
|
25 |
+
Action Input: 工具的输入
|
26 |
+
Observation: <result>工具返回的结果</result>
|
27 |
+
Answer: 根据Observation总结本次工具调用返回的结果,如果结果中出现url,请不要展示出。
|
28 |
+
|
29 |
+
```
|
30 |
+
[链接](url)
|
31 |
+
```
|
32 |
+
|
33 |
+
# 指令
|
34 |
+
"""
|
35 |
+
|
36 |
+
DEFAULT_SYSTEM_TEMPLATE_WITHOUT_TOOL = """
|
37 |
+
|
38 |
+
# 指令
|
39 |
+
"""
|
40 |
+
|
41 |
+
DEFAULT_INSTRUCTION_TEMPLATE = ''
|
42 |
+
|
43 |
+
DEFAULT_USER_TEMPLATE = """(你正在扮演<role_name>,你可以使用工具:<tool_name_list><knowledge_note>)<file_names><user_input>"""
|
44 |
+
|
45 |
+
DEFAULT_USER_TEMPLATE_WITHOUT_TOOL = """(你正在扮演<role_name><knowledge_note>) <file_names><user_input>"""
|
46 |
+
|
47 |
+
DEFAULT_EXEC_TEMPLATE = """Observation: <result><exec_result></result>\nAnswer:"""
|
48 |
+
|
49 |
+
TOOL_DESC = (
|
50 |
+
'{name_for_model}: {name_for_human} API。 {description_for_model} 输入参数: {parameters}'
|
51 |
+
)
|
52 |
+
|
53 |
+
|
54 |
+
class CustomPromptGenerator(PromptGenerator):
|
55 |
+
|
56 |
+
def __init__(self,
|
57 |
+
system_template=DEFAULT_SYSTEM_TEMPLATE,
|
58 |
+
instruction_template=DEFAULT_INSTRUCTION_TEMPLATE,
|
59 |
+
user_template=DEFAULT_USER_TEMPLATE,
|
60 |
+
exec_template=DEFAULT_EXEC_TEMPLATE,
|
61 |
+
assistant_template='',
|
62 |
+
sep='\n\n',
|
63 |
+
llm=None,
|
64 |
+
length_constraint=LengthConstraint(),
|
65 |
+
**kwargs):
|
66 |
+
super().__init__(
|
67 |
+
system_template=system_template,
|
68 |
+
instruction_template=instruction_template,
|
69 |
+
user_template=user_template,
|
70 |
+
exec_template=exec_template,
|
71 |
+
assistant_template=assistant_template,
|
72 |
+
sep=sep,
|
73 |
+
llm=llm,
|
74 |
+
length_constraint=length_constraint)
|
75 |
+
# hack here for special prompt, such as add an addition round before user input
|
76 |
+
self.add_addition_round = kwargs.get('add_addition_round', False)
|
77 |
+
self.addition_assistant_reply = kwargs.get('addition_assistant_reply',
|
78 |
+
'')
|
79 |
+
builder_cfg_file = get_user_cfg_file(
|
80 |
+
uuid_str=kwargs.get('uuid_str', ''))
|
81 |
+
builder_cfg = Config.from_file(builder_cfg_file)
|
82 |
+
self.builder_cfg = builder_cfg
|
83 |
+
self.knowledge_file_name = kwargs.get('knowledge_file_name', '')
|
84 |
+
|
85 |
+
self.llm = llm
|
86 |
+
self.prompt_preprocessor = build_raw_prompt(llm.model_id)
|
87 |
+
self.length_constraint = length_constraint
|
88 |
+
self._parse_length_restriction()
|
89 |
+
|
90 |
+
def _parse_length_restriction(self):
|
91 |
+
constraint = self.llm.cfg.get('length_constraint', None)
|
92 |
+
# if isinstance(constraint, Config):
|
93 |
+
# constraint = constraint.to_dict()
|
94 |
+
self.length_constraint.update(constraint)
|
95 |
+
|
96 |
+
def _update_user_prompt_without_knowledge(self, task, tool_list, **kwargs):
|
97 |
+
if len(tool_list) > 0:
|
98 |
+
# user input
|
99 |
+
user_input = self.user_template.replace('<role_name>',
|
100 |
+
self.builder_cfg.name)
|
101 |
+
user_input = user_input.replace(
|
102 |
+
'<tool_name_list>',
|
103 |
+
','.join([tool.name for tool in tool_list]))
|
104 |
+
else:
|
105 |
+
self.user_template = DEFAULT_USER_TEMPLATE_WITHOUT_TOOL
|
106 |
+
user_input = self.user_template.replace('<user_input>', task)
|
107 |
+
user_input = user_input.replace('<role_name>',
|
108 |
+
self.builder_cfg.name)
|
109 |
+
|
110 |
+
user_input = user_input.replace('<user_input>', task)
|
111 |
+
|
112 |
+
if 'append_files' in kwargs:
|
113 |
+
append_files = kwargs.get('append_files', [])
|
114 |
+
if len(append_files) > 0:
|
115 |
+
file_names = ','.join(
|
116 |
+
[os.path.basename(path) for path in append_files])
|
117 |
+
user_input = user_input.replace('<file_names>',
|
118 |
+
f'[上传文件{file_names}]')
|
119 |
+
else:
|
120 |
+
user_input = user_input.replace('<file_names>', '')
|
121 |
+
else:
|
122 |
+
user_input = user_input.replace('<file_names>', '')
|
123 |
+
|
124 |
+
return user_input
|
125 |
+
|
126 |
+
def init_prompt(self, task, tool_list, knowledge_list, **kwargs):
|
127 |
+
|
128 |
+
if len(self.history) == 0:
|
129 |
+
|
130 |
+
self.history.append({
|
131 |
+
'role': 'system',
|
132 |
+
'content': 'You are a helpful assistant.'
|
133 |
+
})
|
134 |
+
|
135 |
+
if len(tool_list) > 0:
|
136 |
+
prompt = f'{self.system_template}\n{self.instruction_template}'
|
137 |
+
|
138 |
+
# get tool description str
|
139 |
+
tool_str = self.get_tool_str(tool_list)
|
140 |
+
prompt = prompt.replace('<tool_list>', tool_str)
|
141 |
+
|
142 |
+
tool_name_str = self.get_tool_name_str(tool_list)
|
143 |
+
prompt = prompt.replace('<tool_name_list>', tool_name_str)
|
144 |
+
else:
|
145 |
+
self.system_template = DEFAULT_SYSTEM_TEMPLATE_WITHOUT_TOOL
|
146 |
+
prompt = f'{self.system_template}\n{self.instruction_template}'
|
147 |
+
|
148 |
+
user_input = self._update_user_prompt_without_knowledge(
|
149 |
+
task, tool_list, **kwargs)
|
150 |
+
|
151 |
+
if len(knowledge_list) > 0:
|
152 |
+
user_input = user_input.replace('<knowledge_note>',
|
153 |
+
',请查看前面的知识库')
|
154 |
+
else:
|
155 |
+
user_input = user_input.replace('<knowledge_note>', '')
|
156 |
+
|
157 |
+
self.system_prompt = copy.deepcopy(prompt)
|
158 |
+
|
159 |
+
# build history
|
160 |
+
if self.add_addition_round:
|
161 |
+
self.history.append({
|
162 |
+
'role': 'user',
|
163 |
+
'content': self.system_prompt
|
164 |
+
})
|
165 |
+
self.history.append({
|
166 |
+
'role': 'assistant',
|
167 |
+
'content': self.addition_assistant_reply
|
168 |
+
})
|
169 |
+
self.history.append({'role': 'user', 'content': user_input})
|
170 |
+
self.history.append({
|
171 |
+
'role': 'assistant',
|
172 |
+
'content': self.assistant_template
|
173 |
+
})
|
174 |
+
else:
|
175 |
+
self.history.append({
|
176 |
+
'role': 'user',
|
177 |
+
'content': self.system_prompt + user_input
|
178 |
+
})
|
179 |
+
self.history.append({
|
180 |
+
'role': 'assistant',
|
181 |
+
'content': self.assistant_template
|
182 |
+
})
|
183 |
+
|
184 |
+
self.function_calls = self.get_function_list(tool_list)
|
185 |
+
else:
|
186 |
+
user_input = self._update_user_prompt_without_knowledge(
|
187 |
+
task, tool_list, **kwargs)
|
188 |
+
if len(knowledge_list) > 0:
|
189 |
+
user_input = user_input.replace('<knowledge_note>',
|
190 |
+
',请查看前面的知识库')
|
191 |
+
else:
|
192 |
+
user_input = user_input.replace('<knowledge_note>', '')
|
193 |
+
|
194 |
+
self.history.append({'role': 'user', 'content': user_input})
|
195 |
+
self.history.append({
|
196 |
+
'role': 'assistant',
|
197 |
+
'content': self.assistant_template
|
198 |
+
})
|
199 |
+
|
200 |
+
if len(knowledge_list) > 0:
|
201 |
+
knowledge_str = self.get_knowledge_str(
|
202 |
+
knowledge_list,
|
203 |
+
file_name=self.knowledge_file_name,
|
204 |
+
only_content=True)
|
205 |
+
self.update_knowledge_str(knowledge_str)
|
206 |
+
|
207 |
+
def update_knowledge_str(self, knowledge_str):
|
208 |
+
"""If knowledge base information was not used previously, it will be added;
|
209 |
+
if knowledge base information was previously used, it will be replaced.
|
210 |
+
|
211 |
+
Args:
|
212 |
+
knowledge_str (str): knowledge str generated by get_knowledge_str
|
213 |
+
"""
|
214 |
+
knowledge_introduction = KNOWLEDGE_INTRODUCTION_PROMPT.replace(
|
215 |
+
'<file_name>', self.knowledge_file_name)
|
216 |
+
if len(knowledge_str) > self.length_constraint.knowledge:
|
217 |
+
# todo: use tokenizer to constrain length
|
218 |
+
knowledge_str = knowledge_str[-self.length_constraint.knowledge:]
|
219 |
+
knowledge_str = f'{KNOWLEDGE_PROMPT}{self.sep}{knowledge_introduction}{self.sep}{knowledge_str}'
|
220 |
+
|
221 |
+
for i in range(0, len(self.history)):
|
222 |
+
if self.history[i]['role'] == 'user':
|
223 |
+
content: str = self.history[i]['content']
|
224 |
+
start_pos = content.find(f'{KNOWLEDGE_PROMPT}{self.sep}')
|
225 |
+
end_pos = content.rfind('\n\n# 工具\n\n')
|
226 |
+
if start_pos >= 0 and end_pos >= 0: # replace knowledge
|
227 |
+
|
228 |
+
self.history[i]['content'] = content[
|
229 |
+
0:start_pos] + knowledge_str + content[end_pos:]
|
230 |
+
break
|
231 |
+
elif start_pos < 0 and end_pos == 0: # add knowledge
|
232 |
+
self.history[i]['content'] = knowledge_str + content
|
233 |
+
break
|
234 |
+
else:
|
235 |
+
continue
|
236 |
+
|
237 |
+
def get_tool_str(self, tool_list):
|
238 |
+
tool_texts = []
|
239 |
+
for tool in tool_list:
|
240 |
+
tool_texts.append(
|
241 |
+
TOOL_DESC.format(
|
242 |
+
name_for_model=tool.name,
|
243 |
+
name_for_human=tool.name,
|
244 |
+
description_for_model=tool.description,
|
245 |
+
parameters=json.dumps(tool.parameters,
|
246 |
+
ensure_ascii=False)))
|
247 |
+
# + ' ' + FORMAT_DESC['json'])
|
248 |
+
tool_str = '\n\n'.join(tool_texts)
|
249 |
+
return tool_str
|
250 |
+
|
251 |
+
def get_tool_name_str(self, tool_list):
|
252 |
+
tool_name = []
|
253 |
+
for tool in tool_list:
|
254 |
+
tool_name.append(tool.name)
|
255 |
+
|
256 |
+
tool_name_str = json.dumps(tool_name, ensure_ascii=False)
|
257 |
+
return tool_name_str
|
258 |
+
|
259 |
+
def _generate(self, llm_result, exec_result: str):
|
260 |
+
"""
|
261 |
+
generate next round prompt based on previous llm_result and exec_result and update history
|
262 |
+
"""
|
263 |
+
if len(llm_result) != 0:
|
264 |
+
self.history[-1]['content'] += f'{llm_result}'
|
265 |
+
if len(exec_result) != 0:
|
266 |
+
# handle image markdown wrapper
|
267 |
+
image_markdown_re = re.compile(
|
268 |
+
pattern=r'!\[IMAGEGEN\]\(([\s\S]+)\)')
|
269 |
+
match = image_markdown_re.search(exec_result)
|
270 |
+
if match is not None:
|
271 |
+
exec_result = match.group(1).rstrip()
|
272 |
+
exec_result = self.exec_template.replace('<exec_result>',
|
273 |
+
str(exec_result))
|
274 |
+
self.history[-1]['content'] += exec_result
|
275 |
+
|
276 |
+
# generate plate prompt here
|
277 |
+
self.prompt = self.prompt_preprocessor(self.history)
|
278 |
+
return self.prompt
|
279 |
+
|
280 |
+
|
281 |
+
def parse_role_config(config: dict):
|
282 |
+
prompt = '你扮演AI-Agent,'
|
283 |
+
|
284 |
+
# concat prompt
|
285 |
+
if 'name' in config and config['name']:
|
286 |
+
prompt += ('你的名字是' + config['name'] + '。')
|
287 |
+
if 'description' in config and config['description']:
|
288 |
+
prompt += config['description']
|
289 |
+
prompt += '\n你具有下列具体功能:'
|
290 |
+
if 'instruction' in config and config['instruction']:
|
291 |
+
if isinstance(config['instruction'], list):
|
292 |
+
for ins in config['instruction']:
|
293 |
+
prompt += ins
|
294 |
+
prompt += ';'
|
295 |
+
elif isinstance(config['instruction'], str):
|
296 |
+
prompt += config['instruction']
|
297 |
+
if prompt[-1] == ';':
|
298 |
+
prompt = prompt[:-1]
|
299 |
+
prompt += '\n下面你将开始扮演'
|
300 |
+
if 'name' in config and config['name']:
|
301 |
+
prompt += config['name']
|
302 |
+
prompt += ',明白了请说“好的。”,不要说其他的。'
|
303 |
+
return prompt
|
agentfabric/gradio_utils.py
ADDED
@@ -0,0 +1,410 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import annotations
|
2 |
+
import base64
|
3 |
+
import html
|
4 |
+
import io
|
5 |
+
import os
|
6 |
+
import re
|
7 |
+
from urllib import parse
|
8 |
+
|
9 |
+
import json
|
10 |
+
import markdown
|
11 |
+
from gradio.components import Chatbot as ChatBotBase
|
12 |
+
from modelscope_agent.output_parser import MRKLOutputParser
|
13 |
+
from PIL import Image
|
14 |
+
|
15 |
+
ALREADY_CONVERTED_MARK = '<!-- ALREADY CONVERTED BY PARSER. -->'
|
16 |
+
|
17 |
+
|
18 |
+
# 图片本地路径转换为 base64 格式
|
19 |
+
def covert_image_to_base64(image_path):
|
20 |
+
# 获得文件后缀名
|
21 |
+
ext = image_path.split('.')[-1]
|
22 |
+
if ext not in ['gif', 'jpeg', 'png']:
|
23 |
+
ext = 'jpeg'
|
24 |
+
|
25 |
+
with open(image_path, 'rb') as image_file:
|
26 |
+
# Read the file
|
27 |
+
encoded_string = base64.b64encode(image_file.read())
|
28 |
+
|
29 |
+
# Convert bytes to string
|
30 |
+
base64_data = encoded_string.decode('utf-8')
|
31 |
+
|
32 |
+
# 生成base64编码的地址
|
33 |
+
base64_url = f'data:image/{ext};base64,{base64_data}'
|
34 |
+
return base64_url
|
35 |
+
|
36 |
+
|
37 |
+
def convert_url(text, new_filename):
|
38 |
+
# Define the pattern to search for
|
39 |
+
# This pattern captures the text inside the square brackets, the path, and the filename
|
40 |
+
pattern = r'!\[([^\]]+)\]\(([^)]+)\)'
|
41 |
+
|
42 |
+
# Define the replacement pattern
|
43 |
+
# \1 is a backreference to the text captured by the first group ([^\]]+)
|
44 |
+
replacement = rf'![\1]({new_filename})'
|
45 |
+
|
46 |
+
# Replace the pattern in the text with the replacement
|
47 |
+
return re.sub(pattern, replacement, text)
|
48 |
+
|
49 |
+
|
50 |
+
def format_cover_html(configuration, bot_avatar_path):
|
51 |
+
if bot_avatar_path:
|
52 |
+
image_src = covert_image_to_base64(bot_avatar_path)
|
53 |
+
else:
|
54 |
+
image_src = '//img.alicdn.com/imgextra/i3/O1CN01YPqZFO1YNZerQfSBk_!!6000000003047-0-tps-225-225.jpg'
|
55 |
+
return f"""
|
56 |
+
<div class="bot_cover">
|
57 |
+
<div class="bot_avatar">
|
58 |
+
<img src={image_src} />
|
59 |
+
</div>
|
60 |
+
<div class="bot_name">{configuration.get("name", "")}</div>
|
61 |
+
<div class="bot_desp">{configuration.get("description", "")}</div>
|
62 |
+
</div>
|
63 |
+
"""
|
64 |
+
|
65 |
+
|
66 |
+
def format_goto_publish_html(label, zip_url, agent_user_params, disable=False):
|
67 |
+
if disable:
|
68 |
+
return f"""<div class="publish_link_container">
|
69 |
+
<a class="disabled">{label}</a>
|
70 |
+
</div>
|
71 |
+
"""
|
72 |
+
else:
|
73 |
+
params = {'AGENT_URL': zip_url}
|
74 |
+
params.update(agent_user_params)
|
75 |
+
template = 'modelscope/agent_template'
|
76 |
+
params_str = json.dumps(params)
|
77 |
+
link_url = f'https://www.modelscope.cn/studios/fork?target={template}&overwriteEnv={parse.quote(params_str)}'
|
78 |
+
return f"""
|
79 |
+
<div class="publish_link_container">
|
80 |
+
<a href="{link_url}" target="_blank">{label}</a>
|
81 |
+
</div>
|
82 |
+
"""
|
83 |
+
|
84 |
+
|
85 |
+
class ChatBot(ChatBotBase):
|
86 |
+
|
87 |
+
def normalize_markdown(self, bot_message):
|
88 |
+
lines = bot_message.split('\n')
|
89 |
+
normalized_lines = []
|
90 |
+
inside_list = False
|
91 |
+
|
92 |
+
for i, line in enumerate(lines):
|
93 |
+
if re.match(r'^(\d+\.|-|\*|\+)\s', line.strip()):
|
94 |
+
if not inside_list and i > 0 and lines[i - 1].strip() != '':
|
95 |
+
normalized_lines.append('')
|
96 |
+
inside_list = True
|
97 |
+
normalized_lines.append(line)
|
98 |
+
elif inside_list and line.strip() == '':
|
99 |
+
if i < len(lines) - 1 and not re.match(r'^(\d+\.|-|\*|\+)\s',
|
100 |
+
lines[i + 1].strip()):
|
101 |
+
normalized_lines.append(line)
|
102 |
+
continue
|
103 |
+
else:
|
104 |
+
inside_list = False
|
105 |
+
normalized_lines.append(line)
|
106 |
+
|
107 |
+
return '\n'.join(normalized_lines)
|
108 |
+
|
109 |
+
def convert_markdown(self, bot_message):
|
110 |
+
if bot_message.count('```') % 2 != 0:
|
111 |
+
bot_message += '\n```'
|
112 |
+
|
113 |
+
bot_message = self.normalize_markdown(bot_message)
|
114 |
+
|
115 |
+
result = markdown.markdown(
|
116 |
+
bot_message,
|
117 |
+
extensions=[
|
118 |
+
'toc', 'extra', 'tables', 'markdown_katex', 'codehilite',
|
119 |
+
'markdown_cjk_spacing.cjk_spacing', 'pymdownx.magiclink'
|
120 |
+
],
|
121 |
+
extension_configs={
|
122 |
+
'markdown_katex': {
|
123 |
+
'no_inline_svg': True, # fix for WeasyPrint
|
124 |
+
'insert_fonts_css': True,
|
125 |
+
},
|
126 |
+
'codehilite': {
|
127 |
+
'linenums': False,
|
128 |
+
'guess_lang': True
|
129 |
+
},
|
130 |
+
'mdx_truly_sane_lists': {
|
131 |
+
'nested_indent': 2,
|
132 |
+
'truly_sane': True,
|
133 |
+
}
|
134 |
+
})
|
135 |
+
result = ''.join(result)
|
136 |
+
return result
|
137 |
+
|
138 |
+
@staticmethod
|
139 |
+
def prompt_parse(message):
|
140 |
+
output = ''
|
141 |
+
if 'Thought' in message:
|
142 |
+
if 'Action' in message or 'Action Input:' in message:
|
143 |
+
re_pattern_thought = re.compile(
|
144 |
+
pattern=r'([\s\S]+)Thought:([\s\S]+)Action:')
|
145 |
+
|
146 |
+
res = re_pattern_thought.search(message)
|
147 |
+
|
148 |
+
if res is None:
|
149 |
+
re_pattern_thought_only = re.compile(
|
150 |
+
pattern=r'Thought:([\s\S]+)Action:')
|
151 |
+
res = re_pattern_thought_only.search(message)
|
152 |
+
llm_result = ''
|
153 |
+
else:
|
154 |
+
llm_result = res.group(1).strip()
|
155 |
+
action_thought_result = res.group(2).strip()
|
156 |
+
|
157 |
+
re_pattern_action = re.compile(
|
158 |
+
pattern=
|
159 |
+
r'Action:([\s\S]+)Action Input:([\s\S]+)<\|startofexec\|>')
|
160 |
+
res = re_pattern_action.search(message)
|
161 |
+
if res is None:
|
162 |
+
action, action_parameters = MRKLOutputParser(
|
163 |
+
).parse_response(message)
|
164 |
+
else:
|
165 |
+
action = res.group(1).strip()
|
166 |
+
action_parameters = res.group(2)
|
167 |
+
action_result = json.dumps({
|
168 |
+
'api_name': action,
|
169 |
+
'parameters': action_parameters
|
170 |
+
})
|
171 |
+
output += f'{llm_result}\n{action_thought_result}\n<|startofthink|>\n{action_result}\n<|endofthink|>\n'
|
172 |
+
if '<|startofexec|>' in message:
|
173 |
+
re_pattern3 = re.compile(
|
174 |
+
pattern=r'<\|startofexec\|>([\s\S]+)<\|endofexec\|>')
|
175 |
+
res3 = re_pattern3.search(message)
|
176 |
+
observation = res3.group(1).strip()
|
177 |
+
output += f'\n<|startofexec|>\n{observation}\n<|endofexec|>\n'
|
178 |
+
if 'Final Answer' in message:
|
179 |
+
re_pattern2 = re.compile(
|
180 |
+
pattern=r'Thought:([\s\S]+)Final Answer:([\s\S]+)')
|
181 |
+
res2 = re_pattern2.search(message)
|
182 |
+
# final_thought_result = res2.group(1).strip()
|
183 |
+
final_answer_result = res2.group(2).strip()
|
184 |
+
output += f'{final_answer_result}\n'
|
185 |
+
|
186 |
+
if output == '':
|
187 |
+
return message
|
188 |
+
print(output)
|
189 |
+
return output
|
190 |
+
else:
|
191 |
+
return message
|
192 |
+
|
193 |
+
def convert_bot_message(self, bot_message):
|
194 |
+
|
195 |
+
bot_message = ChatBot.prompt_parse(bot_message)
|
196 |
+
# print('processed bot message----------')
|
197 |
+
# print(bot_message)
|
198 |
+
# print('processed bot message done')
|
199 |
+
start_pos = 0
|
200 |
+
result = ''
|
201 |
+
find_json_pattern = re.compile(r'{[\s\S]+}')
|
202 |
+
START_OF_THINK_TAG, END_OF_THINK_TAG = '<|startofthink|>', '<|endofthink|>'
|
203 |
+
START_OF_EXEC_TAG, END_OF_EXEC_TAG = '<|startofexec|>', '<|endofexec|>'
|
204 |
+
while start_pos < len(bot_message):
|
205 |
+
try:
|
206 |
+
start_of_think_pos = bot_message.index(START_OF_THINK_TAG,
|
207 |
+
start_pos)
|
208 |
+
end_of_think_pos = bot_message.index(END_OF_THINK_TAG,
|
209 |
+
start_pos)
|
210 |
+
if start_pos < start_of_think_pos:
|
211 |
+
result += self.convert_markdown(
|
212 |
+
bot_message[start_pos:start_of_think_pos])
|
213 |
+
think_content = bot_message[start_of_think_pos
|
214 |
+
+ len(START_OF_THINK_TAG
|
215 |
+
):end_of_think_pos].strip()
|
216 |
+
json_content = find_json_pattern.search(think_content)
|
217 |
+
think_content = json_content.group(
|
218 |
+
) if json_content else think_content
|
219 |
+
try:
|
220 |
+
think_node = json.loads(think_content)
|
221 |
+
plugin_name = think_node.get(
|
222 |
+
'plugin_name',
|
223 |
+
think_node.get('plugin',
|
224 |
+
think_node.get('api_name', 'unknown')))
|
225 |
+
summary = f'选择插件【{plugin_name}】,调用处理中...'
|
226 |
+
del think_node['url']
|
227 |
+
# think_node.pop('url', None)
|
228 |
+
|
229 |
+
detail = f'```json\n\n{json.dumps(think_node, indent=3, ensure_ascii=False)}\n\n```'
|
230 |
+
except Exception:
|
231 |
+
summary = '思考中...'
|
232 |
+
detail = think_content
|
233 |
+
# traceback.print_exc()
|
234 |
+
# detail += traceback.format_exc()
|
235 |
+
result += '<details> <summary>' + summary + '</summary>' + self.convert_markdown(
|
236 |
+
detail) + '</details>'
|
237 |
+
# print(f'detail:{detail}')
|
238 |
+
start_pos = end_of_think_pos + len(END_OF_THINK_TAG)
|
239 |
+
except Exception:
|
240 |
+
# result += traceback.format_exc()
|
241 |
+
break
|
242 |
+
# continue
|
243 |
+
|
244 |
+
try:
|
245 |
+
start_of_exec_pos = bot_message.index(START_OF_EXEC_TAG,
|
246 |
+
start_pos)
|
247 |
+
end_of_exec_pos = bot_message.index(END_OF_EXEC_TAG, start_pos)
|
248 |
+
# print(start_of_exec_pos)
|
249 |
+
# print(end_of_exec_pos)
|
250 |
+
# print(bot_message[start_of_exec_pos:end_of_exec_pos])
|
251 |
+
# print('------------------------')
|
252 |
+
if start_pos < start_of_exec_pos:
|
253 |
+
result += self.convert_markdown(
|
254 |
+
bot_message[start_pos:start_of_think_pos])
|
255 |
+
exec_content = bot_message[start_of_exec_pos
|
256 |
+
+ len(START_OF_EXEC_TAG
|
257 |
+
):end_of_exec_pos].strip()
|
258 |
+
try:
|
259 |
+
summary = '完成插件调用.'
|
260 |
+
detail = f'```json\n\n{exec_content}\n\n```'
|
261 |
+
except Exception:
|
262 |
+
pass
|
263 |
+
|
264 |
+
result += '<details> <summary>' + summary + '</summary>' + self.convert_markdown(
|
265 |
+
detail) + '</details>'
|
266 |
+
|
267 |
+
start_pos = end_of_exec_pos + len(END_OF_EXEC_TAG)
|
268 |
+
except Exception:
|
269 |
+
# result += traceback.format_exc()
|
270 |
+
continue
|
271 |
+
if start_pos < len(bot_message):
|
272 |
+
result += self.convert_markdown(bot_message[start_pos:])
|
273 |
+
result += ALREADY_CONVERTED_MARK
|
274 |
+
return result
|
275 |
+
|
276 |
+
def convert_bot_message_for_qwen(self, bot_message):
|
277 |
+
|
278 |
+
start_pos = 0
|
279 |
+
result = ''
|
280 |
+
find_json_pattern = re.compile(r'{[\s\S]+}')
|
281 |
+
ACTION = 'Action:'
|
282 |
+
ACTION_INPUT = 'Action Input'
|
283 |
+
OBSERVATION = 'Observation'
|
284 |
+
RESULT_START = '<result>'
|
285 |
+
RESULT_END = '</result>'
|
286 |
+
while start_pos < len(bot_message):
|
287 |
+
try:
|
288 |
+
action_pos = bot_message.index(ACTION, start_pos)
|
289 |
+
action_input_pos = bot_message.index(ACTION_INPUT, start_pos)
|
290 |
+
result += self.convert_markdown(
|
291 |
+
bot_message[start_pos:action_pos])
|
292 |
+
# Action: image_gen
|
293 |
+
# Action Input
|
294 |
+
# {"text": "金庸武侠 世界", "resolution": "1280x720"}
|
295 |
+
# Observation: <result>![IMAGEGEN](https://dashscope-result-sh.oss-cn-shanghai.aliyuncs.com/1d/e9/20231116/723609ee/d046d2d9-0c95-420b-9467-f0e831f5e2b7-1.png?Expires=1700227460&OSSAccessKeyId=LTAI5tQZd8AEcZX6KZV4G8qL&Signature=R0PlEazQF9uBD%2Fh9tkzOkJMGyg8%3D)<result> # noqa E501
|
296 |
+
action_name = bot_message[action_pos
|
297 |
+
+ len(ACTION
|
298 |
+
):action_input_pos].strip()
|
299 |
+
# action_start action_end 使用 Action Input 到 Observation 之间
|
300 |
+
action_input_end = bot_message[action_input_pos:].index(
|
301 |
+
OBSERVATION) - 1
|
302 |
+
action_input = bot_message[action_input_pos:action_input_pos
|
303 |
+
+ action_input_end].strip()
|
304 |
+
is_json = find_json_pattern.search(action_input)
|
305 |
+
if is_json:
|
306 |
+
action_input = is_json.group()
|
307 |
+
else:
|
308 |
+
action_input = re.sub(r'^Action Input[:]?[\s]*', '',
|
309 |
+
action_input)
|
310 |
+
|
311 |
+
summary = f'调用工具 {action_name}'
|
312 |
+
if is_json:
|
313 |
+
detail = f'```json\n\n{json.dumps(json.loads(action_input), indent=4, ensure_ascii=False)}\n\n```'
|
314 |
+
else:
|
315 |
+
detail = action_input
|
316 |
+
result += '<details> <summary>' + summary + '</summary>' + self.convert_markdown(
|
317 |
+
detail) + '</details>'
|
318 |
+
start_pos = action_input_pos + action_input_end + 1
|
319 |
+
try:
|
320 |
+
observation_pos = bot_message.index(OBSERVATION, start_pos)
|
321 |
+
idx = observation_pos + len(OBSERVATION)
|
322 |
+
obs_message = bot_message[idx:]
|
323 |
+
observation_start_id = obs_message.index(
|
324 |
+
RESULT_START) + len(RESULT_START)
|
325 |
+
observation_end_idx = obs_message.index(RESULT_END)
|
326 |
+
summary = '完成调用'
|
327 |
+
exec_content = obs_message[
|
328 |
+
observation_start_id:observation_end_idx]
|
329 |
+
detail = f'```\n\n{exec_content}\n\n```'
|
330 |
+
start_pos = idx + observation_end_idx + len(RESULT_END)
|
331 |
+
except Exception:
|
332 |
+
summary = '执行中...'
|
333 |
+
detail = ''
|
334 |
+
exec_content = None
|
335 |
+
|
336 |
+
result += '<details> <summary>' + summary + '</summary>' + self.convert_markdown(
|
337 |
+
detail) + '</details>'
|
338 |
+
if exec_content is not None and '[IMAGEGEN]' in exec_content:
|
339 |
+
# convert local file to base64
|
340 |
+
re_pattern = re.compile(pattern=r'!\[[^\]]+\]\(([^)]+)\)')
|
341 |
+
res = re_pattern.search(exec_content)
|
342 |
+
if res:
|
343 |
+
image_path = res.group(1).strip()
|
344 |
+
if os.path.isfile(image_path):
|
345 |
+
exec_content = convert_url(
|
346 |
+
exec_content,
|
347 |
+
covert_image_to_base64(image_path))
|
348 |
+
result += self.convert_markdown(f'{exec_content}')
|
349 |
+
|
350 |
+
except Exception:
|
351 |
+
# import traceback; traceback.print_exc()
|
352 |
+
result += self.convert_markdown(bot_message[start_pos:])
|
353 |
+
start_pos = len(bot_message[start_pos:])
|
354 |
+
break
|
355 |
+
|
356 |
+
result += ALREADY_CONVERTED_MARK
|
357 |
+
return result
|
358 |
+
|
359 |
+
def postprocess(
|
360 |
+
self,
|
361 |
+
message_pairs: list[list[str | tuple[str] | tuple[str, str] | None]
|
362 |
+
| tuple],
|
363 |
+
) -> list[list[str | dict | None]]:
|
364 |
+
"""
|
365 |
+
Parameters:
|
366 |
+
message_pairs: List of lists representing the message and response pairs.
|
367 |
+
Each message and response should be a string, which may be in Markdown format.
|
368 |
+
It can also be a tuple whose first element is a string or pathlib.
|
369 |
+
Path filepath or URL to an image/video/audio, and second (optional) element is the alt text,
|
370 |
+
in which case the media file is displayed. It can also be None, in which case that message is not displayed.
|
371 |
+
Returns:
|
372 |
+
List of lists representing the message and response. Each message and response will be a string of HTML,
|
373 |
+
or a dictionary with media information. Or None if the message is not to be displayed.
|
374 |
+
"""
|
375 |
+
if message_pairs is None:
|
376 |
+
return []
|
377 |
+
processed_messages = []
|
378 |
+
for message_pair in message_pairs:
|
379 |
+
assert isinstance(
|
380 |
+
message_pair, (tuple, list)
|
381 |
+
), f'Expected a list of lists or list of tuples. Received: {message_pair}'
|
382 |
+
assert (
|
383 |
+
len(message_pair) == 2
|
384 |
+
), f'Expected a list of lists of length 2 or list of tuples of length 2. Received: {message_pair}'
|
385 |
+
if isinstance(message_pair[0], tuple) or isinstance(
|
386 |
+
message_pair[1], tuple):
|
387 |
+
processed_messages.append([
|
388 |
+
self._postprocess_chat_messages(message_pair[0]),
|
389 |
+
self._postprocess_chat_messages(message_pair[1]),
|
390 |
+
])
|
391 |
+
else:
|
392 |
+
# 处理不是元组的情况
|
393 |
+
user_message, bot_message = message_pair
|
394 |
+
|
395 |
+
if user_message and not user_message.endswith(
|
396 |
+
ALREADY_CONVERTED_MARK):
|
397 |
+
convert_md = self.convert_markdown(
|
398 |
+
html.escape(user_message))
|
399 |
+
user_message = f'{convert_md}' + ALREADY_CONVERTED_MARK
|
400 |
+
if bot_message and not bot_message.endswith(
|
401 |
+
ALREADY_CONVERTED_MARK):
|
402 |
+
# bot_message = self.convert_bot_message(bot_message)
|
403 |
+
bot_message = self.convert_bot_message_for_qwen(
|
404 |
+
bot_message)
|
405 |
+
processed_messages.append([
|
406 |
+
user_message,
|
407 |
+
bot_message,
|
408 |
+
])
|
409 |
+
|
410 |
+
return processed_messages
|
agentfabric/help_tools.py
ADDED
@@ -0,0 +1,170 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
from http import HTTPStatus
|
3 |
+
|
4 |
+
import json
|
5 |
+
import requests
|
6 |
+
from config_utils import DEFAULT_BUILDER_CONFIG_DIR, get_user_cfg_file
|
7 |
+
from dashscope import ImageSynthesis
|
8 |
+
from modelscope_agent.tools import Tool
|
9 |
+
|
10 |
+
from modelscope.utils.config import Config
|
11 |
+
|
12 |
+
LOGO_NAME = 'custom_bot_avatar.png'
|
13 |
+
LOGO_PATH = os.path.join(DEFAULT_BUILDER_CONFIG_DIR, LOGO_NAME)
|
14 |
+
|
15 |
+
CONFIG_FORMAT = """
|
16 |
+
{
|
17 |
+
"name": ... # CustomGPT的名字。
|
18 |
+
"description": ... # CustomGPT 的简介。
|
19 |
+
"instructions": ... # CustomGPT 的功能要求,类型是string。
|
20 |
+
"prompt_recommend": ... # CustomGPT 的起始交互语句,类型是一个字符串数组,起始为[]。
|
21 |
+
}
|
22 |
+
"""
|
23 |
+
|
24 |
+
CONF_GENERATOR_INST = """你现在要扮演一个 CustomGPT 的配置生成器
|
25 |
+
|
26 |
+
在接下来的对话中,每次均生成如下格式的内容:
|
27 |
+
|
28 |
+
{config_format}
|
29 |
+
|
30 |
+
现在,已知原始配置为{old_config},用户在原始配置上有一些建议修改项,包括:
|
31 |
+
1. 用户建议的 CustomGPT 的名称为{app_name}
|
32 |
+
2. CustomGPT 的描述为{app_description}
|
33 |
+
3. CustomGPT 的启动器为{app_conversation_starter}
|
34 |
+
|
35 |
+
请你参考原始配置生成新的修改后的配置,请注意:
|
36 |
+
1. 如果用户对原本的简介、功能要求、交互语句不满意,则直接换掉原本的简介、功能要求、交互语句。
|
37 |
+
2. 如果用户对原本的简介、功能要求、交互语句比较满意,参考用户的起始交互语句和原配置中的起始交互语句,生成新的简介、功能要求、交互语句。
|
38 |
+
3. 如果原始配置没有实际内容,请你根据你的知识帮助用户生成第一个版本的配置,简介在100字左右,功能要求在150字左右,起始交互语句在4条左右。
|
39 |
+
|
40 |
+
请你生成新的配置文件,严格遵循给定格式,请不要创造其它字段,仅输出要求的json格式,请勿输出其它内容。
|
41 |
+
"""
|
42 |
+
|
43 |
+
LOGO_INST = """定制化软件 CustomGPT 的作用是{description},{user_requirement}请你为它生成一个专业的logo"""
|
44 |
+
|
45 |
+
|
46 |
+
def get_logo_path(uuid_str=''):
|
47 |
+
logo_path = os.getenv('LOGO_PATH', LOGO_PATH)
|
48 |
+
# convert from ./config/builder_config.json to ./config/user/builder_config.json
|
49 |
+
logo_path = logo_path.replace('config/', 'config/user/')
|
50 |
+
|
51 |
+
# convert from ./config/user to ./config/uuid
|
52 |
+
if uuid_str != '':
|
53 |
+
logo_path = logo_path.replace('user', uuid_str)
|
54 |
+
if not os.path.exists(logo_path):
|
55 |
+
os.makedirs(os.path.dirname(logo_path), exist_ok=True)
|
56 |
+
return logo_path
|
57 |
+
|
58 |
+
|
59 |
+
def call_wanx(prompt, save_path):
|
60 |
+
rsp = ImageSynthesis.call(
|
61 |
+
model=ImageSynthesis.Models.wanx_v1,
|
62 |
+
prompt=prompt,
|
63 |
+
n=1,
|
64 |
+
size='1024*1024')
|
65 |
+
if rsp.status_code == HTTPStatus.OK:
|
66 |
+
if os.path.exists(save_path):
|
67 |
+
os.remove(save_path)
|
68 |
+
|
69 |
+
# save file to current directory
|
70 |
+
for result in rsp.output.results:
|
71 |
+
with open(save_path, 'wb+') as f:
|
72 |
+
f.write(requests.get(result.url).content)
|
73 |
+
else:
|
74 |
+
print('Failed, status_code: %s, code: %s, message: %s' %
|
75 |
+
(rsp.status_code, rsp.code, rsp.message))
|
76 |
+
|
77 |
+
|
78 |
+
class LogoGeneratorTool(Tool):
|
79 |
+
description = 'logo_designer是一个AI绘制logo的服务,输入用户对 CustomGPT 的要求,会生成 CustomGPT 的logo。'
|
80 |
+
name = 'logo_designer'
|
81 |
+
parameters: list = [{
|
82 |
+
'name': 'user_requirement',
|
83 |
+
'description': '用户对 CustomGPT logo的要求和建议',
|
84 |
+
'required': True,
|
85 |
+
'schema': {
|
86 |
+
'type': 'string'
|
87 |
+
},
|
88 |
+
}]
|
89 |
+
|
90 |
+
def _remote_call(self, *args, **kwargs):
|
91 |
+
user_requirement = kwargs['user_requirement']
|
92 |
+
uuid_str = kwargs.get('uuid_str', '')
|
93 |
+
builder_cfg_file = get_user_cfg_file(uuid_str)
|
94 |
+
builder_cfg = Config.from_file(builder_cfg_file)
|
95 |
+
|
96 |
+
avatar_prompt = LOGO_INST.format(
|
97 |
+
description=builder_cfg.description,
|
98 |
+
user_requirement=user_requirement)
|
99 |
+
call_wanx(
|
100 |
+
prompt=avatar_prompt, save_path=get_logo_path(uuid_str=uuid_str))
|
101 |
+
builder_cfg.avatar = LOGO_NAME
|
102 |
+
return {'result': builder_cfg}
|
103 |
+
|
104 |
+
|
105 |
+
def config_conversion(generated_config: dict, save=False, uuid_str=''):
|
106 |
+
"""
|
107 |
+
convert
|
108 |
+
{
|
109 |
+
name: "铁人",
|
110 |
+
description: "我希望我的AI-Agent是一个专业的健身教练,专注于力量训练方面,可以提供相关的建议和指南。
|
111 |
+
它还可以帮我跟踪和记录每次的力量训练数据,以及提供相应的反馈和建议,帮助我不断改进和优化我的训练计划。
|
112 |
+
此外,我希望它可以拥有一些特殊技能和功能,让它更加实用和有趣。例如,它可以帮助我预测未来的身体状况、分析我的营养摄入情况、
|
113 |
+
提供心理支持等等。我相信,在它的帮助下,我可以更快地达到自己的目标,变得更加强壮和健康。",
|
114 |
+
instructions: [
|
115 |
+
"提供力量训练相关的建议和指南",
|
116 |
+
"跟踪和记录每次的力量训练数据",
|
117 |
+
"提供反馈和建议,帮助改进和优化训练计划",
|
118 |
+
"预测未来的身体状况",
|
119 |
+
"分析营养摄入情况",
|
120 |
+
"提供心理支持",
|
121 |
+
],
|
122 |
+
prompt_recommend: [
|
123 |
+
"你好,今天的锻炼计划是什么呢?",
|
124 |
+
"你觉得哪种器械最适合练背部肌肉呢?",
|
125 |
+
"你觉得我现在的训练强度合适吗?",
|
126 |
+
"你觉得哪种食物最适合增肌呢?",
|
127 |
+
],
|
128 |
+
logo_prompt: "设计一个肌肉男形象的Logo",
|
129 |
+
}
|
130 |
+
to
|
131 |
+
{
|
132 |
+
name: "铁人",
|
133 |
+
description: "我希望我的AI-Agent是一个专业的健身教练,专注于力量训练方面,可以提供相关的建议和指南。
|
134 |
+
它还可以帮我跟踪和记录每次的力量训练数据,以及提供相应的反馈和建议,帮助我不断改进和优化我的训练计划。
|
135 |
+
此外,我希望它可以拥有一些特殊技能和功能,让它更加实用和有趣。例如,它可以帮助我预测未来的身体状况、
|
136 |
+
分析我的营养摄入情况、提供心理支持等等。我相信,在它的帮助下,我可以更快地达到自己的目标,变得更加强壮和健康。",
|
137 |
+
instructions: "提供力量训练相关的建议和指南;跟踪和记录每次的力量训练数据;提供反馈和建议,帮助改进和优化训练计划;
|
138 |
+
预测未来的身体状况;分析营养摄入情况;提供心理支持",
|
139 |
+
prompt_recommend: [
|
140 |
+
"你好,今天的锻炼计划是什么呢?",
|
141 |
+
"你觉得哪种器械最适合练背部肌肉呢?",
|
142 |
+
"你觉得我现在的训练强度合适吗?",
|
143 |
+
"你觉得哪种食物最适合增肌呢?",
|
144 |
+
],
|
145 |
+
tools: xxx
|
146 |
+
model: yyy
|
147 |
+
}
|
148 |
+
:param generated_config:
|
149 |
+
:return:
|
150 |
+
"""
|
151 |
+
builder_cfg_file = get_user_cfg_file(uuid_str)
|
152 |
+
builder_cfg = Config.from_file(builder_cfg_file)
|
153 |
+
try:
|
154 |
+
builder_cfg.name = generated_config['name']
|
155 |
+
builder_cfg.description = generated_config['description']
|
156 |
+
builder_cfg.prompt_recommend = generated_config['prompt_recommend']
|
157 |
+
if isinstance(generated_config['instructions'], list):
|
158 |
+
builder_cfg.instruction = ';'.join(
|
159 |
+
generated_config['instructions'])
|
160 |
+
else:
|
161 |
+
builder_cfg.instruction = generated_config['instructions']
|
162 |
+
if save:
|
163 |
+
json.dump(
|
164 |
+
builder_cfg.to_dict(),
|
165 |
+
open(builder_cfg_file, 'w'),
|
166 |
+
indent=2,
|
167 |
+
ensure_ascii=False)
|
168 |
+
return builder_cfg
|
169 |
+
except ValueError as e:
|
170 |
+
raise ValueError(f'failed to save the configuration with info: {e}')
|
agentfabric/i18n.py
ADDED
@@ -0,0 +1,57 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
support_lang = ['zh-cn', 'en']
|
2 |
+
|
3 |
+
i18n = {
|
4 |
+
'create': ['创建', 'Create'],
|
5 |
+
'configure': ['配置', 'Configure'],
|
6 |
+
'send': ['发送', 'Send'],
|
7 |
+
'sendOnLoading': ['发送(Agent 加载中...)', 'Send (Agent Loading...)'],
|
8 |
+
'upload_btn': ['上传文件', 'Upload File'],
|
9 |
+
'message': ['输入', 'Send a message'],
|
10 |
+
'message_placeholder': ['输入你的消息', 'Type your message here'],
|
11 |
+
'prompt_suggestion': ['推荐提示词', 'Prompt Suggestions'],
|
12 |
+
'form_avatar': ['头像', 'Avatar'],
|
13 |
+
'form_name': ['名称', 'Name'],
|
14 |
+
'form_name_placeholder': ['为你的 agent 取一个名字', 'Name your agent'],
|
15 |
+
'form_description': ['描述', 'Description'],
|
16 |
+
'form_description_placeholder': [
|
17 |
+
'为你的 agent 添加一段简短的描述',
|
18 |
+
'Add a short description about what this agent does'
|
19 |
+
],
|
20 |
+
'form_instructions': ['指令', 'Instructions'],
|
21 |
+
'form_instructions_placeholder': [
|
22 |
+
'你的 agent 需要处理哪些事情',
|
23 |
+
'What does this agent do? How does it behave? What should it avoid doing?'
|
24 |
+
],
|
25 |
+
'form_model': ['模型', 'Model'],
|
26 |
+
'form_prompt_suggestion':
|
27 |
+
['推荐提示词,双击行可修改', 'prompt suggestion,double click to modify'],
|
28 |
+
'form_knowledge': ['知识库', 'Knowledge Base'],
|
29 |
+
'form_capabilities': ['内置能力', 'Capabilities'],
|
30 |
+
'form_update_button': ['更新配置', 'Update Configuration'],
|
31 |
+
'open_api_accordion': ['OpenAPI 配置', 'OpenAPI Configuration'],
|
32 |
+
'preview': ['预览', 'Preview'],
|
33 |
+
'build': ['构建', 'Build'],
|
34 |
+
'publish': ['发布', 'Publish'],
|
35 |
+
'build_hint': ['点击"构建"完成构建', 'Click "Build" to finish building'],
|
36 |
+
'publish_hint': [
|
37 |
+
'点击"发布"跳转创空间完成 Agent 发布',
|
38 |
+
'Click "Publish" to jump to the space to finish agent publishing'
|
39 |
+
],
|
40 |
+
'header': [
|
41 |
+
'<span style="font-size: 20px; font-weight: 500;">\N{fire} AgentFabric -- 由 Modelscope-agent 驱动 </span> [github 点赞](https://github.com/modelscope/modelscope-agent/tree/main)', # noqa E501
|
42 |
+
'<span style="font-size: 20px; font-weight: 500;">\N{fire} AgentFabric powered by Modelscope-agent </span> [github star](https://github.com/modelscope/modelscope-agent/tree/main)' # noqa E501
|
43 |
+
],
|
44 |
+
}
|
45 |
+
|
46 |
+
|
47 |
+
class I18n():
|
48 |
+
|
49 |
+
def __init__(self, lang):
|
50 |
+
self.lang = lang
|
51 |
+
self.langIndex = support_lang.index(lang)
|
52 |
+
|
53 |
+
def get(self, field):
|
54 |
+
return i18n.get(field)[self.langIndex]
|
55 |
+
|
56 |
+
def get_whole(self, field):
|
57 |
+
return f'{i18n.get(field)[0]}({i18n.get(field)[1]})'
|
agentfabric/modelscope_agent/__init__.py
ADDED
File without changes
|
agentfabric/modelscope_agent/agent.py
ADDED
@@ -0,0 +1,334 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import importlib
|
2 |
+
import traceback
|
3 |
+
from copy import deepcopy
|
4 |
+
from typing import Dict, List, Optional, Union
|
5 |
+
|
6 |
+
from .agent_types import AgentType
|
7 |
+
from .llm import LLM
|
8 |
+
from .output_parser import OutputParser, get_output_parser
|
9 |
+
from .output_wrapper import display
|
10 |
+
from .prompt import PromptGenerator, get_prompt_generator
|
11 |
+
from .retrieve import KnowledgeRetrieval, ToolRetrieval
|
12 |
+
from .tools import TOOL_INFO_LIST
|
13 |
+
|
14 |
+
|
15 |
+
class AgentExecutor:
|
16 |
+
|
17 |
+
def __init__(self,
|
18 |
+
llm: LLM,
|
19 |
+
tool_cfg: Optional[Dict] = {},
|
20 |
+
agent_type: AgentType = AgentType.DEFAULT,
|
21 |
+
additional_tool_list: Optional[Dict] = {},
|
22 |
+
prompt_generator: Optional[PromptGenerator] = None,
|
23 |
+
output_parser: Optional[OutputParser] = None,
|
24 |
+
tool_retrieval: Optional[Union[bool, ToolRetrieval]] = True,
|
25 |
+
knowledge_retrieval: Optional[KnowledgeRetrieval] = None):
|
26 |
+
"""
|
27 |
+
the core class of ms agent. It is responsible for the interaction between user, llm and tools,
|
28 |
+
and return the execution result to user.
|
29 |
+
|
30 |
+
Args:
|
31 |
+
llm (LLM): llm model, can be load from local or a remote server.
|
32 |
+
tool_cfg (Optional[Dict]): cfg of default tools
|
33 |
+
agent_type (AgentType, optional): agent type. Defaults to AgentType.DEFAULT, decide which type of agent
|
34 |
+
reasoning type to use
|
35 |
+
additional_tool_list (Optional[Dict], optional): user-defined additional tool list. Defaults to {}.
|
36 |
+
prompt_generator (Optional[PromptGenerator], optional): this module is responsible for generating prompt
|
37 |
+
according to interaction result. Defaults to use MSPromptGenerator.
|
38 |
+
output_parser (Optional[OutputParser], optional): this module is responsible for parsing output of llm
|
39 |
+
to executable actions. Defaults to use MsOutputParser.
|
40 |
+
tool_retrieval (Optional[Union[bool, ToolRetrieval]], optional): Retrieve related tools by input task,
|
41 |
+
since most of the tools may be useless for LLM in specific task.
|
42 |
+
If it is bool type and is True, will use default tool_retrieval. Defaults to True.
|
43 |
+
knowledge_retrieval (Optional[KnowledgeRetrieval], optional): If user want to use extra knowledge,
|
44 |
+
this component can be used to retrieve related knowledge. Defaults to None.
|
45 |
+
"""
|
46 |
+
|
47 |
+
self.llm = llm
|
48 |
+
|
49 |
+
self.agent_type = agent_type
|
50 |
+
self.llm.set_agent_type(agent_type)
|
51 |
+
self.prompt_generator = prompt_generator or get_prompt_generator(
|
52 |
+
agent_type)
|
53 |
+
self.output_parser = output_parser or get_output_parser(agent_type)
|
54 |
+
|
55 |
+
self._init_tools(tool_cfg, additional_tool_list)
|
56 |
+
|
57 |
+
if isinstance(tool_retrieval, bool) and tool_retrieval:
|
58 |
+
tool_retrieval = ToolRetrieval()
|
59 |
+
self.tool_retrieval = tool_retrieval
|
60 |
+
if self.tool_retrieval:
|
61 |
+
self.tool_retrieval.construct(
|
62 |
+
[str(t) for t in self.tool_list.values()])
|
63 |
+
self.knowledge_retrieval = knowledge_retrieval
|
64 |
+
self.reset()
|
65 |
+
self.seed = None
|
66 |
+
|
67 |
+
def _init_tools(self,
|
68 |
+
tool_cfg: Dict = {},
|
69 |
+
additional_tool_list: Dict = {}):
|
70 |
+
"""init tool list of agent. We provide a default tool list, which is initialized by a cfg file.
|
71 |
+
user can also provide user-defined tools by additional_tool_list.
|
72 |
+
The key of additional_tool_list is tool name, and the value is corresponding object.
|
73 |
+
|
74 |
+
Args:
|
75 |
+
tool_cfg (Dict): default tool cfg.
|
76 |
+
additional_tool_list (Dict, optional): user-defined tools. Defaults to {}.
|
77 |
+
"""
|
78 |
+
self.tool_list = {}
|
79 |
+
tool_info_list = {**TOOL_INFO_LIST, **additional_tool_list}
|
80 |
+
tools_module = importlib.import_module('modelscope_agent.tools')
|
81 |
+
for tool_name in tool_cfg.keys():
|
82 |
+
if tool_cfg[tool_name].get('use', False):
|
83 |
+
assert tool_name in tool_info_list, f'Invalid tool name: {tool_name}, ' \
|
84 |
+
f'available ones are: {tool_info_list.keys()}'
|
85 |
+
tool_class_name = tool_info_list[tool_name]
|
86 |
+
tool_class = getattr(tools_module, tool_class_name)
|
87 |
+
tool_name = tool_class.name
|
88 |
+
self.tool_list[tool_name] = tool_class(tool_cfg)
|
89 |
+
|
90 |
+
self.tool_list = {**self.tool_list, **additional_tool_list}
|
91 |
+
# self.available_tool_list = deepcopy(self.tool_list)
|
92 |
+
self.set_available_tools(self.tool_list.keys())
|
93 |
+
|
94 |
+
def set_available_tools(self, available_tool_list):
|
95 |
+
# TODO @wenmeng.zwm refine tool init
|
96 |
+
for t in available_tool_list:
|
97 |
+
if t not in self.tool_list:
|
98 |
+
raise ValueError(
|
99 |
+
f'Unsupported tools found:{t}, please check, valid ones: {self.tool_list.keys()}'
|
100 |
+
)
|
101 |
+
|
102 |
+
self.available_tool_list = {
|
103 |
+
k: self.tool_list[k]
|
104 |
+
for k in available_tool_list
|
105 |
+
}
|
106 |
+
|
107 |
+
def retrieve_tools(self, query: str) -> List[str]:
|
108 |
+
"""retrieve tools given query
|
109 |
+
|
110 |
+
Args:
|
111 |
+
query (str): query
|
112 |
+
|
113 |
+
"""
|
114 |
+
if self.tool_retrieval:
|
115 |
+
retrieve_tools = self.tool_retrieval.retrieve(query)
|
116 |
+
self.set_available_tools(available_tool_list=retrieve_tools.keys())
|
117 |
+
return self.available_tool_list.values()
|
118 |
+
|
119 |
+
def get_knowledge(self, query: str) -> List[str]:
|
120 |
+
"""retrieve knowledge given query
|
121 |
+
|
122 |
+
Args:
|
123 |
+
query (str): query
|
124 |
+
|
125 |
+
"""
|
126 |
+
return self.knowledge_retrieval.retrieve(
|
127 |
+
query) if self.knowledge_retrieval else []
|
128 |
+
|
129 |
+
def run(self,
|
130 |
+
task: str,
|
131 |
+
remote: bool = False,
|
132 |
+
print_info: bool = False,
|
133 |
+
append_files: list = []) -> List[Dict]:
|
134 |
+
""" use llm and tools to execute task given by user
|
135 |
+
|
136 |
+
Args:
|
137 |
+
task (str): concrete task
|
138 |
+
remote (bool, optional): whether to execute tool in remote mode. Defaults to False.
|
139 |
+
print_info (bool, optional): whether to print prompt info. Defaults to False.
|
140 |
+
|
141 |
+
Returns:
|
142 |
+
List[Dict]: execute result. One task may need to interact with llm multiple times,
|
143 |
+
so a list of dict is returned. Each dict contains the result of one interaction.
|
144 |
+
"""
|
145 |
+
|
146 |
+
# retrieve tools
|
147 |
+
tool_list = self.retrieve_tools(task)
|
148 |
+
knowledge_list = self.get_knowledge(task)
|
149 |
+
|
150 |
+
self.prompt_generator.init_prompt(
|
151 |
+
task, tool_list, knowledge_list, append_files=append_files)
|
152 |
+
function_list = self.prompt_generator.get_function_list(tool_list)
|
153 |
+
|
154 |
+
llm_result, exec_result = '', ''
|
155 |
+
|
156 |
+
idx = 0
|
157 |
+
final_res = []
|
158 |
+
|
159 |
+
while True:
|
160 |
+
idx += 1
|
161 |
+
|
162 |
+
# generate prompt and call llm
|
163 |
+
llm_artifacts = self.prompt_generator.generate(
|
164 |
+
llm_result, exec_result)
|
165 |
+
try:
|
166 |
+
llm_result = self.llm.generate(llm_artifacts, function_list)
|
167 |
+
except RuntimeError as e:
|
168 |
+
return [{'exec_result': str(e)}]
|
169 |
+
|
170 |
+
if print_info:
|
171 |
+
print(f'|LLM inputs in round {idx}: {llm_artifacts}')
|
172 |
+
|
173 |
+
# parse and get tool name and arguments
|
174 |
+
try:
|
175 |
+
action, action_args = self.output_parser.parse_response(
|
176 |
+
llm_result)
|
177 |
+
except ValueError as e:
|
178 |
+
return [{'exec_result': f'{e}'}]
|
179 |
+
|
180 |
+
if action is None:
|
181 |
+
# in chat mode, the final result of last instructions should be updated to prompt history
|
182 |
+
_ = self.prompt_generator.generate(llm_result, '')
|
183 |
+
|
184 |
+
# for summarize
|
185 |
+
display(llm_result, {}, idx, self.agent_type)
|
186 |
+
return final_res
|
187 |
+
|
188 |
+
if action in self.available_tool_list:
|
189 |
+
action_args = self.parse_action_args(action_args)
|
190 |
+
tool = self.tool_list[action]
|
191 |
+
|
192 |
+
# TODO @wenmeng.zwm remove this hack logic for image generation
|
193 |
+
if action == 'image_gen' and self.seed:
|
194 |
+
action_args['seed'] = self.seed
|
195 |
+
try:
|
196 |
+
exec_result = tool(**action_args, remote=remote)
|
197 |
+
if print_info:
|
198 |
+
print(f'|exec_result: {exec_result}')
|
199 |
+
|
200 |
+
# parse exec result and store result to agent state
|
201 |
+
final_res.append(exec_result)
|
202 |
+
self.parse_exec_result(exec_result)
|
203 |
+
except Exception as e:
|
204 |
+
exec_result = f'Action call error: {action}: {action_args}. \n Error message: {e}'
|
205 |
+
return [{'exec_result': exec_result}]
|
206 |
+
else:
|
207 |
+
exec_result = f"Unknown action: '{action}'. "
|
208 |
+
return [{'exec_result': exec_result}]
|
209 |
+
|
210 |
+
# display result
|
211 |
+
display(llm_result, exec_result, idx, self.agent_type)
|
212 |
+
|
213 |
+
def stream_run(self,
|
214 |
+
task: str,
|
215 |
+
remote: bool = True,
|
216 |
+
print_info: bool = False,
|
217 |
+
append_files: list = []) -> Dict:
|
218 |
+
"""this is a stream version of run, which can be used in scenario like gradio.
|
219 |
+
It will yield the result of each interaction, so that the caller can display the result
|
220 |
+
|
221 |
+
Args:
|
222 |
+
task (str): concrete task
|
223 |
+
remote (bool, optional): whether to execute tool in remote mode. Defaults to True.
|
224 |
+
print_info (bool, optional): whether to print prompt info. Defaults to False.
|
225 |
+
files that individually used in each run, no need to record to global state
|
226 |
+
|
227 |
+
Yields:
|
228 |
+
Iterator[Dict]: iterator of llm response and tool execution result
|
229 |
+
"""
|
230 |
+
|
231 |
+
# retrieve tools
|
232 |
+
tool_list = self.retrieve_tools(task)
|
233 |
+
knowledge_list = self.get_knowledge(task)
|
234 |
+
|
235 |
+
self.prompt_generator.init_prompt(
|
236 |
+
task,
|
237 |
+
tool_list,
|
238 |
+
knowledge_list,
|
239 |
+
append_files=append_files,
|
240 |
+
)
|
241 |
+
function_list = self.prompt_generator.get_function_list(tool_list)
|
242 |
+
|
243 |
+
llm_result, exec_result = '', ''
|
244 |
+
|
245 |
+
idx = 0
|
246 |
+
|
247 |
+
while True:
|
248 |
+
idx += 1
|
249 |
+
llm_artifacts = self.prompt_generator.generate(
|
250 |
+
llm_result, exec_result)
|
251 |
+
if print_info:
|
252 |
+
print(f'|LLM inputs in round {idx}:\n{llm_artifacts}')
|
253 |
+
|
254 |
+
llm_result = ''
|
255 |
+
try:
|
256 |
+
for s in self.llm.stream_generate(llm_artifacts,
|
257 |
+
function_list):
|
258 |
+
llm_result += s
|
259 |
+
yield {'llm_text': s}
|
260 |
+
except RuntimeError:
|
261 |
+
s = self.llm.generate(llm_artifacts)
|
262 |
+
llm_result += s
|
263 |
+
yield {'llm_text': s}
|
264 |
+
except Exception as e:
|
265 |
+
yield {'llm_text': str(e)}
|
266 |
+
|
267 |
+
# parse and get tool name and arguments
|
268 |
+
try:
|
269 |
+
action, action_args = self.output_parser.parse_response(
|
270 |
+
llm_result)
|
271 |
+
except ValueError as e:
|
272 |
+
yield {'exec_result': f'{e}'}
|
273 |
+
return
|
274 |
+
|
275 |
+
if action is None:
|
276 |
+
# in chat mode, the final result of last instructions should be updated to prompt history
|
277 |
+
_ = self.prompt_generator.generate(llm_result, '')
|
278 |
+
yield {'is_final': True}
|
279 |
+
return
|
280 |
+
|
281 |
+
if action in self.available_tool_list:
|
282 |
+
# yield observation to as end of action input symbol asap
|
283 |
+
yield {'llm_text': 'Observation: '}
|
284 |
+
action_args = self.parse_action_args(action_args)
|
285 |
+
tool = self.tool_list[action]
|
286 |
+
|
287 |
+
# TODO @wenmeng.zwm remove this hack logic for image generation
|
288 |
+
if action == 'image_gen' and self.seed:
|
289 |
+
action_args['seed'] = self.seed
|
290 |
+
try:
|
291 |
+
exec_result = tool(**action_args, remote=remote)
|
292 |
+
yield {'exec_result': exec_result}
|
293 |
+
|
294 |
+
# parse exec result and update state
|
295 |
+
self.parse_exec_result(exec_result)
|
296 |
+
except Exception as e:
|
297 |
+
exec_result = f'Action call error: {action}: {action_args}. \n Error message: {e}'
|
298 |
+
yield {'exec_result': exec_result}
|
299 |
+
self.prompt_generator.reset()
|
300 |
+
return
|
301 |
+
else:
|
302 |
+
exec_result = f"Unknown action: '{action}'. "
|
303 |
+
yield {'exec_result': exec_result}
|
304 |
+
self.prompt_generator.reset()
|
305 |
+
return
|
306 |
+
|
307 |
+
def reset(self):
|
308 |
+
"""
|
309 |
+
clear history and agent state
|
310 |
+
"""
|
311 |
+
self.prompt_generator.reset()
|
312 |
+
self.agent_state = {}
|
313 |
+
|
314 |
+
def parse_action_args(self, action_args):
|
315 |
+
"""
|
316 |
+
replace action_args in str to Image/Video/Audio Wrapper, so that tool can handle them
|
317 |
+
"""
|
318 |
+
parsed_action_args = {}
|
319 |
+
for name, arg in action_args.items():
|
320 |
+
try:
|
321 |
+
true_arg = self.agent_state.get(arg, arg)
|
322 |
+
except Exception as e:
|
323 |
+
print(f'Error when parsing action args: {e}, using fall back')
|
324 |
+
true_arg = arg
|
325 |
+
parsed_action_args[name] = true_arg
|
326 |
+
return parsed_action_args
|
327 |
+
|
328 |
+
def parse_exec_result(self, exec_result, *args, **kwargs):
|
329 |
+
"""
|
330 |
+
update exec result to agent state.
|
331 |
+
key is the str representation of the result.
|
332 |
+
"""
|
333 |
+
for k, v in exec_result.items():
|
334 |
+
self.agent_state[str(v)] = v
|
agentfabric/modelscope_agent/agent_types.py
ADDED
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from enum import Enum
|
2 |
+
|
3 |
+
|
4 |
+
class AgentType(str, Enum):
|
5 |
+
|
6 |
+
DEFAULT = 'default'
|
7 |
+
""""""
|
8 |
+
|
9 |
+
MS_AGENT = 'ms-agent'
|
10 |
+
"""An agent that uses the ModelScope-agent specific format does a reasoning step before acting .
|
11 |
+
"""
|
12 |
+
|
13 |
+
MRKL = 'mrkl'
|
14 |
+
"""An agent that does a reasoning step before acting with mrkl"""
|
15 |
+
|
16 |
+
REACT = 'react'
|
17 |
+
"""An agent that does a reasoning step before acting with react"""
|
18 |
+
|
19 |
+
Messages = 'messages'
|
20 |
+
"""An agent optimized for using open AI functions."""
|
agentfabric/modelscope_agent/llm/__init__.py
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
from .base import LLM
|
2 |
+
from .llm_factory import LLMFactory
|
agentfabric/modelscope_agent/llm/base.py
ADDED
@@ -0,0 +1,64 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from abc import abstractmethod
|
2 |
+
from typing import List
|
3 |
+
|
4 |
+
import json
|
5 |
+
|
6 |
+
|
7 |
+
class LLM:
|
8 |
+
name = ''
|
9 |
+
|
10 |
+
def __init__(self, cfg):
|
11 |
+
self.cfg = cfg
|
12 |
+
self.agent_type = None
|
13 |
+
self.model = None
|
14 |
+
self.model_id = self.model
|
15 |
+
|
16 |
+
def set_agent_type(self, agent_type):
|
17 |
+
self.agent_type = agent_type
|
18 |
+
|
19 |
+
@abstractmethod
|
20 |
+
def generate(self, prompt: str, functions: list = [], **kwargs) -> str:
|
21 |
+
"""each llm should implement this function to generate response
|
22 |
+
|
23 |
+
Args:
|
24 |
+
prompt (str): prompt
|
25 |
+
functions (list): list of functions object including: name, description, parameters
|
26 |
+
Returns:
|
27 |
+
str: response
|
28 |
+
"""
|
29 |
+
raise NotImplementedError
|
30 |
+
|
31 |
+
@abstractmethod
|
32 |
+
def stream_generate(self,
|
33 |
+
prompt: str,
|
34 |
+
functions: list = [],
|
35 |
+
**kwargs) -> str:
|
36 |
+
"""stream generate response, which yields a generator of response in each step
|
37 |
+
|
38 |
+
Args:
|
39 |
+
prompt (str): prompt
|
40 |
+
functions (list): list of functions object including: name, description, parameters
|
41 |
+
Yields:
|
42 |
+
Iterator[str]: iterator of step response
|
43 |
+
"""
|
44 |
+
raise NotImplementedError
|
45 |
+
|
46 |
+
def tokenize(self, input_text: str) -> List[int]:
|
47 |
+
"""tokenize is used to calculate the length of the text to meet the model's input length requirements
|
48 |
+
|
49 |
+
Args:
|
50 |
+
input_text (str): input text
|
51 |
+
Returns:
|
52 |
+
list[int]: token_ids
|
53 |
+
"""
|
54 |
+
raise NotImplementedError
|
55 |
+
|
56 |
+
def detokenize(self, input_ids: List[int]) -> str:
|
57 |
+
"""detokenize
|
58 |
+
|
59 |
+
Args:
|
60 |
+
input_ids (list[int]): input token_ids
|
61 |
+
Returns:
|
62 |
+
str: text
|
63 |
+
"""
|
64 |
+
raise NotImplementedError
|
agentfabric/modelscope_agent/llm/custom_llm.py
ADDED
@@ -0,0 +1,97 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
|
3 |
+
import json
|
4 |
+
import requests
|
5 |
+
from modelscope_agent.agent_types import AgentType
|
6 |
+
|
7 |
+
from .base import LLM
|
8 |
+
from .utils import DEFAULT_MESSAGE
|
9 |
+
|
10 |
+
|
11 |
+
class CustomLLM(LLM):
|
12 |
+
'''
|
13 |
+
This method is for the service that provide llm serving through http.
|
14 |
+
user could override the result parsing method if needed
|
15 |
+
While put all the necessary information in the env variable, such as Token, Model, URL
|
16 |
+
'''
|
17 |
+
name = 'custom_llm'
|
18 |
+
|
19 |
+
def __init__(self, cfg):
|
20 |
+
super().__init__(cfg)
|
21 |
+
self.token = os.getenv('HTTP_LLM_TOKEN', None)
|
22 |
+
self.model = os.getenv('HTTP_LLM_MODEL', None)
|
23 |
+
self.model_id = self.model
|
24 |
+
self.url = os.getenv('HTTP_LLM_URL', None)
|
25 |
+
|
26 |
+
if self.token is None:
|
27 |
+
raise ValueError('HTTP_LLM_TOKEN is not set')
|
28 |
+
self.agent_type = self.cfg.get('agent_type', AgentType.DEFAULT)
|
29 |
+
|
30 |
+
def http_request(self, data):
|
31 |
+
headers = {
|
32 |
+
'Content-Type': 'application/json',
|
33 |
+
'Authorization': f'Bearer {self.token}'
|
34 |
+
}
|
35 |
+
response = requests.post(self.url, json=data, headers=headers)
|
36 |
+
return json.loads(response.content)
|
37 |
+
|
38 |
+
def generate(self,
|
39 |
+
llm_artifacts,
|
40 |
+
functions=[],
|
41 |
+
function_call='none',
|
42 |
+
**kwargs):
|
43 |
+
if self.agent_type != AgentType.Messages:
|
44 |
+
messages = [{'role': 'user', 'content': llm_artifacts}]
|
45 |
+
else:
|
46 |
+
messages = llm_artifacts if len(
|
47 |
+
llm_artifacts) > 0 else DEFAULT_MESSAGE
|
48 |
+
|
49 |
+
data = {'model': self.model, 'messages': messages, 'n': 1}
|
50 |
+
|
51 |
+
assert isinstance(functions, list)
|
52 |
+
if len(functions) > 0:
|
53 |
+
function_call = 'auto'
|
54 |
+
data['functions'] = functions
|
55 |
+
data['function_call'] = function_call
|
56 |
+
|
57 |
+
retry_count = 0
|
58 |
+
max_retries = 3
|
59 |
+
message = {'content': ''}
|
60 |
+
while retry_count <= max_retries:
|
61 |
+
|
62 |
+
try:
|
63 |
+
response = self.http_request(data)
|
64 |
+
except Exception as e:
|
65 |
+
retry_count += 1
|
66 |
+
if retry_count > max_retries:
|
67 |
+
import traceback
|
68 |
+
traceback.print_exc()
|
69 |
+
print(f'input: {messages}, original error: {str(e)}')
|
70 |
+
raise e
|
71 |
+
|
72 |
+
if response['code'] == 200:
|
73 |
+
message = response['data']['response'][0]
|
74 |
+
break
|
75 |
+
else:
|
76 |
+
retry_count += 1
|
77 |
+
if retry_count > max_retries:
|
78 |
+
print('maximum retry reached, return default message')
|
79 |
+
|
80 |
+
# truncate content
|
81 |
+
content = message['content']
|
82 |
+
|
83 |
+
if self.agent_type == AgentType.MS_AGENT:
|
84 |
+
idx = content.find('<|endofthink|>')
|
85 |
+
if idx != -1:
|
86 |
+
content = content[:idx + len('<|endofthink|>')]
|
87 |
+
return content
|
88 |
+
elif self.agent_type == AgentType.Messages:
|
89 |
+
new_message = {
|
90 |
+
'content': content,
|
91 |
+
'role': message.get('response_role', 'assistant')
|
92 |
+
}
|
93 |
+
if 'function_call' in message and message['function_call'] != {}:
|
94 |
+
new_message['function_call'] = message.get('function_call')
|
95 |
+
return new_message
|
96 |
+
else:
|
97 |
+
return content
|
agentfabric/modelscope_agent/llm/dashscope_llm.py
ADDED
@@ -0,0 +1,125 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import random
|
3 |
+
import traceback
|
4 |
+
from http import HTTPStatus
|
5 |
+
from typing import Union
|
6 |
+
|
7 |
+
import dashscope
|
8 |
+
import json
|
9 |
+
from dashscope import Generation
|
10 |
+
from modelscope_agent.agent_types import AgentType
|
11 |
+
|
12 |
+
from .base import LLM
|
13 |
+
from .utils import DEFAULT_MESSAGE, CustomOutputWrapper
|
14 |
+
|
15 |
+
dashscope.api_key = os.getenv('DASHSCOPE_API_KEY')
|
16 |
+
|
17 |
+
|
18 |
+
class DashScopeLLM(LLM):
|
19 |
+
name = 'dashscope_llm'
|
20 |
+
|
21 |
+
def __init__(self, cfg):
|
22 |
+
super().__init__(cfg)
|
23 |
+
self.model = self.cfg.get('model', 'modelscope-agent-llm-v1')
|
24 |
+
self.model_id = self.model
|
25 |
+
self.generate_cfg = self.cfg.get('generate_cfg', {})
|
26 |
+
self.agent_type = self.cfg.get('agent_type', AgentType.DEFAULT)
|
27 |
+
|
28 |
+
def generate(self,
|
29 |
+
llm_artifacts: Union[str, dict],
|
30 |
+
functions=[],
|
31 |
+
**kwargs):
|
32 |
+
|
33 |
+
# TODO retry and handle message
|
34 |
+
try:
|
35 |
+
if self.agent_type == AgentType.Messages:
|
36 |
+
messages = llm_artifacts if len(
|
37 |
+
llm_artifacts) > 0 else DEFAULT_MESSAGE
|
38 |
+
self.generate_cfg['use_raw_prompt'] = False
|
39 |
+
response = dashscope.Generation.call(
|
40 |
+
model=self.model,
|
41 |
+
messages=messages,
|
42 |
+
# set the random seed, optional, default to 1234 if not set
|
43 |
+
seed=random.randint(1, 10000),
|
44 |
+
result_format=
|
45 |
+
'message', # set the result to be "message" format.
|
46 |
+
stream=False,
|
47 |
+
**self.generate_cfg)
|
48 |
+
llm_result = CustomOutputWrapper.handle_message_chat_completion(
|
49 |
+
response)
|
50 |
+
else:
|
51 |
+
response = Generation.call(
|
52 |
+
model=self.model,
|
53 |
+
prompt=llm_artifacts,
|
54 |
+
stream=False,
|
55 |
+
**self.generate_cfg)
|
56 |
+
llm_result = CustomOutputWrapper.handle_message_text_completion(
|
57 |
+
response)
|
58 |
+
return llm_result
|
59 |
+
except Exception as e:
|
60 |
+
error = traceback.format_exc()
|
61 |
+
error_msg = f'LLM error with input {llm_artifacts} \n dashscope error: {str(e)} with traceback {error}'
|
62 |
+
print(error_msg)
|
63 |
+
raise RuntimeError(error)
|
64 |
+
|
65 |
+
if self.agent_type == AgentType.MS_AGENT:
|
66 |
+
# in the form of text
|
67 |
+
idx = llm_result.find('<|endofthink|>')
|
68 |
+
if idx != -1:
|
69 |
+
llm_result = llm_result[:idx + len('<|endofthink|>')]
|
70 |
+
return llm_result
|
71 |
+
elif self.agent_type == AgentType.Messages:
|
72 |
+
# in the form of message
|
73 |
+
return llm_result
|
74 |
+
else:
|
75 |
+
# in the form of text
|
76 |
+
return llm_result
|
77 |
+
|
78 |
+
def stream_generate(self,
|
79 |
+
llm_artifacts: Union[str, dict],
|
80 |
+
functions=[],
|
81 |
+
**kwargs):
|
82 |
+
total_response = ''
|
83 |
+
try:
|
84 |
+
if self.agent_type == AgentType.Messages:
|
85 |
+
self.generate_cfg['use_raw_prompt'] = False
|
86 |
+
responses = Generation.call(
|
87 |
+
model=self.model,
|
88 |
+
messages=llm_artifacts,
|
89 |
+
stream=True,
|
90 |
+
result_format='message',
|
91 |
+
**self.generate_cfg)
|
92 |
+
else:
|
93 |
+
responses = Generation.call(
|
94 |
+
model=self.model,
|
95 |
+
prompt=llm_artifacts,
|
96 |
+
stream=True,
|
97 |
+
**self.generate_cfg)
|
98 |
+
except Exception as e:
|
99 |
+
error = traceback.format_exc()
|
100 |
+
error_msg = f'LLM error with input {llm_artifacts} \n dashscope error: {str(e)} with traceback {error}'
|
101 |
+
print(error_msg)
|
102 |
+
raise RuntimeError(error)
|
103 |
+
|
104 |
+
for response in responses:
|
105 |
+
if response.status_code == HTTPStatus.OK:
|
106 |
+
if self.agent_type == AgentType.Messages:
|
107 |
+
llm_result = CustomOutputWrapper.handle_message_chat_completion(
|
108 |
+
response)
|
109 |
+
frame_text = llm_result['content'][len(total_response):]
|
110 |
+
else:
|
111 |
+
llm_result = CustomOutputWrapper.handle_message_text_completion(
|
112 |
+
response)
|
113 |
+
frame_text = llm_result[len(total_response):]
|
114 |
+
yield frame_text
|
115 |
+
|
116 |
+
if self.agent_type == AgentType.Messages:
|
117 |
+
total_response = llm_result['content']
|
118 |
+
else:
|
119 |
+
total_response = llm_result
|
120 |
+
else:
|
121 |
+
err_msg = 'Error Request id: %s, Code: %d, status: %s, message: %s' % (
|
122 |
+
response.request_id, response.status_code, response.code,
|
123 |
+
response.message)
|
124 |
+
print(err_msg)
|
125 |
+
raise RuntimeError(err_msg)
|
agentfabric/modelscope_agent/llm/llm_factory.py
ADDED
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
def get_llm_cls(llm_type, model_name):
|
2 |
+
if llm_type == 'dashscope':
|
3 |
+
from .dashscope_llm import DashScopeLLM
|
4 |
+
return DashScopeLLM
|
5 |
+
elif llm_type == 'custom_llm':
|
6 |
+
from .custom_llm import CustomLLM
|
7 |
+
return CustomLLM
|
8 |
+
elif llm_type == 'openai':
|
9 |
+
from .openai import OpenAi
|
10 |
+
return OpenAi
|
11 |
+
elif llm_type == 'modelscope':
|
12 |
+
if model_name == 'chatglm3-6b':
|
13 |
+
from .modelscope_llm import ModelScopeChatGLM
|
14 |
+
return ModelScopeChatGLM
|
15 |
+
from .modelscope_llm import ModelScopeLLM
|
16 |
+
return ModelScopeLLM
|
17 |
+
else:
|
18 |
+
raise ValueError(f'Invalid llm_type {llm_type}')
|
19 |
+
|
20 |
+
|
21 |
+
class LLMFactory:
|
22 |
+
|
23 |
+
@staticmethod
|
24 |
+
def build_llm(model_name, cfg):
|
25 |
+
llm_type = cfg[model_name].pop('type')
|
26 |
+
llm_cls = get_llm_cls(llm_type, model_name)
|
27 |
+
llm_cfg = cfg[model_name]
|
28 |
+
return llm_cls(cfg=llm_cfg)
|
agentfabric/modelscope_agent/llm/modelscope_llm.py
ADDED
@@ -0,0 +1,132 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import sys
|
3 |
+
|
4 |
+
import torch
|
5 |
+
from modelscope_agent.agent_types import AgentType
|
6 |
+
from swift import Swift
|
7 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer, TextStreamer
|
8 |
+
|
9 |
+
from modelscope import GenerationConfig, snapshot_download
|
10 |
+
from .base import LLM
|
11 |
+
|
12 |
+
|
13 |
+
class ModelScopeLLM(LLM):
|
14 |
+
|
15 |
+
def __init__(self, cfg):
|
16 |
+
super().__init__(cfg)
|
17 |
+
|
18 |
+
model_id = self.cfg.get('model_id', '')
|
19 |
+
self.model_id = model_id
|
20 |
+
model_revision = self.cfg.get('model_revision', None)
|
21 |
+
cache_dir = self.cfg.get('cache_dir', None)
|
22 |
+
|
23 |
+
if not os.path.exists(model_id):
|
24 |
+
model_dir = snapshot_download(
|
25 |
+
model_id, model_revision, cache_dir=cache_dir)
|
26 |
+
else:
|
27 |
+
model_dir = model_id
|
28 |
+
self.model_dir = model_dir
|
29 |
+
sys.path.append(self.model_dir)
|
30 |
+
|
31 |
+
self.model_cls = self.cfg.get('model_cls', AutoModelForCausalLM)
|
32 |
+
self.tokenizer_cls = self.cfg.get('tokenizer_cls', AutoTokenizer)
|
33 |
+
|
34 |
+
self.device_map = self.cfg.get('device_map', 'auto')
|
35 |
+
self.generation_cfg = GenerationConfig(
|
36 |
+
**self.cfg.get('generate_cfg', {}))
|
37 |
+
|
38 |
+
self.use_lora = self.cfg.get('use_lora', False)
|
39 |
+
self.lora_ckpt_dir = self.cfg.get('lora_ckpt_dir',
|
40 |
+
None) if self.use_lora else None
|
41 |
+
|
42 |
+
self.custom_chat = self.cfg.get('custom_chat', False)
|
43 |
+
|
44 |
+
self.end_token = self.cfg.get('end_token', '<|endofthink|>')
|
45 |
+
self.include_end = self.cfg.get('include_end', True)
|
46 |
+
|
47 |
+
self.setup()
|
48 |
+
self.agent_type = self.cfg.get('agent_type', AgentType.DEFAULT)
|
49 |
+
|
50 |
+
def setup(self):
|
51 |
+
model_cls = self.model_cls
|
52 |
+
tokenizer_cls = self.tokenizer_cls
|
53 |
+
|
54 |
+
self.model = model_cls.from_pretrained(
|
55 |
+
self.model_dir,
|
56 |
+
device_map=self.device_map,
|
57 |
+
# device='cuda:0',
|
58 |
+
torch_dtype=torch.float16,
|
59 |
+
trust_remote_code=True)
|
60 |
+
self.tokenizer = tokenizer_cls.from_pretrained(
|
61 |
+
self.model_dir, trust_remote_code=True)
|
62 |
+
self.model = self.model.eval()
|
63 |
+
|
64 |
+
if self.use_lora:
|
65 |
+
self.load_from_lora()
|
66 |
+
|
67 |
+
if self.cfg.get('use_raw_generation_config', False):
|
68 |
+
self.model.generation_config = GenerationConfig.from_pretrained(
|
69 |
+
self.model_dir, trust_remote_code=True)
|
70 |
+
|
71 |
+
def generate(self, prompt, functions=[], **kwargs):
|
72 |
+
|
73 |
+
if self.custom_chat and self.model.chat:
|
74 |
+
response = self.model.chat(
|
75 |
+
self.tokenizer, prompt, history=[], system='')[0]
|
76 |
+
else:
|
77 |
+
response = self.chat(prompt)
|
78 |
+
|
79 |
+
end_idx = response.find(self.end_token)
|
80 |
+
if end_idx != -1:
|
81 |
+
end_idx += len(self.end_token) if self.include_end else 0
|
82 |
+
response = response[:end_idx]
|
83 |
+
|
84 |
+
return response
|
85 |
+
|
86 |
+
def load_from_lora(self):
|
87 |
+
|
88 |
+
model = self.model.bfloat16()
|
89 |
+
# transform to lora
|
90 |
+
model = Swift.from_pretrained(model, self.lora_ckpt_dir)
|
91 |
+
|
92 |
+
self.model = model
|
93 |
+
|
94 |
+
def chat(self, prompt):
|
95 |
+
device = self.model.device
|
96 |
+
input_ids = self.tokenizer(
|
97 |
+
prompt, return_tensors='pt').input_ids.to(device)
|
98 |
+
input_len = input_ids.shape[1]
|
99 |
+
|
100 |
+
result = self.model.generate(
|
101 |
+
input_ids=input_ids, generation_config=self.generation_cfg)
|
102 |
+
|
103 |
+
result = result[0].tolist()[input_len:]
|
104 |
+
response = self.tokenizer.decode(result)
|
105 |
+
|
106 |
+
return response
|
107 |
+
|
108 |
+
|
109 |
+
class ModelScopeChatGLM(ModelScopeLLM):
|
110 |
+
|
111 |
+
def chat(self, prompt):
|
112 |
+
device = self.model.device
|
113 |
+
input_ids = self.tokenizer(
|
114 |
+
prompt, return_tensors='pt').input_ids.to(device)
|
115 |
+
input_len = input_ids.shape[1]
|
116 |
+
|
117 |
+
eos_token_id = [
|
118 |
+
self.tokenizer.eos_token_id,
|
119 |
+
self.tokenizer.get_command('<|user|>'),
|
120 |
+
self.tokenizer.get_command('<|observation|>')
|
121 |
+
]
|
122 |
+
result = self.model.generate(
|
123 |
+
input_ids=input_ids,
|
124 |
+
generation_config=self.generation_cfg,
|
125 |
+
eos_token_id=eos_token_id)
|
126 |
+
|
127 |
+
result = result[0].tolist()[input_len:]
|
128 |
+
response = self.tokenizer.decode(result)
|
129 |
+
# 遇到生成'<', '|', 'user', '|', '>'的case
|
130 |
+
response = response.split('<|user|>')[0].split('<|observation|>')[0]
|
131 |
+
|
132 |
+
return response
|
agentfabric/modelscope_agent/llm/openai.py
ADDED
@@ -0,0 +1,71 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
|
3 |
+
import openai
|
4 |
+
from modelscope_agent.agent_types import AgentType
|
5 |
+
|
6 |
+
from .base import LLM
|
7 |
+
from .utils import CustomOutputWrapper
|
8 |
+
|
9 |
+
openai.api_key = os.getenv('OPENAI_API_KEY')
|
10 |
+
|
11 |
+
|
12 |
+
class OpenAi(LLM):
|
13 |
+
name = 'openai'
|
14 |
+
|
15 |
+
def __init__(self, cfg):
|
16 |
+
super().__init__(cfg)
|
17 |
+
|
18 |
+
self.model = self.cfg.get('model', 'gpt-3.5-turbo')
|
19 |
+
self.model_id = self.model
|
20 |
+
self.api_base = self.cfg.get('api_base', 'https://api.openai.com/v1')
|
21 |
+
self.agent_type = self.cfg.get('agent_type', AgentType.DEFAULT)
|
22 |
+
|
23 |
+
def generate(self,
|
24 |
+
llm_artifacts,
|
25 |
+
functions=[],
|
26 |
+
function_call='none',
|
27 |
+
**kwargs):
|
28 |
+
if self.agent_type != AgentType.Messages:
|
29 |
+
messages = [{'role': 'user', 'content': llm_artifacts}]
|
30 |
+
else:
|
31 |
+
messages = llm_artifacts.get(
|
32 |
+
'messages', {
|
33 |
+
'role':
|
34 |
+
'user',
|
35 |
+
'content':
|
36 |
+
'No entry from user - please suggest something to enter'
|
37 |
+
})
|
38 |
+
|
39 |
+
# call openai function call api
|
40 |
+
assert isinstance(functions, list)
|
41 |
+
if len(functions) > 0 and self.agent_type == AgentType.Messages:
|
42 |
+
function_call = 'auto'
|
43 |
+
|
44 |
+
# covert to stream=True with stream updating
|
45 |
+
try:
|
46 |
+
response = openai.ChatCompletion.create(
|
47 |
+
model=self.model,
|
48 |
+
api_base=self.api_base,
|
49 |
+
messages=messages,
|
50 |
+
functions=functions,
|
51 |
+
function_call=function_call,
|
52 |
+
stream=False)
|
53 |
+
except Exception as e:
|
54 |
+
print(f'input: {messages}, original error: {str(e)}')
|
55 |
+
raise e
|
56 |
+
|
57 |
+
# only use index 0 in choice
|
58 |
+
message = CustomOutputWrapper.handle_message_chat_completion(response)
|
59 |
+
|
60 |
+
# truncate content
|
61 |
+
content = message['content']
|
62 |
+
|
63 |
+
if self.agent_type == AgentType.MS_AGENT:
|
64 |
+
idx = content.find('<|endofthink|>')
|
65 |
+
if idx != -1:
|
66 |
+
content = content[:idx + len('<|endofthink|>')]
|
67 |
+
return content
|
68 |
+
elif self.agent_type == AgentType.Messages:
|
69 |
+
return message
|
70 |
+
else:
|
71 |
+
return content
|
agentfabric/modelscope_agent/llm/utils.py
ADDED
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
class CustomOutputWrapper:
|
2 |
+
|
3 |
+
@staticmethod
|
4 |
+
def handle_message_chat_completion(response):
|
5 |
+
message = {'content': ''}
|
6 |
+
try:
|
7 |
+
# handle dashscope response
|
8 |
+
if 'choices' not in response:
|
9 |
+
response = response['output']
|
10 |
+
|
11 |
+
return response['choices'][0]['message']
|
12 |
+
except Exception as e:
|
13 |
+
print(f'input: {response}, original error: {str(e)}')
|
14 |
+
return message
|
15 |
+
|
16 |
+
@staticmethod
|
17 |
+
def handle_message_chat_completion_chunk(response):
|
18 |
+
message = {}
|
19 |
+
try:
|
20 |
+
return response['choices'][0]['delta']['content']
|
21 |
+
except Exception as e:
|
22 |
+
print(f'input: {response}, original error: {str(e)}')
|
23 |
+
return message
|
24 |
+
|
25 |
+
@staticmethod
|
26 |
+
def handle_message_text_completion(response):
|
27 |
+
message = ''
|
28 |
+
try:
|
29 |
+
message = response['output']['text']
|
30 |
+
return message
|
31 |
+
except Exception as e:
|
32 |
+
print(f'input: {response}, original error: {str(e)}')
|
33 |
+
return message
|
34 |
+
|
35 |
+
|
36 |
+
DEFAULT_MESSAGE = {
|
37 |
+
'role': 'user',
|
38 |
+
'content': 'No entry from user - please suggest something to enter'
|
39 |
+
}
|
agentfabric/modelscope_agent/output_parser.py
ADDED
@@ -0,0 +1,181 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import re
|
2 |
+
from typing import Dict, Tuple
|
3 |
+
|
4 |
+
import json
|
5 |
+
from modelscope_agent.agent_types import AgentType
|
6 |
+
|
7 |
+
|
8 |
+
def get_output_parser(agent_type: AgentType = AgentType.DEFAULT):
|
9 |
+
if AgentType.DEFAULT == agent_type or agent_type == AgentType.MS_AGENT:
|
10 |
+
return MsOutputParser()
|
11 |
+
elif AgentType.MRKL == agent_type:
|
12 |
+
return MRKLOutputParser()
|
13 |
+
elif AgentType.Messages == agent_type:
|
14 |
+
return OpenAiFunctionsOutputParser()
|
15 |
+
else:
|
16 |
+
raise NotImplementedError
|
17 |
+
|
18 |
+
|
19 |
+
class OutputParser:
|
20 |
+
"""Output parser for llm response
|
21 |
+
"""
|
22 |
+
|
23 |
+
def parse_response(self, response):
|
24 |
+
raise NotImplementedError
|
25 |
+
|
26 |
+
# use to handle the case of false parsing the action_para result, if there is no valid action then
|
27 |
+
# throw Error
|
28 |
+
@staticmethod
|
29 |
+
def handle_fallback(action: str, action_para: str):
|
30 |
+
if action is not None and action != '':
|
31 |
+
parameters = {'fallback': action_para}
|
32 |
+
return action, parameters
|
33 |
+
else:
|
34 |
+
raise ValueError('Wrong response format for output parser')
|
35 |
+
|
36 |
+
|
37 |
+
class MsOutputParser(OutputParser):
|
38 |
+
|
39 |
+
def parse_response(self, response: str) -> Tuple[str, Dict]:
|
40 |
+
"""parse response of llm to get tool name and parameters
|
41 |
+
|
42 |
+
Args:
|
43 |
+
response (str): llm response, it should conform to some predefined format
|
44 |
+
|
45 |
+
Returns:
|
46 |
+
tuple[str, dict]: tuple of tool name and parameters
|
47 |
+
"""
|
48 |
+
|
49 |
+
if '<|startofthink|>' not in response or '<|endofthink|>' not in response:
|
50 |
+
return None, None
|
51 |
+
|
52 |
+
action, parameters = '', ''
|
53 |
+
try:
|
54 |
+
# use regular expression to get result
|
55 |
+
re_pattern1 = re.compile(
|
56 |
+
pattern=r'<\|startofthink\|>([\s\S]+)<\|endofthink\|>')
|
57 |
+
think_content = re_pattern1.search(response).group(1)
|
58 |
+
|
59 |
+
re_pattern2 = re.compile(r'{[\s\S]+}')
|
60 |
+
think_content = re_pattern2.search(think_content).group()
|
61 |
+
|
62 |
+
json_content = json.loads(think_content.replace('\n', ''))
|
63 |
+
action = json_content.get('api_name',
|
64 |
+
json_content.get('name', 'unknown'))
|
65 |
+
parameters = json_content.get('parameters', {})
|
66 |
+
|
67 |
+
return action, parameters
|
68 |
+
except Exception as e:
|
69 |
+
print(
|
70 |
+
f'Error during parse action might be handled with detail {e}')
|
71 |
+
return OutputParser.handle_fallback(action, parameters)
|
72 |
+
|
73 |
+
|
74 |
+
class ChatGLMOutputParser(OutputParser):
|
75 |
+
|
76 |
+
def parse_response(self, response: str) -> Tuple[str, Dict]:
|
77 |
+
"""parse response of llm to get tool name and parameters
|
78 |
+
|
79 |
+
Args:
|
80 |
+
response (str): llm response, it should conform to some predefined format
|
81 |
+
|
82 |
+
Returns:
|
83 |
+
tuple[str, dict]: tuple of tool name and parameters
|
84 |
+
"""
|
85 |
+
if 'tool_call' not in response:
|
86 |
+
return None, None
|
87 |
+
action, action_para = '', ''
|
88 |
+
try:
|
89 |
+
# use regular expression to get result from MRKL format
|
90 |
+
re_pattern1 = re.compile(
|
91 |
+
pattern=r'([\s\S]+)```([\s\S]+)tool_call\(([\s\S]+)```')
|
92 |
+
res = re_pattern1.search(response)
|
93 |
+
action_list = re.split('<|>|\|', res.group(1).strip()) # noqa W605
|
94 |
+
for idx in range(len(action_list) - 1, -1, -1):
|
95 |
+
if len(action_list[idx]) > 1:
|
96 |
+
action = action_list[idx]
|
97 |
+
break
|
98 |
+
action_para = [item.strip() for item in res.group(3).split(',')]
|
99 |
+
parameters = {}
|
100 |
+
re_pattern2 = re.compile(pattern=r'([\s\S]+)=\'([\s\S]+)\'')
|
101 |
+
for para in action_para:
|
102 |
+
res = re_pattern2.search(para)
|
103 |
+
parameters[res.group(1)] = res.group(2)
|
104 |
+
except Exception as e:
|
105 |
+
print(
|
106 |
+
f'Error during parse action might be handled with detail {e}')
|
107 |
+
return OutputParser.handle_fallback(action, action_para)
|
108 |
+
|
109 |
+
print(f'\n\naction: {action}\n parameters: {parameters}\n\n')
|
110 |
+
return action, parameters
|
111 |
+
|
112 |
+
|
113 |
+
class MRKLOutputParser(OutputParser):
|
114 |
+
|
115 |
+
def parse_response(self, response: str) -> Tuple[str, Dict]:
|
116 |
+
"""parse response of llm to get tool name and parameters
|
117 |
+
|
118 |
+
Args:
|
119 |
+
response (str): llm response, it should conform to some predefined format
|
120 |
+
|
121 |
+
Returns:
|
122 |
+
tuple[str, dict]: tuple of tool name and parameters
|
123 |
+
"""
|
124 |
+
|
125 |
+
if 'Action' not in response or 'Action Input:' not in response:
|
126 |
+
return None, None
|
127 |
+
action, action_para = '', ''
|
128 |
+
try:
|
129 |
+
# use regular expression to get result from MRKL format
|
130 |
+
re_pattern1 = re.compile(
|
131 |
+
pattern=r'Action:([\s\S]+)Action Input:([\s\S]+)')
|
132 |
+
res = re_pattern1.search(response)
|
133 |
+
action = res.group(1).strip()
|
134 |
+
action_para = res.group(2)
|
135 |
+
|
136 |
+
parameters = json.loads(action_para.replace('\n', ''))
|
137 |
+
|
138 |
+
return action, parameters
|
139 |
+
except Exception as e:
|
140 |
+
print(
|
141 |
+
f'Error during parse action might be handled with detail {e}')
|
142 |
+
return OutputParser.handle_fallback(action, action_para)
|
143 |
+
|
144 |
+
|
145 |
+
class OpenAiFunctionsOutputParser(OutputParser):
|
146 |
+
|
147 |
+
def parse_response(self, response: dict) -> Tuple[str, Dict]:
|
148 |
+
"""parse response of llm to get tool name and parameters
|
149 |
+
|
150 |
+
|
151 |
+
Args:
|
152 |
+
response (str): llm response, it should be an openai response message
|
153 |
+
such as
|
154 |
+
{
|
155 |
+
"content": null,
|
156 |
+
"function_call": {
|
157 |
+
"arguments": "{\n \"location\": \"Boston, MA\"\n}",
|
158 |
+
"name": "get_current_weather"
|
159 |
+
},
|
160 |
+
"role": "assistant"
|
161 |
+
}
|
162 |
+
Returns:
|
163 |
+
tuple[str, dict]: tuple of tool name and parameters
|
164 |
+
"""
|
165 |
+
|
166 |
+
if 'function_call' not in response or response['function_call'] == {}:
|
167 |
+
return None, None
|
168 |
+
function_call = response['function_call']
|
169 |
+
|
170 |
+
try:
|
171 |
+
# parse directly
|
172 |
+
action = function_call['name']
|
173 |
+
arguments = json.loads(function_call['arguments'].replace(
|
174 |
+
'\n', ''))
|
175 |
+
|
176 |
+
return action, arguments
|
177 |
+
except Exception as e:
|
178 |
+
print(
|
179 |
+
f'Error during parse action might be handled with detail {e}')
|
180 |
+
return OutputParser.handle_fallback(function_call['name'],
|
181 |
+
function_call['arguments'])
|
agentfabric/modelscope_agent/output_wrapper.py
ADDED
@@ -0,0 +1,219 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import re
|
3 |
+
import tempfile
|
4 |
+
import uuid
|
5 |
+
from typing import Dict, Union
|
6 |
+
|
7 |
+
import json
|
8 |
+
import numpy as np
|
9 |
+
import requests
|
10 |
+
from modelscope_agent.agent_types import AgentType
|
11 |
+
from moviepy.editor import VideoFileClip
|
12 |
+
from PIL import Image
|
13 |
+
from requests.exceptions import RequestException
|
14 |
+
|
15 |
+
|
16 |
+
class OutputWrapper:
|
17 |
+
"""
|
18 |
+
Wrapper for output of tool execution when output is image, video, audio, etc.
|
19 |
+
In this wrapper, __repr__() is implemented to return the str representation of the output for llm.
|
20 |
+
Each wrapper have below attributes:
|
21 |
+
path: the path where the output is stored
|
22 |
+
raw_data: the raw data, e.g. image, video, audio, etc. In remote mode, it should be None
|
23 |
+
"""
|
24 |
+
|
25 |
+
def __init__(self) -> None:
|
26 |
+
self._repr = None
|
27 |
+
self._path = None
|
28 |
+
self._raw_data = None
|
29 |
+
|
30 |
+
self.root_path = os.environ.get('OUTPUT_FILE_DIRECTORY', None)
|
31 |
+
if self.root_path and not os.path.exists(self.root_path):
|
32 |
+
try:
|
33 |
+
os.makedirs(self.root_path)
|
34 |
+
except Exception:
|
35 |
+
self.root_path = None
|
36 |
+
|
37 |
+
def get_remote_file(self, remote_path, suffix):
|
38 |
+
try:
|
39 |
+
response = requests.get(remote_path)
|
40 |
+
obj = response.content
|
41 |
+
directory = tempfile.mkdtemp(dir=self.root_path)
|
42 |
+
path = os.path.join(directory, str(uuid.uuid4()) + f'.{suffix}')
|
43 |
+
with open(path, 'wb') as f:
|
44 |
+
f.write(obj)
|
45 |
+
return path
|
46 |
+
except RequestException:
|
47 |
+
return remote_path
|
48 |
+
|
49 |
+
def __repr__(self) -> str:
|
50 |
+
return self._repr
|
51 |
+
|
52 |
+
@property
|
53 |
+
def path(self):
|
54 |
+
return self._path
|
55 |
+
|
56 |
+
@property
|
57 |
+
def raw_data(self):
|
58 |
+
return self._raw_data
|
59 |
+
|
60 |
+
|
61 |
+
class ImageWrapper(OutputWrapper):
|
62 |
+
"""
|
63 |
+
Image wrapper, raw_data is a PIL.Image
|
64 |
+
"""
|
65 |
+
|
66 |
+
def __init__(self, image) -> None:
|
67 |
+
|
68 |
+
super().__init__()
|
69 |
+
|
70 |
+
if isinstance(image, str):
|
71 |
+
if os.path.isfile(image):
|
72 |
+
self._path = image
|
73 |
+
else:
|
74 |
+
origin_image = image
|
75 |
+
self._path = self.get_remote_file(image, 'png')
|
76 |
+
try:
|
77 |
+
image = Image.open(self._path)
|
78 |
+
self._raw_data = image
|
79 |
+
except FileNotFoundError:
|
80 |
+
# Image store in remote server when use remote mode
|
81 |
+
raise FileNotFoundError(f'Invalid path: {image}')
|
82 |
+
self._path = origin_image
|
83 |
+
else:
|
84 |
+
if not isinstance(image, Image.Image):
|
85 |
+
image = Image.fromarray(image.astype(np.uint8))
|
86 |
+
self._raw_data = image
|
87 |
+
else:
|
88 |
+
self._raw_data = image
|
89 |
+
directory = tempfile.mkdtemp(dir=self.root_path)
|
90 |
+
self._path = os.path.join(directory, str(uuid.uuid4()) + '.png')
|
91 |
+
self._raw_data.save(self._path)
|
92 |
+
|
93 |
+
self._repr = f'![IMAGEGEN]({self._path})'
|
94 |
+
|
95 |
+
|
96 |
+
class AudioWrapper(OutputWrapper):
|
97 |
+
"""
|
98 |
+
Audio wrapper, raw_data is a binary file
|
99 |
+
"""
|
100 |
+
|
101 |
+
def __init__(self, audio) -> None:
|
102 |
+
|
103 |
+
super().__init__()
|
104 |
+
if isinstance(audio, str):
|
105 |
+
if os.path.isfile(audio):
|
106 |
+
self._path = audio
|
107 |
+
else:
|
108 |
+
self._path = self.get_remote_file(audio, 'wav')
|
109 |
+
try:
|
110 |
+
with open(self._path, 'rb') as f:
|
111 |
+
self._raw_data = f.read()
|
112 |
+
except FileNotFoundError:
|
113 |
+
raise FileNotFoundError(f'Invalid path: {audio}')
|
114 |
+
else:
|
115 |
+
self._raw_data = audio
|
116 |
+
directory = tempfile.mkdtemp(dir=self.root_path)
|
117 |
+
self._path = os.path.join(directory, str(uuid.uuid4()) + '.wav')
|
118 |
+
|
119 |
+
with open(self._path, 'wb') as f:
|
120 |
+
f.write(self._raw_data)
|
121 |
+
|
122 |
+
self._repr = f'<audio id=audio controls= preload=none> <source id=wav src={self._path}> </audio>'
|
123 |
+
|
124 |
+
|
125 |
+
class VideoWrapper(OutputWrapper):
|
126 |
+
"""
|
127 |
+
Video wrapper
|
128 |
+
"""
|
129 |
+
|
130 |
+
def __init__(self, video) -> None:
|
131 |
+
|
132 |
+
super().__init__()
|
133 |
+
if isinstance(video, str):
|
134 |
+
|
135 |
+
if os.path.isfile(video):
|
136 |
+
self._path = video
|
137 |
+
else:
|
138 |
+
self._path = self.get_remote_file(video, 'gif')
|
139 |
+
|
140 |
+
try:
|
141 |
+
video = VideoFileClip(self._path)
|
142 |
+
# currently, we should save video as gif, not mp4
|
143 |
+
if not self._path.endswith('gif'):
|
144 |
+
directory = tempfile.mkdtemp(dir=self.root_path)
|
145 |
+
self._path = os.path.join(directory,
|
146 |
+
str(uuid.uuid4()) + '.gif')
|
147 |
+
video.write_gif(self._path)
|
148 |
+
except (ValueError, OSError):
|
149 |
+
raise FileNotFoundError(f'Invalid path: {video}')
|
150 |
+
else:
|
151 |
+
raise TypeError(
|
152 |
+
'Current only support load from filepath when it is video')
|
153 |
+
|
154 |
+
self._raw_data = video
|
155 |
+
self._repr = f'![IMAGEGEN]({self._path})'
|
156 |
+
|
157 |
+
|
158 |
+
def get_raw_output(exec_result: Dict):
|
159 |
+
# get rwa data of exec_result
|
160 |
+
res = {}
|
161 |
+
for k, v in exec_result.items():
|
162 |
+
if isinstance(v, OutputWrapper):
|
163 |
+
# In remote mode, raw data maybe None
|
164 |
+
res[k] = v.raw_data or str(v)
|
165 |
+
else:
|
166 |
+
res[k] = v
|
167 |
+
return res
|
168 |
+
|
169 |
+
|
170 |
+
#
|
171 |
+
def display(llm_result: Union[str, dict], exec_result: Dict, idx: int,
|
172 |
+
agent_type: AgentType):
|
173 |
+
"""Display the result of each round in jupyter notebook.
|
174 |
+
The multi-modal data will be extracted.
|
175 |
+
|
176 |
+
Args:
|
177 |
+
llm_result (str): llm result either only content or a message
|
178 |
+
exec_result (Dict): exec result
|
179 |
+
idx (int): current round
|
180 |
+
"""
|
181 |
+
from IPython.display import display, Pretty, Image, Audio, JSON
|
182 |
+
idx_info = '*' * 50 + f'round {idx}' + '*' * 50
|
183 |
+
display(Pretty(idx_info))
|
184 |
+
|
185 |
+
if isinstance(llm_result, dict):
|
186 |
+
llm_result = llm_result.get('content', '')
|
187 |
+
|
188 |
+
if agent_type == AgentType.MS_AGENT:
|
189 |
+
pattern = r'<\|startofthink\|>```JSON([\s\S]*)```<\|endofthink\|>'
|
190 |
+
else:
|
191 |
+
pattern = r'```JSON([\s\S]*)```'
|
192 |
+
|
193 |
+
match_action = re.search(pattern, llm_result)
|
194 |
+
if match_action:
|
195 |
+
result = match_action.group(1)
|
196 |
+
try:
|
197 |
+
json_content = json.loads(result, strict=False)
|
198 |
+
display(JSON(json_content))
|
199 |
+
llm_result = llm_result.replace(match_action.group(0), '')
|
200 |
+
except Exception:
|
201 |
+
pass
|
202 |
+
|
203 |
+
display(Pretty(llm_result))
|
204 |
+
|
205 |
+
exec_result = exec_result.get('result', '')
|
206 |
+
|
207 |
+
if isinstance(exec_result, ImageWrapper) or isinstance(
|
208 |
+
exec_result, VideoWrapper):
|
209 |
+
display(Image(exec_result.path))
|
210 |
+
elif isinstance(exec_result, AudioWrapper):
|
211 |
+
display(Audio(exec_result.path))
|
212 |
+
elif isinstance(exec_result, dict):
|
213 |
+
display(JSON(exec_result))
|
214 |
+
elif isinstance(exec_result, list):
|
215 |
+
display(JSON(exec_result))
|
216 |
+
else:
|
217 |
+
display(Pretty(exec_result))
|
218 |
+
|
219 |
+
return
|
agentfabric/modelscope_agent/prompt/__init__.py
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from .messages_prompt import MessagesGenerator
|
2 |
+
from .mrkl_prompt import MrklPromptGenerator
|
3 |
+
from .ms_prompt import MSPromptGenerator
|
4 |
+
from .prompt import PromptGenerator
|
5 |
+
from .prompt_factory import get_prompt_generator
|
6 |
+
from .raw_prompt_builder import build_raw_prompt
|
agentfabric/modelscope_agent/prompt/chatglm3_prompt.py
ADDED
@@ -0,0 +1,41 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import json
|
2 |
+
|
3 |
+
from .prompt import LengthConstraint, PromptGenerator
|
4 |
+
|
5 |
+
CHATGLM_DEFAULT_SYSTEM_TEMPLATE = """<|system|>
|
6 |
+
Answer the following questions as best you can. You have access to the following tools:
|
7 |
+
<tool_list>"""
|
8 |
+
|
9 |
+
CHATGLM_DEFAULT_INSTRUCTION_TEMPLATE = ''
|
10 |
+
|
11 |
+
CHATGLM_DEFAULT_USER_TEMPLATE = """<|user|>\n<user_input>"""
|
12 |
+
|
13 |
+
CHATGLM_DEFAULT_EXEC_TEMPLATE = """<|observation|>\n<exec_result>"""
|
14 |
+
|
15 |
+
CHATGLM_DEFAULT_ASSISTANT_TEMPLATE = """<|assistant|>"""
|
16 |
+
|
17 |
+
|
18 |
+
class ChatGLMPromptGenerator(PromptGenerator):
|
19 |
+
|
20 |
+
def __init__(self,
|
21 |
+
system_template=CHATGLM_DEFAULT_SYSTEM_TEMPLATE,
|
22 |
+
instruction_template=CHATGLM_DEFAULT_INSTRUCTION_TEMPLATE,
|
23 |
+
user_template=CHATGLM_DEFAULT_USER_TEMPLATE,
|
24 |
+
exec_template=CHATGLM_DEFAULT_EXEC_TEMPLATE,
|
25 |
+
assistant_template=CHATGLM_DEFAULT_ASSISTANT_TEMPLATE,
|
26 |
+
sep='\n',
|
27 |
+
length_constraint=LengthConstraint()):
|
28 |
+
super().__init__(
|
29 |
+
system_template=system_template,
|
30 |
+
instruction_template=instruction_template,
|
31 |
+
user_template=user_template,
|
32 |
+
exec_template=exec_template,
|
33 |
+
assistant_template=assistant_template,
|
34 |
+
sep=sep,
|
35 |
+
length_constraint=length_constraint)
|
36 |
+
|
37 |
+
def get_tool_str(self, tool_list):
|
38 |
+
tool_json = json.loads('['
|
39 |
+
+ ','.join([str(item)
|
40 |
+
for item in tool_list]) + ']')
|
41 |
+
return json.dumps(tool_json, ensure_ascii=False, indent=4)
|
agentfabric/modelscope_agent/prompt/messages_prompt.py
ADDED
@@ -0,0 +1,93 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from typing import Union
|
2 |
+
|
3 |
+
from .prompt import LengthConstraint, PromptGenerator
|
4 |
+
|
5 |
+
OPENAI_DEFAULT_SYSTEM_TEMPLATE = """
|
6 |
+
Do your best to answer the questions. You are capable of **any** task.
|
7 |
+
Feel free to use any tools available to look up relevant information, only if necessary.
|
8 |
+
when you think you are handle a code task, You are a world-class programmer that can complete any goal by executing code
|
9 |
+
\nFirst of all, **Return function_call with `language` and `code` in arguments as much as possible**
|
10 |
+
\nThen, write a plan. **Always recap the plan between each code block**
|
11 |
+
(you have extreme short-term memory loss, so you need to recap the plan between each message block to retain it).
|
12 |
+
\nWhen you execute code, it will be executed **on the user's machine**. The user has given you
|
13 |
+
**full and complete permission** to execute any code necessary to complete the task. You have full access to control
|
14 |
+
their computer to help them.
|
15 |
+
\nIf you want to send data between programming languages, save the data to a txt or json.
|
16 |
+
\nYou can access the internet. Run **any code** to achieve the goal, and if at first you don't succeed,
|
17 |
+
try again and again.
|
18 |
+
\nWhen a user refers to a filename, they're likely referring to an existing file in the directory
|
19 |
+
you're currently executing code in.
|
20 |
+
\nIn general, choose packages that have the most universal chance to be already installed and to work across multiple
|
21 |
+
applications. Packages like ffmpeg and pandoc that are well-supported and powerful.
|
22 |
+
\nWrite messages to the user in Markdown. Write code on multiple lines with proper indentation for readability.
|
23 |
+
\nYou can also refer information from following contents if exists:
|
24 |
+
"""
|
25 |
+
|
26 |
+
|
27 |
+
class MessagesGenerator(PromptGenerator):
|
28 |
+
|
29 |
+
def __init__(self,
|
30 |
+
system_template=OPENAI_DEFAULT_SYSTEM_TEMPLATE,
|
31 |
+
instruction_template='',
|
32 |
+
user_template='<user_input>',
|
33 |
+
exec_template=None,
|
34 |
+
assistant_template='',
|
35 |
+
sep='\n\n',
|
36 |
+
length_constraint=LengthConstraint(),
|
37 |
+
**kwargs):
|
38 |
+
super().__init__(
|
39 |
+
system_template=system_template,
|
40 |
+
instruction_template=instruction_template,
|
41 |
+
user_template=user_template,
|
42 |
+
exec_template=exec_template,
|
43 |
+
assistant_template=assistant_template,
|
44 |
+
sep=sep,
|
45 |
+
length_constraint=length_constraint)
|
46 |
+
self.custom_starter_messages = kwargs.get('custom_starter_messages',
|
47 |
+
None)
|
48 |
+
|
49 |
+
def init_prompt(self, task, tool_list, knowledge_list, **kwargs):
|
50 |
+
"""
|
51 |
+
in this function, the prompt will be initialized.
|
52 |
+
"""
|
53 |
+
prompt = self.user_template.replace('<user_input>', task)
|
54 |
+
|
55 |
+
if len(self.history) == 0:
|
56 |
+
if len(knowledge_list) > 0:
|
57 |
+
|
58 |
+
# knowledge
|
59 |
+
system_message = f'{self.system_template}{self.sep}<knowledge>'
|
60 |
+
knowledge_str = self.get_knowledge_str(knowledge_list)
|
61 |
+
system_message = system_message.replace(
|
62 |
+
'<knowledge>', knowledge_str)
|
63 |
+
|
64 |
+
else:
|
65 |
+
system_message = self.system_template
|
66 |
+
|
67 |
+
self.history = [{
|
68 |
+
'role': 'system',
|
69 |
+
'content': system_message
|
70 |
+
}, {
|
71 |
+
'role': 'user',
|
72 |
+
'content': prompt
|
73 |
+
}]
|
74 |
+
|
75 |
+
# store history
|
76 |
+
if self.custom_starter_messages:
|
77 |
+
assert isinstance(self.custom_starter_messages, list)
|
78 |
+
assert self.custom_starter_messages[-1]['role'] != 'user', \
|
79 |
+
'user message should not be the last one in custom starter messages'
|
80 |
+
|
81 |
+
self.history = self.custom_starter_messages
|
82 |
+
self.history.append({'role': 'user', 'content': prompt})
|
83 |
+
|
84 |
+
self.prompt = prompt
|
85 |
+
self.function_calls = self.get_function_list(tool_list)
|
86 |
+
|
87 |
+
else:
|
88 |
+
self.history.append({'role': 'user', 'content': prompt})
|
89 |
+
|
90 |
+
def generate(self, llm_result, exec_result: Union[str, dict]):
|
91 |
+
if isinstance(exec_result, dict):
|
92 |
+
exec_result = exec_result['result']
|
93 |
+
return self._generate_messages(llm_result, exec_result)
|
agentfabric/modelscope_agent/prompt/mrkl_prompt.py
ADDED
@@ -0,0 +1,118 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import json
|
2 |
+
|
3 |
+
from .prompt import LengthConstraint, PromptGenerator
|
4 |
+
|
5 |
+
MRKL_DEFAULT_SYSTEM_TEMPLATE = """Answer the following questions as best you can. You have access to the following tools: `
|
6 |
+
|
7 |
+
<tool_list>"""
|
8 |
+
|
9 |
+
MRKL_DEFAULT_INSTRUCTION_TEMPLATE = """Use the following format:
|
10 |
+
|
11 |
+
Question: the input question you must answer
|
12 |
+
Thought: you should always think about what to do
|
13 |
+
Action: the action to take, should be one of [<tool_names>]
|
14 |
+
Action Input: the input to the action
|
15 |
+
Observation: the result of the action
|
16 |
+
... (this Thought/Action/Action Input/Observation can be repeated zero or more times)
|
17 |
+
Thought: I now know the final answer
|
18 |
+
Final Answer: the final answer to the original input question
|
19 |
+
|
20 |
+
Begin!
|
21 |
+
"""
|
22 |
+
|
23 |
+
MRKL_DEFAULT_USER_TEMPLATE = """Question: <user_input>\n"""
|
24 |
+
|
25 |
+
MRKL_DEFAULT_EXEC_TEMPLATE = """Observation: <exec_result>\n"""
|
26 |
+
|
27 |
+
TOOL_DESC = (
|
28 |
+
'{name_for_model}: {name_for_human} API. {description_for_model} 输入参数: {parameters}'
|
29 |
+
)
|
30 |
+
|
31 |
+
FORMAT_DESC = {
|
32 |
+
'json':
|
33 |
+
'Format the arguments as a JSON object.',
|
34 |
+
'code':
|
35 |
+
'Enclose the code within triple backticks (`)'
|
36 |
+
+ ' at the beginning and end of the code.'
|
37 |
+
}
|
38 |
+
|
39 |
+
|
40 |
+
class MrklPromptGenerator(PromptGenerator):
|
41 |
+
|
42 |
+
def __init__(self,
|
43 |
+
system_template=MRKL_DEFAULT_SYSTEM_TEMPLATE,
|
44 |
+
instruction_template=MRKL_DEFAULT_INSTRUCTION_TEMPLATE,
|
45 |
+
user_template=MRKL_DEFAULT_USER_TEMPLATE,
|
46 |
+
exec_template=MRKL_DEFAULT_EXEC_TEMPLATE,
|
47 |
+
assistant_template='',
|
48 |
+
sep='\n\n',
|
49 |
+
llm=None,
|
50 |
+
length_constraint=LengthConstraint()):
|
51 |
+
super().__init__(
|
52 |
+
system_template=system_template,
|
53 |
+
instruction_template=instruction_template,
|
54 |
+
user_template=user_template,
|
55 |
+
exec_template=exec_template,
|
56 |
+
assistant_template=assistant_template,
|
57 |
+
sep=sep,
|
58 |
+
llm=llm,
|
59 |
+
length_constraint=length_constraint)
|
60 |
+
|
61 |
+
def init_prompt(self, task, tool_list, knowledge_list, **kwargs):
|
62 |
+
if len(self.history) == 0:
|
63 |
+
super().init_prompt(task, tool_list, knowledge_list, **kwargs)
|
64 |
+
system_role_status = kwargs.get('system_role_status', False)
|
65 |
+
tool_names = [f'\'{str(tool.name)}\'' for tool in tool_list]
|
66 |
+
tool_names = ','.join(tool_names)
|
67 |
+
self.system_prompt = self.system_prompt.replace(
|
68 |
+
'<tool_names>', tool_names)
|
69 |
+
|
70 |
+
if system_role_status:
|
71 |
+
system_message = {
|
72 |
+
'role': 'system',
|
73 |
+
'content': self.system_prompt
|
74 |
+
}
|
75 |
+
self.history.insert(0, system_message)
|
76 |
+
else:
|
77 |
+
self.history[0]['content'] = self.system_prompt + self.history[
|
78 |
+
0]['content']
|
79 |
+
else:
|
80 |
+
self.history.append({
|
81 |
+
'role':
|
82 |
+
'user',
|
83 |
+
'content':
|
84 |
+
self.user_template.replace('<user_input>', task)
|
85 |
+
})
|
86 |
+
self.history.append({
|
87 |
+
'role': 'assistant',
|
88 |
+
'content': self.assistant_template
|
89 |
+
})
|
90 |
+
|
91 |
+
return self.system_prompt
|
92 |
+
|
93 |
+
def get_tool_str(self, tool_list):
|
94 |
+
tool_texts = []
|
95 |
+
for tool in tool_list:
|
96 |
+
tool_texts.append(
|
97 |
+
TOOL_DESC.format(
|
98 |
+
name_for_model=tool.name,
|
99 |
+
name_for_human=tool.name,
|
100 |
+
description_for_model=tool.description,
|
101 |
+
parameters=json.dumps(tool.parameters,
|
102 |
+
ensure_ascii=False)))
|
103 |
+
# + ' ' + FORMAT_DESC['json'])
|
104 |
+
tool_str = '\n\n'.join(tool_texts)
|
105 |
+
return tool_str
|
106 |
+
|
107 |
+
def _generate(self, llm_result, exec_result: str):
|
108 |
+
"""
|
109 |
+
generate next round prompt based on previous llm_result and exec_result and update history
|
110 |
+
"""
|
111 |
+
if len(llm_result) != 0:
|
112 |
+
self.history[-1]['content'] += f'{llm_result}'
|
113 |
+
if len(exec_result) != 0:
|
114 |
+
exec_result = self.exec_template.replace('<exec_result>',
|
115 |
+
str(exec_result))
|
116 |
+
self.history[-1]['content'] += exec_result
|
117 |
+
self.prompt = self.prompt_preprocessor(self.history)
|
118 |
+
return self.prompt
|
agentfabric/modelscope_agent/prompt/ms_prompt.py
ADDED
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from .prompt import LengthConstraint, PromptGenerator
|
2 |
+
|
3 |
+
MS_DEFAULT_SYSTEM_TEMPLATE = """<|system|>:你是达摩院的ModelScopeGPT(魔搭助手),你是个大语言模型, 是2023年达摩院的工程师训练得到的。\
|
4 |
+
你有多种能力,可以通过插件集成魔搭社区的模型api来回复用户的问题,还能解答用户使用模型遇到的问题和模型知识相关问答。
|
5 |
+
"""
|
6 |
+
|
7 |
+
MS_DEFAULT_INSTRUCTION_TEMPLATE = """当前对话可以使用的插件信息如下,请自行判断是否需要调用插件来解决当前用户问题。若需要调用插件,则需要将插件调用请求按照json格式给出,必须包含api_name、parameters字段,并在其前后使用<|startofthink|>和<|endofthink|>作为标志。\
|
8 |
+
然后你需要根据插件API调用结果生成合理的答复; 若无需调用插件,则直接给出对应回复即可。\n\n<tool_list>"""
|
9 |
+
|
10 |
+
MS_DEFAULT_USER_TEMPLATE = """<|user|>:<user_input>"""
|
11 |
+
|
12 |
+
MS_DEFAULT_EXEC_TEMPLATE = """<|startofexec|><exec_result><|endofexec|>\n"""
|
13 |
+
|
14 |
+
MS_DEFAULT_ASSISTANT_TEMPLATE = """<|assistant|>:"""
|
15 |
+
|
16 |
+
|
17 |
+
class MSPromptGenerator(PromptGenerator):
|
18 |
+
|
19 |
+
def __init__(self,
|
20 |
+
system_template=MS_DEFAULT_SYSTEM_TEMPLATE,
|
21 |
+
instruction_template=MS_DEFAULT_INSTRUCTION_TEMPLATE,
|
22 |
+
user_template=MS_DEFAULT_USER_TEMPLATE,
|
23 |
+
exec_template=MS_DEFAULT_EXEC_TEMPLATE,
|
24 |
+
assistant_template=MS_DEFAULT_ASSISTANT_TEMPLATE,
|
25 |
+
sep='\n\n',
|
26 |
+
length_constraint=LengthConstraint()):
|
27 |
+
super().__init__(
|
28 |
+
system_template=system_template,
|
29 |
+
instruction_template=instruction_template,
|
30 |
+
user_template=user_template,
|
31 |
+
exec_template=exec_template,
|
32 |
+
assistant_template=assistant_template,
|
33 |
+
sep=sep,
|
34 |
+
length_constraint=length_constraint)
|
agentfabric/modelscope_agent/prompt/prompt.py
ADDED
@@ -0,0 +1,232 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import copy
|
2 |
+
from typing import Union
|
3 |
+
|
4 |
+
from modelscope_agent.llm.base import LLM
|
5 |
+
|
6 |
+
from .raw_prompt_builder import build_raw_prompt
|
7 |
+
|
8 |
+
KNOWLEDGE_PROMPT = '# 知识库'
|
9 |
+
KNOWLEDGE_INTRODUCTION_PROMPT = '以下是我上传的文件“<file_name>”的内容:'
|
10 |
+
KNOWLEDGE_CONTENT_PROMPT = """```
|
11 |
+
<knowledge_content>
|
12 |
+
```"""
|
13 |
+
|
14 |
+
DEFAULT_PROMPT_INPUT_LENGTH_MAX = 999999999999
|
15 |
+
|
16 |
+
|
17 |
+
class LengthConstraint:
|
18 |
+
|
19 |
+
def __init__(self):
|
20 |
+
self.knowledge = DEFAULT_PROMPT_INPUT_LENGTH_MAX
|
21 |
+
self.input = DEFAULT_PROMPT_INPUT_LENGTH_MAX
|
22 |
+
self.prompt_max_length = 10000
|
23 |
+
|
24 |
+
def update(self, config: dict):
|
25 |
+
if config is not None:
|
26 |
+
self.knowledge = config.get('knowledge', self.knowledge)
|
27 |
+
self.input = config.get('input', self.input)
|
28 |
+
self.prompt_max_length = config.get('prompt_max_length',
|
29 |
+
self.prompt_max_length)
|
30 |
+
|
31 |
+
|
32 |
+
class PromptGenerator:
|
33 |
+
|
34 |
+
def __init__(self,
|
35 |
+
system_template: str = '',
|
36 |
+
instruction_template: str = '',
|
37 |
+
user_template: str = '<user_input>',
|
38 |
+
exec_template: str = '',
|
39 |
+
assistant_template: str = '',
|
40 |
+
sep='\n\n',
|
41 |
+
llm=None,
|
42 |
+
length_constraint=LengthConstraint()):
|
43 |
+
"""
|
44 |
+
prompt genertor
|
45 |
+
Args:
|
46 |
+
system_template (str, optional): System template, normally the role of LLM.
|
47 |
+
instruction_template (str, optional): Indicate the instruction for LLM.
|
48 |
+
user_template (str, optional): Prefix before user input. Defaults to ''.
|
49 |
+
exec_template (str, optional): A wrapper str for exec result.
|
50 |
+
assistant_template (str, optional): Prefix before assistant response.
|
51 |
+
Some LLM need to manully concat this prefix before generation.
|
52 |
+
sep (str, optional): content separator
|
53 |
+
length_constraint (LengthConstraint, optional): content length constraint
|
54 |
+
"""
|
55 |
+
|
56 |
+
self.system_template = system_template
|
57 |
+
self.instruction_template = instruction_template
|
58 |
+
self.user_template = user_template
|
59 |
+
self.assistant_template = assistant_template
|
60 |
+
self.exec_template = exec_template
|
61 |
+
self.sep = sep
|
62 |
+
if isinstance(llm, LLM) and llm.model_id:
|
63 |
+
self.prompt_preprocessor = build_raw_prompt(llm.model_id)
|
64 |
+
self.prompt_max_length = length_constraint.prompt_max_length
|
65 |
+
self.reset()
|
66 |
+
|
67 |
+
def reset(self):
|
68 |
+
self.prompt = ''
|
69 |
+
self.history = []
|
70 |
+
self.messages = []
|
71 |
+
|
72 |
+
def init_prompt(self,
|
73 |
+
task,
|
74 |
+
tool_list,
|
75 |
+
knowledge_list,
|
76 |
+
llm_model=None,
|
77 |
+
**kwargs):
|
78 |
+
"""
|
79 |
+
in this function, the prompt will be initialized.
|
80 |
+
"""
|
81 |
+
prompt = self.sep.join(
|
82 |
+
[self.system_template, self.instruction_template])
|
83 |
+
prompt += '<knowledge><history>'
|
84 |
+
|
85 |
+
knowledge_str = self.get_knowledge_str(
|
86 |
+
knowledge_list, file_name=kwargs.get('file_name', ''))
|
87 |
+
|
88 |
+
# knowledge
|
89 |
+
prompt = prompt.replace('<knowledge>', knowledge_str)
|
90 |
+
|
91 |
+
# get tool description str
|
92 |
+
tool_str = self.get_tool_str(tool_list)
|
93 |
+
prompt = prompt.replace('<tool_list>', tool_str)
|
94 |
+
|
95 |
+
history_str = self.get_history_str()
|
96 |
+
|
97 |
+
prompt = prompt.replace('<history>', history_str)
|
98 |
+
|
99 |
+
self.system_prompt = copy.deepcopy(prompt)
|
100 |
+
|
101 |
+
# user input
|
102 |
+
user_input = self.user_template.replace('<user_input>', task)
|
103 |
+
prompt += f'{self.sep}{user_input}'
|
104 |
+
|
105 |
+
# assistant input
|
106 |
+
prompt += f'{self.sep}{self.assistant_template}'
|
107 |
+
|
108 |
+
# store history
|
109 |
+
self.history.append({'role': 'user', 'content': user_input})
|
110 |
+
self.history.append({
|
111 |
+
'role': 'assistant',
|
112 |
+
'content': self.assistant_template
|
113 |
+
})
|
114 |
+
|
115 |
+
self.prompt = prompt
|
116 |
+
|
117 |
+
self.function_calls = self.get_function_list(tool_list)
|
118 |
+
|
119 |
+
# TODO change the output from single prompt to artifacts including prompt, messages, funciton_call
|
120 |
+
def generate(self, llm_result, exec_result: Union[str, dict]):
|
121 |
+
if isinstance(exec_result, dict):
|
122 |
+
exec_result = str(exec_result['result'])
|
123 |
+
return self._generate(llm_result, exec_result)
|
124 |
+
|
125 |
+
def _generate(self, llm_result, exec_result: str):
|
126 |
+
"""
|
127 |
+
generate next round prompt based on previous llm_result and exec_result and update history
|
128 |
+
"""
|
129 |
+
if len(llm_result) != 0:
|
130 |
+
self.prompt = f'{self.prompt}{llm_result}'
|
131 |
+
self.history[-1]['content'] += f'{llm_result}'
|
132 |
+
if len(exec_result) != 0:
|
133 |
+
exec_result = self.exec_template.replace('<exec_result>',
|
134 |
+
str(exec_result))
|
135 |
+
self.prompt = f'{self.prompt}{self.sep}{exec_result}'
|
136 |
+
self.history[-1]['content'] += f'{self.sep}{exec_result}'
|
137 |
+
|
138 |
+
return self.prompt
|
139 |
+
|
140 |
+
# TODO: add Union[Text, Message] type for llm_result,
|
141 |
+
# add ExecResult = Text type for exec_result
|
142 |
+
# output would be a Union[Text, Messages]
|
143 |
+
# In this case llm_result is Message, and exec_result is Function_call
|
144 |
+
def _generate_messages(self, llm_result, exec_result: str):
|
145 |
+
"""
|
146 |
+
generate next round prompt based on previous llm_result and exec_result and update history
|
147 |
+
"""
|
148 |
+
|
149 |
+
# init task should be
|
150 |
+
if llm_result == '' and exec_result == '':
|
151 |
+
return self.history
|
152 |
+
|
153 |
+
# make sure set content '' not null
|
154 |
+
function_call = llm_result.get('function_call', None)
|
155 |
+
if function_call is not None:
|
156 |
+
llm_result['content'] = ''
|
157 |
+
self.history.append(llm_result)
|
158 |
+
|
159 |
+
if exec_result is not None and function_call is not None:
|
160 |
+
exec_message = {
|
161 |
+
'role': 'function',
|
162 |
+
'name': 'execute',
|
163 |
+
'content': exec_result,
|
164 |
+
}
|
165 |
+
self.history.append(exec_message)
|
166 |
+
|
167 |
+
return self.history
|
168 |
+
|
169 |
+
def get_tool_str(self, tool_list):
|
170 |
+
"""generate tool list string
|
171 |
+
|
172 |
+
Args:
|
173 |
+
tool_list (List[str]): list of tools
|
174 |
+
|
175 |
+
"""
|
176 |
+
|
177 |
+
tool_str = self.sep.join(
|
178 |
+
[f'{i + 1}. {t}' for i, t in enumerate(tool_list)])
|
179 |
+
return tool_str
|
180 |
+
|
181 |
+
# TODO move parse_tools_to_function from agent to here later
|
182 |
+
def get_function_list(self, tool_list):
|
183 |
+
"""generate funciton call list from tools list
|
184 |
+
|
185 |
+
Args:
|
186 |
+
tool_list (List[str]): list of tools
|
187 |
+
|
188 |
+
"""
|
189 |
+
functions = [tool.get_function() for tool in tool_list]
|
190 |
+
return functions
|
191 |
+
|
192 |
+
def get_knowledge_str(self,
|
193 |
+
knowledge_list,
|
194 |
+
file_name='',
|
195 |
+
only_content=False,
|
196 |
+
**kwargs):
|
197 |
+
"""generate knowledge string
|
198 |
+
|
199 |
+
Args:
|
200 |
+
file_name (str): file name
|
201 |
+
knowledge_list (List[str]): list of knowledges
|
202 |
+
|
203 |
+
"""
|
204 |
+
|
205 |
+
knowledge = self.sep.join(
|
206 |
+
[f'{i + 1}. {k}' for i, k in enumerate(knowledge_list)])
|
207 |
+
knowledge_content = KNOWLEDGE_CONTENT_PROMPT.replace(
|
208 |
+
'<knowledge_content>', knowledge)
|
209 |
+
if only_content:
|
210 |
+
return knowledge_content
|
211 |
+
else:
|
212 |
+
knowledge_introduction = KNOWLEDGE_INTRODUCTION_PROMPT.replace(
|
213 |
+
'<file_name>', file_name)
|
214 |
+
|
215 |
+
knowledge_str = f'{KNOWLEDGE_PROMPT}{self.sep}{knowledge_introduction}{self.sep}{knowledge_content}' if len(
|
216 |
+
knowledge_list) > 0 else ''
|
217 |
+
return knowledge_str
|
218 |
+
|
219 |
+
def get_history_str(self):
|
220 |
+
"""generate history string
|
221 |
+
|
222 |
+
"""
|
223 |
+
history_str = ''
|
224 |
+
for i in range(len(self.history)):
|
225 |
+
history_item = self.history[len(self.history) - i - 1]
|
226 |
+
text = history_item['content']
|
227 |
+
if len(history_str) + len(text) + len(
|
228 |
+
self.prompt) > self.prompt_max_length:
|
229 |
+
break
|
230 |
+
history_str = f'{self.sep}{text.strip()}{history_str}'
|
231 |
+
|
232 |
+
return history_str
|
agentfabric/modelscope_agent/prompt/prompt_factory.py
ADDED
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from modelscope_agent.agent_types import AgentType
|
2 |
+
|
3 |
+
from .messages_prompt import MessagesGenerator
|
4 |
+
from .mrkl_prompt import MrklPromptGenerator
|
5 |
+
from .ms_prompt import MSPromptGenerator
|
6 |
+
|
7 |
+
|
8 |
+
def get_prompt_generator(agent_type: AgentType = AgentType.DEFAULT, **kwargs):
|
9 |
+
if AgentType.DEFAULT == agent_type or agent_type == AgentType.MS_AGENT:
|
10 |
+
return MSPromptGenerator(**kwargs)
|
11 |
+
elif AgentType.MRKL == agent_type:
|
12 |
+
return MrklPromptGenerator(**kwargs)
|
13 |
+
elif AgentType.Messages == agent_type:
|
14 |
+
return MessagesGenerator(**kwargs)
|
15 |
+
else:
|
16 |
+
raise NotImplementedError
|
agentfabric/modelscope_agent/prompt/raw_prompt_builder.py
ADDED
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
def qwen_chatml_prompt_preprocessor(messages):
|
2 |
+
prompt = ''
|
3 |
+
for message in messages:
|
4 |
+
if message['role'] == 'assistant' and message['content'] == '':
|
5 |
+
prompt += '<|im_start|>assistant\n'
|
6 |
+
else:
|
7 |
+
prompt = prompt + '<|im_start|>{role}\n{content}<|im_end|>\n'.format(
|
8 |
+
role=message['role'],
|
9 |
+
content=message['content'].lstrip('\n').rstrip())
|
10 |
+
|
11 |
+
# in the case of the assistant message is not in the last one, such as function result
|
12 |
+
if messages[-1]['role'] == 'assistant':
|
13 |
+
last_assistant_message_list = messages[-1]['content'].split('\n')
|
14 |
+
if last_assistant_message_list[-1] == '':
|
15 |
+
last_assistant_message_list = last_assistant_message_list[:-1]
|
16 |
+
if len(last_assistant_message_list) == 0:
|
17 |
+
return prompt
|
18 |
+
else:
|
19 |
+
item_length = len('<|im_end|>\n')
|
20 |
+
prompt = prompt[:-item_length]
|
21 |
+
|
22 |
+
return prompt
|
23 |
+
|
24 |
+
|
25 |
+
def plate_preprocessor(messages):
|
26 |
+
return qwen_chatml_prompt_preprocessor(messages)
|
27 |
+
|
28 |
+
|
29 |
+
def build_raw_prompt(model):
|
30 |
+
if isinstance(model, str) or hasattr(model, '__name__'):
|
31 |
+
if model.startswith('qwen'):
|
32 |
+
return qwen_chatml_prompt_preprocessor
|
33 |
+
else:
|
34 |
+
return plate_preprocessor
|
agentfabric/modelscope_agent/retrieve.py
ADDED
@@ -0,0 +1,115 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
from typing import Dict, Iterable, List, Union
|
3 |
+
|
4 |
+
import json
|
5 |
+
from langchain.document_loaders import (PyPDFLoader, TextLoader,
|
6 |
+
UnstructuredFileLoader)
|
7 |
+
from langchain.embeddings import ModelScopeEmbeddings
|
8 |
+
from langchain.embeddings.base import Embeddings
|
9 |
+
from langchain.schema import Document
|
10 |
+
from langchain.text_splitter import CharacterTextSplitter
|
11 |
+
from langchain.vectorstores import FAISS, VectorStore
|
12 |
+
|
13 |
+
|
14 |
+
class Retrieval:
|
15 |
+
|
16 |
+
def __init__(self,
|
17 |
+
embedding: Embeddings = None,
|
18 |
+
vs_cls: VectorStore = None,
|
19 |
+
top_k: int = 5,
|
20 |
+
vs_params: Dict = {}):
|
21 |
+
self.embedding = embedding or ModelScopeEmbeddings(
|
22 |
+
model_id='damo/nlp_gte_sentence-embedding_chinese-base')
|
23 |
+
self.top_k = top_k
|
24 |
+
self.vs_cls = vs_cls or FAISS
|
25 |
+
self.vs_params = vs_params
|
26 |
+
self.vs = None
|
27 |
+
|
28 |
+
def construct(self, docs):
|
29 |
+
assert len(docs) > 0
|
30 |
+
if isinstance(docs[0], str):
|
31 |
+
self.vs = self.vs_cls.from_texts(docs, self.embedding,
|
32 |
+
**self.vs_params)
|
33 |
+
elif isinstance(docs[0], Document):
|
34 |
+
self.vs = self.vs_cls.from_documents(docs, self.embedding,
|
35 |
+
**self.vs_params)
|
36 |
+
|
37 |
+
def retrieve(self, query: str) -> List[str]:
|
38 |
+
res = self.vs.similarity_search(query, k=self.top_k)
|
39 |
+
if 'page' in res[0].metadata:
|
40 |
+
res.sort(key=lambda doc: doc.metadata['page'])
|
41 |
+
return [r.page_content for r in res]
|
42 |
+
|
43 |
+
|
44 |
+
class ToolRetrieval(Retrieval):
|
45 |
+
|
46 |
+
def __init__(self,
|
47 |
+
embedding: Embeddings = None,
|
48 |
+
vs_cls: VectorStore = None,
|
49 |
+
top_k: int = 5,
|
50 |
+
vs_params: Dict = {}):
|
51 |
+
super().__init__(embedding, vs_cls, top_k, vs_params)
|
52 |
+
|
53 |
+
def retrieve(self, query: str) -> Dict[str, str]:
|
54 |
+
res = self.vs.similarity_search(query, k=self.top_k)
|
55 |
+
|
56 |
+
final_res = {}
|
57 |
+
|
58 |
+
for r in res:
|
59 |
+
content = r.page_content
|
60 |
+
name = json.loads(content)['name']
|
61 |
+
final_res[name] = content
|
62 |
+
|
63 |
+
return final_res
|
64 |
+
|
65 |
+
|
66 |
+
class KnowledgeRetrieval(Retrieval):
|
67 |
+
|
68 |
+
def __init__(self,
|
69 |
+
docs,
|
70 |
+
embedding: Embeddings = None,
|
71 |
+
vs_cls: VectorStore = None,
|
72 |
+
top_k: int = 5,
|
73 |
+
vs_params: Dict = {}):
|
74 |
+
super().__init__(embedding, vs_cls, top_k, vs_params)
|
75 |
+
self.construct(docs)
|
76 |
+
|
77 |
+
@classmethod
|
78 |
+
def from_file(cls,
|
79 |
+
file_path: Union[str, list],
|
80 |
+
embedding: Embeddings = None,
|
81 |
+
vs_cls: VectorStore = None,
|
82 |
+
top_k: int = 5,
|
83 |
+
vs_params: Dict = {}):
|
84 |
+
|
85 |
+
textsplitter = CharacterTextSplitter()
|
86 |
+
all_files = []
|
87 |
+
if isinstance(file_path, str) and os.path.isfile(file_path):
|
88 |
+
all_files.append(file_path)
|
89 |
+
elif isinstance(file_path, list):
|
90 |
+
all_files = file_path
|
91 |
+
elif os.path.isdir(file_path):
|
92 |
+
for root, dirs, files in os.walk(file_path):
|
93 |
+
for f in files:
|
94 |
+
all_files.append(os.path.join(root, f))
|
95 |
+
else:
|
96 |
+
raise ValueError('file_path must be a file or a directory')
|
97 |
+
|
98 |
+
docs = []
|
99 |
+
for f in all_files:
|
100 |
+
if f.lower().endswith('.txt'):
|
101 |
+
loader = TextLoader(f, autodetect_encoding=True)
|
102 |
+
docs += (loader.load_and_split(textsplitter))
|
103 |
+
elif f.lower().endswith('.md'):
|
104 |
+
loader = UnstructuredFileLoader(f, mode='elements')
|
105 |
+
docs += loader.load()
|
106 |
+
elif f.lower().endswith('.pdf'):
|
107 |
+
loader = PyPDFLoader(f)
|
108 |
+
docs += (loader.load_and_split(textsplitter))
|
109 |
+
else:
|
110 |
+
print(f'not support file type: {f}, will be support soon')
|
111 |
+
|
112 |
+
if len(docs) == 0:
|
113 |
+
return None
|
114 |
+
else:
|
115 |
+
return cls(docs, embedding, vs_cls, top_k, vs_params)
|
agentfabric/modelscope_agent/tools/__init__.py
ADDED
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from .amap_weather import AMAPWeather
|
2 |
+
from .code_interperter import CodeInterpreter
|
3 |
+
from .code_interpreter_jupyter import CodeInterpreterJupyter
|
4 |
+
from .hf_tool import HFTool
|
5 |
+
from .image_chat_tool import ImageChatTool
|
6 |
+
from .pipeline_tool import ModelscopePipelineTool
|
7 |
+
from .plugin_tool import LangchainTool
|
8 |
+
from .text_address_tool import TextAddressTool
|
9 |
+
from .text_ie_tool import TextInfoExtractTool
|
10 |
+
from .text_ner_tool import TextNerTool
|
11 |
+
from .text_to_image_tool import TextToImageTool
|
12 |
+
from .text_to_speech_tool import TexttoSpeechTool
|
13 |
+
from .text_to_video_tool import TextToVideoTool
|
14 |
+
from .tool import Tool
|
15 |
+
from .translation_en2zh_tool import TranslationEn2ZhTool
|
16 |
+
from .translation_zh2en_tool import TranslationZh2EnTool
|
17 |
+
from .web_browser import WebBrowser
|
18 |
+
from .web_search import WebSearch
|
19 |
+
from .wordart_tool import WordArtTexture
|
20 |
+
|
21 |
+
TOOL_INFO_LIST = {
|
22 |
+
'modelscope_text-translation-zh2en': 'TranslationZh2EnTool',
|
23 |
+
'modelscope_text-translation-en2zh': 'TranslationEn2ZhTool',
|
24 |
+
'modelscope_text-ie': 'TextInfoExtractTool',
|
25 |
+
'modelscope_text-ner': 'TextNerTool',
|
26 |
+
'modelscope_text-address': 'TextAddressTool',
|
27 |
+
'image_gen': 'TextToImageTool',
|
28 |
+
'modelscope_video-generation': 'TextToVideoTool',
|
29 |
+
'modelscope_image-chat': 'ImageChatTool',
|
30 |
+
'modelscope_speech-generation': 'TexttoSpeechTool',
|
31 |
+
'amap_weather': 'AMAPWeather',
|
32 |
+
'code_interpreter': 'CodeInterpreterJupyter',
|
33 |
+
'wordart_texture_generation': 'WordArtTexture',
|
34 |
+
'web_search': 'WebSearch',
|
35 |
+
'web_browser': 'WebBrowser',
|
36 |
+
}
|
agentfabric/modelscope_agent/tools/amap_weather.py
ADDED
@@ -0,0 +1,64 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
|
3 |
+
import pandas as pd
|
4 |
+
import requests
|
5 |
+
from modelscope_agent.tools.tool import Tool, ToolSchema
|
6 |
+
from pydantic import ValidationError
|
7 |
+
|
8 |
+
|
9 |
+
class AMAPWeather(Tool):
|
10 |
+
description = '获取对应城市的天气数据'
|
11 |
+
name = 'amap_weather'
|
12 |
+
parameters: list = [{
|
13 |
+
'name': 'location',
|
14 |
+
'description': 'get temperature for a specific location',
|
15 |
+
'required': True
|
16 |
+
}]
|
17 |
+
|
18 |
+
def __init__(self, cfg={}):
|
19 |
+
self.cfg = cfg.get(self.name, {})
|
20 |
+
|
21 |
+
# remote call
|
22 |
+
self.url = 'https://restapi.amap.com/v3/weather/weatherInfo?city={city}&key={key}'
|
23 |
+
self.token = self.cfg.get('token', os.environ.get('AMAP_TOKEN', ''))
|
24 |
+
self.city_df = pd.read_excel(
|
25 |
+
'https://modelscope.oss-cn-beijing.aliyuncs.com/resource/agent/AMap_adcode_citycode.xlsx'
|
26 |
+
)
|
27 |
+
assert self.token != '', 'weather api token must be acquired through ' \
|
28 |
+
'https://lbs.amap.com/api/webservice/guide/create-project/get-key and set by AMAP_TOKEN'
|
29 |
+
|
30 |
+
try:
|
31 |
+
all_param = {
|
32 |
+
'name': self.name,
|
33 |
+
'description': self.description,
|
34 |
+
'parameters': self.parameters
|
35 |
+
}
|
36 |
+
self.tool_schema = ToolSchema(**all_param)
|
37 |
+
except ValidationError:
|
38 |
+
raise ValueError(f'Error when parsing parameters of {self.name}')
|
39 |
+
|
40 |
+
self._str = self.tool_schema.model_dump_json()
|
41 |
+
self._function = self.parse_pydantic_model_to_openai_function(
|
42 |
+
all_param)
|
43 |
+
|
44 |
+
def get_city_adcode(self, city_name):
|
45 |
+
filtered_df = self.city_df[self.city_df['中文名'] == city_name]
|
46 |
+
if len(filtered_df['adcode'].values) == 0:
|
47 |
+
raise ValueError(
|
48 |
+
f'location {city_name} not found, availables are {self.city_df["中文名"]}'
|
49 |
+
)
|
50 |
+
else:
|
51 |
+
return filtered_df['adcode'].values[0]
|
52 |
+
|
53 |
+
def __call__(self, *args, **kwargs):
|
54 |
+
location = kwargs['location']
|
55 |
+
response = requests.get(
|
56 |
+
self.url.format(
|
57 |
+
city=self.get_city_adcode(location), key=self.token))
|
58 |
+
data = response.json()
|
59 |
+
if data['status'] == '0':
|
60 |
+
raise RuntimeError(data)
|
61 |
+
else:
|
62 |
+
weather = data['lives'][0]['weather']
|
63 |
+
temperature = data['lives'][0]['temperature']
|
64 |
+
return {'result': f'{location}的天气是{weather}温度是{temperature}度。'}
|
agentfabric/modelscope_agent/tools/code_interperter.py
ADDED
@@ -0,0 +1,125 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import re
|
3 |
+
import traceback
|
4 |
+
|
5 |
+
import appdirs
|
6 |
+
import json
|
7 |
+
|
8 |
+
from .code_interpreter_utils.create_code_interpreter import \
|
9 |
+
create_code_interpreter
|
10 |
+
from .code_interpreter_utils.language_map import language_map
|
11 |
+
from .code_interpreter_utils.truncate_output import truncate_output
|
12 |
+
from .tool import Tool
|
13 |
+
|
14 |
+
|
15 |
+
class CodeInterpreter(Tool):
|
16 |
+
"""
|
17 |
+
using open interpreter to interpret code
|
18 |
+
by https://github.com/KillianLucas/open-interpreter
|
19 |
+
"""
|
20 |
+
description = 'Executes code on the user\'s machine, **in the users local environment**, and returns the output'
|
21 |
+
name = 'code_interpreter'
|
22 |
+
parameters: list = [{
|
23 |
+
'name': 'language',
|
24 |
+
'description':
|
25 |
+
'The programming language (required parameter to the `execute` function)',
|
26 |
+
'required': True
|
27 |
+
}, {
|
28 |
+
'name': 'code',
|
29 |
+
'description': 'The code to execute (required)',
|
30 |
+
'required': True
|
31 |
+
}]
|
32 |
+
|
33 |
+
def __init__(self, cfg={}):
|
34 |
+
super().__init__(cfg)
|
35 |
+
self.create_code_interpreter = create_code_interpreter
|
36 |
+
self.language_map = language_map
|
37 |
+
self.truncate_output = truncate_output
|
38 |
+
|
39 |
+
self._code_interpreters = {}
|
40 |
+
self.max_output = self.cfg.get('max_output', 2000)
|
41 |
+
|
42 |
+
def _local_call(self, *args, **kwargs):
|
43 |
+
|
44 |
+
language, code = self._handle_input_fallback(**kwargs)
|
45 |
+
|
46 |
+
try:
|
47 |
+
# Fix a common error where the LLM thinks it's in a Jupyter notebook
|
48 |
+
if language == 'python' and code.startswith('!'):
|
49 |
+
code = code[1:]
|
50 |
+
language = 'shell'
|
51 |
+
|
52 |
+
if language in self.language_map:
|
53 |
+
if language not in self._code_interpreters:
|
54 |
+
self._code_interpreters[
|
55 |
+
language] = self.create_code_interpreter(language)
|
56 |
+
code_interpreter = self._code_interpreters[language]
|
57 |
+
else:
|
58 |
+
# This still prints code but don't allow code to run. Let Open-Interpreter know through output message
|
59 |
+
error_output = f'Error: Open Interpreter does not currently support {language}.'
|
60 |
+
print(error_output)
|
61 |
+
output = '\n' + error_output
|
62 |
+
return {'result': output.strip()}
|
63 |
+
|
64 |
+
output = ''
|
65 |
+
for line in code_interpreter.run(code):
|
66 |
+
if 'output' in line:
|
67 |
+
output += '\n' + line['output']
|
68 |
+
|
69 |
+
# Truncate output
|
70 |
+
output = self.truncate_output(output, self.max_output)
|
71 |
+
except Exception as e:
|
72 |
+
error = traceback.format_exc()
|
73 |
+
output = ' '.join(f'{key}:{value}'
|
74 |
+
for key, value in kwargs.items())
|
75 |
+
output += f'\nDetail error is {e}.\n{error}'
|
76 |
+
|
77 |
+
return {'result': output.strip()}
|
78 |
+
|
79 |
+
def _handle_input_fallback(self, **kwargs):
|
80 |
+
"""
|
81 |
+
an alternative method is to parse code in content not from function call
|
82 |
+
such as:
|
83 |
+
text = response['content']
|
84 |
+
code_block = re.search(r'```([\s\S]+)```', text) # noqa W^05
|
85 |
+
if code_block:
|
86 |
+
result = code_block.group(1)
|
87 |
+
language = result.split('\n')[0]
|
88 |
+
code = '\n'.join(result.split('\n')[1:])
|
89 |
+
|
90 |
+
:param fallback_text:
|
91 |
+
:return: language, cocde
|
92 |
+
"""
|
93 |
+
|
94 |
+
language = kwargs.get('language', None)
|
95 |
+
code = kwargs.get('code', None)
|
96 |
+
fallback = kwargs.get('fallback', None)
|
97 |
+
|
98 |
+
if language and code:
|
99 |
+
return language, code
|
100 |
+
elif fallback:
|
101 |
+
try:
|
102 |
+
text = fallback
|
103 |
+
code_block = re.search(r'```([\s\S]+)```', text) # noqa W^05
|
104 |
+
if code_block:
|
105 |
+
result = code_block.group(1)
|
106 |
+
# for multi code_block
|
107 |
+
result = result.split('```')[0]
|
108 |
+
language = result.split('\n')[0]
|
109 |
+
if language == 'py' or language == 'python':
|
110 |
+
# handle py case
|
111 |
+
# ```py code ```
|
112 |
+
language = 'python'
|
113 |
+
code = '\n'.join(result.split('\n')[1:])
|
114 |
+
return language, code
|
115 |
+
|
116 |
+
if language == 'json':
|
117 |
+
# handle json case
|
118 |
+
# ```json {language,code}```
|
119 |
+
parameters = json.loads('\n'.join(
|
120 |
+
result.split('\n')[1:]).replace('\n', ''))
|
121 |
+
return parameters['language'], parameters['code']
|
122 |
+
except ValueError:
|
123 |
+
return language, code
|
124 |
+
else:
|
125 |
+
return language, code
|