Spaces:
Running
Running
sci-m-wang
commited on
Commit
•
74cb225
1
Parent(s):
56fa864
Upload 15 files
Browse files- app.py +33 -0
- models/openai.py +25 -0
- models/transformers.py +56 -0
- modules/background.py +13 -0
- modules/command.py +13 -0
- modules/constraints.py +14 -0
- modules/get_modules.py +75 -0
- modules/goal.py +13 -0
- modules/initialization.py +13 -0
- modules/output_format.py +13 -0
- modules/skills.py +13 -0
- modules/suggestion.py +13 -0
- modules/workflow.py +13 -0
- showcases/generate.py +177 -0
- showcases/test.py +44 -0
app.py
ADDED
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from showcases.generate import generate
|
2 |
+
from showcases.test import test
|
3 |
+
from models.openai import Generator
|
4 |
+
import streamlit as st
|
5 |
+
import os
|
6 |
+
|
7 |
+
os.system("pip install openai")
|
8 |
+
# os.system("pip install transformers")
|
9 |
+
# os.system("pip install outlines")
|
10 |
+
# os.system("pip install pydantic")
|
11 |
+
|
12 |
+
api_key = os.getenv("api_key")
|
13 |
+
base_url = os.getenv("base_url")
|
14 |
+
model_name = os.getenv("model_name")
|
15 |
+
|
16 |
+
if __name__ == "__main__":
|
17 |
+
state = st.session_state
|
18 |
+
if "generator" not in state:
|
19 |
+
state.generator = Generator(
|
20 |
+
api_key = api_key,
|
21 |
+
base_url = base_url
|
22 |
+
)
|
23 |
+
state.generator.set_model(model_name)
|
24 |
+
pass
|
25 |
+
if "page" not in state:
|
26 |
+
state.page = "generate"
|
27 |
+
pass
|
28 |
+
if state.page == "generate":
|
29 |
+
generate()
|
30 |
+
pass
|
31 |
+
elif state.page == "test":
|
32 |
+
test()
|
33 |
+
pass
|
models/openai.py
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from openai import OpenAI
|
2 |
+
|
3 |
+
class Generator:
|
4 |
+
def __init__(self, api_key, base_url):
|
5 |
+
self.client = OpenAI(
|
6 |
+
api_key=api_key,
|
7 |
+
base_url=base_url
|
8 |
+
)
|
9 |
+
pass
|
10 |
+
def set_model(self, model):
|
11 |
+
self.model = model
|
12 |
+
pass
|
13 |
+
def generate_response(self, messages):
|
14 |
+
response = self.client.chat.completions.create(
|
15 |
+
model=self.model,
|
16 |
+
messages=messages
|
17 |
+
)
|
18 |
+
return response.choices[0].message.content
|
19 |
+
def json_response(self, messages):
|
20 |
+
response = self.client.chat.completions.create(
|
21 |
+
model=self.model,
|
22 |
+
messages=messages,
|
23 |
+
response_format={"type":"json_object"}
|
24 |
+
)
|
25 |
+
return response.choices[0].message.content
|
models/transformers.py
ADDED
@@ -0,0 +1,56 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import sys
|
2 |
+
import os
|
3 |
+
abs_path = os.getcwd()
|
4 |
+
sys.path.append(abs_path) # Adds higher directory to python modules path.
|
5 |
+
|
6 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer
|
7 |
+
from outlines import models, generate
|
8 |
+
|
9 |
+
from pydantic import BaseModel
|
10 |
+
|
11 |
+
schema = """
|
12 |
+
{
|
13 |
+
"title": "Modules",
|
14 |
+
"type": "object",
|
15 |
+
"properties": {
|
16 |
+
"background": {"type": "boolean"},
|
17 |
+
"command": {"type": "boolean"},
|
18 |
+
"suggesstion": {"type": "boolean"},
|
19 |
+
"goal": {"type": "boolean"},
|
20 |
+
"examples": {"type": "boolean"},
|
21 |
+
"constraints": {"type": "boolean"},
|
22 |
+
"workflow": {"type": "boolean"},
|
23 |
+
"output_format": {"type": "boolean"},
|
24 |
+
"skills": {"type": "boolean"},
|
25 |
+
"style": {"type": "boolean"},
|
26 |
+
"initialization": {"type": "boolean"}
|
27 |
+
},
|
28 |
+
"required": ["background", "command", "suggesstion", "goal", "examples", "constraints", "workflow", "output_format", "skills", "style", "initialization"]
|
29 |
+
}
|
30 |
+
"""
|
31 |
+
|
32 |
+
class Generator:
|
33 |
+
def __init__(self, model_path, device):
|
34 |
+
self.llm = AutoModelForCausalLM.from_pretrained(model_path, trust_remote_code = True).to(device)
|
35 |
+
self.tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code = True)
|
36 |
+
self.llm = self.llm.eval()
|
37 |
+
self.model = models.Transformers(self.llm, self.tokenizer)
|
38 |
+
pass
|
39 |
+
def generate_response(self, messages):
|
40 |
+
g = generate.text(self.model)
|
41 |
+
prompt = self.tokenizer.apply_chat_template(
|
42 |
+
messages,
|
43 |
+
tokenize=False,
|
44 |
+
add_generation_prompt=True
|
45 |
+
)
|
46 |
+
response = g(prompt)
|
47 |
+
return response
|
48 |
+
def json_response(self, messages):
|
49 |
+
g = generate.json(self.model, schema)
|
50 |
+
prompt = self.tokenizer.apply_chat_template(
|
51 |
+
messages,
|
52 |
+
tokenize=False,
|
53 |
+
add_generation_prompt=True
|
54 |
+
)
|
55 |
+
response = g(prompt)
|
56 |
+
return response
|
modules/background.py
ADDED
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generate the background module of the task
|
2 |
+
|
3 |
+
import sys
|
4 |
+
import os
|
5 |
+
abs_path = os.getcwd()
|
6 |
+
sys.path.append(abs_path) # Adds higher directory to python modules path.
|
7 |
+
|
8 |
+
def gen_background(client,messages):
|
9 |
+
messages=[
|
10 |
+
{"role": "system", "content": "你需要分析用户给出的任务,生成任务的背景信息,以无序列表的格式输出。例如,当用户需要LLM玩谁是卧底游戏时,背景信息可能为:\n- 你正在参与一场谁是卧底游戏\n- 你的身份词是“黄桃”。"},
|
11 |
+
] + messages
|
12 |
+
response = client.generate_response(messages)
|
13 |
+
return response
|
modules/command.py
ADDED
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generate the command module of the task
|
2 |
+
|
3 |
+
import sys
|
4 |
+
import os
|
5 |
+
abs_path = os.getcwd()
|
6 |
+
sys.path.append(abs_path) # Adds higher directory to python modules path.
|
7 |
+
|
8 |
+
def gen_command(client,messages):
|
9 |
+
messages=[
|
10 |
+
{"role": "system", "content": "你需要分析用户给出的任务,根据任务可能需要的动作为LLM创建命令提示词,以无序列表的格式输出,每个命令需要以“/”开头,然后连接命令符,之后是关于命令的解释。例如,当用户需要LLM玩谁是卧底游戏时,命令信息可能为:\n- /describe 请描述你的身份词\n- /vote 请投票给你认为是敌对阵营的玩家"},
|
11 |
+
] + messages
|
12 |
+
response = client.generate_response(messages)
|
13 |
+
return response
|
modules/constraints.py
ADDED
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generate the constraints module needed for the task
|
2 |
+
|
3 |
+
import sys
|
4 |
+
import os
|
5 |
+
abs_path = os.getcwd()
|
6 |
+
sys.path.append(abs_path) # Adds higher directory to python modules path.
|
7 |
+
|
8 |
+
def gen_constraints(client,messages):
|
9 |
+
messages=[
|
10 |
+
{"role": "system", "content": "用户正在编写提示词,你需要帮助用户完善提示词的约束部分。首先分析用户给出的任务,确定执行任务需要的约束。然后直接指定关于约束的具体细节,例如对于长度约束,你应该指定如“长度不要超过20词”这种具体的约束,而不是“预期字数范围”这种模糊的约束方向。如果用户没有给出具体的约束,根据你的经验与知识帮用户补全内容。以无序列表的格式输出,不要输出任何交互信息。例如,当用户需要LLM为论文构思一个题目时,约束信息可能为:\n- 题目长度不超过20字\n- 不能出现侮辱性的词汇\n- 要使用专业术语"},
|
11 |
+
] + messages
|
12 |
+
response = client.generate_response(messages)
|
13 |
+
return response
|
14 |
+
|
modules/get_modules.py
ADDED
@@ -0,0 +1,75 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Description: Get the modules needed for the task
|
2 |
+
import sys
|
3 |
+
import os
|
4 |
+
abs_path = os.getcwd()
|
5 |
+
sys.path.append(abs_path) # Adds higher directory to python modules path.
|
6 |
+
import json
|
7 |
+
|
8 |
+
def get_modules(generator,messages):
|
9 |
+
'''
|
10 |
+
Get the modules needed for the task
|
11 |
+
:param client: OpenAI client
|
12 |
+
:param text: The task description
|
13 |
+
:return: The modules needed for the task
|
14 |
+
The Correct format of the modules is:
|
15 |
+
{
|
16 |
+
background: bool,
|
17 |
+
command: bool,
|
18 |
+
suggesstion: bool,
|
19 |
+
goal: bool,
|
20 |
+
examples: bool,
|
21 |
+
constraints: bool,
|
22 |
+
workflow: bool,
|
23 |
+
output_format: bool,
|
24 |
+
skills: bool,
|
25 |
+
style: bool,
|
26 |
+
initialization: bool
|
27 |
+
}
|
28 |
+
'''
|
29 |
+
default_modules = {
|
30 |
+
"background": True,
|
31 |
+
"command": False,
|
32 |
+
"suggesstion": False,
|
33 |
+
"goal": True,
|
34 |
+
"examples": False,
|
35 |
+
"constraints": True,
|
36 |
+
"workflow": True,
|
37 |
+
"output_format": True,
|
38 |
+
"skills": False,
|
39 |
+
"style": False,
|
40 |
+
"initialization": True
|
41 |
+
}
|
42 |
+
## Generate the modules needed for the task
|
43 |
+
messages=[
|
44 |
+
{"role": "system", "content": "你需要分析用户给出的任务类型,分析完整描述该任务所需的提示词需要的模块,例如:背景、目标、约束、命令、建议、任务样例、工作流程、输出格式、技能、风格、初始化等。按照json的格式输出,表示某个类是否需要,需要的类为True,不需要的类为False。例如,当需要背景、技能、工作流程、输出格式和初始化时,具体格式如下:{\"background\": True, \"command\": False, \"suggesstion\": False, \"goal\": False, \"examples\": False, \"constraints\": False, \"workflow\": True, \"output_format\": True, \"skills\": True, \"style\": False, \"initialization\": True}"},
|
45 |
+
] + messages
|
46 |
+
response = generator.generate_response(messages).replace("```", "").replace("\n", "").replace("json", "").replace(" ", "").replace("True", "true").replace("False", "false")
|
47 |
+
|
48 |
+
for i in range(5):
|
49 |
+
## Verify if the format of the modules is correct
|
50 |
+
try:
|
51 |
+
## Load the modules
|
52 |
+
print(response)
|
53 |
+
modules = json.loads(response)
|
54 |
+
## Check if there are missing modules or extra modules
|
55 |
+
for key in ["background", "command", "suggesstion", "goal", "examples", "constraints", "workflow", "output_format", "skills", "style", "initialization"]:
|
56 |
+
if key not in modules:
|
57 |
+
modules[key] = False
|
58 |
+
pass
|
59 |
+
pass
|
60 |
+
extra_keys = []
|
61 |
+
for key in modules.keys():
|
62 |
+
if key not in ["background", "command", "suggesstion", "goal", "examples", "constraints", "workflow", "output_format", "skills", "style", "initialization"]:
|
63 |
+
extra_keys.append(key)
|
64 |
+
pass
|
65 |
+
pass
|
66 |
+
for key in extra_keys:
|
67 |
+
del modules[key]
|
68 |
+
pass
|
69 |
+
return modules
|
70 |
+
except Exception as e:
|
71 |
+
print(e)
|
72 |
+
continue
|
73 |
+
pass
|
74 |
+
## Return the default modules if the format is incorrect
|
75 |
+
return default_modules
|
modules/goal.py
ADDED
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generate the goal module needed for the task
|
2 |
+
|
3 |
+
import sys
|
4 |
+
import os
|
5 |
+
abs_path = os.getcwd()
|
6 |
+
sys.path.append(abs_path) # Adds higher directory to python modules path.
|
7 |
+
|
8 |
+
def gen_goal(generator,messages):
|
9 |
+
messages=[
|
10 |
+
{"role": "system", "content": "你需要分析用户给出的任务,确定任务的目标,以无序列表的格式输出,不要输出任何交互信息。请注意,目标应当尽量简单,在任务没有明显的多种行为的形况下,通常为1条,一般不超过2条。例如,当用户需要LLM计算方程的解时,目标信息可能为:\n- 计算出正确的方程解"},
|
11 |
+
] + messages
|
12 |
+
response = generator.generate_response(messages)
|
13 |
+
return response
|
modules/initialization.py
ADDED
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generate the initialization module needed for the task
|
2 |
+
|
3 |
+
import sys
|
4 |
+
import os
|
5 |
+
abs_path = os.getcwd()
|
6 |
+
sys.path.append(abs_path) # Adds higher directory to python modules path.
|
7 |
+
|
8 |
+
def gen_initialization(generator,messages):
|
9 |
+
messages=[
|
10 |
+
{"role": "system", "content": "你需要分析用户给出的任务,确定任务的初始化部分,即任务开始前和用户的一句话交流问候。例如,当用户需要LLM提供营养规划时,初始化信息可能为:\n作为一名营养规划师,我将根据您的情况给出营养建议。"},
|
11 |
+
] + messages
|
12 |
+
response = generator.generate_response(messages)
|
13 |
+
return response
|
modules/output_format.py
ADDED
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generate the output_format module needed for the task
|
2 |
+
|
3 |
+
import sys
|
4 |
+
import os
|
5 |
+
abs_path = os.getcwd()
|
6 |
+
sys.path.append(abs_path) # Adds higher directory to python modules path.
|
7 |
+
|
8 |
+
def gen_output_format(generator,messages):
|
9 |
+
messages=[
|
10 |
+
{"role": "system", "content": "你需要分析用户给出的任务,确定任务的输出格式,不要输出任何交互信息。请注意,输出格式应当尽量简单,优先考虑json、xml等标准格式,如果标准格式不适用于这个任务,用一句话简要描述规定的格式,不要包含过多的细节。例如,当用户需要LLM计算方程的解时,输出格式信息可能为:\n- 输出仅为一个数字,表示方程的解"},
|
11 |
+
] + messages
|
12 |
+
response = generator.generate_response(messages)
|
13 |
+
return response
|
modules/skills.py
ADDED
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generate the skills module needed for the task
|
2 |
+
|
3 |
+
import sys
|
4 |
+
import os
|
5 |
+
abs_path = os.getcwd()
|
6 |
+
sys.path.append(abs_path) # Adds higher directory to python modules path.
|
7 |
+
|
8 |
+
def gen_skills(generator,messages):
|
9 |
+
messages=[
|
10 |
+
{"role": "system", "content": "你需要分析用户给出的任务,确定任务所需的技能,以无序列表的格式输出。请注意,技能描述应当尽量简单,不要包含过多的细节。例如,当用户需要LLM点评时事热点时,所需技能可能为:\n- 对各类社会热点事件了如指掌,能快速把握事件的来龙去脉\n- 善于从多角度分析事件,给出独特犀利的评论观点"},
|
11 |
+
] + messages
|
12 |
+
response = generator.generate_response(messages)
|
13 |
+
return response
|
modules/suggestion.py
ADDED
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generate the suggestion module of the task
|
2 |
+
|
3 |
+
import sys
|
4 |
+
import os
|
5 |
+
abs_path = os.getcwd()
|
6 |
+
sys.path.append(abs_path) # Adds higher directory to python modules path.
|
7 |
+
|
8 |
+
def gen_suggestion(client,messages):
|
9 |
+
messages=[
|
10 |
+
{"role": "system", "content": "你需要分析用户给出的任务,生成任务的建议信息,以无序列表的格式输出,不要输出任何交互信息。例如,当用户需要LLM玩谁是卧底游戏时,建议信息可能为:\n- 当无法确定你的阵营时,你的描述应该尽量模糊"},
|
11 |
+
] + messages
|
12 |
+
response = client.generate_response(messages)
|
13 |
+
return response
|
modules/workflow.py
ADDED
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generate the workflow module needed for the task
|
2 |
+
|
3 |
+
import sys
|
4 |
+
import os
|
5 |
+
abs_path = os.getcwd()
|
6 |
+
sys.path.append(abs_path) # Adds higher directory to python modules path.
|
7 |
+
|
8 |
+
def gen_workflow(generator,messages):
|
9 |
+
messages=[
|
10 |
+
{"role": "system", "content": "你需要分析用户给出的任务,拆解执行这个任务需要完成的工作流程,以有序列表的格式输出,序号表示先后顺序,不要输出任何交互信息。对于需要判断的分支流程,可以用类似“1.1”和“1.2”这样的次级流程表示;对于需要迭代的循环流程,可以用“跳转至第2步”或“重复上一步”之类的流程表示。例如,当用户需要LLM求解一元二次方程时,工作流程信息可能为:\n1. 将方程化为标准形式,确定方程中的系数a、b和c。\n2. 计算方程的判别式,分析方程根的情况\n2.1 判别式>0,方程有两个实数跟;\n2.2 判别式=0,方程有一个实数根;\n2.3 判别式<0,方程没有实数根\n3. 根据解的情况给出解的形式\n4. 求解方程的根\n5. 检验解的正确性"},
|
11 |
+
] + messages
|
12 |
+
response = generator.generate_response(messages)
|
13 |
+
return response
|
showcases/generate.py
ADDED
@@ -0,0 +1,177 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import sys
|
2 |
+
import os
|
3 |
+
abs_path = os.getcwd()
|
4 |
+
sys.path.append(abs_path) # Adds higher directory to python modules path.
|
5 |
+
|
6 |
+
import streamlit as st
|
7 |
+
from modules.get_modules import get_modules
|
8 |
+
from modules.background import gen_background
|
9 |
+
from modules.command import gen_command
|
10 |
+
from modules.constraints import gen_constraints
|
11 |
+
from modules.goal import gen_goal
|
12 |
+
from modules.initialization import gen_initialization
|
13 |
+
from modules.output_format import gen_output_format
|
14 |
+
from modules.skills import gen_skills
|
15 |
+
from modules.suggestion import gen_suggestion
|
16 |
+
from modules.workflow import gen_workflow
|
17 |
+
|
18 |
+
module_name_dict = {
|
19 |
+
"background": "背景",
|
20 |
+
"command": "命令",
|
21 |
+
"suggesstion": "建议",
|
22 |
+
"goal": "目标",
|
23 |
+
"examples": "任务样例",
|
24 |
+
"constraints": "约束",
|
25 |
+
"workflow": "工作流程",
|
26 |
+
"output_format": "输出格式",
|
27 |
+
"skills": "技能",
|
28 |
+
"style": "风格",
|
29 |
+
"initialization": "初始化"
|
30 |
+
}
|
31 |
+
|
32 |
+
module_func_dict = {
|
33 |
+
"background": gen_background,
|
34 |
+
"command": gen_command,
|
35 |
+
"suggesstion": gen_suggestion,
|
36 |
+
"goal": gen_goal,
|
37 |
+
"examples": None,
|
38 |
+
"constraints": gen_constraints,
|
39 |
+
"workflow": gen_workflow,
|
40 |
+
"output_format": gen_output_format,
|
41 |
+
"skills": gen_skills,
|
42 |
+
"style": None,
|
43 |
+
"initialization": gen_initialization
|
44 |
+
}
|
45 |
+
|
46 |
+
## The page to generate the LangGPT prompt
|
47 |
+
def generate():
|
48 |
+
state = st.session_state
|
49 |
+
## A text input for the user to input the basic description of the task
|
50 |
+
col1, col2 = st.columns([8, 2])
|
51 |
+
with col1:
|
52 |
+
task = st.text_input("任务描述","撰写科幻小说",label_visibility="collapsed")
|
53 |
+
pass
|
54 |
+
## A button to analyze the task and generate the modules
|
55 |
+
with col2:
|
56 |
+
if st.button("分析任务",type="primary"):
|
57 |
+
## Get the modules
|
58 |
+
state.module_messages = [{"role": "user", "content": f"我希望LLM帮我执行的任务是:{task}"}]
|
59 |
+
state.modules = get_modules(state.generator, state.module_messages)
|
60 |
+
pass
|
61 |
+
with st.sidebar:
|
62 |
+
st.subheader("基本信息")
|
63 |
+
state.role_name = st.text_input("助手名称","",help="例如:大模型、助手等")
|
64 |
+
state.author = st.text_input("作者","LangGPT")
|
65 |
+
state.version = st.number_input("版本",min_value=0.1,value=0.1,step=0.1)
|
66 |
+
state.description = st.text_area("描述","这是一个LangGPT生成的助手",height=100)
|
67 |
+
st.subheader("模块控制")
|
68 |
+
if "modules" not in state:
|
69 |
+
state.modules = {
|
70 |
+
"background": False,
|
71 |
+
"command": False,
|
72 |
+
"suggesstion": False,
|
73 |
+
"goal": False,
|
74 |
+
"examples": False,
|
75 |
+
"constraints": False,
|
76 |
+
"workflow": False,
|
77 |
+
"output_format": False,
|
78 |
+
"skills": False,
|
79 |
+
"style": False,
|
80 |
+
"initialization": False
|
81 |
+
}
|
82 |
+
## Some toggles to show the modules
|
83 |
+
if "on_modules" not in state:
|
84 |
+
state.on_modules = {}
|
85 |
+
pass
|
86 |
+
for key in state.modules.keys():
|
87 |
+
if key in module_name_dict:
|
88 |
+
state.on_modules[key] = st.toggle(module_name_dict[key],state.modules[key])
|
89 |
+
pass
|
90 |
+
pass
|
91 |
+
pass
|
92 |
+
if "modules" in state:
|
93 |
+
if state.on_modules["examples"]:
|
94 |
+
st.subheader("请提供任务样例:")
|
95 |
+
input_area, output_area = st.columns(2)
|
96 |
+
with input_area:
|
97 |
+
input_example = st.text_area("样例输入","")
|
98 |
+
pass
|
99 |
+
with output_area:
|
100 |
+
output_example = st.text_area("样例输出","")
|
101 |
+
pass
|
102 |
+
state.examples = {
|
103 |
+
"input": input_example,
|
104 |
+
"output": output_example
|
105 |
+
}
|
106 |
+
pass
|
107 |
+
if state.on_modules["style"]:
|
108 |
+
st.subheader("请指定回复的风格:")
|
109 |
+
style = st.text_input("风格","",help="例如:正式、幽默、严肃等",label_visibility="collapsed")
|
110 |
+
state.style = style
|
111 |
+
pass
|
112 |
+
## A button to control the generation of the modules
|
113 |
+
for key in state.modules.keys():
|
114 |
+
if key in state:
|
115 |
+
if state.on_modules[key]:
|
116 |
+
with st.expander(module_name_dict[key]):
|
117 |
+
st.text_area(module_name_dict[key],state[key],label_visibility="collapsed")
|
118 |
+
pass
|
119 |
+
pass
|
120 |
+
g,c = st.columns([1,1])
|
121 |
+
with g:
|
122 |
+
generate_button = st.button("生成模块")
|
123 |
+
pass
|
124 |
+
with c:
|
125 |
+
compose_button = st.button("合成提示")
|
126 |
+
pass
|
127 |
+
if generate_button:
|
128 |
+
for key in state.modules.keys():
|
129 |
+
if key == "examples" or key == "style":
|
130 |
+
continue
|
131 |
+
else:
|
132 |
+
if state.on_modules[key]:
|
133 |
+
if key not in state:
|
134 |
+
state[key] = module_func_dict[key](state.generator,state.module_messages)
|
135 |
+
pass
|
136 |
+
pass
|
137 |
+
st.rerun()
|
138 |
+
pass
|
139 |
+
if compose_button:
|
140 |
+
if "prompt" not in state:
|
141 |
+
state.prompt = ""
|
142 |
+
pass
|
143 |
+
if state.role_name:
|
144 |
+
state.prompt += f"# Role: {state.role_name}\n"
|
145 |
+
pass
|
146 |
+
state.prompt += f"## profile\n"
|
147 |
+
if state.author:
|
148 |
+
state.prompt += f"- Author: {state.author}\n"
|
149 |
+
pass
|
150 |
+
if state.version:
|
151 |
+
state.prompt += f"- Version: {state.version}\n"
|
152 |
+
pass
|
153 |
+
if state.description:
|
154 |
+
state.prompt += f"- Description: {state.description}\n"
|
155 |
+
pass
|
156 |
+
## Check if all the checked modules are generated
|
157 |
+
for key in state.modules.keys():
|
158 |
+
if state.on_modules[key]:
|
159 |
+
if key not in state:
|
160 |
+
st.error(f"请先生成{module_name_dict[key]}")
|
161 |
+
return
|
162 |
+
else:
|
163 |
+
if key == "examples":
|
164 |
+
state.prompt += f"## {module_name_dict[key]}\n"
|
165 |
+
state.prompt += f"### 输入\n"
|
166 |
+
state.prompt += state.examples["input"]
|
167 |
+
state.prompt += "\n"
|
168 |
+
state.prompt += f"### 输出\n"
|
169 |
+
state.prompt += state.examples["output"]
|
170 |
+
state.prompt += "\n\n"
|
171 |
+
state.prompt += f"## {key}\n"
|
172 |
+
state.prompt += state[key]
|
173 |
+
state.prompt += "\n\n"
|
174 |
+
state.page = "test"
|
175 |
+
pass
|
176 |
+
st.rerun()
|
177 |
+
|
showcases/test.py
ADDED
@@ -0,0 +1,44 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import sys
|
2 |
+
import os
|
3 |
+
abs_path = os.getcwd()
|
4 |
+
sys.path.append(abs_path) # Adds higher directory to python modules path.
|
5 |
+
from models.openai import Generator
|
6 |
+
import streamlit as st
|
7 |
+
|
8 |
+
def test():
|
9 |
+
state = st.session_state
|
10 |
+
# col1, col2 = st.columns([1, 1])
|
11 |
+
with st.sidebar:
|
12 |
+
st.subheader("LangGPT结构化提示词")
|
13 |
+
prompt = st.text_area("langgpt_prompt",state.prompt,height=500,label_visibility="collapsed")
|
14 |
+
if st.button("保存提示词"):
|
15 |
+
if "test_messages" not in state:
|
16 |
+
state.test_messages = []
|
17 |
+
pass
|
18 |
+
# state.test_messages = [{"role": "system", "content": prompt}]
|
19 |
+
state.prompt = prompt
|
20 |
+
st.rerun()
|
21 |
+
pass
|
22 |
+
pass
|
23 |
+
## A Chatbot to display the messages
|
24 |
+
if "test_messages" not in state:
|
25 |
+
state.test_messages = [{"role": "system", "content": state.prompt}]
|
26 |
+
response = state.generator.generate_response(state.test_messages)
|
27 |
+
state.test_messages.append({"role": "assistant", "content": response})
|
28 |
+
st.rerun()
|
29 |
+
pass
|
30 |
+
# st.subheader("LangGPT对话")
|
31 |
+
for message in state.test_messages:
|
32 |
+
if message["role"] == "system":
|
33 |
+
continue
|
34 |
+
st.chat_message(message["role"]).write(message["content"])
|
35 |
+
pass
|
36 |
+
|
37 |
+
if prompt := st.chat_input("输入对话"):
|
38 |
+
state.test_messages.append({"role": "user", "content": prompt})
|
39 |
+
response = state.generator.generate_response(state.test_messages)
|
40 |
+
state.test_messages.append({"role": "assistant", "content": response})
|
41 |
+
st.rerun()
|
42 |
+
pass
|
43 |
+
pass
|
44 |
+
|