peichao.dong
commited on
Commit
·
87d8ff9
1
Parent(s):
eede2d8
add models to cantrol model version
Browse files- agents/code_generate_agent.py +2 -3
- agents/tools/api_layer_code_tool.py +3 -2
- agents/tools/domain_layer_code_tool.py +4 -3
- agents/tools/persistent_layer_code_tool.py +3 -2
- agents/tools/python_code_tool.py +31 -3
- app.py +0 -5
- chains.py +3 -4
- documents/bussiness_context/business_context.md +8 -1
- embedding.py +3 -2
- models.py +10 -0
agents/code_generate_agent.py
CHANGED
@@ -1,8 +1,6 @@
|
|
1 |
import re
|
2 |
from typing import List, Union
|
3 |
from langchain.chains import LLMChain
|
4 |
-
from langchain.chat_models import ChatOpenAI
|
5 |
-
from langchain.llms import OpenAI
|
6 |
from langchain.agents import Tool, LLMSingleActionAgent, AgentExecutor, AgentOutputParser
|
7 |
from langchain.schema import AgentAction, AgentFinish
|
8 |
from langchain.agents import initialize_agent
|
@@ -11,6 +9,7 @@ from agents.promopts import code_generate_agent_template
|
|
11 |
from agents.tools.api_layer_code_tool import apiLayerCodeGenerator
|
12 |
from agents.tools.domain_layer_code_tool import domainLayerCodeGenerator, entityCodeGenerator
|
13 |
from agents.tools.persistent_layer_code_tool import persistentLayerCodeGenerator
|
|
|
14 |
|
15 |
|
16 |
class CustomPromptTemplate(StringPromptTemplate):
|
@@ -79,7 +78,7 @@ def code_agent_executor() -> AgentExecutor:
|
|
79 |
input_variables=["input", "intermediate_steps"]
|
80 |
)
|
81 |
|
82 |
-
code_llm_chain = LLMChain(llm=
|
83 |
|
84 |
tool_names = [tool.name for tool in code_agent_tools]
|
85 |
code_agent = LLMSingleActionAgent(
|
|
|
1 |
import re
|
2 |
from typing import List, Union
|
3 |
from langchain.chains import LLMChain
|
|
|
|
|
4 |
from langchain.agents import Tool, LLMSingleActionAgent, AgentExecutor, AgentOutputParser
|
5 |
from langchain.schema import AgentAction, AgentFinish
|
6 |
from langchain.agents import initialize_agent
|
|
|
9 |
from agents.tools.api_layer_code_tool import apiLayerCodeGenerator
|
10 |
from agents.tools.domain_layer_code_tool import domainLayerCodeGenerator, entityCodeGenerator
|
11 |
from agents.tools.persistent_layer_code_tool import persistentLayerCodeGenerator
|
12 |
+
from models import llm
|
13 |
|
14 |
|
15 |
class CustomPromptTemplate(StringPromptTemplate):
|
|
|
78 |
input_variables=["input", "intermediate_steps"]
|
79 |
)
|
80 |
|
81 |
+
code_llm_chain = LLMChain(llm=llm(temperature=0.7), prompt=AGENT_PROMPT)
|
82 |
|
83 |
tool_names = [tool.name for tool in code_agent_tools]
|
84 |
code_agent = LLMSingleActionAgent(
|
agents/tools/api_layer_code_tool.py
CHANGED
@@ -1,7 +1,8 @@
|
|
1 |
from langchain import LLMChain, PromptTemplate
|
2 |
-
from langchain.chat_models import ChatOpenAI
|
3 |
from langchain.agents import tool
|
4 |
|
|
|
|
|
5 |
|
6 |
API_LAYER = """You are a software developer. Your task is to generate the api layer tests and product code.
|
7 |
|
@@ -85,7 +86,7 @@ request: {input}"""
|
|
85 |
API_LAYER_PROMPT = PromptTemplate(input_variables=["input"], template=API_LAYER,)
|
86 |
|
87 |
|
88 |
-
apiChain = LLMChain(llm =
|
89 |
|
90 |
|
91 |
@tool("Generate API Layer Code", return_direct=True)
|
|
|
1 |
from langchain import LLMChain, PromptTemplate
|
|
|
2 |
from langchain.agents import tool
|
3 |
|
4 |
+
from models import llm
|
5 |
+
|
6 |
|
7 |
API_LAYER = """You are a software developer. Your task is to generate the api layer tests and product code.
|
8 |
|
|
|
86 |
API_LAYER_PROMPT = PromptTemplate(input_variables=["input"], template=API_LAYER,)
|
87 |
|
88 |
|
89 |
+
apiChain = LLMChain(llm = llm(temperature=0.1), prompt=API_LAYER_PROMPT)
|
90 |
|
91 |
|
92 |
@tool("Generate API Layer Code", return_direct=True)
|
agents/tools/domain_layer_code_tool.py
CHANGED
@@ -1,7 +1,8 @@
|
|
1 |
from langchain import LLMChain, PromptTemplate
|
2 |
-
from langchain.chat_models import ChatOpenAI
|
3 |
from langchain.agents import tool
|
4 |
|
|
|
|
|
5 |
|
6 |
DOMAIN_LAYER = """You are a software developer. Your task is to generate the domain layer tests and product code.
|
7 |
|
@@ -77,7 +78,7 @@ request: {input}"""
|
|
77 |
|
78 |
DOMAIN_LAYER_PROMPT = PromptTemplate(input_variables=["input"], template=DOMAIN_LAYER,)
|
79 |
|
80 |
-
domainLayerChain = LLMChain(llm =
|
81 |
|
82 |
|
83 |
@tool("Generate Domain Layer Code", return_direct=True)
|
@@ -188,7 +189,7 @@ request: {input}"""
|
|
188 |
|
189 |
ENTITY_PROMPT = PromptTemplate(input_variables=["input"], template=ENTITY,)
|
190 |
|
191 |
-
entityChain = LLMChain(llm =
|
192 |
|
193 |
|
194 |
@tool("Generate Entity Code", return_direct=True)
|
|
|
1 |
from langchain import LLMChain, PromptTemplate
|
|
|
2 |
from langchain.agents import tool
|
3 |
|
4 |
+
from models import llm
|
5 |
+
|
6 |
|
7 |
DOMAIN_LAYER = """You are a software developer. Your task is to generate the domain layer tests and product code.
|
8 |
|
|
|
78 |
|
79 |
DOMAIN_LAYER_PROMPT = PromptTemplate(input_variables=["input"], template=DOMAIN_LAYER,)
|
80 |
|
81 |
+
domainLayerChain = LLMChain(llm = llm(temperature=0.1), prompt=DOMAIN_LAYER_PROMPT)
|
82 |
|
83 |
|
84 |
@tool("Generate Domain Layer Code", return_direct=True)
|
|
|
189 |
|
190 |
ENTITY_PROMPT = PromptTemplate(input_variables=["input"], template=ENTITY,)
|
191 |
|
192 |
+
entityChain = LLMChain(llm = llm(temperature=0.1), prompt=ENTITY_PROMPT)
|
193 |
|
194 |
|
195 |
@tool("Generate Entity Code", return_direct=True)
|
agents/tools/persistent_layer_code_tool.py
CHANGED
@@ -1,7 +1,8 @@
|
|
1 |
from langchain import LLMChain, PromptTemplate
|
2 |
-
from langchain.chat_models import ChatOpenAI
|
3 |
from langchain.agents import tool
|
4 |
|
|
|
|
|
5 |
|
6 |
PERSISTENT_LAYER = """You are a software developer. Your task is to generate the persistent layer tests and product code.
|
7 |
|
@@ -162,7 +163,7 @@ request: {input}"""
|
|
162 |
|
163 |
PERSISTENT_LAYER_PROMPT = PromptTemplate(input_variables=["input"], template=PERSISTENT_LAYER,)
|
164 |
|
165 |
-
persistentChain = LLMChain(llm =
|
166 |
|
167 |
|
168 |
@tool("Generate Persistent Layer Code", return_direct=True)
|
|
|
1 |
from langchain import LLMChain, PromptTemplate
|
|
|
2 |
from langchain.agents import tool
|
3 |
|
4 |
+
from models import llm
|
5 |
+
|
6 |
|
7 |
PERSISTENT_LAYER = """You are a software developer. Your task is to generate the persistent layer tests and product code.
|
8 |
|
|
|
163 |
|
164 |
PERSISTENT_LAYER_PROMPT = PromptTemplate(input_variables=["input"], template=PERSISTENT_LAYER,)
|
165 |
|
166 |
+
persistentChain = LLMChain(llm = llm(temperature=0.1), prompt=PERSISTENT_LAYER_PROMPT)
|
167 |
|
168 |
|
169 |
@tool("Generate Persistent Layer Code", return_direct=True)
|
agents/tools/python_code_tool.py
CHANGED
@@ -3,8 +3,36 @@ from langchain import LLMChain, PromptTemplate
|
|
3 |
from langchain.chat_models import ChatOpenAI
|
4 |
from langchain.llms import OpenAI
|
5 |
from langchain.agents import tool
|
6 |
-
from langchain.utilities import PythonREPL
|
7 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
8 |
|
9 |
generate_python_code = """
|
10 |
Please write Python script to fulfill the following requirement:
|
@@ -18,7 +46,7 @@ Only output the code section with code block.
|
|
18 |
|
19 |
generate_python_code_promopt = PromptTemplate(input_variables=["input"], template=generate_python_code,)
|
20 |
|
21 |
-
generate_code_chain = LLMChain(llm =
|
22 |
|
23 |
|
24 |
@tool("Generate and Excute Python Code ", return_direct=True)
|
@@ -37,7 +65,7 @@ def generate_and_excute_python_code(input: str) -> str:
|
|
37 |
code_content = answer_code[start:end].strip()
|
38 |
|
39 |
print(code_content)
|
40 |
-
python_repl = PythonREPL()
|
41 |
result = python_repl.run(code_content)
|
42 |
return f"""
|
43 |
code:
|
|
|
3 |
from langchain.chat_models import ChatOpenAI
|
4 |
from langchain.llms import OpenAI
|
5 |
from langchain.agents import tool
|
6 |
+
# from langchain.utilities import PythonREPL
|
7 |
|
8 |
+
import sys
|
9 |
+
from io import StringIO
|
10 |
+
from typing import Dict, Optional
|
11 |
+
|
12 |
+
from pydantic import BaseModel, Field
|
13 |
+
|
14 |
+
from models import llm
|
15 |
+
|
16 |
+
|
17 |
+
class PythonREPL(BaseModel):
|
18 |
+
"""Simulates a standalone Python REPL."""
|
19 |
+
|
20 |
+
# globals: Optional[Dict] = Field(default_factory=dict, alias="_globals")
|
21 |
+
# locals: Optional[Dict] = Field(default_factory=dict, alias="_locals")
|
22 |
+
|
23 |
+
def run(self, command: str) -> str:
|
24 |
+
"""Run command with own globals/locals and returns anything printed."""
|
25 |
+
old_stdout = sys.stdout
|
26 |
+
sys.stdout = mystdout = StringIO()
|
27 |
+
try:
|
28 |
+
exec(command, globals(), locals())
|
29 |
+
sys.stdout = old_stdout
|
30 |
+
output = mystdout.getvalue()
|
31 |
+
except Exception as e:
|
32 |
+
sys.stdout = old_stdout
|
33 |
+
output = str(e)
|
34 |
+
return output
|
35 |
+
|
36 |
|
37 |
generate_python_code = """
|
38 |
Please write Python script to fulfill the following requirement:
|
|
|
46 |
|
47 |
generate_python_code_promopt = PromptTemplate(input_variables=["input"], template=generate_python_code,)
|
48 |
|
49 |
+
generate_code_chain = LLMChain(llm = llm(temperature=0.1), prompt=generate_python_code_promopt, output_key="code")
|
50 |
|
51 |
|
52 |
@tool("Generate and Excute Python Code ", return_direct=True)
|
|
|
65 |
code_content = answer_code[start:end].strip()
|
66 |
|
67 |
print(code_content)
|
68 |
+
python_repl = PythonREPL(globals={"__name__": "__main__"}, locals={})
|
69 |
result = python_repl.run(code_content)
|
70 |
return f"""
|
71 |
code:
|
app.py
CHANGED
@@ -1,16 +1,11 @@
|
|
1 |
import gradio as gr
|
2 |
-
from langchain.chains import LLMChain
|
3 |
-
from langchain.chat_models import ChatOpenAI
|
4 |
from langchain.document_loaders import TextLoader
|
5 |
from agents.tools.python_code_tool import generate_and_excute_python_code
|
6 |
from chains import HumanFeedBackChain, contextRewriteChain
|
7 |
from embedding import CustomEmbedding
|
8 |
from memories import HumenFeedbackBufferMemory
|
9 |
-
from langchain.memory import ConversationBufferMemory
|
10 |
-
from promopts import FEEDBACK, FEEDBACK_PROMPT
|
11 |
from agents.code_generate_agent import code_agent_executor, code_agent_tools
|
12 |
|
13 |
-
# llm = ChatOpenAI(temperature=0.7)
|
14 |
|
15 |
baMemory = HumenFeedbackBufferMemory(
|
16 |
input_key="input", human_prefix="Answer", ai_prefix="AI")
|
|
|
1 |
import gradio as gr
|
|
|
|
|
2 |
from langchain.document_loaders import TextLoader
|
3 |
from agents.tools.python_code_tool import generate_and_excute_python_code
|
4 |
from chains import HumanFeedBackChain, contextRewriteChain
|
5 |
from embedding import CustomEmbedding
|
6 |
from memories import HumenFeedbackBufferMemory
|
|
|
|
|
7 |
from agents.code_generate_agent import code_agent_executor, code_agent_tools
|
8 |
|
|
|
9 |
|
10 |
baMemory = HumenFeedbackBufferMemory(
|
11 |
input_key="input", human_prefix="Answer", ai_prefix="AI")
|
chains.py
CHANGED
@@ -1,10 +1,9 @@
|
|
1 |
from typing import Any, Optional
|
2 |
from langchain.chains import LLMChain
|
3 |
from langchain.base_language import BaseLanguageModel
|
4 |
-
from langchain.schema import LLMResult, PromptValue
|
5 |
from langchain.prompts import PromptTemplate
|
6 |
from langchain.memory.chat_memory import BaseMemory
|
7 |
-
from
|
8 |
|
9 |
from promopts import CONTENT_RE_WRIGHT_PROMPT, FEEDBACK_PROMPT
|
10 |
|
@@ -14,7 +13,7 @@ class HumanFeedBackChain(LLMChain):
|
|
14 |
|
15 |
memory: Optional[BaseMemory] = None
|
16 |
|
17 |
-
def __init__(self, verbose=True, llm: BaseLanguageModel =
|
18 |
super().__init__(llm=llm, prompt=prompt, memory=memory, verbose=verbose)
|
19 |
|
20 |
def run(self, *args: Any, **kwargs: Any) -> str:
|
@@ -40,4 +39,4 @@ class HumanFeedBackChain(LLMChain):
|
|
40 |
)
|
41 |
|
42 |
|
43 |
-
contextRewriteChain = LLMChain(llm=
|
|
|
1 |
from typing import Any, Optional
|
2 |
from langchain.chains import LLMChain
|
3 |
from langchain.base_language import BaseLanguageModel
|
|
|
4 |
from langchain.prompts import PromptTemplate
|
5 |
from langchain.memory.chat_memory import BaseMemory
|
6 |
+
from models import llm
|
7 |
|
8 |
from promopts import CONTENT_RE_WRIGHT_PROMPT, FEEDBACK_PROMPT
|
9 |
|
|
|
13 |
|
14 |
memory: Optional[BaseMemory] = None
|
15 |
|
16 |
+
def __init__(self, verbose=True, llm: BaseLanguageModel = llm(temperature=0.7), memory: Optional[BaseMemory] = None, prompt: PromptTemplate = FEEDBACK_PROMPT):
|
17 |
super().__init__(llm=llm, prompt=prompt, memory=memory, verbose=verbose)
|
18 |
|
19 |
def run(self, *args: Any, **kwargs: Any) -> str:
|
|
|
39 |
)
|
40 |
|
41 |
|
42 |
+
contextRewriteChain = LLMChain(llm=llm(temperature=0.7), prompt=CONTENT_RE_WRIGHT_PROMPT)
|
documents/bussiness_context/business_context.md
CHANGED
@@ -6,4 +6,11 @@ FeatureConfig 用于配置某个 Feature 中控制前端展示效果的配置项
|
|
6 |
|
7 |
添加 FeatureConfig 的主要目的是为了控制 FeatureConfig 消费方的某个行为。在添加 FeatureConfig 时,应该包含 featureKey、data、saData、status、标题和描述信息。新增的 FeatureConfig 状态为 DRAFT。
|
8 |
|
9 |
-
客户端用户需要查看 FeatureConfig 中的 data、saData、更新时间和 id。同时,FeatureConfig 可以关联圈人条件,符合圈人条件的配置可以展示给客户端用户。客户端用户仅能查看符合圈人条件的 PUBLISHED 状态的数据。圈人条件包括上传用户白名单、按照比例灰度发布、地理位置和人群标签等。
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
6 |
|
7 |
添加 FeatureConfig 的主要目的是为了控制 FeatureConfig 消费方的某个行为。在添加 FeatureConfig 时,应该包含 featureKey、data、saData、status、标题和描述信息。新增的 FeatureConfig 状态为 DRAFT。
|
8 |
|
9 |
+
客户端用户需要查看 FeatureConfig 中的 data、saData、更新时间和 id。同时,FeatureConfig 可以关联圈人条件,符合圈人条件的配置可以展示给客户端用户。客户端用户仅能查看符合圈人条件的 PUBLISHED 状态的数据。圈人条件包括上传用户白名单、按照比例灰度发布、地理位置和人群标签等。
|
10 |
+
|
11 |
+
在添加 FeatureConfig 时,应该包含 featureKey、data、saData、status、标题和描述信息。新增的 FeatureConfig 状态为 DRAFT。
|
12 |
+
|
13 |
+
客户端用户通过客户端访问服务端接口获取FeatureConfig,客户端通过FeatureConfig控制相关Feature展示
|
14 |
+
|
15 |
+
用户白名单圈人条件需要上传用户id的白名单,仅在白名单里的用户可以获取到相关feature
|
16 |
+
地理位置配置端需要设置圈定地区的地理位置编号列表,客户端请求接口是传递地理位置编号参数,位置编号匹配的数据用户可见
|
embedding.py
CHANGED
@@ -7,10 +7,11 @@ from langchain.chains import RetrievalQA
|
|
7 |
from langchain.chains.question_answering import load_qa_chain
|
8 |
|
9 |
from langchain.document_loaders import NotionDirectoryLoader
|
10 |
-
from langchain.chat_models import ChatOpenAI
|
11 |
from langchain.memory import ConversationBufferMemory
|
12 |
from langchain.chains import ConversationalRetrievalChain
|
13 |
|
|
|
|
|
14 |
|
15 |
class CustomEmbedding:
|
16 |
notionDirectoryLoader = NotionDirectoryLoader(
|
@@ -32,7 +33,7 @@ class CustomEmbedding:
|
|
32 |
|
33 |
|
34 |
|
35 |
-
def getFAQChain(self, llm=
|
36 |
memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)
|
37 |
docsearch = FAISS.load_local(
|
38 |
"./documents/business_context.faiss", self.embeddings)
|
|
|
7 |
from langchain.chains.question_answering import load_qa_chain
|
8 |
|
9 |
from langchain.document_loaders import NotionDirectoryLoader
|
|
|
10 |
from langchain.memory import ConversationBufferMemory
|
11 |
from langchain.chains import ConversationalRetrievalChain
|
12 |
|
13 |
+
from models import llm
|
14 |
+
|
15 |
|
16 |
class CustomEmbedding:
|
17 |
notionDirectoryLoader = NotionDirectoryLoader(
|
|
|
33 |
|
34 |
|
35 |
|
36 |
+
def getFAQChain(self, llm=llm(temperature=0.7)):
|
37 |
memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)
|
38 |
docsearch = FAISS.load_local(
|
39 |
"./documents/business_context.faiss", self.embeddings)
|
models.py
ADDED
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from langchain.chat_models import ChatOpenAI
|
2 |
+
from langchain.base_language import BaseLanguageModel
|
3 |
+
|
4 |
+
def llm(temperature=0) -> BaseLanguageModel:
|
5 |
+
# gpt-3.5
|
6 |
+
return ChatOpenAI(temperature=temperature)
|
7 |
+
# gpt-4
|
8 |
+
# return ChatOpenAI(temperature=temperature, model_name="gpt-4")
|
9 |
+
|
10 |
+
|