Spaces:
Running
Running
fix bugs
Browse files
.gitignore
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
__pycache__
|
2 |
+
logs
|
agentverse/agents/simulation_agent/conversation.py
CHANGED
@@ -66,7 +66,7 @@ class ConversationAgent(BaseAgent):
|
|
66 |
raise
|
67 |
except Exception as e:
|
68 |
logger.error(e)
|
69 |
-
logger.
|
70 |
continue
|
71 |
|
72 |
if parsed_response is None:
|
|
|
66 |
raise
|
67 |
except Exception as e:
|
68 |
logger.error(e)
|
69 |
+
logger.warn("Retrying...")
|
70 |
continue
|
71 |
|
72 |
if parsed_response is None:
|
agentverse/output_parser/output_parser.py
CHANGED
@@ -134,13 +134,14 @@ class NlpClassroom9PlayersParser(OutputParser):
|
|
134 |
cleaned_output = re.sub(r"\n+", "\n", cleaned_output)
|
135 |
cleaned_output = cleaned_output.split("\n")
|
136 |
if not (
|
137 |
-
len(cleaned_output)
|
138 |
and cleaned_output[0].startswith("Action:")
|
139 |
and cleaned_output[1].startswith("Action Input:")
|
140 |
):
|
141 |
raise OutputParserError(text)
|
142 |
action = cleaned_output[0][len("Action:") :].strip()
|
143 |
-
action_input = cleaned_output[1][len("Action Input:") :].strip()
|
|
|
144 |
if action == "Speak":
|
145 |
return AgentFinish({"output": action_input}, text)
|
146 |
elif action == "CallOn":
|
@@ -216,13 +217,13 @@ class PrisonerDilemmaParser(OutputParser):
|
|
216 |
cleaned_output = re.sub(r"\n+", "\n", cleaned_output)
|
217 |
cleaned_output = cleaned_output.split("\n")
|
218 |
if not (
|
219 |
-
len(cleaned_output)
|
220 |
and cleaned_output[0].startswith("Action:")
|
221 |
and cleaned_output[1].startswith("Action Input:")
|
222 |
):
|
223 |
raise OutputParserError(text)
|
224 |
action = cleaned_output[0][len("Action:") :].strip()
|
225 |
-
action_input = cleaned_output[1][len("Action Input:") :].strip()
|
226 |
|
227 |
if action == "Speak":
|
228 |
# make sure the police count the round right
|
|
|
134 |
cleaned_output = re.sub(r"\n+", "\n", cleaned_output)
|
135 |
cleaned_output = cleaned_output.split("\n")
|
136 |
if not (
|
137 |
+
len(cleaned_output) >= 2
|
138 |
and cleaned_output[0].startswith("Action:")
|
139 |
and cleaned_output[1].startswith("Action Input:")
|
140 |
):
|
141 |
raise OutputParserError(text)
|
142 |
action = cleaned_output[0][len("Action:") :].strip()
|
143 |
+
# action_input = cleaned_output[1][len("Action Input:") :].strip()
|
144 |
+
action_input = "\n".join(cleaned_output[1:]).strip()[len("Action Input:") :].strip()
|
145 |
if action == "Speak":
|
146 |
return AgentFinish({"output": action_input}, text)
|
147 |
elif action == "CallOn":
|
|
|
217 |
cleaned_output = re.sub(r"\n+", "\n", cleaned_output)
|
218 |
cleaned_output = cleaned_output.split("\n")
|
219 |
if not (
|
220 |
+
len(cleaned_output) >= 2
|
221 |
and cleaned_output[0].startswith("Action:")
|
222 |
and cleaned_output[1].startswith("Action Input:")
|
223 |
):
|
224 |
raise OutputParserError(text)
|
225 |
action = cleaned_output[0][len("Action:") :].strip()
|
226 |
+
action_input = "\n".join(cleaned_output[1:]).strip()[len("Action Input:") :].strip()
|
227 |
|
228 |
if action == "Speak":
|
229 |
# make sure the police count the round right
|
agentverse/tasks/simulation/nlp_classroom_9players/config.yaml
CHANGED
@@ -26,7 +26,7 @@ prompts:
|
|
26 |
Here is the conversation history
|
27 |
${chat_history}
|
28 |
|
29 |
-
Remember to pay attention to the response format instructions, and strictly follow the rules specified above!
|
30 |
You should give your response based on the above history. What will you, ${agent_name}, do next?
|
31 |
|
32 |
student_prompt: &student_prompt |-
|
@@ -60,7 +60,7 @@ prompts:
|
|
60 |
Here is the conversation history
|
61 |
${chat_history}
|
62 |
|
63 |
-
Remember to pay attention to the response format instructions, and strictly follow the rules specified above!
|
64 |
You should give your response based on the above history. What will you, ${agent_name}, do next?
|
65 |
|
66 |
|
@@ -96,7 +96,7 @@ agents:
|
|
96 |
prompt_template: *professor_prompt
|
97 |
llm:
|
98 |
llm_type: gpt-4
|
99 |
-
model: "gpt-
|
100 |
temperature: 0.7
|
101 |
max_tokens: 250
|
102 |
output_parser:
|
@@ -112,7 +112,7 @@ agents:
|
|
112 |
memory_type: chat_history
|
113 |
llm:
|
114 |
llm_type: gpt-4
|
115 |
-
model: "gpt-
|
116 |
temperature: 0.7
|
117 |
max_tokens: 100
|
118 |
output_parser:
|
@@ -126,7 +126,7 @@ agents:
|
|
126 |
memory_type: chat_history
|
127 |
llm:
|
128 |
llm_type: gpt-4
|
129 |
-
model: "gpt-
|
130 |
temperature: 0.7
|
131 |
max_tokens: 100
|
132 |
output_parser:
|
@@ -140,7 +140,7 @@ agents:
|
|
140 |
memory_type: chat_history
|
141 |
llm:
|
142 |
llm_type: gpt-4
|
143 |
-
model: "gpt-
|
144 |
temperature: 0.7
|
145 |
max_tokens: 100
|
146 |
output_parser:
|
@@ -154,7 +154,7 @@ agents:
|
|
154 |
memory_type: chat_history
|
155 |
llm:
|
156 |
llm_type: gpt-4
|
157 |
-
model: "gpt-
|
158 |
temperature: 0.7
|
159 |
max_tokens: 100
|
160 |
output_parser:
|
@@ -168,7 +168,7 @@ agents:
|
|
168 |
memory_type: chat_history
|
169 |
llm:
|
170 |
llm_type: gpt-4
|
171 |
-
model: "gpt-
|
172 |
temperature: 0.7
|
173 |
max_tokens: 100
|
174 |
output_parser:
|
@@ -182,7 +182,7 @@ agents:
|
|
182 |
memory_type: chat_history
|
183 |
llm:
|
184 |
llm_type: gpt-4
|
185 |
-
model: "gpt-
|
186 |
temperature: 0.7
|
187 |
max_tokens: 100
|
188 |
output_parser:
|
@@ -196,7 +196,7 @@ agents:
|
|
196 |
memory_type: chat_history
|
197 |
llm:
|
198 |
llm_type: gpt-4
|
199 |
-
model: "gpt-
|
200 |
temperature: 0.7
|
201 |
max_tokens: 100
|
202 |
output_parser:
|
@@ -210,7 +210,7 @@ agents:
|
|
210 |
memory_type: chat_history
|
211 |
llm:
|
212 |
llm_type: gpt-4
|
213 |
-
model: "gpt-
|
214 |
temperature: 0.7
|
215 |
max_tokens: 100
|
216 |
output_parser:
|
|
|
26 |
Here is the conversation history
|
27 |
${chat_history}
|
28 |
|
29 |
+
Remember to pay attention to the response format instructions, and strictly follow the rules specified above! Only output ONE action and action input in your response!
|
30 |
You should give your response based on the above history. What will you, ${agent_name}, do next?
|
31 |
|
32 |
student_prompt: &student_prompt |-
|
|
|
60 |
Here is the conversation history
|
61 |
${chat_history}
|
62 |
|
63 |
+
Remember to pay attention to the response format instructions, and strictly follow the rules specified above! Only output ONE action and action input in your response!
|
64 |
You should give your response based on the above history. What will you, ${agent_name}, do next?
|
65 |
|
66 |
|
|
|
96 |
prompt_template: *professor_prompt
|
97 |
llm:
|
98 |
llm_type: gpt-4
|
99 |
+
model: "gpt-4o"
|
100 |
temperature: 0.7
|
101 |
max_tokens: 250
|
102 |
output_parser:
|
|
|
112 |
memory_type: chat_history
|
113 |
llm:
|
114 |
llm_type: gpt-4
|
115 |
+
model: "gpt-4o"
|
116 |
temperature: 0.7
|
117 |
max_tokens: 100
|
118 |
output_parser:
|
|
|
126 |
memory_type: chat_history
|
127 |
llm:
|
128 |
llm_type: gpt-4
|
129 |
+
model: "gpt-4o"
|
130 |
temperature: 0.7
|
131 |
max_tokens: 100
|
132 |
output_parser:
|
|
|
140 |
memory_type: chat_history
|
141 |
llm:
|
142 |
llm_type: gpt-4
|
143 |
+
model: "gpt-4o"
|
144 |
temperature: 0.7
|
145 |
max_tokens: 100
|
146 |
output_parser:
|
|
|
154 |
memory_type: chat_history
|
155 |
llm:
|
156 |
llm_type: gpt-4
|
157 |
+
model: "gpt-4o"
|
158 |
temperature: 0.7
|
159 |
max_tokens: 100
|
160 |
output_parser:
|
|
|
168 |
memory_type: chat_history
|
169 |
llm:
|
170 |
llm_type: gpt-4
|
171 |
+
model: "gpt-4o"
|
172 |
temperature: 0.7
|
173 |
max_tokens: 100
|
174 |
output_parser:
|
|
|
182 |
memory_type: chat_history
|
183 |
llm:
|
184 |
llm_type: gpt-4
|
185 |
+
model: "gpt-4o"
|
186 |
temperature: 0.7
|
187 |
max_tokens: 100
|
188 |
output_parser:
|
|
|
196 |
memory_type: chat_history
|
197 |
llm:
|
198 |
llm_type: gpt-4
|
199 |
+
model: "gpt-4o"
|
200 |
temperature: 0.7
|
201 |
max_tokens: 100
|
202 |
output_parser:
|
|
|
210 |
memory_type: chat_history
|
211 |
llm:
|
212 |
llm_type: gpt-4
|
213 |
+
model: "gpt-4o"
|
214 |
temperature: 0.7
|
215 |
max_tokens: 100
|
216 |
output_parser:
|
agentverse/tasks/simulation/prisoner_dilemma/config.yaml
CHANGED
@@ -53,7 +53,7 @@ agents:
|
|
53 |
prompt_template: *prompt
|
54 |
llm:
|
55 |
model: "gpt-4"
|
56 |
-
llm_type: gpt-
|
57 |
temperature: 1.2
|
58 |
max_tokens: 200
|
59 |
output_parser:
|
@@ -75,7 +75,7 @@ agents:
|
|
75 |
prompt_template: *prompt
|
76 |
llm:
|
77 |
model: "gpt-4"
|
78 |
-
llm_type: gpt-
|
79 |
temperature: 1.2
|
80 |
max_tokens: 100
|
81 |
output_parser:
|
@@ -97,7 +97,7 @@ agents:
|
|
97 |
prompt_template: *prompt
|
98 |
llm:
|
99 |
model: "gpt-4"
|
100 |
-
llm_type: gpt-
|
101 |
temperature: 1.2
|
102 |
max_tokens: 100
|
103 |
output_parser:
|
|
|
53 |
prompt_template: *prompt
|
54 |
llm:
|
55 |
model: "gpt-4"
|
56 |
+
llm_type: gpt-4o
|
57 |
temperature: 1.2
|
58 |
max_tokens: 200
|
59 |
output_parser:
|
|
|
75 |
prompt_template: *prompt
|
76 |
llm:
|
77 |
model: "gpt-4"
|
78 |
+
llm_type: gpt-4o
|
79 |
temperature: 1.2
|
80 |
max_tokens: 100
|
81 |
output_parser:
|
|
|
97 |
prompt_template: *prompt
|
98 |
llm:
|
99 |
model: "gpt-4"
|
100 |
+
llm_type: gpt-4o
|
101 |
temperature: 1.2
|
102 |
max_tokens: 100
|
103 |
output_parser:
|
app.py
CHANGED
@@ -130,10 +130,10 @@ class GUI:
|
|
130 |
gr.Box.update(visible=any(self.solution_status)),
|
131 |
)
|
132 |
|
133 |
-
def delay_reset(self, task_dropdown, api_key_text, organization_text):
|
134 |
self.autoplay = False
|
135 |
self.image_now, self.text_now = self.reset(
|
136 |
-
task_dropdown, api_key_text, organization_text
|
137 |
)
|
138 |
return (
|
139 |
self.image_now,
|
@@ -150,9 +150,11 @@ class GUI:
|
|
150 |
task_dropdown="simulation/nlp_classroom_9players",
|
151 |
api_key_text="",
|
152 |
organization_text="",
|
|
|
153 |
):
|
154 |
openai.api_key = api_key_text
|
155 |
openai.organization = organization_text
|
|
|
156 |
"""
|
157 |
tell backend the new number of students and generate new empty image
|
158 |
:param stu_num:
|
@@ -472,6 +474,7 @@ class GUI:
|
|
472 |
)
|
473 |
api_key_text = gr.Textbox(label="OPENAI API KEY")
|
474 |
organization_text = gr.Textbox(label="Organization")
|
|
|
475 |
with gr.Row():
|
476 |
with gr.Column():
|
477 |
image_output = gr.Image()
|
@@ -550,7 +553,7 @@ class GUI:
|
|
550 |
# reset_btn.click(fn=self.reset, inputs=None, outputs=[image_output, text_output], show_progress=False)
|
551 |
reset_btn.click(
|
552 |
fn=self.delay_reset,
|
553 |
-
inputs=[task_dropdown, api_key_text, organization_text],
|
554 |
outputs=[
|
555 |
image_output,
|
556 |
text_output,
|
|
|
130 |
gr.Box.update(visible=any(self.solution_status)),
|
131 |
)
|
132 |
|
133 |
+
def delay_reset(self, task_dropdown, api_key_text, organization_text, api_base_text):
|
134 |
self.autoplay = False
|
135 |
self.image_now, self.text_now = self.reset(
|
136 |
+
task_dropdown, api_key_text, organization_text, api_base_text
|
137 |
)
|
138 |
return (
|
139 |
self.image_now,
|
|
|
150 |
task_dropdown="simulation/nlp_classroom_9players",
|
151 |
api_key_text="",
|
152 |
organization_text="",
|
153 |
+
api_base_text=""
|
154 |
):
|
155 |
openai.api_key = api_key_text
|
156 |
openai.organization = organization_text
|
157 |
+
openai.api_base = api_base_text if api_base_text else None
|
158 |
"""
|
159 |
tell backend the new number of students and generate new empty image
|
160 |
:param stu_num:
|
|
|
474 |
)
|
475 |
api_key_text = gr.Textbox(label="OPENAI API KEY")
|
476 |
organization_text = gr.Textbox(label="Organization")
|
477 |
+
api_base_text = gr.Textbox(label="OpenAI Base URL", default="", placeholder="if not set, will use openai's default url")
|
478 |
with gr.Row():
|
479 |
with gr.Column():
|
480 |
image_output = gr.Image()
|
|
|
553 |
# reset_btn.click(fn=self.reset, inputs=None, outputs=[image_output, text_output], show_progress=False)
|
554 |
reset_btn.click(
|
555 |
fn=self.delay_reset,
|
556 |
+
inputs=[task_dropdown, api_key_text, organization_text, api_base_text],
|
557 |
outputs=[
|
558 |
image_output,
|
559 |
text_output,
|