jpfearnworks commited on
Commit
040f332
1 Parent(s): 6355201

Add gradio interface

Browse files
Dockerfile CHANGED
@@ -13,5 +13,6 @@ WORKDIR /app/src
13
  ENV PATH="/root/.local/bin:${PATH}"
14
 
15
  EXPOSE 8501
 
16
 
17
- CMD ["streamlit", "run", "main.py", ]
 
13
  ENV PATH="/root/.local/bin:${PATH}"
14
 
15
  EXPOSE 8501
16
+ EXPOSE 7000
17
 
18
+ CMD python main.py
README.MD CHANGED
@@ -27,4 +27,17 @@ Coming Soon
27
 
28
  ### Deployment
29
 
30
- Project is packaged with a dockerfile and docker-compose job that should expose a streamlit ui on localhost:8501. Please use the envtemplate file to create your .env for running the project
 
 
 
 
 
 
 
 
 
 
 
 
 
 
27
 
28
  ### Deployment
29
 
30
+ Project is packaged with a dockerfile expose a gradio ui on localhost:7000. Please use the envtemplate file to create your .env for running the project
31
+
32
+ To build & run
33
+
34
+ ```
35
+ docker build . -t ai_agent:latest
36
+ docker run -it -p 7000:7000 ai_agent:latest
37
+ ```
38
+
39
+ To build, run, and, clean up image in one command :
40
+
41
+ ```
42
+ docker build . -t ai_agent:latest && docker run --rm -it -p 7000:7000 --name ai_agent_container ai_agent:latest
43
+ ```
docker-compose.yml DELETED
@@ -1,10 +0,0 @@
1
- version: '3.8'
2
-
3
- services:
4
- app:
5
- build: .
6
- ports:
7
- - "8501:8501"
8
- volumes:
9
- - ./src:/app/src
10
- command: streamlit run main.py
 
 
 
 
 
 
 
 
 
 
 
requirements.txt CHANGED
@@ -1,5 +1,7 @@
1
  langchain
2
  streamlit
 
3
  python-dotenv
4
  openai
5
- wikipedia
 
 
1
  langchain
2
  streamlit
3
+ gradio
4
  python-dotenv
5
  openai
6
+ wikipedia
7
+ ipykernel
sandbox.ipynb ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": 1,
6
+ "metadata": {},
7
+ "outputs": [
8
+ {
9
+ "name": "stderr",
10
+ "output_type": "stream",
11
+ "text": [
12
+ "/home/jphillips/ai_agents/.venv/lib/python3.10/site-packages/tqdm/auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n",
13
+ " from .autonotebook import tqdm as notebook_tqdm\n"
14
+ ]
15
+ },
16
+ {
17
+ "name": "stdout",
18
+ "output_type": "stream",
19
+ "text": [
20
+ "Running on local URL: http://127.0.0.1:7860\n",
21
+ "\n",
22
+ "To create a public link, set `share=True` in `launch()`.\n"
23
+ ]
24
+ },
25
+ {
26
+ "data": {
27
+ "text/html": [
28
+ "<div><iframe src=\"http://127.0.0.1:7860/\" width=\"100%\" height=\"500\" allow=\"autoplay; camera; microphone; clipboard-read; clipboard-write;\" frameborder=\"0\" allowfullscreen></iframe></div>"
29
+ ],
30
+ "text/plain": [
31
+ "<IPython.core.display.HTML object>"
32
+ ]
33
+ },
34
+ "metadata": {},
35
+ "output_type": "display_data"
36
+ },
37
+ {
38
+ "data": {
39
+ "text/plain": []
40
+ },
41
+ "execution_count": 1,
42
+ "metadata": {},
43
+ "output_type": "execute_result"
44
+ }
45
+ ],
46
+ "source": [
47
+ "import gradio as gr\n",
48
+ "\n",
49
+ "def greet(name):\n",
50
+ " return \"Hello \" + name + \"!\"\n",
51
+ "\n",
52
+ "demo = gr.Interface(fn=greet, inputs=\"text\", outputs=\"text\")\n",
53
+ "\n",
54
+ "demo.launch() "
55
+ ]
56
+ }
57
+ ],
58
+ "metadata": {
59
+ "kernelspec": {
60
+ "display_name": ".venv",
61
+ "language": "python",
62
+ "name": "python3"
63
+ },
64
+ "language_info": {
65
+ "codemirror_mode": {
66
+ "name": "ipython",
67
+ "version": 3
68
+ },
69
+ "file_extension": ".py",
70
+ "mimetype": "text/x-python",
71
+ "name": "python",
72
+ "nbconvert_exporter": "python",
73
+ "pygments_lexer": "ipython3",
74
+ "version": "3.10.6"
75
+ },
76
+ "orig_nbformat": 4
77
+ },
78
+ "nbformat": 4,
79
+ "nbformat_minor": 2
80
+ }
src/main.py CHANGED
@@ -1,30 +1,26 @@
1
  from dotenv import load_dotenv, find_dotenv
2
  import os
3
- import streamlit as st
4
- from reasoning import ReasoningRouter, default_reasoning_router_config
5
  load_dotenv(find_dotenv())
6
 
 
7
 
8
- def run_app():
9
- """
10
- Runs the Streamlit application.
11
 
12
- Returns:
13
- None
14
- """
15
- openai_api_key = os.getenv("OPENAI_API_KEY")
 
 
16
 
17
- col1, col2 = st.columns([1, 3])
18
- with col1:
19
- st.text("AI Agents Sandbox")
20
- with col2:
21
- st.title("Prompt Strategy Demo")
22
- question = st.text_area('Enter your question here:', height=200)
23
- config = default_reasoning_router_config()
24
- if question:
25
- determiner = ReasoningRouter(api_key=openai_api_key, config=config, question=question,display=st.write)
26
- determiner.determine_and_execute()
27
 
28
  if __name__ == "__main__":
29
- run_app()
30
-
 
1
  from dotenv import load_dotenv, find_dotenv
2
  import os
3
+ import gradio as gr
4
+ from reasoning import ReasoningRouter, get_reasoning_router_config
5
  load_dotenv(find_dotenv())
6
 
7
+ openai_api_key = os.getenv("OPENAI_API_KEY")
8
 
 
 
 
9
 
10
+ def determine_and_execute(question, temperature):
11
+ config = get_reasoning_router_config(temperature=temperature)
12
+ config.temperature = temperature
13
+ determiner = ReasoningRouter(api_key=openai_api_key, config=config, question=question, display=print)
14
+ determine_output, execute_output = determiner.determine_and_execute()
15
+ return determine_output, execute_output
16
 
17
+ iface = gr.Interface(
18
+ fn=determine_and_execute,
19
+ inputs=[gr.components.Textbox(label="Enter your question here:"), gr.components.Slider(minimum=0, maximum=2, default=.7, label="Temperature")],
20
+ outputs=[gr.components.Textbox(label="Reasoning Strategy"), gr.components.Textbox(label="Reasoning")],
21
+ title="Prompt Strategy Demo",
22
+ description="AI Agents Sandbox"
23
+ )
 
 
 
24
 
25
  if __name__ == "__main__":
26
+ iface.launch(server_name="0.0.0.0", server_port=7000)
 
src/reasoning/__init__.py CHANGED
@@ -1,5 +1,5 @@
1
- from .react import ReactStrategy, default_react_config
2
- from .tree_of_thought import TreeOfThoughtStrategy, default_tot_config
3
- from .chain_of_thought import ChainOfThoughtStrategy, default_cot_confg
4
- from .reasoning_router import ReasoningRouter, default_reasoning_router_config
5
- from .reasoning_strategy import ReasoningStrategy, ReasoningConfig, default_reasoning_config
 
1
+ from .react import ReactStrategy, get_react_config
2
+ from .tree_of_thought import TreeOfThoughtStrategy, get_tot_config
3
+ from .chain_of_thought import ChainOfThoughtStrategy, get_cot_confg
4
+ from .reasoning_router import ReasoningRouter, get_reasoning_router_config
5
+ from .reasoning_strategy import ReasoningStrategy, ReasoningConfig, get_reasoning_config
src/reasoning/chain_of_thought.py CHANGED
@@ -2,10 +2,13 @@ from langchain import PromptTemplate, LLMChain
2
  import streamlit as st
3
  from .reasoning_strategy import ReasoningStrategy, ReasoningConfig
4
  from typing import Callable
 
5
 
6
  class ChainOfThoughtStrategy(ReasoningStrategy):
7
  def __init__(self, config: ReasoningConfig, display: Callable):
8
  super().__init__(config=config, display=display)
 
 
9
 
10
  def run(self, question):
11
  print('Using Chain of Thought')
@@ -21,11 +24,12 @@ class ChainOfThoughtStrategy(ReasoningStrategy):
21
  response_cot = llm_chain.run(question)
22
  print(response_cot)
23
  self.display(response_cot)
 
24
 
25
- def default_cot_confg():
26
  usage = """
27
  This problem is simple and the solution may be obtained by focusing on generating a coherent series
28
  of reasoning steps that lead to the final answer. The approach provides interpretability, decomposes
29
  multi-step problems into intermediate steps, and allows for additional computation allocation
30
  """
31
- return ReasoningConfig(usage=usage)
 
2
  import streamlit as st
3
  from .reasoning_strategy import ReasoningStrategy, ReasoningConfig
4
  from typing import Callable
5
+ import pprint
6
 
7
  class ChainOfThoughtStrategy(ReasoningStrategy):
8
  def __init__(self, config: ReasoningConfig, display: Callable):
9
  super().__init__(config=config, display=display)
10
+ print("Creating Reasoning Router with config: ")
11
+ pprint.pprint(vars(config))
12
 
13
  def run(self, question):
14
  print('Using Chain of Thought')
 
24
  response_cot = llm_chain.run(question)
25
  print(response_cot)
26
  self.display(response_cot)
27
+ return response_cot
28
 
29
+ def get_cot_confg(temperature: float = 0.7) -> ReasoningConfig:
30
  usage = """
31
  This problem is simple and the solution may be obtained by focusing on generating a coherent series
32
  of reasoning steps that lead to the final answer. The approach provides interpretability, decomposes
33
  multi-step problems into intermediate steps, and allows for additional computation allocation
34
  """
35
+ return ReasoningConfig(usage=usage, temperature=temperature)
src/reasoning/react.py CHANGED
@@ -3,12 +3,14 @@ from .reasoning_strategy import ReasoningStrategy, ReasoningConfig
3
  from langchain.docstore.wikipedia import Wikipedia
4
  from langchain.agents import initialize_agent, Tool, AgentExecutor
5
  from langchain.agents.react.base import DocstoreExplorer
6
- from typing import Callable
7
-
8
 
9
  class ReactStrategy(ReasoningStrategy):
10
  def __init__(self, config: ReasoningConfig, display: Callable):
11
  super().__init__(config=config, display=display)
 
 
12
 
13
  def run(self, question) -> str:
14
  print('Using ReAct')
@@ -38,7 +40,7 @@ class ReactStrategy(ReasoningStrategy):
38
  self.display(response_react)
39
  return response_react
40
 
41
- def default_react_config() -> ReasoningConfig:
42
  usage = """
43
  The solution for this problem requires searching for further information online,
44
  generating reasoning traces and task-specific actions in an interleaved manner.
@@ -47,4 +49,5 @@ def default_react_config() -> ReasoningConfig:
47
  maintain, and adjust high-level plans for acting, while also interacting with external
48
  sources to incorporate additional information into reasoning
49
  """
50
- return ReasoningConfig(usage=usage)
 
 
3
  from langchain.docstore.wikipedia import Wikipedia
4
  from langchain.agents import initialize_agent, Tool, AgentExecutor
5
  from langchain.agents.react.base import DocstoreExplorer
6
+ from typing import Callable, Optional
7
+ import pprint
8
 
9
  class ReactStrategy(ReasoningStrategy):
10
  def __init__(self, config: ReasoningConfig, display: Callable):
11
  super().__init__(config=config, display=display)
12
+ print("Creating reAct strategy with config: ",)
13
+ pprint.pprint(vars(config))
14
 
15
  def run(self, question) -> str:
16
  print('Using ReAct')
 
40
  self.display(response_react)
41
  return response_react
42
 
43
+ def get_react_config(temperature: float = 0.7) -> ReasoningConfig:
44
  usage = """
45
  The solution for this problem requires searching for further information online,
46
  generating reasoning traces and task-specific actions in an interleaved manner.
 
49
  maintain, and adjust high-level plans for acting, while also interacting with external
50
  sources to incorporate additional information into reasoning
51
  """
52
+ return ReasoningConfig(usage=usage, temperature=temperature)
53
+
src/reasoning/reasoning_router.py CHANGED
@@ -1,9 +1,10 @@
1
  from langchain import PromptTemplate, LLMChain
2
- from .react import ReactStrategy, default_react_config
3
- from .tree_of_thought import TreeOfThoughtStrategy, default_tot_config
4
- from .chain_of_thought import ChainOfThoughtStrategy, default_cot_confg
5
  from .reasoning_strategy import ReasoningConfig
6
  from typing import Tuple, Callable, Optional
 
7
  import re
8
  import os
9
 
@@ -18,15 +19,18 @@ class ReasoningRouter:
18
  Returns:
19
  None
20
  """
 
 
21
  self.api_key = api_key
22
  self.llm = config.llm_class(temperature=config.temperature, max_tokens=config.max_tokens)
23
  self.question: str = question
24
  self.display: Callable = display
25
 
 
26
  self.strategies = {
27
- 1: ReactStrategy(default_react_config(), display=self.display),
28
- 2: TreeOfThoughtStrategy(default_tot_config(),display=self.display),
29
- 3: ChainOfThoughtStrategy(default_cot_confg(),display=self.display)
30
  }
31
  self.usage_block = f"""
32
 
@@ -66,7 +70,8 @@ class ReasoningRouter:
66
  Determines the appropriate reasoning strategy based on the user's question and executes it.
67
 
68
  Returns:
69
- None
 
70
  """
71
 
72
  prompt = PromptTemplate(template=self.template, input_variables=["question"])
@@ -85,6 +90,6 @@ class ReasoningRouter:
85
 
86
  return response, strat_resp
87
 
88
- def default_reasoning_router_config() -> ReasoningConfig:
89
  usage="This router should be used when determing the most effective strategy for a query requiring more complex, but general reasoning to derive"
90
- return ReasoningConfig(temperature=0.6, max_tokens=3000, usage=usage)
 
1
  from langchain import PromptTemplate, LLMChain
2
+ from .react import ReactStrategy, get_react_config
3
+ from .tree_of_thought import TreeOfThoughtStrategy, get_tot_config
4
+ from .chain_of_thought import ChainOfThoughtStrategy, get_cot_confg
5
  from .reasoning_strategy import ReasoningConfig
6
  from typing import Tuple, Callable, Optional
7
+ import pprint
8
  import re
9
  import os
10
 
 
19
  Returns:
20
  None
21
  """
22
+ print("Creating Reasoning Router with config: ",)
23
+ pprint.pprint(vars(config))
24
  self.api_key = api_key
25
  self.llm = config.llm_class(temperature=config.temperature, max_tokens=config.max_tokens)
26
  self.question: str = question
27
  self.display: Callable = display
28
 
29
+
30
  self.strategies = {
31
+ 1: ReactStrategy(get_react_config(temperature=config.temperature), display=self.display),
32
+ 2: TreeOfThoughtStrategy(get_tot_config(temperature=config.temperature),display=self.display),
33
+ 3: ChainOfThoughtStrategy(get_cot_confg(temperature=config.temperature),display=self.display)
34
  }
35
  self.usage_block = f"""
36
 
 
70
  Determines the appropriate reasoning strategy based on the user's question and executes it.
71
 
72
  Returns:
73
+ response : Reason the strategy was selected
74
+ strat_response : Response from the strategy
75
  """
76
 
77
  prompt = PromptTemplate(template=self.template, input_variables=["question"])
 
90
 
91
  return response, strat_resp
92
 
93
+ def get_reasoning_router_config(temperature: float = 0.6) -> ReasoningConfig:
94
  usage="This router should be used when determing the most effective strategy for a query requiring more complex, but general reasoning to derive"
95
+ return ReasoningConfig(temperature=temperature, max_tokens=3000, usage=usage)
src/reasoning/reasoning_strategy.py CHANGED
@@ -31,6 +31,6 @@ class ReasoningStrategy:
31
  def run(self, question):
32
  raise NotImplementedError()
33
 
34
- def default_reasoning_config():
35
  usage = "This is the default reasoning model that should only be used as a last resort"
36
  return ReasoningConfig(usage=usage)
 
31
  def run(self, question):
32
  raise NotImplementedError()
33
 
34
+ def get_reasoning_config(temperature: float = 0.7) -> ReasoningConfig:
35
  usage = "This is the default reasoning model that should only be used as a last resort"
36
  return ReasoningConfig(usage=usage)
src/reasoning/tree_of_thought.py CHANGED
@@ -2,10 +2,13 @@ from .reasoning_strategy import ReasoningStrategy
2
  from langchain import LLMChain, PromptTemplate
3
  from .reasoning_strategy import ReasoningStrategy, ReasoningConfig
4
  from typing import Callable
 
5
 
6
  class TreeOfThoughtStrategy(ReasoningStrategy):
7
  def __init__(self, config: ReasoningConfig, display: Callable):
8
  super().__init__(config=config, display=display)
 
 
9
 
10
  def run(self, question)-> str:
11
  print('Using ToT')
@@ -34,11 +37,11 @@ class TreeOfThoughtStrategy(ReasoningStrategy):
34
  self.display(response_tot)
35
  return response_tot
36
 
37
- def default_tot_config():
38
  usage= """
39
  This problem is complex and the solution requires exploring multiple reasoning paths over thoughts.
40
  It treats the problem as a search over a tree structure, with each node representing a partial
41
  solution and the branches corresponding to operators that modify the solution. It involves thought
42
  decomposition, thought generation, state evaluation, and a search algorithm
43
  """
44
- return ReasoningConfig(usage=usage)
 
2
  from langchain import LLMChain, PromptTemplate
3
  from .reasoning_strategy import ReasoningStrategy, ReasoningConfig
4
  from typing import Callable
5
+ import pprint
6
 
7
  class TreeOfThoughtStrategy(ReasoningStrategy):
8
  def __init__(self, config: ReasoningConfig, display: Callable):
9
  super().__init__(config=config, display=display)
10
+ print("Creating Reasoning Router with config: ",)
11
+ pprint.pprint(vars(config))
12
 
13
  def run(self, question)-> str:
14
  print('Using ToT')
 
37
  self.display(response_tot)
38
  return response_tot
39
 
40
+ def get_tot_config(temperature: float = 0.7) -> ReasoningConfig:
41
  usage= """
42
  This problem is complex and the solution requires exploring multiple reasoning paths over thoughts.
43
  It treats the problem as a search over a tree structure, with each node representing a partial
44
  solution and the branches corresponding to operators that modify the solution. It involves thought
45
  decomposition, thought generation, state evaluation, and a search algorithm
46
  """
47
+ return ReasoningConfig(usage=usage, temperature=temperature)
src/streamlit_main.py ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from dotenv import load_dotenv, find_dotenv
2
+ import os
3
+ import streamlit as st
4
+ from reasoning import ReasoningRouter, get_reasoning_router_config
5
+ load_dotenv(find_dotenv())
6
+
7
+
8
+ def run_app():
9
+ """
10
+ Runs the Streamlit application.
11
+
12
+ Returns:
13
+ None
14
+ """
15
+ openai_api_key = os.getenv("OPENAI_API_KEY")
16
+
17
+ col1, col2 = st.columns([1, 3])
18
+ with col1:
19
+ st.text("AI Agents Sandbox")
20
+ with col2:
21
+ st.title("Prompt Strategy Demo")
22
+ question = st.text_area('Enter your question here:', height=200)
23
+ config = get_reasoning_router_config()
24
+ if question:
25
+ determiner = ReasoningRouter(api_key=openai_api_key, config=config, question=question,display=st.write)
26
+ determiner.determine_and_execute()
27
+
28
+ if __name__ == "__main__":
29
+ run_app()
30
+