Update app.py
Browse files
app.py
CHANGED
@@ -133,11 +133,12 @@ with gr.Blocks() as demo:
|
|
133 |
# Launch the Gradio application
|
134 |
demo.launch()
|
135 |
'''
|
|
|
136 |
import gradio as gr
|
137 |
from langchain.chains import LLMChain
|
138 |
from langchain.prompts import PromptTemplate
|
139 |
from langchain_huggingface import HuggingFaceEndpoint
|
140 |
-
from langgraph.graph import StateGraph
|
141 |
|
142 |
# Define the LLM models
|
143 |
llm1 = HuggingFaceEndpoint(model='t5-small')
|
@@ -151,34 +152,42 @@ def agent2(response):
|
|
151 |
return f"Agent 2: {response}"
|
152 |
|
153 |
# Define the prompts and LLM chains
|
154 |
-
chain1 = LLMChain(llm=llm1, prompt=PromptTemplate(
|
155 |
-
|
156 |
-
|
157 |
-
|
158 |
-
|
159 |
-
|
160 |
-
|
161 |
-
|
162 |
-
|
163 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
164 |
|
165 |
# Initialize the current state
|
166 |
-
current_state =
|
167 |
|
168 |
def handle_input(query):
|
169 |
global current_state
|
170 |
output = ''
|
171 |
|
172 |
-
|
173 |
-
|
174 |
-
output = chain1.invoke(input=query) # Invoke chain1 with
|
175 |
response = agent1(output) # Process output through Agent 1
|
176 |
-
current_state =
|
177 |
-
elif current_state ==
|
178 |
-
#
|
179 |
-
output = chain2.invoke(input=query) # Invoke chain2 with the user input
|
180 |
response = agent2(output) # Process output through Agent 2
|
181 |
-
current_state =
|
182 |
|
183 |
return response
|
184 |
|
|
|
133 |
# Launch the Gradio application
|
134 |
demo.launch()
|
135 |
'''
|
136 |
+
|
137 |
import gradio as gr
|
138 |
from langchain.chains import LLMChain
|
139 |
from langchain.prompts import PromptTemplate
|
140 |
from langchain_huggingface import HuggingFaceEndpoint
|
141 |
+
from langgraph.graph import StateGraph
|
142 |
|
143 |
# Define the LLM models
|
144 |
llm1 = HuggingFaceEndpoint(model='t5-small')
|
|
|
152 |
return f"Agent 2: {response}"
|
153 |
|
154 |
# Define the prompts and LLM chains
|
155 |
+
chain1 = LLMChain(llm=llm1, prompt=PromptTemplate(
|
156 |
+
input_variables=["query"],
|
157 |
+
template="You are in state s1. {{query}}"
|
158 |
+
))
|
159 |
+
chain2 = LLMChain(llm=llm2, prompt=PromptTemplate(
|
160 |
+
input_variables=["query"],
|
161 |
+
template="You are in state s2. {{query}}"
|
162 |
+
))
|
163 |
+
|
164 |
+
# Create a state graph for managing the chatbot's states
|
165 |
+
graph = StateGraph()
|
166 |
+
|
167 |
+
# Create states and add them to the graph
|
168 |
+
state1 = graph.add_state("s1") # State for the first agent
|
169 |
+
state2 = graph.add_state("s2") # State for the second agent
|
170 |
+
|
171 |
+
# Define transitions
|
172 |
+
graph.add_edge(state1, state2, "next") # Transition from s1 to s2
|
173 |
+
graph.add_edge(state2, state1, "back") # Transition from s2 to s1
|
174 |
|
175 |
# Initialize the current state
|
176 |
+
current_state = state1
|
177 |
|
178 |
def handle_input(query):
|
179 |
global current_state
|
180 |
output = ''
|
181 |
|
182 |
+
# Process user input based on current state
|
183 |
+
if current_state == state1:
|
184 |
+
output = chain1.invoke(input=query) # Invoke chain1 with user input
|
185 |
response = agent1(output) # Process output through Agent 1
|
186 |
+
current_state = state2 # Transition to state s2
|
187 |
+
elif current_state == state2:
|
188 |
+
output = chain2.invoke(input=query) # Invoke chain2 with user input
|
|
|
189 |
response = agent2(output) # Process output through Agent 2
|
190 |
+
current_state = state1 # Transition back to state s1
|
191 |
|
192 |
return response
|
193 |
|