Update app.py
Browse files
app.py
CHANGED
@@ -139,6 +139,15 @@ from langchain.chains import LLMChain
|
|
139 |
from langchain.prompts import PromptTemplate
|
140 |
from langchain_huggingface import HuggingFaceEndpoint
|
141 |
from langgraph.graph import StateGraph
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
142 |
|
143 |
# Define the LLM models
|
144 |
llm1 = HuggingFaceEndpoint(model='t5-small')
|
@@ -161,45 +170,35 @@ chain2 = LLMChain(llm=llm2, prompt=PromptTemplate(
|
|
161 |
template="You are in state s2. {{query}}"
|
162 |
))
|
163 |
|
164 |
-
# Define the state schema
|
165 |
-
state_schema = {
|
166 |
-
"s1": {
|
167 |
-
"inputs": ["query"],
|
168 |
-
"outputs": ["response"]
|
169 |
-
},
|
170 |
-
"s2": {
|
171 |
-
"inputs": ["query"],
|
172 |
-
"outputs": ["response"]
|
173 |
-
}
|
174 |
-
}
|
175 |
|
176 |
# Create a state graph with required schemas for inputs and outputs
|
177 |
-
graph = StateGraph(
|
178 |
|
179 |
# Add states to the graph
|
180 |
-
|
181 |
-
|
182 |
|
183 |
# Define transitions
|
184 |
-
graph.add_edge(
|
185 |
-
graph.add_edge(
|
|
|
186 |
|
187 |
# Initialize the current state
|
188 |
-
current_state =
|
189 |
|
190 |
def handle_input(query):
|
191 |
global current_state
|
192 |
output = ''
|
193 |
|
194 |
# Process user input based on current state
|
195 |
-
if current_state
|
196 |
output = chain1.invoke(input=query) # Invoke chain1 with user input
|
197 |
response = agent1(output) # Process output through Agent 1
|
198 |
-
current_state =
|
199 |
-
elif current_state
|
200 |
output = chain2.invoke(input=query) # Invoke chain2 with user input
|
201 |
response = agent2(output) # Process output through Agent 2
|
202 |
-
current_state =
|
203 |
|
204 |
return response
|
205 |
|
|
|
139 |
from langchain.prompts import PromptTemplate
|
140 |
from langchain_huggingface import HuggingFaceEndpoint
|
141 |
from langgraph.graph import StateGraph
|
142 |
+
from typing import TypedDict
|
143 |
+
|
144 |
+
class InputState(TypedDict):
|
145 |
+
string_var :str
|
146 |
+
numeric_var :int
|
147 |
+
|
148 |
+
def changeState(input: InputState):
|
149 |
+
print(f"Current value: {input}")
|
150 |
+
return input
|
151 |
|
152 |
# Define the LLM models
|
153 |
llm1 = HuggingFaceEndpoint(model='t5-small')
|
|
|
170 |
template="You are in state s2. {{query}}"
|
171 |
))
|
172 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
173 |
|
174 |
# Create a state graph with required schemas for inputs and outputs
|
175 |
+
graph = StateGraph(InputState)
|
176 |
|
177 |
# Add states to the graph
|
178 |
+
graph.add_node("s1",changeState)
|
179 |
+
graph.add_node("s2",changeState)
|
180 |
|
181 |
# Define transitions
|
182 |
+
graph.add_edge(START, "s1") # Transition from s1 to s2
|
183 |
+
graph.add_edge("s1", "s2") # Transition from s2 to s1
|
184 |
+
graph.add_edge("s2", END)
|
185 |
|
186 |
# Initialize the current state
|
187 |
+
current_state = "s1"
|
188 |
|
189 |
def handle_input(query):
|
190 |
global current_state
|
191 |
output = ''
|
192 |
|
193 |
# Process user input based on current state
|
194 |
+
if current_state == "s1":
|
195 |
output = chain1.invoke(input=query) # Invoke chain1 with user input
|
196 |
response = agent1(output) # Process output through Agent 1
|
197 |
+
current_state = "s2" # Transition to state s2
|
198 |
+
elif current_state == "s2":
|
199 |
output = chain2.invoke(input=query) # Invoke chain2 with user input
|
200 |
response = agent2(output) # Process output through Agent 2
|
201 |
+
current_state = "s1" # Transition back to state s1
|
202 |
|
203 |
return response
|
204 |
|