Sreekan commited on
Commit
649caa0
·
verified ·
1 Parent(s): e48ef1a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +68 -1
app.py CHANGED
@@ -63,7 +63,8 @@ demo = gr.ChatInterface(
63
 
64
  if __name__ == "__main__":
65
  demo.launch()
66
- '''
 
67
 
68
  import gradio as gr
69
  from langchain.chains import LLMChain
@@ -131,3 +132,69 @@ with gr.Blocks() as demo:
131
 
132
  # Launch the Gradio application
133
  demo.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
63
 
64
  if __name__ == "__main__":
65
  demo.launch()
66
+
67
+
68
 
69
  import gradio as gr
70
  from langchain.chains import LLMChain
 
132
 
133
  # Launch the Gradio application
134
  demo.launch()
135
+ '''
136
+ import gradio as gr
137
+ from langchain.chains import LLMChain
138
+ from langchain.prompts import PromptTemplate
139
+ from langchain_huggingface import HuggingFaceEndpoint
140
+ from langgraph.graph import StateGraph, END, START
141
+
142
+ # Define the LLM models
143
+ llm1 = HuggingFaceEndpoint(model='t5-small')
144
+ llm2 = HuggingFaceEndpoint(model='t5-large')
145
+
146
+ # Define the agent functions
147
+ def agent1(response):
148
+ return f"Agent 1: {response}"
149
+
150
+ def agent2(response):
151
+ return f"Agent 2: {response}"
152
+
153
+ # Define the prompts and LLM chains
154
+ chain1 = LLMChain(llm=llm1, prompt=PromptTemplate(input_variables=["query"], template="You are in state s1. {{query}}"))
155
+ chain2 = LLMChain(llm=llm2, prompt=PromptTemplate(input_variables=["query"], template="You are in state s2. {{query}}"))
156
+
157
+ # State definitions
158
+ s1 = StateGraph("s1")
159
+ s2 = StateGraph("s2")
160
+
161
+ # Create transitions in the states
162
+ s1.add_edge(s2, 'next') # From state s1 to s2
163
+ s2.add_edge(s1, 'back') # From state s2 to s1
164
+
165
+ # Initialize the current state
166
+ current_state = s1
167
+
168
+ def handle_input(query):
169
+ global current_state
170
+ output = ''
171
+
172
+ if current_state == s1:
173
+ # Use LLM Chain in s1
174
+ output = chain1.invoke(input=query) # Invoke chain1 with the user input
175
+ response = agent1(output) # Process output through Agent 1
176
+ current_state = s2 # Transition to state s2
177
+ elif current_state == s2:
178
+ # Use LLM Chain in s2
179
+ output = chain2.invoke(input=query) # Invoke chain2 with the user input
180
+ response = agent2(output) # Process output through Agent 2
181
+ current_state = s1 # Transition back to state s1
182
+
183
+ return response
184
+
185
+ # Create the Gradio interface
186
+ with gr.Blocks() as demo:
187
+ gr.Markdown("# Chatbot Interface")
188
+ chatbot_interface = gr.Chatbot()
189
+ user_input = gr.Textbox(label="Your Message", placeholder="Type something here...")
190
+ submit_btn = gr.Button("Send")
191
+
192
+ # Define the behavior of the submit button
193
+ submit_btn.click(
194
+ fn=lambda input_text: handle_input(input_text), # Handle user input
195
+ inputs=[user_input],
196
+ outputs=chatbot_interface
197
+ )
198
+
199
+ # Launch the Gradio application
200
+ demo.launch()