Sreekan commited on
Commit
c0fdd98
·
verified ·
1 Parent(s): c3be02c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +181 -1
app.py CHANGED
@@ -64,7 +64,7 @@ demo = gr.ChatInterface(
64
  if __name__ == "__main__":
65
  demo.launch()
66
 
67
- '''
68
 
69
  import gradio as gr
70
  from langchain.chains import LLMChain
@@ -150,4 +150,184 @@ with gr.Blocks() as demo:
150
 
151
  # Launch the Gradio application
152
  demo.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
153
 
 
64
  if __name__ == "__main__":
65
  demo.launch()
66
 
67
+
68
 
69
  import gradio as gr
70
  from langchain.chains import LLMChain
 
150
 
151
  # Launch the Gradio application
152
  demo.launch()
153
+ '''
154
+ from typing import Annotated, Sequence, TypedDict
155
+ import operator
156
+ import functools
157
+
158
+ from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
159
+ from langchain_core.messages import BaseMessage, HumanMessage, SystemMessage
160
+ from langchain_community.tools.tavily_search import TavilySearchResults
161
+ from langchain_experimental.tools import PythonREPLTool
162
+ from langchain.agents import create_openai_tools_agent
163
+ from langchain_huggingface import HuggingFacePipeline
164
+ from langgraph.graph import StateGraph, END
165
+
166
+ from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
167
+
168
+ # SETUP: HuggingFace Model and Pipeline
169
+ #name = "meta-llama/Llama-3.2-1B"
170
+ #name="deepseek-ai/DeepSeek-R1-Distill-Qwen-32B"
171
+ #name="deepseek-ai/deepseek-llm-7b-chat"
172
+ #name="openai-community/gpt2"
173
+ #name="deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B"
174
+ #name="microsoft/Phi-3.5-mini-instruct"
175
+ name="Qwen/Qwen2.5-7B-Instruct-1M"
176
+
177
+ tokenizer = AutoTokenizer.from_pretrained(name,truncation=True)
178
+ tokenizer.pad_token = tokenizer.eos_token
179
+ model = AutoModelForCausalLM.from_pretrained(name)
180
+
181
+ pipe = pipeline(
182
+ "text-generation",
183
+ model=model,
184
+ tokenizer=tokenizer,
185
+ device_map="auto",
186
+ max_new_tokens=500, # text to generate for outputs
187
+ )
188
+ print ("pipeline is created")
189
+
190
+ # Wrap in LangChain's HuggingFacePipeline
191
+ llm = HuggingFacePipeline(pipeline=pipe)
192
+
193
+ # Members and Final Options
194
+ members = ["Researcher", "Coder"]
195
+ options = ["FINISH"] + members
196
+
197
+ # Supervisor prompt
198
+ system_prompt = (
199
+ "You are a supervisor tasked with managing a conversation between the following workers: {members}."
200
+ " Given the following user request, respond with the workers to act next. Each worker will perform a task"
201
+ " and respond with their results and status. When all workers are finished, respond with FINISH."
202
+ )
203
+
204
+ # Prompt template required for the workflow
205
+ prompt = ChatPromptTemplate.from_messages(
206
+ [
207
+ ("system", system_prompt),
208
+ MessagesPlaceholder(variable_name="messages"),
209
+ ("system", "Given the conversation above, who should act next? Or Should we FINISH? Select one of: {options}"),
210
+ ]
211
+ ).partial(options=str(options), members=", ".join(members))
212
+
213
+ print ("Prompt Template created")
214
+
215
+ # Supervisor routing logic
216
+ def route_tool_response(llm_response):
217
+ """
218
+ Parse the LLM response to determine the next step based on routing logic.
219
+ """
220
+ if "FINISH" in llm_response:
221
+ return "FINISH"
222
+ for member in members:
223
+ if member in llm_response:
224
+ return member
225
+ return "Unknown"
226
+
227
+ def supervisor_chain(state):
228
+ """
229
+ Supervisor logic to interact with HuggingFacePipeline and decide the next worker.
230
+ """
231
+ messages = state.get("messages", [])
232
+ print(f"[TRACE] Supervisor received messages: {messages}") # Trace input messages
233
+ user_prompt = prompt.format(messages=messages)
234
+
235
+ try:
236
+ llm_response = pipe(user_prompt, max_new_tokens=500)[0]["generated_text"]
237
+ print(f"[TRACE] LLM Response: {llm_response}") # Trace LLM interaction
238
+ except Exception as e:
239
+ raise RuntimeError(f"LLM processing error: {e}")
240
+
241
+ next_action = route_tool_response(llm_response)
242
+ print(f"[TRACE] Supervisor deciding next action: {next_action}") # Trace state changes
243
+ return {"next": next_action}
244
+
245
+ # AgentState definition
246
+ class AgentState(TypedDict):
247
+ messages: Annotated[Sequence[BaseMessage], operator.add]
248
+ next: str
249
+
250
+ # Create tools
251
+ tavily_tool = TavilySearchResults(max_results=5)
252
+ python_repl_tool = PythonREPLTool()
253
+
254
+ # Create agents with their respective prompts
255
+ research_agent = create_openai_tools_agent(
256
+ llm=llm,
257
+ tools=[tavily_tool],
258
+ prompt=ChatPromptTemplate.from_messages(
259
+ [
260
+ SystemMessage(content="You are a web researcher."),
261
+ MessagesPlaceholder(variable_name="messages"),
262
+ MessagesPlaceholder(variable_name="agent_scratchpad"), # Add required placeholder
263
+ ]
264
+ ),
265
+ )
266
+
267
+ print ("Created agents with their respective prompts")
268
+
269
+ code_agent = create_openai_tools_agent(
270
+ llm=llm,
271
+ tools=[python_repl_tool],
272
+ prompt=ChatPromptTemplate.from_messages(
273
+ [
274
+ SystemMessage(content="You may generate safe Python code for analysis."),
275
+ MessagesPlaceholder(variable_name="messages"),
276
+ MessagesPlaceholder(variable_name="agent_scratchpad"), # Add required placeholder
277
+ ]
278
+ ),
279
+ )
280
+
281
+
282
+ print ("create_openai_tools_agent")
283
+
284
+
285
+ # Create the workflow
286
+ workflow = StateGraph(AgentState)
287
+
288
+ # Nodes
289
+ workflow.add_node("Researcher", research_agent) # Pass the agent directly (no .run required)
290
+ workflow.add_node("Coder", code_agent) # Pass the agent directly
291
+ workflow.add_node("supervisor", supervisor_chain)
292
+
293
+ # Add edges for workflow transitions
294
+ for member in members:
295
+ workflow.add_edge(member, "supervisor")
296
+
297
+ workflow.add_conditional_edges(
298
+ "supervisor",
299
+ lambda x: x["next"],
300
+ {k: k for k in members} | {"FINISH": END} # Dynamically map workers to their actions
301
+ )
302
+ print("[DEBUG] Workflow edges added: supervisor -> members/FINISH based on 'next'")
303
+
304
+ # Define entry point
305
+ workflow.set_entry_point("supervisor")
306
+
307
+ print(workflow)
308
+
309
+ # Compile the workflow
310
+ graph = workflow.compile()
311
+
312
+ from IPython.display import display, Image
313
+ display(Image(graph.get_graph().draw_mermaid_png()))
314
+
315
+ # Properly formatted initial state
316
+ initial_state = {
317
+ "messages": [
318
+ #HumanMessage(content="Code hello world and print it to the terminal.") # Correct format for user input
319
+ HumanMessage(content="Write Code for printing \"hello world\" in Python. Keep it precise.") # Correct format for user input
320
+ ]
321
+ }
322
+
323
+ # Execute the workflow
324
+ try:
325
+ print(f"[TRACE] Initial workflow state: {initial_state}")
326
+ result = graph.invoke(initial_state)
327
+
328
+ print(f"[TRACE] Workflow Result: {result}") # Final workflow result
329
+ except Exception as e:
330
+ print(f"[ERROR] Workflow execution failed: {e}")
331
+
332
+
333