pratikshahp's picture
Update app.py
93fe64b verified
import os
from typing import Dict, Literal
from langgraph.graph import StateGraph, START
from langgraph.types import Command
from dotenv import load_dotenv
import gradio as gr
from langchain_huggingface import HuggingFaceEndpoint
# Load environment variables
load_dotenv()
HF_TOKEN = os.getenv("HF_TOKEN")
# Define HuggingFaceEndpoint
llm = HuggingFaceEndpoint(
repo_id="mistralai/Mistral-7B-Instruct-v0.3",
huggingfacehub_api_token=HF_TOKEN.strip(),
temperature=0.7,
max_new_tokens=150,
)
# Define state class for managing the ticket state
class State(dict):
issue_description: str
priority: str
escalation_needed: bool
response: str
escalation: str
# Create the graph
builder = StateGraph(State)
# Define the nodes for the multi-agent flow
# Ticket creation agent
def ticket_creation_agent(state: State) -> Command[Literal["priority_classification_agent"]]:
"""Capture ticket description and proceed to priority classification."""
return Command(update={"issue_description": state["issue_description"]}, goto="priority_classification_agent")
# Priority classification agent
def priority_classification_agent(state: State) -> Command[Literal["escalation_classification_agent"]]:
"""Classify priority based on the issue description."""
#prompt = (
# f"You are a support assistant. Classify the following issue as "
# f"'urgent', 'critical', or 'normal' based on its severity:\n\n"
# f"Issue: {state['issue_description']}"
prompt = (
f"You are a support assistant. Based on the severity, classify the issues "
f"into one of the following categories:\n"
f"1. Urgent: Issues causing significant business impact (e.g., payment gateway failure, server down or issues that can not wait as per company's well being).\n"
f"2. Critical: Issues needing immediate attention but with manageable impact.\n"
f"3. Normal: Issues that can wait for routine handling.\n\n"
f"Issue: {state['issue_description']} \n"
f"Clearly specify only one category: 'urgent', 'critical', or 'normal'."
)
#)
priority = llm.invoke(prompt).strip().lower()
print("priority : ", priority)
# Ensure valid priority classification
if priority not in ["urgent", "critical", "normal"]:
priority = "normal" # Default to 'normal' if the classification fails
return Command(update={"priority": priority}, goto="escalation_classification_agent")
# Generate response agent
def generate_response_agent(state: State) -> Dict[str, str]:
"""Generate response based on ticket priority and escalation need."""
escalation = (
"Escalate the issue to a senior team member immediately."
if state["escalation_needed"]
else "No escalation needed."
)
# Updated prompt to guide the model explicitly
prompt = (
f"You are a customer service assistant. Generate a complete, concise and actionable response "
f"for the following issue:\n\n"
f"Issue: {state['issue_description']}\n"
f"Priority: {state['priority']}.\n\n"
f"Your response should directly address the issue and provide next steps."
)
response = llm.invoke(prompt).strip()
return {"response": response, "escalation": escalation}
# Escalation classification agent
def escalation_classification_agent(state: State) -> Command[Literal["generate_response_agent"]]:
"""Classify whether escalation is needed based on priority."""
escalation_needed = state["priority"].lower() in ["urgent", "critical"]
print(f"Escalation Needed: {escalation_needed}, Priority: {state['priority']}") # Debugging escalation
return Command(update={"escalation_needed": escalation_needed}, goto="generate_response_agent")
# Gradio Interface function to process the ticket
def process_ticket(issue_description: str):
"""Process the issue ticket through the multi-agent flow."""
state = {"issue_description": issue_description}
try:
print(f"Initial Issue Description: {issue_description}") # Debug log
result = graph.invoke(state)
print(f"Graph Result: {result}") # Debug log
response = result.get("response", "No response generated")
escalation = result.get("escalation", "No escalation specified")
return response, escalation
except Exception as e:
print(f"Error occurred: {e}") # Debug log
return f"Error occurred: {e}", "Unable to determine escalation"
# Add nodes to the graph
builder.add_edge(START, "ticket_creation_agent")
builder.add_node("ticket_creation_agent", ticket_creation_agent)
builder.add_node("priority_classification_agent", priority_classification_agent)
builder.add_node("escalation_classification_agent", escalation_classification_agent)
builder.add_node("generate_response_agent", generate_response_agent)
# Compile the graph
graph = builder.compile()
# Gradio Interface function to process the ticket
def process_ticket(issue_description: str):
"""Process the issue ticket through the multi-agent flow."""
state = {"issue_description": issue_description}
try:
result = graph.invoke(state)
response = result.get("response", "No response generated")
escalation = result.get("escalation", "No escalation specified")
mermaid_code = graph.get_graph().draw_mermaid()
return (
response,
escalation,
"## Mermaid Graph",
"Check out this [mermaid link](https://mermaid.live/) to display a graph with the following data",
f"```mermaid\n{mermaid_code}\n```"
)
except Exception as e:
return f"Error occurred: {e}", "Unable to determine escalation"
# Gradio Interface
iface = gr.Interface(
fn=process_ticket,
inputs=gr.Textbox(label="Describe the issue"),
outputs=[
gr.Textbox(label="Response"),
gr.Textbox(label="Escalation Decision"),
gr.Markdown(), # Placeholder for the "Mermaid Graph" heading
gr.Markdown(), # Placeholder for the link text
gr.Markdown(label="Mermaid Graph Visualization") # Mermaid visualization
],
title="Ticket Handling System",
)
if __name__ == "__main__":
iface.launch()