|
import os |
|
from typing import Dict, Literal |
|
from langgraph.graph import StateGraph, START |
|
from langgraph.types import Command |
|
from dotenv import load_dotenv |
|
import gradio as gr |
|
from langchain_huggingface import HuggingFaceEndpoint |
|
|
|
|
|
load_dotenv() |
|
HF_TOKEN = os.getenv("HF_TOKEN") |
|
|
|
|
|
llm = HuggingFaceEndpoint( |
|
repo_id="mistralai/Mistral-7B-Instruct-v0.3", |
|
huggingfacehub_api_token=HF_TOKEN.strip(), |
|
temperature=0.7, |
|
max_new_tokens=150, |
|
) |
|
|
|
|
|
class State(dict): |
|
issue_description: str |
|
priority: str |
|
escalation_needed: bool |
|
response: str |
|
escalation: str |
|
|
|
|
|
builder = StateGraph(State) |
|
|
|
|
|
|
|
|
|
def ticket_creation_agent(state: State) -> Command[Literal["priority_classification_agent"]]: |
|
"""Capture ticket description and proceed to priority classification.""" |
|
return Command(update={"issue_description": state["issue_description"]}, goto="priority_classification_agent") |
|
|
|
|
|
def priority_classification_agent(state: State) -> Command[Literal["escalation_classification_agent"]]: |
|
"""Classify priority based on the issue description.""" |
|
|
|
|
|
|
|
|
|
prompt = ( |
|
f"You are a support assistant. Based on the severity, classify the issues " |
|
f"into one of the following categories:\n" |
|
f"1. Urgent: Issues causing significant business impact (e.g., payment gateway failure, server down or issues that can not wait as per company's well being).\n" |
|
f"2. Critical: Issues needing immediate attention but with manageable impact.\n" |
|
f"3. Normal: Issues that can wait for routine handling.\n\n" |
|
f"Issue: {state['issue_description']} \n" |
|
f"Clearly specify only one category: 'urgent', 'critical', or 'normal'." |
|
) |
|
|
|
priority = llm.invoke(prompt).strip().lower() |
|
print("priority : ", priority) |
|
|
|
|
|
if priority not in ["urgent", "critical", "normal"]: |
|
priority = "normal" |
|
|
|
return Command(update={"priority": priority}, goto="escalation_classification_agent") |
|
|
|
|
|
def generate_response_agent(state: State) -> Dict[str, str]: |
|
"""Generate response based on ticket priority and escalation need.""" |
|
escalation = ( |
|
"Escalate the issue to a senior team member immediately." |
|
if state["escalation_needed"] |
|
else "No escalation needed." |
|
) |
|
|
|
|
|
prompt = ( |
|
f"You are a customer service assistant. Generate a complete, concise and actionable response " |
|
f"for the following issue:\n\n" |
|
f"Issue: {state['issue_description']}\n" |
|
f"Priority: {state['priority']}.\n\n" |
|
f"Your response should directly address the issue and provide next steps." |
|
) |
|
response = llm.invoke(prompt).strip() |
|
|
|
return {"response": response, "escalation": escalation} |
|
|
|
|
|
|
|
def escalation_classification_agent(state: State) -> Command[Literal["generate_response_agent"]]: |
|
"""Classify whether escalation is needed based on priority.""" |
|
escalation_needed = state["priority"].lower() in ["urgent", "critical"] |
|
print(f"Escalation Needed: {escalation_needed}, Priority: {state['priority']}") |
|
return Command(update={"escalation_needed": escalation_needed}, goto="generate_response_agent") |
|
|
|
|
|
|
|
def process_ticket(issue_description: str): |
|
"""Process the issue ticket through the multi-agent flow.""" |
|
state = {"issue_description": issue_description} |
|
try: |
|
print(f"Initial Issue Description: {issue_description}") |
|
result = graph.invoke(state) |
|
print(f"Graph Result: {result}") |
|
response = result.get("response", "No response generated") |
|
escalation = result.get("escalation", "No escalation specified") |
|
return response, escalation |
|
except Exception as e: |
|
print(f"Error occurred: {e}") |
|
return f"Error occurred: {e}", "Unable to determine escalation" |
|
|
|
|
|
|
|
builder.add_edge(START, "ticket_creation_agent") |
|
builder.add_node("ticket_creation_agent", ticket_creation_agent) |
|
builder.add_node("priority_classification_agent", priority_classification_agent) |
|
builder.add_node("escalation_classification_agent", escalation_classification_agent) |
|
builder.add_node("generate_response_agent", generate_response_agent) |
|
|
|
|
|
graph = builder.compile() |
|
|
|
|
|
def process_ticket(issue_description: str): |
|
"""Process the issue ticket through the multi-agent flow.""" |
|
state = {"issue_description": issue_description} |
|
try: |
|
result = graph.invoke(state) |
|
response = result.get("response", "No response generated") |
|
escalation = result.get("escalation", "No escalation specified") |
|
mermaid_code = graph.get_graph().draw_mermaid() |
|
return ( |
|
response, |
|
escalation, |
|
"## Mermaid Graph", |
|
"Check out this [mermaid link](https://mermaid.live/) to display a graph with the following data", |
|
f"```mermaid\n{mermaid_code}\n```" |
|
) |
|
except Exception as e: |
|
return f"Error occurred: {e}", "Unable to determine escalation" |
|
|
|
|
|
iface = gr.Interface( |
|
fn=process_ticket, |
|
inputs=gr.Textbox(label="Describe the issue"), |
|
outputs=[ |
|
gr.Textbox(label="Response"), |
|
gr.Textbox(label="Escalation Decision"), |
|
gr.Markdown(), |
|
gr.Markdown(), |
|
gr.Markdown(label="Mermaid Graph Visualization") |
|
], |
|
title="Ticket Handling System", |
|
) |
|
|
|
if __name__ == "__main__": |
|
iface.launch() |
|
|