|
import os |
|
from typing import Literal |
|
from langgraph.graph import StateGraph, START |
|
from langgraph.types import Command |
|
from dotenv import load_dotenv |
|
import gradio as gr |
|
from langchain_huggingface import HuggingFaceEndpoint |
|
|
|
|
|
load_dotenv() |
|
HF_TOKEN = os.getenv("HF_TOKEN") |
|
|
|
|
|
llm = HuggingFaceEndpoint( |
|
repo_id="mistralai/Mistral-7B-Instruct-v0.3", |
|
huggingfacehub_api_token=HF_TOKEN.strip(), |
|
temperature=0.7, |
|
max_new_tokens=150, |
|
) |
|
|
|
|
|
class State(dict): |
|
disaster_type: str |
|
severity: str |
|
resources: list[str] |
|
recommendation: str |
|
|
|
|
|
builder = StateGraph(State) |
|
|
|
|
|
def disaster_type_node(state: State) -> Command[Literal["severity_node"]]: |
|
"""Set disaster type and proceed to severity selection.""" |
|
return Command(update={"disaster_type": state["disaster_type"]}, goto="severity_node") |
|
|
|
def severity_node(state: State) -> Command[Literal["resources_node"]]: |
|
"""Set severity level and proceed to resource selection.""" |
|
return Command(update={"severity": state["severity"]}, goto="resources_node") |
|
|
|
def resources_node(state: State) -> Command[Literal["generate_response_node"]]: |
|
"""Set available resources and proceed to generating response.""" |
|
return Command(update={"resources": state["resources"]}, goto="generate_response_node") |
|
|
|
def generate_response_node(state: State): |
|
"""Generate a disaster response plan based on inputs.""" |
|
resources = ", ".join(state["resources"]) |
|
prompt = f""" |
|
Disaster: {state['disaster_type']} |
|
Severity: {state['severity']} |
|
Resources: {resources} |
|
|
|
You are an emergency response assistant. Provide a detailed response plan for the given situation in 100 words. |
|
""" |
|
response = llm(prompt) |
|
return {"recommendation": response} |
|
|
|
|
|
builder.add_edge(START, "disaster_type_node") |
|
builder.add_node("disaster_type_node", disaster_type_node) |
|
builder.add_node("severity_node", severity_node) |
|
builder.add_node("resources_node", resources_node) |
|
builder.add_node("generate_response_node", generate_response_node) |
|
|
|
|
|
graph = builder.compile() |
|
|
|
|
|
def process_disaster(disaster_type, severity, resources): |
|
"""Run the disaster response graph with user inputs.""" |
|
state = {"disaster_type": disaster_type, "severity": severity, "resources": resources} |
|
result = graph.invoke(state) |
|
|
|
|
|
mermaid_code = graph.get_graph().draw_mermaid() |
|
|
|
|
|
return ( |
|
result["recommendation"], |
|
"## Mermaid Graph", |
|
"Check out this [mermaid link](https://mermaid.live/) to display a graph with the following data", |
|
f"```mermaid\n{mermaid_code}\n```" |
|
) |
|
|
|
iface = gr.Interface( |
|
fn=process_disaster, |
|
inputs=[ |
|
gr.Dropdown( |
|
["Flood", "Fire", "Earthquake", "Tornado", "Other"], |
|
label="Select Disaster Type", |
|
), |
|
gr.Dropdown( |
|
["Low", "Medium", "High", "Critical"], |
|
label="Select Severity Level", |
|
), |
|
gr.CheckboxGroup( |
|
[ |
|
"Water and Food Supplies", |
|
"Medical Aid", |
|
"Shelter and Evacuation", |
|
"Search and Rescue Teams", |
|
"Other", |
|
], |
|
label="Select Available Resources (Multi-Select)", |
|
), |
|
], |
|
outputs=[ |
|
gr.Textbox(label="Generated Disaster Response Plan"), |
|
gr.Markdown(), |
|
gr.Markdown(), |
|
gr.Markdown(label="Mermaid Graph Visualization") |
|
], |
|
title="Disaster Response Assistant", |
|
) |
|
|
|
iface.launch() |