pratikshahp's picture
Update app.py
2a9a68c verified
import gradio as gr
from guardrail import is_safe # Import the guardrail validation function
from langchain_huggingface import HuggingFaceEndpoint
from dotenv import load_dotenv
import os
# Load environment variables
load_dotenv()
HF_TOKEN = os.getenv("HF_TOKEN")
# Validate Hugging Face token
if not HF_TOKEN:
raise ValueError("Missing Hugging Face API token. Please check your .env file.")
# Initialize the Hugging Face endpoint for generating responses
llm = HuggingFaceEndpoint(
repo_id="mistralai/Mistral-7B-Instruct-v0.3",
huggingfacehub_api_token=HF_TOKEN.strip(),
temperature=0.7,
max_new_tokens=100
)
# Chatbot response function with safety checks
def chatbot_response(user_message: str) -> str:
"""
Generates a chatbot response while ensuring the content is safe for children under 16.
"""
try:
# Step 1: Validate the user input
if not is_safe(user_message):
return "Sorry, I cannot respond to that as it violates our safety policy."
# Step 2: Generate a response using the Mistral model
raw_response = llm.invoke(user_message)
# Step 3: Validate the generated response
if not is_safe(raw_response):
return "Sorry, I cannot share that information as it violates our safety policy."
# Step 4: Return the validated response
return raw_response
except Exception as e:
return f"An error occurred: {str(e)}"
# Gradio Interface for the chatbot
with gr.Blocks() as app:
gr.Markdown("## Kid-Safe Chatbot 🛡️")
gr.Markdown("This chatbot ensures that all responses are appropriate for children under 16.")
with gr.Row():
user_input = gr.Textbox(label="Your Message", placeholder="Type your message here...")
response_output = gr.Textbox(label="Chatbot Response", placeholder="The chatbot will respond here.")
submit_button = gr.Button("Send")
# On button click, generate response
submit_button.click(
fn=chatbot_response,
inputs=[user_input],
outputs=[response_output]
)
# Launch the app
if __name__ == "__main__":
app.launch()