Spaces:
Sleeping
Sleeping
import os | |
from dotenv import load_dotenv | |
import gradio as gr | |
from langchain_huggingface import HuggingFaceEndpoint | |
# Load environment variables | |
load_dotenv() | |
HF_TOKEN = os.getenv("HF_TOKEN") | |
# Initialize the Hugging Face endpoint for inference (Primary model) | |
llm = HuggingFaceEndpoint( | |
repo_id="mistralai/Mistral-7B-Instruct-v0.3", # Replace with your model repo | |
huggingfacehub_api_token=HF_TOKEN.strip(), | |
temperature=0.7, | |
max_new_tokens=100 | |
) | |
# Initialize the Meta-Llama Guard model for content safety | |
guard_llm = HuggingFaceEndpoint( | |
repo_id="meta-llama/Meta-Llama-Guard-2-8B", # Replace with your guard model repo | |
huggingfacehub_api_token=HF_TOKEN.strip(), | |
temperature=0.7, | |
max_new_tokens=100 | |
) | |
# Function to handle chatbot response with guardrails | |
def guarded_chatbot_response(message): | |
try: | |
# Get raw response from the primary model | |
raw_response = llm(message) | |
# Guardrail logic: Ensure the response is safe and appropriate | |
guard_prompt = f""" | |
You are a content safety guard for kids under 16. Analyze the following text and ensure it is friendly and appropriate for children. | |
If appropriate, return the text as is. If not, rephrase or respond with "Content not suitable." | |
Text: {raw_response} | |
""" | |
# Get the guarded response from the Meta-Llama Guard model | |
guard_response = guard_llm(guard_prompt) | |
return guard_response | |
except Exception as e: | |
return f"Error: {e}" | |
# Gradio Interface for Chatbot with Guardrails | |
with gr.Blocks() as app_with_guardrails: | |
gr.Markdown("## Chatbot With Guardrails") | |
gr.Markdown("This chatbot ensures all responses are appropriate for kids under 16.") | |
# Input and output | |
with gr.Row(): | |
user_input = gr.Textbox(label="Your Message", placeholder="Type here...") | |
response_output = gr.Textbox(label="Guarded Response", placeholder="Bot will respond here...") | |
submit_button = gr.Button("Send") | |
# Button click event | |
submit_button.click( | |
guarded_chatbot_response, | |
inputs=[user_input], | |
outputs=[response_output] | |
) | |
# Launch the app | |
if __name__ == "__main__": | |
app_with_guardrails.launch() | |