|
import os |
|
from dotenv import load_dotenv |
|
import gradio as gr |
|
from langchain_huggingface import HuggingFaceEndpoint |
|
from together import Together |
|
|
|
|
|
load_dotenv() |
|
HF_TOKEN = os.getenv("HF_TOKEN") |
|
API_KEY = os.getenv("API_KEY") |
|
|
|
|
|
client = Together(api_key=API_KEY) |
|
|
|
|
|
llm = HuggingFaceEndpoint( |
|
repo_id="mistralai/Mistral-7B-Instruct-v0.3", |
|
huggingfacehub_api_token=HF_TOKEN.strip(), |
|
temperature=0.7, |
|
max_new_tokens=100 |
|
) |
|
|
|
|
|
def chatbot_response_with_guardrails(message): |
|
try: |
|
|
|
raw_response = llm(message) |
|
|
|
|
|
response = client.completions.create( |
|
model="Meta-Llama/LlamaGuard-2-8b", |
|
prompt=raw_response |
|
) |
|
|
|
|
|
guardrail_check = response.choices[0].text.strip() |
|
|
|
|
|
if 'toxic' in guardrail_check.lower(): |
|
return "Content not suitable." |
|
else: |
|
|
|
return raw_response |
|
|
|
except Exception as e: |
|
return f"Error: {e}" |
|
|
|
|
|
with gr.Blocks() as app_with_guardrails: |
|
gr.Markdown("## Chatbot With Guardrails") |
|
gr.Markdown("This chatbot ensures all responses are appropriate.") |
|
|
|
|
|
with gr.Row(): |
|
user_input = gr.Textbox(label="Your Message", placeholder="Type here...") |
|
response_output = gr.Textbox(label="Guarded Response", placeholder="Bot will respond here...") |
|
submit_button = gr.Button("Send") |
|
|
|
|
|
submit_button.click( |
|
chatbot_response_with_guardrails, |
|
inputs=[user_input], |
|
outputs=[response_output] |
|
) |
|
|
|
|
|
if __name__ == "__main__": |
|
app_with_guardrails.launch() |
|
|