pratikshahp's picture
Update app.py
6473a13 verified
import os
from dotenv import load_dotenv
import gradio as gr
from langchain_huggingface import HuggingFaceEndpoint
# Load environment variables
load_dotenv()
HF_TOKEN = os.getenv("HF_TOKEN")
# Initialize the Hugging Face endpoint for inference
llm = HuggingFaceEndpoint(
repo_id="mistralai/Mistral-7B-Instruct-v0.3", # Replace with your model repo
huggingfacehub_api_token=HF_TOKEN.strip(),
temperature=0.7,
max_new_tokens=100
)
# Function to handle chatbot response
def chatbot_response(message):
try:
response = llm(message)
return response
except Exception as e:
return f"Error: {e}"
# Gradio Interface for Chatbot without Guardrails
with gr.Blocks() as app_without_guardrails:
gr.Markdown("## Chatbot Without Guardrails")
gr.Markdown("This chatbot uses the model directly without applying any content filtering.")
# Input and output
with gr.Row():
user_input = gr.Textbox(label="Your Message", placeholder="Type here...")
response_output = gr.Textbox(label="Response", placeholder="Bot will respond here...")
submit_button = gr.Button("Send")
# Button click event
submit_button.click(
chatbot_response,
inputs=[user_input],
outputs=[response_output]
)
# Launch the app
if __name__ == "__main__":
app_without_guardrails.launch()