|
import os |
|
from dotenv import load_dotenv |
|
import gradio as gr |
|
from langchain_huggingface import HuggingFaceEndpoint |
|
|
|
|
|
load_dotenv() |
|
HF_TOKEN = os.getenv("HF_TOKEN") |
|
|
|
|
|
llm = HuggingFaceEndpoint( |
|
repo_id="mistralai/Mistral-7B-Instruct-v0.3", |
|
huggingfacehub_api_token=HF_TOKEN.strip(), |
|
temperature=0.7, |
|
max_new_tokens=100 |
|
) |
|
|
|
|
|
def chatbot_response(message): |
|
try: |
|
response = llm(message) |
|
return response |
|
except Exception as e: |
|
return f"Error: {e}" |
|
|
|
|
|
|
|
with gr.Blocks() as app_without_guardrails: |
|
gr.Markdown("## Chatbot Without Guardrails") |
|
gr.Markdown("This chatbot uses the model directly without applying any content filtering.") |
|
|
|
|
|
with gr.Row(): |
|
user_input = gr.Textbox(label="Your Message", placeholder="Type here...") |
|
response_output = gr.Textbox(label="Response", placeholder="Bot will respond here...") |
|
submit_button = gr.Button("Send") |
|
|
|
|
|
submit_button.click( |
|
chatbot_response, |
|
inputs=[user_input], |
|
outputs=[response_output] |
|
) |
|
|
|
|
|
if __name__ == "__main__": |
|
app_without_guardrails.launch() |
|
|