import gradio as gr
from transformers import pipeline, AutoModelForCausalLM, AutoTokenizer
import os
from bertopic import BERTopic
from sklearn.feature_extraction.text import CountVectorizer
import numpy as np
from sentence_transformers import SentenceTransformer
# Retrieve the token from environment variables
huggingface_token = os.getenv('LLAMA_ACCES_TOKEN')
# Use the token with from_pretrained
#tokenizer = AutoTokenizer.from_pretrained("meta-llama/Llama-2-7b-chat-hf", token=huggingface_token)
#model = AutoModelForCausalLM.from_pretrained("meta-llama/Llama-2-7b-chat-hf", token=huggingface_token)
# Load the tokenizer and model
tokenizer = AutoTokenizer.from_pretrained("openai-community/gpt2-xl")
model = AutoModelForCausalLM.from_pretrained("openai-community/gpt2-xl")
# Assuming BERTopic and other necessary components are initialized here
# Initialize your BERTopic model
#sentence_model = SentenceTransformer("all-MiniLM-L6-v2")
#topic_model = BERTopic(embedding_model=sentence_model)
def print_like_dislike(x: gr.LikeData):
print(x.index, x.value, x.liked)
def add_text(history, text):
history.append((text, "**That's cool!**"))
return history
def add_file(history, file):
# Assuming you want to display the name of the uploaded file
file_info = (f"Uploaded file: {file.name}", "")
history.append(file_info)
return history
def initialize_chat():
# This function initializes the chat with a "Hello" message.
return [(None, "Hello, my name is Andrea, I'm a Friendly Chatbot and will help you with your learning journey.
Select a question from below to start!")]
chat_history = initialize_chat()
def generate_response(selected_question):
global chat_history
prompt = selected_question # Ensure selected_question is a string
inputs = tokenizer(prompt, return_tensors="pt", max_length=512, truncation=True)
outputs = model.generate(**inputs, max_length=100, do_sample=True, top_p=0.95, top_k=50)
response = tokenizer.decode(outputs[0], skip_special_tokens=True)
#try:
#topics, _ = topic_model.transform([response])
#topic_names = [", ".join([word for word, _ in topic_model.get_topic(topic)[:5]]) for topic in topics if topic != -1]
#topics_str = "; ".join(topic_names[:10])
#except Exception as e:
topics_str = "Topic analysis not available"
#print(f"Error during topic analysis: {e}")
# Adjusted to return a list of tuples as expected by the Chatbot component
new_response = (None, response + "\n\nTopics: " + topics_str)
chat_history.append(new_response)
return chat_history
with gr.Blocks() as demo:
gr.Markdown(
"""
# Child safe chatbot project !
In the realm of digital communication, the development of an advanced chatbot that incorporates topic modeling represents a significant leap towards enhancing user interaction and maintaining focus during conversations. This innovative chatbot design is specifically engineered to streamline discussions by guiding users to select from a curated list of suggested questions. This approach is crafted to mitigate the risk of diverging into off-topic dialogues, which are common pitfalls in conventional chatbot systems.
""")
chatbot = gr.Chatbot(
initialize_chat(),
elem_id="chatbot",
bubble_full_width=False,
label= "Safe Chatbot v1",
avatar_images=(None, os.path.join(os.getcwd(), "avatar.png"))
)
with gr.Row():
txt = gr.Textbox(scale=4, show_label=False, placeholder="Select Question", container=False, interactive=False) # Adjust based on need
btn = gr.Button("Submit")
btn.click(fn=generate_response, inputs=[txt], outputs=chatbot)
examples = [
["What are the basic requirements to become an airforce pilot?"],
["How long does it take to train as an airforce pilot?"],
["Can you describe a day in the life of an airforce pilot?"]
]
gr.Examples(examples, inputs=[txt], outputs=[chatbot], label="Select Question")
chatbot.like(print_like_dislike, None, None)
if __name__ == "__main__":
demo.launch()