kairaamilanii's picture
Update app.py
608e7f0 verified
import gradio as gr
from transformers import pipeline, BertTokenizer, AutoModelForSequenceClassification
import torch
import os
from groq import Groq
# Set the GROQ_API_KEY environment variable (this is your API key for Groq)
api_key = os.getenv("GROQ_API_KEY")
if not api_key:
raise ValueError("GROQ_API_KEY is missing. Please add it as a secret in Hugging Face Spaces.")
# Initialize the Groq client
client = Groq(api_key=api_key)
# Load the saved models and tokenizers for bullying detection
bullying_model_path = 'kairaamilanii/IndoBERT-Bullying-Classifier'
bullying_tokenizer = BertTokenizer.from_pretrained(bullying_model_path)
bullying_model = AutoModelForSequenceClassification.from_pretrained(bullying_model_path)
# Load the emotion detection pipeline
emotion_model_path = "StevenLimcorn/indonesian-roberta-base-emotion-classifier"
emotion_pipeline = pipeline(
"sentiment-analysis",
model=emotion_model_path,
tokenizer=emotion_model_path
)
# Function to query Groq API for suggestions or feedback on the comment in Indonesian
def get_llm_suggestions(comment, bullying_result, emotion_label):
# Use the Groq API to get feedback or suggestions
chat_completion = client.chat.completions.create(
messages=[
{
"role": "user",
"content": (
f"Beri saran atau umpan balik untuk meningkatkan komentar berikut dalam bahasa Indonesia: "
f"'{comment}' jika terdeteksi '{bullying_result}' dengan emosi yang dideteksi sebagai "
f"'{emotion_label}'. Berikan peringatan jika komentar terlalu kasar."
)
}
],
model="gemma2-9b-it" # You can replace this with any other model you'd like to use
)
response = chat_completion.choices[0].message.content
return response
# Define the function for Gradio interface
def detect_bullying_and_emotion(comment):
# Tokenize the input text for bullying detection
bullying_inputs = bullying_tokenizer(comment, return_tensors="pt")
# Make the bullying detection prediction
with torch.no_grad():
bullying_outputs = bullying_model(**bullying_inputs)
bullying_predicted_class = torch.argmax(bullying_outputs.logits, dim=-1).item()
# Make the emotion detection prediction using the pipeline
emotion_result = emotion_pipeline(comment)
# Interpreting the bullying result
if bullying_predicted_class == 1:
bullying_result = "Bullying"
feedback_message = "Apakah Anda yakin ingin mengirim komentar ini? Komentar ini terdeteksi sebagai bullying."
else:
bullying_result = "Non-bullying"
feedback_message = "Komentar ini aman untuk dikirim."
# Emotion result (label and confidence)
emotion_label = emotion_result[0]['label']
emotion_score = emotion_result[0]['score']
# Get LLM suggestions for improving the comment
llm_suggestions = get_llm_suggestions(comment, bullying_result, emotion_label)
# Return the results along with feedback message and suggestions
return bullying_result, f"{emotion_label} (Confidence Score: {emotion_score:.2f})", feedback_message, llm_suggestions
# Create Gradio interface
iface = gr.Interface(
fn=detect_bullying_and_emotion, # The function that processes the input
inputs=gr.Textbox(label="Masukkan komentar Anda", lines=4), # Text input for the comment
outputs=[
gr.Textbox(label="Deteksi Bullying"), # Output 1: Bullying result
gr.Textbox(label="Deteksi Emosi"), # Output 2: Emotion detection
gr.Textbox(label="Feedback"), # Output 3: Feedback message
gr.Textbox(label="Saran Perbaikan AI")], # Output 4: LLM Suggestions]
live=False, # Don't analyze live, only after submit
title="Sistem Evaluasi Komentar", # Title of the interface
description="Masukkan komentar untuk memeriksa bullying, deteksi emosi, dan saran perbaikan." # Description
)
# Launch the interface
iface.launch()