|
import gradio as gr |
|
from transformers import pipeline, BertTokenizer, AutoModelForSequenceClassification |
|
import torch |
|
import os |
|
from groq import Groq |
|
|
|
|
|
api_key = os.getenv("GROQ_API_KEY") |
|
if not api_key: |
|
raise ValueError("GROQ_API_KEY is missing. Please add it as a secret in Hugging Face Spaces.") |
|
|
|
|
|
client = Groq(api_key=api_key) |
|
|
|
|
|
bullying_model_path = 'kairaamilanii/IndoBERT-Bullying-Classifier' |
|
bullying_tokenizer = BertTokenizer.from_pretrained(bullying_model_path) |
|
bullying_model = AutoModelForSequenceClassification.from_pretrained(bullying_model_path) |
|
|
|
|
|
emotion_model_path = "StevenLimcorn/indonesian-roberta-base-emotion-classifier" |
|
emotion_pipeline = pipeline( |
|
"sentiment-analysis", |
|
model=emotion_model_path, |
|
tokenizer=emotion_model_path |
|
) |
|
|
|
|
|
def get_llm_suggestions(comment, bullying_result, emotion_label): |
|
|
|
chat_completion = client.chat.completions.create( |
|
messages=[ |
|
{ |
|
"role": "user", |
|
"content": ( |
|
f"Beri saran atau umpan balik untuk meningkatkan komentar berikut dalam bahasa Indonesia: " |
|
f"'{comment}' jika terdeteksi '{bullying_result}' dengan emosi yang dideteksi sebagai " |
|
f"'{emotion_label}'. Berikan peringatan jika komentar terlalu kasar." |
|
) |
|
} |
|
], |
|
model="gemma2-9b-it" |
|
) |
|
|
|
response = chat_completion.choices[0].message.content |
|
return response |
|
|
|
|
|
def detect_bullying_and_emotion(comment): |
|
|
|
bullying_inputs = bullying_tokenizer(comment, return_tensors="pt") |
|
|
|
|
|
with torch.no_grad(): |
|
bullying_outputs = bullying_model(**bullying_inputs) |
|
bullying_predicted_class = torch.argmax(bullying_outputs.logits, dim=-1).item() |
|
|
|
|
|
emotion_result = emotion_pipeline(comment) |
|
|
|
|
|
if bullying_predicted_class == 1: |
|
bullying_result = "Bullying" |
|
feedback_message = "Apakah Anda yakin ingin mengirim komentar ini? Komentar ini terdeteksi sebagai bullying." |
|
else: |
|
bullying_result = "Non-bullying" |
|
feedback_message = "Komentar ini aman untuk dikirim." |
|
|
|
|
|
emotion_label = emotion_result[0]['label'] |
|
emotion_score = emotion_result[0]['score'] |
|
|
|
|
|
llm_suggestions = get_llm_suggestions(comment, bullying_result, emotion_label) |
|
|
|
|
|
return bullying_result, f"{emotion_label} (Confidence Score: {emotion_score:.2f})", feedback_message, llm_suggestions |
|
|
|
|
|
iface = gr.Interface( |
|
fn=detect_bullying_and_emotion, |
|
inputs=gr.Textbox(label="Masukkan komentar Anda", lines=4), |
|
outputs=[ |
|
gr.Textbox(label="Deteksi Bullying"), |
|
gr.Textbox(label="Deteksi Emosi"), |
|
gr.Textbox(label="Feedback"), |
|
gr.Textbox(label="Saran Perbaikan AI")], |
|
live=False, |
|
title="Sistem Evaluasi Komentar", |
|
description="Masukkan komentar untuk memeriksa bullying, deteksi emosi, dan saran perbaikan." |
|
) |
|
|
|
|
|
iface.launch() |
|
|