emociones / cosas.txt
Jeysshon
Upload cosas.txt
9f94bb9
raw
history blame
3.53 kB
import os
from fastai.vision.all import *
import gradio as gr
# Cargar los modelos
learn_emotion = load_learner('emotions_jey.pkl')
learn_emotion_labels = learn_emotion.dls.vocab
learn_sentiment = load_learner('sentiment_jey.pkl')
learn_sentiment_labels = learn_sentiment.dls.vocab
# Diccionario de mapeo de etiquetas en inglés a etiquetas en español
label_mapping = {
'angry': 'enojado',
'disgust': 'asco',
'fear': 'miedo',
'happy': 'feliz',
'sad': 'triste',
'surprise': 'sorpresa',
'neutral': 'neutral',
'negative': 'negativo',
'positive': 'positivo'
}
# Función de predicción
def predict(img_path):
img = PILImage.create(img_path)
pred_emotion, pred_emotion_idx, probs_emotion = learn_emotion.predict(img)
pred_sentiment, pred_sentiment_idx, probs_sentiment = learn_sentiment.predict(img)
emotions = {label_mapping[label]: float(prob) for label, prob in zip(learn_emotion_labels, probs_emotion)}
sentiments = {label_mapping[label]: float(prob) for label, prob in zip(learn_sentiment_labels, probs_sentiment)}
return emotions, sentiments
# Interfaz de Gradio
title = "Detector de emociones y sentimientos faciales"
description = (
"Esta interfaz utiliza redes neuronales para detectar emociones y sentimientos a partir de imágenes faciales."
)
article = "Esta herramienta proporciona una forma rápida de analizar emociones y sentimientos en imágenes."
examples = [
'PrivateTest_10131363.jpg',
'angry1.png',
'angry2.jpg',
'happy1.jpg',
'happy2.jpg',
'neutral1.jpg',
'neutral2.jpg'
]
iface = gr.Interface(
fn=predict,
inputs=gr.Image(shape=(48, 48), image_mode='L'),
outputs=[gr.Label(label='Emoción'), gr.Label(label='Sentimiento')],
title=title,
examples=examples,
description=description,
article=article,
allow_flagging='never'
)
iface.launch(enable_queue=True)
#################
import os
from fastai.vision.all import *
import gradio as gr
# Cargar los modelos
learn_emotion = load_learner('emotions_jey.pkl')
learn_emotion_labels = learn_emotion.dls.vocab
learn_sentiment = load_learner('sentiment_jey.pkl')
learn_sentiment_labels = learn_sentiment.dls.vocab
# Función de predicción
def predict(img_path):
img = PILImage.create(img_path)
pred_emotion, pred_emotion_idx, probs_emotion = learn_emotion.predict(img)
pred_sentiment, pred_sentiment_idx, probs_sentiment = learn_sentiment.predict(img)
emotions = {label: float(prob) for label, prob in zip(learn_emotion_labels, probs_emotion)}
sentiments = {label: float(prob) for label, prob in zip(learn_sentiment_labels, probs_sentiment)}
return emotions, sentiments
# Interfaz de Gradio
title = "Detector de emociones y sentimientos faciales "
description = (
"Esta interfaz utiliza redes neuronales para detectar emociones y sentimientos a partir de imágenes faciales."
)
article = "Esta herramienta proporciona una forma rápida de analizar emociones y sentimientos en imágenes."
examples = [
'PrivateTest_10131363.jpg',
'angry1.png',
'angry2.jpg',
'happy1.jpg',
'happy2.jpg',
'neutral1.jpg',
'neutral2.jpg'
]
iface = gr.Interface(
fn=predict,
inputs=gr.Image(shape=(48, 48), image_mode='L'),
outputs=[gr.Label(label='Emotion'), gr.Label(label='Sentiment')],
title=title,
examples=examples,
description=description,
article=article,
allow_flagging='never'
)
iface.launch(enable_queue=True)