from transformers import AutoTokenizer, AutoModelForSequenceClassification import torch import gradio as gr tokenizer = AutoTokenizer.from_pretrained("jarvisx17/japanese-sentiment-analysis") model = AutoModelForSequenceClassification.from_pretrained("jarvisx17/japanese-sentiment-analysis") classes = ["negative", "positive"] def predict(sequence): input = tokenizer(sequence, return_tensors="pt") classification_logits = model(**input).logits results = torch.softmax(classification_logits, dim=1).tolist()[0] output = classes[0] + ":" + str(int(round(results[0] * 100))) + "\n" output += classes[1] + ":" + str(int(round(results[1] * 100))) return output gr.Interface(fn=predict, inputs="text", outputs="text").launch()