Spaces:
Sleeping
Sleeping
from pathlib import Path | |
from sklearn.model_selection import train_test_split | |
import torch | |
from torch.utils.data import Dataset | |
from transformers import DistilBertTokenizerFast, DistilBertForSequenceClassification | |
from transformers import Trainer, TrainingArguments | |
import streamlit as st | |
from streamlit_chat import message | |
import requests | |
model_one = "distilbert-base-uncased-finetuned-sst-2-english" | |
model_two = "Newtral/xlm-r-finetuned-toxic-political-tweets-es" | |
def toxicRating(text, model): | |
model = AutoModelForSequenceClassification.from_pretrained(model) | |
tokenizer = AutoTokenizer.from_pretrained(model) | |
classifier = pipeline("sentiment-analysis", model=model, tokenizer=tokenizer) | |
results = classifier(text) | |
return results | |
def main(): | |
st.title("TOXIC TWEETS, \n TOXIC OR NOT?") | |
prompt = st.header("Select Model") | |
selection = st.radio("Models",('Model 1', 'Model 2')) | |
input = st.text_area("Enter Tweet: ") | |
if input: | |
if selection == 'Model 1': | |
rating = rate_ModelOne(input, model_one) | |
st.write(f"Label: {rating[1]} \n Score : {rating[3]}") | |
elif selection == 'Model 2': | |
rating = rate_ModelTwo(input, model_two) | |
rating = rate_ModelOne(input, model_one) | |
st.write(f"Label: {rating[1]} \n Score : {rating[3]}") | |
else: | |
st.warning("Enter Tweet") | |
if __name__ == "__main__": | |
main(); |