import streamlit as st from transformers import AutoModelForSequenceClassification, AutoTokenizer import torch # Set up the device (GPU or CPU) device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # Streamlit app def main(): st.title("Sentiment Analysis App") st.write("Enter a text and select a pretrained model to perform sentiment analysis.") text = st.text_area("Enter text", value="") model_options = { "distilbert-base-uncased-finetuned-sst-2-english": "DistilBERT (SST-2)", "distilbert-base-uncased": "DistilBERT Uncased", "roberta-base": "RoBERTa Base", "albert-base-v2": "ALBERT Base v2" # Can add more models here if desired } # Load the pretrained model and tokenizer model_name = st.selectbox("Select a pretrained model", list(model_options.keys())) model = AutoModelForSequenceClassification.from_pretrained(model_name) tokenizer = AutoTokenizer.from_pretrained(model_name) if st.button("Analyze"): # Perform sentiment analysis inputs = tokenizer(text, padding=True, truncation=True, return_tensors="pt") inputs = inputs.to(device) outputs = model(**inputs) logits = outputs.logits probabilities = torch.softmax(logits, dim=1).detach().cpu().numpy()[0] sentiment_label = "Positive" if probabilities[1] > probabilities[0] else "Negative" st.write(f"Sentiment: {sentiment_label}") st.write(f"Positive probability: {probabilities[1]}") st.write(f"Negative probability: {probabilities[0]}") if __name__ == "__main__": main()