from transformers import AutoTokenizer, AutoModelForSequenceClassification import streamlit as st import torch # Load tokenizer and model from Hugging Face tokenizer = AutoTokenizer.from_pretrained("cardiffnlp/twitter-roberta-base-sentiment") model = AutoModelForSequenceClassification.from_pretrained("cardiffnlp/twitter-roberta-base-sentiment") # Streamlit UI setup st.title("Sentiment Analysis App using GenAI Models") # Text input from the user user_input = st.text_area("Enter text to analyze sentiment:") # Prediction button if st.button("Analyze"): if user_input: # Tokenize the user input inputs = tokenizer(user_input, return_tensors="pt") # Perform inference with torch.no_grad(): outputs = model(**inputs) # Interpret the results predicted_class = torch.argmax(outputs.logits, dim=1).item() sentiment = ["Negative", "Neutral", "Positive"][predicted_class] # Assuming 3 classes st.write(f"**Predicted Sentiment:** {sentiment}") else: st.warning("Please enter some text to analyze.")