Spaces:
Sleeping
Sleeping
import streamlit as st | |
from transformers import pipeline | |
from transformers import AutoModelForSequenceClassification | |
from transformers import AutoTokenizer | |
import torch | |
import numpy as np | |
def main(): | |
st.title("yelp2024fall Test") | |
st.write("Enter a sentence for analysis:") | |
user_input = st.text_input("") | |
if user_input: | |
# Approach: AutoModel | |
model2 = AutoModelForSequenceClassification.from_pretrained("isom5240/CustomModel_yelp2024fall", | |
num_labels=5) | |
tokenizer = AutoTokenizer.from_pretrained("distilbert-base-uncased") | |
inputs = tokenizer(user_input, | |
padding=True, | |
truncation=True, | |
return_tensors='pt') | |
outputs = model2(**inputs) | |
predictions = torch.nn.functional.softmax(outputs.logits, dim=-1) | |
predictions = predictions.cpu().detach().numpy() | |
# Get the index of the largest output value | |
max_index = np.argmax(predictions) | |
st.write(f"result (AutoModel) - Label: {max_index}") | |
if __name__ == "__main__": | |
main() | |
# import streamlit as st | |
# from transformers import pipeline | |
# # img2text | |
# def img2text(url): | |
# image_to_text_model = pipeline("image-to-text", model="Salesforce/blip-image-captioning-large") | |
# text = image_to_text_model(url)[0]["generated_text"] | |
# print(text) | |
# return text | |
# # txt2Story | |
# def txt2story(text): | |
# pipe = pipeline("text-generation", model="pranavpsv/genre-story-generator-v2") | |
# story_txt = pipe(text)[0]['generated_text'] | |
# print(story_txt) | |
# return story_txt | |
# # Story2Audio | |
# def text2audio(story_text): | |
# pipe = pipeline("text-to-audio", model="Matthijs/mms-tts-eng") | |
# audio_data = pipe(story_text) | |
# return audio_data | |
# def main(): | |
# st.set_page_config(page_title="Your Image to Audio Story", page_icon="π¦") | |
# st.header("Turn Your Image to Audio Story") | |
# uploaded_file = st.file_uploader("Select an Image...") | |
# if uploaded_file is not None: | |
# print(uploaded_file) | |
# bytes_data = uploaded_file.getvalue() | |
# with open(uploaded_file.name, "wb") as file: | |
# file.write(bytes_data) | |
# st.image(uploaded_file, caption="Uploaded Image", use_column_width=True) | |
# #Stage 1: Image to Text | |
# st.text('Processing img2text...') | |
# scenario = img2text(uploaded_file.name) | |
# st.write(scenario) | |
# #Stage 2: Text to Story | |
# st.text('Generating a story...') | |
# story = txt2story(scenario) | |
# st.write(story) | |
# #Stage 3: Story to Audio data | |
# st.text('Generating audio data...') | |
# audio_data =text2audio(story) | |
# # Play button | |
# if st.button("Play Audio"): | |
# st.audio(audio_data['audio'], | |
# format="audio/wav", | |
# start_time=0, | |
# sample_rate = audio_data['sampling_rate']) | |
# if __name__ == "__main__": | |
# main() |