File size: 3,078 Bytes
a202bc8
 
dae8411
 
 
 
a202bc8
dae8411
a202bc8
 
dae8411
 
a202bc8
dae8411
 
 
 
 
 
a202bc8
dae8411
 
 
 
a202bc8
dae8411
 
 
 
 
 
a202bc8
 
dae8411
 
a202bc8
 
 
dae8411
 
a202bc8
dae8411
 
 
 
a202bc8
dae8411
 
a202bc8
dae8411
 
 
 
a202bc8
dae8411
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
import streamlit as st
from transformers import pipeline
from transformers import AutoModelForSequenceClassification
from transformers import AutoTokenizer
import torch
import numpy as np

def main():


    st.title("yelp2024fall Test")
    st.write("Enter a sentence for analysis:")

    user_input = st.text_input("")
    if user_input:
        # Approach: AutoModel
        model2 = AutoModelForSequenceClassification.from_pretrained("isom5240/CustomModel_yelp2024fall",
                                                                    num_labels=5)
        tokenizer = AutoTokenizer.from_pretrained("distilbert-base-uncased")

        inputs = tokenizer(user_input,
                        padding=True,
                        truncation=True,
                        return_tensors='pt')

        outputs = model2(**inputs)
        predictions = torch.nn.functional.softmax(outputs.logits, dim=-1)
        predictions = predictions.cpu().detach().numpy()
        # Get the index of the largest output value
        max_index = np.argmax(predictions)
        st.write(f"result (AutoModel) - Label: {max_index}")


if __name__ == "__main__":
    main()



# import streamlit as st
# from transformers import pipeline

# # img2text
# def img2text(url):
#     image_to_text_model = pipeline("image-to-text", model="Salesforce/blip-image-captioning-large")
#     text = image_to_text_model(url)[0]["generated_text"]

#     print(text)
#     return text

# # txt2Story
# def txt2story(text):
#     pipe = pipeline("text-generation", model="pranavpsv/genre-story-generator-v2")
#     story_txt = pipe(text)[0]['generated_text']

#     print(story_txt)
#     return story_txt

# # Story2Audio
# def text2audio(story_text):
#     pipe = pipeline("text-to-audio", model="Matthijs/mms-tts-eng")
#     audio_data = pipe(story_text)
#     return audio_data


# def main():
#     st.set_page_config(page_title="Your Image to Audio Story", page_icon="🦜")
#     st.header("Turn Your Image to Audio Story")
#     uploaded_file = st.file_uploader("Select an Image...")

#     if uploaded_file is not None:
#         print(uploaded_file)
#         bytes_data = uploaded_file.getvalue()
#         with open(uploaded_file.name, "wb") as file:
#             file.write(bytes_data)
#         st.image(uploaded_file, caption="Uploaded Image", use_column_width=True)


#         #Stage 1: Image to Text
#         st.text('Processing img2text...')
#         scenario = img2text(uploaded_file.name)
#         st.write(scenario)

#         #Stage 2: Text to Story
#         st.text('Generating a story...')
#         story = txt2story(scenario)
#         st.write(story)

#         #Stage 3: Story to Audio data
#         st.text('Generating audio data...')
#         audio_data =text2audio(story)

#         # Play button
#         if st.button("Play Audio"):
#             st.audio(audio_data['audio'],
#                      format="audio/wav",
#                      start_time=0,
#                      sample_rate = audio_data['sampling_rate'])


# if __name__ == "__main__":
#     main()