File size: 3,758 Bytes
446a37b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
472673f
 
446a37b
472673f
446a37b
472673f
446a37b
 
 
 
 
 
 
a527847
 
 
 
 
 
 
 
446a37b
a527847
92f783d
 
446a37b
 
 
01a3a4a
2027dac
 
 
 
5c7cb49
446a37b
92f783d
472673f
446a37b
 
 
 
32e97e4
446a37b
2fe2a41
 
472673f
 
 
 
 
 
 
92f783d
472673f
446a37b
 
 
 
5c7cb49
446a37b
5c7cb49
 
 
 
446a37b
 
 
472673f
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
import streamlit as st
from transformers import pipeline

# Load models

# Distilled Sentiment Classifier
# Link: https://huggingface.co/lxyuan/distilbert-base-multilingual-cased-sentiments-student
distilled_sentiment_classifier = pipeline(
    model="lxyuan/distilbert-base-multilingual-cased-sentiments-student", 
    return_all_scores=True
)

# Emotion Classifier
# Link: https://huggingface.co/SamLowe/roberta-base-go_emotions
emotion_text_classifier = pipeline("text-classification", model="SamLowe/roberta-base-go_emotions")

# Named Entity Recognition
# Link: https://huggingface.co/mdarhri00/named-entity-recognition
named_entity_classifier = pipeline("token-classification", model="mdarhri00/named-entity-recognition")

# Toxicity Classifier
# Link: https://huggingface.co/s-nlp/roberta_toxicity_classifier
toxicity_classifier = pipeline("text-classification", model="s-nlp/roberta_toxicity_classifier")

# Streamlit app
def main():
    st.title("HuggingFace Model Demo App")

    # User input for text
    user_text = st.text_area("Enter some text:")

    if user_text:
        # Available Models
        analysis_type = st.selectbox("Select Analysis Type", ["", "Sentiment Analysis", "Emotion Analysis", "Named Entity Recognition", "Toxicity Analysis"])
        
        # Run custom and display outputs
        st.header("Function Output:")

        if analysis_type == "Sentiment Analysis":
            st.subheader("Sentiment Analysis:")
            
            for labels_and_scores in data:
                for entry in labels_and_scores:
                    label = entry["label"]
                    score = entry["score"]
                    st.write(f"Label: {label}, Score: {score}")
            
            # Find the label with the highest score
            max_score_entry = max(data, key=lambda x: x['score'])
            max_score_label = max_score_entry['label']
            max_score = max_score_entry['score']
            
            # Display the label with the highest score
            st.write(f"Label with the highest score: {max_score_label}, Highest Score: {max_score}")

    
        if analysis_type == "Emotion Analysis":    
            st.subheader("Emotion Analysis:")
            
            # Parse JSON data
            data = emotion_text_classifier(user_text)
            
            # Extract and display label and score for each entry
            for entry in data:
                label = entry["label"]
                score = entry["score"]
                st.write(f"Label: {label}, Score: {score}")

        
        if analysis_type == "Named Entity Recognition":
            st.subheader("Named Entity Recognition:")
            
            # Parse JSON data
            data = named_entity_classifier(user_text)
            st.write(data)
            # Extract and display data
            for labels in data:
                for entry in labels:
                    entity_group = entry["entity_group"]
                    score = entry["score"]
                    word = entry["word"]
                    start = entry["start"]
                    end = entry["end"]
                    st.write(f"Word: {word}, Entity Group: {entity_group}, Score: {score}, Start: {start}, End: {end}")

        
        if analysis_type == "Toxicity Analysis":
            st.subheader("Toxicity Analysis:")
            
            # Parse JSON data
            data = toxicity_classifier(user_text)
            #st.write(data)
            # Extract and display label and score values
            for entry in data:
                label = entry["label"]
                score = entry["score"]
                st.write(f"Label: {label}, Score: {score}")

if __name__ == "__main__":
    main()