File size: 3,329 Bytes
da4611f
7af13f4
da4611f
48193db
7af13f4
 
 
 
5136fe5
 
 
 
48193db
7af13f4
 
 
 
48193db
7af13f4
 
 
 
 
 
 
 
 
 
 
 
da4611f
8553418
7af13f4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
19964c7
 
 
 
 
 
 
 
 
7af13f4
 
 
 
d3d7595
7af13f4
06b445f
a14bbf4
48193db
01631c2
 
48193db
19964c7
06b445f
 
 
 
 
 
 
 
7af13f4
 
 
 
 
da4611f
7af13f4
 
da4611f
7af13f4
d3d7595
7af13f4
5136fe5
 
7af13f4
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
import sklearn
import gradio as gr
import joblib
from transformers import pipeline
import requests.exceptions
from huggingface_hub import HfApi, hf_hub_download
from huggingface_hub.repocard import metadata_load

#pipe = joblib.load("https://huggingface.co/spaces/scikit-learn/sentiment-analysis/tree/main/pipeline.pkl")
#inputs = [gr.Textbox(value = "The customer service was satisfactory.")]
#outputs = [gr.Label(label = "Sentiment")]
#title = "Sentiment Analysis"

app = gr.Blocks()

def load_agent(model_id_1, model_id_2):
    """
    This function load the agent's results
    """
    # Load the metrics
    metadata_1 = get_metadata(model_id_1)
    
    # Get the accuracy
    results_1 = parse_metrics_accuracy(metadata_1)
    
    # Load the metrics
    metadata_2 = get_metadata(model_id_2)
    
    # Get the accuracy
    results_2 = parse_metrics_accuracy(metadata_2)

    return model_id_1, results_1, model_id_2, results_2

def parse_metrics_accuracy(meta):
    if "model-index" not in meta:
        return None
    result = meta["model-index"][0]["results"]
    metrics = result[0]["metrics"]
    accuracy = metrics[0]["value"]
    return accuracy

def get_metadata(model_id):
    """
    Get the metadata of the model repo
    :param model_id:
    :return: metadata
    """
    try:
        readme_path = hf_hub_download(model_id, filename="README.md")
        metadata = metadata_load(readme_path)
        print(metadata)
        return metadata
    except requests.exceptions.HTTPError:
        return None

classifier = pipeline("text-classification", model="juliensimon/distilbert-amazon-shoe-reviews")
                
def predict(review):
        prediction = classifier(review)
        print(prediction)
        stars = prediction[0]['label']
        stars = (int)(stars.split('_')[1])+1
        score = 100*prediction[0]['score']
        return "{} {:.0f}%".format("\U00002B50"*stars, score)

with app:
    gr.Markdown(
    """
    # Compare Sentiment Analysis Models 
    
    Type text to predict sentiment.
    """)   
    with gr.Row():
        inp = gr.Textbox(label="Type text here.",placeholder="The customer service was satisfactory.")
        out = gr.Textbox(label="Prediction")
    btn = gr.Button("Run")
    btn.click(fn=predict, inputs=inp, outputs=out)
    
    gr.Markdown(
    """
    Type two models id you want to compare or check examples below.
    """)
    with gr.Row():
      model1_input = gr.Textbox(label="Model 1")
      model2_input = gr.Textbox(label="Model 2") 
    with gr.Row():
      app_button = gr.Button("Compare models")
    with gr.Row():
      with gr.Column():
        model1_name = gr.Markdown()
        model1_score_output = gr.Textbox(label="Sentiment")
      with gr.Column():
        model2_name = gr.Markdown()
        model2_score_output = gr.Textbox(label="Sentiment")

    app_button.click(load_agent, inputs=[model1_input, model2_input], outputs=[model1_name, model1_score_output, model2_name, model2_score_output])
    
    examples = gr.Examples(examples=[["distilbert-base-uncased-finetuned-sst-2-english","distilbert-base-uncased-finetuned-sst-2-english"],
        ["distilbert-base-uncased-finetuned-sst-2-english", "distilbert-base-uncased-finetuned-sst-2-english"]],
                           inputs=[model1_input, model2_input])

    
app.launch()