shawarmabytes commited on
Commit
318df58
1 Parent(s): 09bef43

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -8
app.py CHANGED
@@ -54,21 +54,17 @@ def tester(text):
54
 
55
  emo = st.text_input("Enter a text/phrase/sentence. A corresponding song will be recommended based on its emotion.")
56
 
57
-
58
-
59
-
60
- st.sidebar.write("The specific DistilBERT model used for this is Bhadresh Savani's [distilbert-base-uncased-emotion] (https://huggingface.co/bhadresh-savani/distilbert-base-uncased-emotion). It is fine-tuned on the Emotion Dataset from Twitter, which can be found [here](https://huggingface.co/datasets/viewer/?dataset=emotion).")
61
 
62
  st.sidebar.subheader("Model Description")
63
  st.sidebar.write("This application uses the DistilBERT model, a distilled version of BERT. The BERT framework uses a bidirectional transformer that allows it to learn the context of a word based on the left and right of the word. According to a paper by V. Sanh, et al., DistilBERT can \"reduce the size of a BERT model by 40%, while retaining 97% of its language understanding capabilities, and being 60% faster.\" This is why the DistilBERT model was used. For more information about the paper, please check out this [link](https://share.streamlit.io/mesmith027/streamlit_webapps/main/MC_pi/streamlit_app.py).")
 
64
 
65
- st.sidebar.subheader("Disclaimer/Limitations")
66
- st.sidebar.write("The model only outputs sadness, joy, love, anger, fear, and surprise. With that said, it does not completely encompass the emotions that a human being feels, and the application only suggests a playlist based on the aforementioned emotions.")
67
-
68
- st.sidebar.subheader("Performance Benchmarks")
69
 
70
 
71
 
 
72
  st.sidebar.write("[Distilbert-base-uncased-emotion](https://huggingface.co/bhadresh-savani/distilbert-base-uncased-emotion)")
73
  st.sidebar.write("Accuracy = 93.8")
74
  st.sidebar.write("F1 Score = 93.79")
 
54
 
55
  emo = st.text_input("Enter a text/phrase/sentence. A corresponding song will be recommended based on its emotion.")
56
 
57
+ st.sidebar.subheader("Disclaimer/Limitations")
58
+ st.sidebar.write("The model only outputs sadness, joy, love, anger, fear, and surprise. With that said, it does not completely encompass the emotions that a human being feels, and the application only suggests a playlist based on the aforementioned emotions.")
 
 
59
 
60
  st.sidebar.subheader("Model Description")
61
  st.sidebar.write("This application uses the DistilBERT model, a distilled version of BERT. The BERT framework uses a bidirectional transformer that allows it to learn the context of a word based on the left and right of the word. According to a paper by V. Sanh, et al., DistilBERT can \"reduce the size of a BERT model by 40%, while retaining 97% of its language understanding capabilities, and being 60% faster.\" This is why the DistilBERT model was used. For more information about the paper, please check out this [link](https://share.streamlit.io/mesmith027/streamlit_webapps/main/MC_pi/streamlit_app.py).")
62
+ st.sidebar.write("The specific DistilBERT model used for this is Bhadresh Savani's [distilbert-base-uncased-emotion] (https://huggingface.co/bhadresh-savani/distilbert-base-uncased-emotion). It is fine-tuned on the Emotion Dataset from Twitter, which can be found [here](https://huggingface.co/datasets/viewer/?dataset=emotion).")
63
 
 
 
 
 
64
 
65
 
66
 
67
+ st.sidebar.subheader("Performance Benchmarks")
68
  st.sidebar.write("[Distilbert-base-uncased-emotion](https://huggingface.co/bhadresh-savani/distilbert-base-uncased-emotion)")
69
  st.sidebar.write("Accuracy = 93.8")
70
  st.sidebar.write("F1 Score = 93.79")