Jainesh212 commited on
Commit
de71ace
·
1 Parent(s): 567e725

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +17 -20
app.py CHANGED
@@ -17,46 +17,43 @@ def classify_toxicity(text):
17
  score = result['score']
18
  return label, score
19
 
 
20
  # Define the Streamlit app
21
  def app():
22
  # Create a persistent DataFrame
23
  if 'results' not in st.session_state:
24
  st.session_state.results = pd.DataFrame(columns=['text', 'toxicity', 'score'])
25
-
26
  # Set page title and favicon
27
- st.set_page_config(page_title='Toxicity Classifier', page_icon=':microscope:')
28
 
29
- # Set app title and description
30
- st.title('Toxicity Classifier')
31
- st.markdown('This app uses a pre-trained BERT model to classify text into three toxicity levels: **positive**, **neutral**, and **negative**.')
32
 
33
  # Create a form for users to enter their text
34
  with st.form(key='text_form'):
35
- text_input = st.text_input(label='Enter your text:', help='Enter some text to classify.')
36
  submit_button = st.form_submit_button(label='Classify')
37
 
38
  # Classify the text and display the results
39
  if submit_button and text_input != '':
40
  label, score = classify_toxicity(text_input)
41
-
42
- # Create a collapsible section for the classification result
43
- with st.beta_expander('Classification Result', expanded=True):
44
- st.write(f'Text: {text_input}')
45
- st.write(f'Toxicity: {label}')
46
- st.write(f'Score: {score:.4f}')
47
 
48
  # Add the classification result to the persistent DataFrame
49
  st.session_state.results = st.session_state.results.append({'text': text_input, 'toxicity': label, 'score': score}, ignore_index=True)
50
 
51
  # Display the persistent DataFrame
52
- if not st.session_state.results.empty:
53
- st.markdown('## Classification Results')
54
- st.dataframe(st.session_state.results.style.highlight_max(subset=['score'], color='lightgreen'))
55
-
56
- # Add a footer with some info and links
57
- st.markdown('---')
58
- st.markdown('Made with :heart: by ChatGPT')
59
- st.markdown('[GitHub](https://github.com/chatgpt/toxicity-classifier) - [OpenAI Blog](https://openai.com/blog/) - [Streamlit Docs](https://docs.streamlit.io/)')
60
 
61
  if __name__ == '__main__':
62
  app()
 
17
  score = result['score']
18
  return label, score
19
 
20
+
21
  # Define the Streamlit app
22
  def app():
23
  # Create a persistent DataFrame
24
  if 'results' not in st.session_state:
25
  st.session_state.results = pd.DataFrame(columns=['text', 'toxicity', 'score'])
26
+
27
  # Set page title and favicon
28
+ st.set_page_config(page_title='Toxicity Classification App', page_icon=':guardsman:')
29
 
30
+ # Set app header
31
+ st.write('# Toxicity Classification App')
32
+ st.write('Enter some text and the app will classify its toxicity.')
33
 
34
  # Create a form for users to enter their text
35
  with st.form(key='text_form'):
36
+ text_input = st.text_input(label='Enter your text:')
37
  submit_button = st.form_submit_button(label='Classify')
38
 
39
  # Classify the text and display the results
40
  if submit_button and text_input != '':
41
  label, score = classify_toxicity(text_input)
42
+ st.write('## Classification Result')
43
+ st.write(f'**Text:** {text_input}')
44
+ st.write(f'**Toxicity:** {label}')
45
+ st.write(f'**Score:** {score:.2f}')
 
 
46
 
47
  # Add the classification result to the persistent DataFrame
48
  st.session_state.results = st.session_state.results.append({'text': text_input, 'toxicity': label, 'score': score}, ignore_index=True)
49
 
50
  # Display the persistent DataFrame
51
+ st.write('## Classification Results')
52
+ st.write(st.session_state.results)
53
+
54
+ # Display a chart of the classification results
55
+ chart_data = st.session_state.results.groupby('toxicity').size().reset_index(name='count')
56
+ chart = st.bar_chart(chart_data.set_index('toxicity'))
 
 
57
 
58
  if __name__ == '__main__':
59
  app()