maxspad commited on
Commit
3d4b0bc
1 Parent(s): 1dde253

examples working

Browse files
Files changed (3) hide show
  1. app.py +25 -14
  2. requirements.txt +2 -1
  3. test.pkl +3 -0
app.py CHANGED
@@ -2,6 +2,8 @@ import streamlit as st
2
  import transformers as tf
3
  import plotly.graph_objects as go
4
  import matplotlib.cm as cm
 
 
5
 
6
  # Function to load and cache models
7
  @st.experimental_singleton(show_spinner=False)
@@ -9,6 +11,10 @@ def load_model(username, prefix, model_name):
9
  p = tf.pipeline('text-classification', f'{username}/{prefix}-{model_name}')
10
  return p
11
 
 
 
 
 
12
  def get_results(model, c):
13
  res = model(c)[0]
14
  label = float(res['label'].split('_')[1])
@@ -36,7 +42,6 @@ PREFIX = 'nlp-qual'
36
  models_to_load = ['qual', 'q1', 'q2i', 'q3i']
37
  n_models = float(len(models_to_load))
38
  models = {}
39
-
40
  # Show a progress bar while models are downloading,
41
  # then hide it when done
42
  lc_placeholder = st.empty()
@@ -48,22 +53,36 @@ for i, mn in enumerate(models_to_load):
48
  models[mn] = load_model(USERNAME, PREFIX, mn)
49
  lc_placeholder.empty()
50
 
 
 
51
 
52
  ### Process input
 
 
 
53
  with st.form('comment_form'):
54
- comment = st.text_area('Try a comment:')
55
  left_col, right_col = st.columns([1,9], gap='medium')
56
  submitted = left_col.form_submit_button('Submit')
57
- try_example = right_col.form_submit_button('Try an example!')
58
-
59
- results = run_models(models_to_load, models, comment)
 
 
 
 
 
 
 
 
 
60
 
61
  tab_titles = ['Overview', 'Q1 - Level of Detail', 'Q2 - Suggestion Given', 'Q3 - Suggestion Linked']
62
  tabs = st.tabs(tab_titles)
63
 
64
 
65
  with tabs[0]:
66
- with st.expander('What is the QuAL score?'):
67
  st.markdown('**The best thing ever**!')
68
  cmap = cm.get_cmap('RdYlGn')
69
  color = cmap(results['qual']['label'] / 6.0)
@@ -74,16 +93,8 @@ with tabs[0]:
74
  value = results['qual']['label'],
75
  mode = "gauge+number",
76
  title = {'text': "QuAL"},
77
- # delta = {'reference': 380},
78
  gauge = {'axis': {'range': [None, 5]},
79
  'bgcolor': 'lightgray',
80
- # 'steps': [
81
- # {'range': [0,1], 'color': "rgb(215,48,39)"},
82
- # {'range': [1,2], 'color': "rgb(244,109,67)"},
83
- # {'range': [2,3], 'color': "rgb(254,224,139)"},
84
- # {'range': [3,4], 'color': "rgb(102,189,99)"},
85
- # {'range': [4,5], 'color': "rgb(0,104,55)"}
86
- # ],
87
  'bar': {'color': color, 'thickness': 1.0},
88
 
89
  }
 
2
  import transformers as tf
3
  import plotly.graph_objects as go
4
  import matplotlib.cm as cm
5
+ import pandas as pd
6
+
7
 
8
  # Function to load and cache models
9
  @st.experimental_singleton(show_spinner=False)
 
11
  p = tf.pipeline('text-classification', f'{username}/{prefix}-{model_name}')
12
  return p
13
 
14
+ @st.experimental_singleton(show_spinner=False)
15
+ def load_pickle(f):
16
+ return pd.read_pickle(f)
17
+
18
  def get_results(model, c):
19
  res = model(c)[0]
20
  label = float(res['label'].split('_')[1])
 
42
  models_to_load = ['qual', 'q1', 'q2i', 'q3i']
43
  n_models = float(len(models_to_load))
44
  models = {}
 
45
  # Show a progress bar while models are downloading,
46
  # then hide it when done
47
  lc_placeholder = st.empty()
 
53
  models[mn] = load_model(USERNAME, PREFIX, mn)
54
  lc_placeholder.empty()
55
 
56
+ ### Load example data
57
+ examples = load_pickle('test.pkl')
58
 
59
  ### Process input
60
+ ex = examples['comment'].sample(1).tolist()[0]
61
+ if 'comment' not in st.session_state:
62
+ st.session_state['comment'] = ex
63
  with st.form('comment_form'):
64
+ comment = st.text_area('Try a comment:', value=st.session_state['comment'])
65
  left_col, right_col = st.columns([1,9], gap='medium')
66
  submitted = left_col.form_submit_button('Submit')
67
+ trying_example = right_col.form_submit_button('Try an example!')
68
+
69
+ if submitted:
70
+ st.session_state['button_clicked'] = 'submit'
71
+ st.session_state['comment'] = comment
72
+ st.experimental_rerun()
73
+ elif trying_example:
74
+ st.session_state['button_clicked'] = 'example'
75
+ st.session_state['comment'] = ex
76
+ st.experimental_rerun()
77
+
78
+ results = run_models(models_to_load, models, st.session_state['comment'])
79
 
80
  tab_titles = ['Overview', 'Q1 - Level of Detail', 'Q2 - Suggestion Given', 'Q3 - Suggestion Linked']
81
  tabs = st.tabs(tab_titles)
82
 
83
 
84
  with tabs[0]:
85
+ with st.expander('About the QuAL score?'):
86
  st.markdown('**The best thing ever**!')
87
  cmap = cm.get_cmap('RdYlGn')
88
  color = cmap(results['qual']['label'] / 6.0)
 
93
  value = results['qual']['label'],
94
  mode = "gauge+number",
95
  title = {'text': "QuAL"},
 
96
  gauge = {'axis': {'range': [None, 5]},
97
  'bgcolor': 'lightgray',
 
 
 
 
 
 
 
98
  'bar': {'color': color, 'thickness': 1.0},
99
 
100
  }
requirements.txt CHANGED
@@ -2,4 +2,5 @@ torch
2
  torchvision
3
  torchaudio
4
  transformers
5
- plotly==5.11.0
 
 
2
  torchvision
3
  torchaudio
4
  transformers
5
+ plotly==5.11.0
6
+ pandas
test.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fffa01055fbedf253065c9a12f02c0aa3599856b59c22ebaa09ca23404a9742b
3
+ size 10474780