maxspad commited on
Commit
a002819
β€’
1 Parent(s): 1bb3e7f

single tab eonly

Browse files
Files changed (3) hide show
  1. app.py +2 -9
  2. fullreport.py +0 -76
  3. plots.py +0 -32
app.py CHANGED
@@ -83,13 +83,6 @@ with st.form('comment_form'):
83
  st.experimental_rerun()
84
 
85
  results = run_models(models_to_load, models, st.session_state['comment'])
86
- # tab_titles = ['Overview', 'Q1 - Level of Detail', 'Q2 - Suggestion Given', 'Q3 - Suggestion Linked', 'About']
87
- tab_titles = ['Overview', 'Full Report']
88
- tabs = st.tabs(tab_titles)
89
 
90
- with tabs[0]:
91
- overview = NQDOverview(st, results)
92
- overview.draw()
93
- with tabs[1]:
94
- fullrep = NQDFullReport(st, results)
95
- fullrep.draw()
 
83
  st.experimental_rerun()
84
 
85
  results = run_models(models_to_load, models, st.session_state['comment'])
 
 
 
86
 
87
+ overview = NQDOverview(st, results)
88
+ overview.draw()
 
 
 
 
fullreport.py DELETED
@@ -1,76 +0,0 @@
1
- import streamlit as st
2
- import altair as alt
3
- import pandas as pd
4
- from plots import altair_gauge, pred_bar_chart
5
- import streamlit.components.v1 as components
6
-
7
- md_about_qual = '''
8
- The Quality of Assessment for Learning (QuAL) score measures three
9
- components of high-quality feedback via three subscores:
10
-
11
- 1. A detailed description of the behavior observed (rated 0-3 depending on detail level)
12
- 2. A suggestion for improvement is present (rated no = 0, yes = 1)
13
- 3. Linkage between the behavior and the suggestion is present (rated no = 0, yes = 1)
14
-
15
- The final QuAL score is the sum of these subscores, so it ranges from 0 (lowest quality)
16
- to 5 (highest quality).
17
- '''
18
- class NQDFullReport(object):
19
-
20
- def __init__(self, parent : st, results : dict):
21
- self.p = parent
22
- self.results = results
23
-
24
- def draw(self):
25
- st = self.p
26
- st.header('Understand Your Score')
27
- st.subheader('About the QuAL Score')
28
- # with st.expander('About the QuAL Score', True):
29
- st.markdown(md_about_qual)
30
-
31
- st.subheader('Level of Detail')
32
- c1, c2 = st.columns(2)
33
- with c1:
34
- gauge = altair_gauge(self.results['q1']['label'], 3, 'Level of Detail')
35
- gauge_html = gauge.to_html()
36
- # components.html(gauge_html, height=225, width=334)
37
- st.altair_chart(gauge, use_container_width=True)
38
- with c2:
39
- bar = pred_bar_chart(self.results['q1']['scores'])
40
- st.altair_chart(bar, use_container_width=True)
41
-
42
- st.subheader('Suggestion for Improvement')
43
- c1, c2 = st.columns(2)
44
- with c1:
45
- q2lab = self.results['q2i']['label']
46
- st.markdown('#### Suggestion Given')
47
- if q2lab == 0:
48
- md_str = '# βœ… Yes'
49
- else:
50
- md_str = '# ❌ No'
51
- st.markdown(md_str)
52
- # st.metric('Suggestion Given', (md_str),
53
- # help='Did the evaluator give a suggestion for improvement?')
54
- gauge = altair_gauge(self.results['q2i']['label'], 1, 'Suggestion for Improvement')
55
- # st.altair_chart(gauge, use_container_width=True)
56
- with c2:
57
- bar = pred_bar_chart(self.results['q2i']['scores'], binary_labels={0: 'Yes', 1: 'No'})
58
- st.altair_chart(bar, use_container_width=True)
59
-
60
- st.subheader('Suggestion Linking')
61
- c1, c2 = st.columns(2)
62
- with c1:
63
- q2lab = self.results['q3i']['label']
64
- st.markdown('#### Suggestion Linked')
65
- if q2lab == 0:
66
- md_str = '# βœ… Yes'
67
- else:
68
- md_str = '# ❌ No'
69
- st.markdown(md_str)
70
- # st.metric('Suggestion Given', (md_str),
71
- # help='Did the evaluator give a suggestion for improvement?')
72
- gauge = altair_gauge(self.results['q3i']['label'], 1, 'Suggestion for Improvement')
73
- # st.altair_chart(gauge, use_container_width=True)
74
- with c2:
75
- bar = pred_bar_chart(self.results['q3i']['scores'], binary_labels={0: 'Yes', 1: 'No'})
76
- st.altair_chart(bar, use_container_width=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
plots.py DELETED
@@ -1,32 +0,0 @@
1
- import altair as alt
2
- import pandas as pd
3
- import matplotlib.cm as cm
4
-
5
- def altair_gauge(score, max_score, title):
6
- source = pd.DataFrame({"category": [1,2], "value": [4,3], "tt": ['hello!',None]})
7
- gauge_theta2 = -1 * 2 * 3.14 * 0.25 + 3.14 * score / float(max_score)
8
- c = alt.layer(
9
- alt.Chart(source).mark_arc(innerRadius=100, theta=0, thetaOffset=(-1*2*3.14*0.25), theta2=(-1*2*3.14*0.25 + 3.14), color='lightgray', tooltip=None),
10
- alt.Chart(source).mark_arc(innerRadius=100, theta=0, thetaOffset=(-1*2*3.14*0.25), theta2=gauge_theta2, tooltip=f'{title}: {int(score)}', color=get_color(score, max_score)),
11
- alt.Chart(source).mark_text(text='%.1d' % score, size=80, font='Calibri', dy=-30)
12
- ).properties(title=title)
13
- return c
14
-
15
- def get_color(score, max_score):
16
- cmap = cm.get_cmap('RdYlGn')
17
- color = cmap(score / float(max_score))
18
- color = f'rgba({int(color[0]*256)}, {int(color[1]*256)}, {int(color[2]*256)}, {int(color[3]*256)})'
19
- return color
20
-
21
- def pred_bar_chart(scores, binary_labels=None):
22
- bar_df = (pd.DataFrame(scores)
23
- .reset_index()
24
- .rename(columns={'index': 'Rating', 0: 'Score'}))
25
- if binary_labels:
26
- bar_df['Rating'].replace(binary_labels, inplace=True)
27
- bar = alt.Chart(bar_df).mark_bar().encode(
28
- x='Rating:O', y='Score',
29
- color=alt.Color('Rating', scale=alt.Scale(scheme='redyellowgreen'), legend=None)
30
- ).properties(height=225, title='Prediction Scores')
31
- bar.to_html()
32
- return bar