Spaces:
Sleeping
Sleeping
add new titles
Browse files- .gitignore +2 -0
- app.py +16 -10
.gitignore
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
venv/
|
2 |
+
.idea/
|
app.py
CHANGED
@@ -4,7 +4,7 @@
|
|
4 |
|
5 |
|
6 |
import streamlit as st
|
7 |
-
|
8 |
from n4a_analytics_lib.analytics import (GlobalStatistics, IaaStatistics)
|
9 |
|
10 |
|
@@ -105,18 +105,19 @@ if option == "Inter-Annotator Agreement results":
|
|
105 |
#tab1, tab2, tab3, tab4, tab5 = st.tabs(
|
106 |
# ["π IAA metrics", "π IAA Metrics Legend", "βοΈ Agree annotations", "β Disagree annotations",
|
107 |
# "π·οΈ Global Labels Statistics"])
|
108 |
-
st.
|
109 |
-
|
|
|
110 |
|
111 |
|
112 |
-
|
113 |
-
|
114 |
# tab1.dataframe(df)
|
115 |
data = []
|
116 |
for coder_1, coder_2 in pairs:
|
117 |
cohen_function = cohen_kappa_function(project_analyzed.labels_per_coder[coder_1], project_analyzed.labels_per_coder[coder_2])
|
118 |
data.append(((coder_1, coder_2), cohen_function))
|
119 |
-
|
120 |
# print(f"* {coder_1} <> {coder_2} : {cohen_function}")
|
121 |
|
122 |
intermediary = defaultdict(Counter)
|
@@ -138,7 +139,10 @@ if option == "Inter-Annotator Agreement results":
|
|
138 |
sn.heatmap(df_cm, cmap=colors, annot=True, mask=mask, annot_kws={"size": 7}, vmin=0, vmax=1, ax=ax) # font size
|
139 |
# plt.show()
|
140 |
st.pyplot(ax.figure)
|
141 |
-
|
|
|
|
|
|
|
142 |
<table>
|
143 |
<thead>
|
144 |
<tr>
|
@@ -193,10 +197,11 @@ if option == "Inter-Annotator Agreement results":
|
|
193 |
agreement </td>
|
194 |
</tr>
|
195 |
</tbody>
|
196 |
-
</table>"""
|
197 |
|
198 |
, unsafe_allow_html = True)
|
199 |
|
|
|
200 |
## commune
|
201 |
@st.cache
|
202 |
def convert_df(df_ex):
|
@@ -217,7 +222,7 @@ if option == "Inter-Annotator Agreement results":
|
|
217 |
|
218 |
csv_agree = convert_df(df_agree)
|
219 |
|
220 |
-
st.subheader("
|
221 |
st.markdown(f"{total_unanime} / {len(df)} annotations ({round((total_unanime / len(df)) * 100, 2)} %)")
|
222 |
st.download_button(
|
223 |
"Press to Download CSV",
|
@@ -238,7 +243,7 @@ if option == "Inter-Annotator Agreement results":
|
|
238 |
df_disagree = df[df[columns_to_compare].apply(lambda row: check_all_not_equal(row), axis=1)]
|
239 |
total_desaccord = len(df_disagree)
|
240 |
csv_disagree = convert_df(df_disagree)
|
241 |
-
st.subheader("
|
242 |
st.markdown(
|
243 |
f"{total_desaccord} / {len(df)} annotations ({round((total_desaccord / len(df)) * 100, 2)} %)")
|
244 |
st.download_button(
|
@@ -299,6 +304,7 @@ if option == "Inter-Annotator Agreement results":
|
|
299 |
return fig
|
300 |
|
301 |
f = plot_pies(to_pie)
|
|
|
302 |
st.pyplot(f.figure)
|
303 |
|
304 |
# global project results view
|
|
|
4 |
|
5 |
|
6 |
import streamlit as st
|
7 |
+
from streamlit.components.v1 import html
|
8 |
from n4a_analytics_lib.analytics import (GlobalStatistics, IaaStatistics)
|
9 |
|
10 |
|
|
|
105 |
#tab1, tab2, tab3, tab4, tab5 = st.tabs(
|
106 |
# ["π IAA metrics", "π IAA Metrics Legend", "βοΈ Agree annotations", "β Disagree annotations",
|
107 |
# "π·οΈ Global Labels Statistics"])
|
108 |
+
st.markdown("## π IAA metrics")
|
109 |
+
col1_kappa, col2_kappa = st.columns(2)
|
110 |
+
col1_kappa.subheader("Fleiss Kappa (global score for group):")
|
111 |
|
112 |
|
113 |
+
col1_kappa.markdown(interpret_kappa(round(fleiss_kappa_function(matrix), 2)), unsafe_allow_html=True)
|
114 |
+
col1_kappa.subheader("Cohen Kappa Annotators Matrix (score between annotators):")
|
115 |
# tab1.dataframe(df)
|
116 |
data = []
|
117 |
for coder_1, coder_2 in pairs:
|
118 |
cohen_function = cohen_kappa_function(project_analyzed.labels_per_coder[coder_1], project_analyzed.labels_per_coder[coder_2])
|
119 |
data.append(((coder_1, coder_2), cohen_function))
|
120 |
+
col1_kappa.markdown(f"* {coder_1} <> {coder_2} : {interpret_kappa(cohen_function)}", unsafe_allow_html=True)
|
121 |
# print(f"* {coder_1} <> {coder_2} : {cohen_function}")
|
122 |
|
123 |
intermediary = defaultdict(Counter)
|
|
|
139 |
sn.heatmap(df_cm, cmap=colors, annot=True, mask=mask, annot_kws={"size": 7}, vmin=0, vmax=1, ax=ax) # font size
|
140 |
# plt.show()
|
141 |
st.pyplot(ax.figure)
|
142 |
+
col2_kappa.markdown("""
|
143 |
+
<div>
|
144 |
+
<div id="legend" style="right: 70em;">
|
145 |
+
<h3>π IAA Metrics Legend</h3>
|
146 |
<table>
|
147 |
<thead>
|
148 |
<tr>
|
|
|
197 |
agreement </td>
|
198 |
</tr>
|
199 |
</tbody>
|
200 |
+
</table></div></div>"""
|
201 |
|
202 |
, unsafe_allow_html = True)
|
203 |
|
204 |
+
|
205 |
## commune
|
206 |
@st.cache
|
207 |
def convert_df(df_ex):
|
|
|
222 |
|
223 |
csv_agree = convert_df(df_agree)
|
224 |
|
225 |
+
st.subheader("βοΈ Agree annotations")
|
226 |
st.markdown(f"{total_unanime} / {len(df)} annotations ({round((total_unanime / len(df)) * 100, 2)} %)")
|
227 |
st.download_button(
|
228 |
"Press to Download CSV",
|
|
|
243 |
df_disagree = df[df[columns_to_compare].apply(lambda row: check_all_not_equal(row), axis=1)]
|
244 |
total_desaccord = len(df_disagree)
|
245 |
csv_disagree = convert_df(df_disagree)
|
246 |
+
st.subheader("β Disagree annotations")
|
247 |
st.markdown(
|
248 |
f"{total_desaccord} / {len(df)} annotations ({round((total_desaccord / len(df)) * 100, 2)} %)")
|
249 |
st.download_button(
|
|
|
304 |
return fig
|
305 |
|
306 |
f = plot_pies(to_pie)
|
307 |
+
st.subheader("π·οΈ Global Labels Statistics")
|
308 |
st.pyplot(f.figure)
|
309 |
|
310 |
# global project results view
|