File size: 10,918 Bytes
d2c1af1
68f913d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e2a8566
68f913d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5deffc7
67b90dc
68f913d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
25d2b51
68f913d
 
 
6b8a79c
d9a8817
 
 
 
 
 
 
 
 
 
 
ee7e4a5
68f913d
 
da65c5d
b32ddfa
 
68f913d
 
 
 
 
b32ddfa
68f913d
b32ddfa
68f913d
b32ddfa
68f913d
 
 
b32ddfa
68f913d
 
b32ddfa
68f913d
 
 
 
 
 
c84c243
68f913d
c84c243
68f913d
b32ddfa
68f913d
 
 
 
c84c243
68f913d
c84c243
68f913d
 
 
67b90dc
 
 
 
68f913d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d2c1af1
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
import streamlit as st
import os
import pathlib
import beir
from beir import util
from beir.datasets.data_loader import GenericDataLoader
import pytrec_eval
import pandas as pd
from collections import defaultdict
import json
import copy

def load_jsonl(f):
    did2text = defaultdict(list)
    sub_did2text = {}

    for idx, line in enumerate(f):
        inst = json.loads(line)
        if "question" in inst:
            docid = inst["metadata"][0]["passage_id"] if "doc_id" not in inst else inst["doc_id"]
            did2text[docid].append(inst["question"])
        elif "text" in inst:
            docid = inst["doc_id"] if "doc_id" in inst else inst["did"]
            did2text[docid].append(inst["text"])
            sub_did2text[inst["did"]] = inst["text"]
        elif "query" in inst:
            docid = inst["doc_id"] if "doc_id" in inst else inst["did"]
            did2text[docid].append(inst["query"])
        else:
            breakpoint()
            raise NotImplementedError("Need to handle this case")
                
    return did2text, sub_did2text



def get_beir(dataset: str):
    url = "https://public.ukp.informatik.tu-darmstadt.de/thakur/BEIR/datasets/{}.zip".format(dataset)
    out_dir = os.path.join(pathlib.Path(__file__).parent.absolute(), "datasets")
    data_path = util.download_and_unzip(url, out_dir)
    return GenericDataLoader(data_folder=data_path).load(split="test")

def load_run(f_run):
    run = pytrec_eval.parse_run(copy.deepcopy(f_run))
    # convert bytes to strings for keys
    new_run = defaultdict(dict)
    for key, sub_dict in run.items():
        new_run[key.decode("utf-8")] = {k.decode("utf-8"): v for k, v in sub_dict.items()}

    run_pandas = pd.read_csv(f_run, header=None, index_col=None, sep="\t")
    run_pandas.columns = ["qid", "generic", "doc_id", "rank", "score", "model"]
    run_pandas.doc_id = run_pandas.doc_id.astype(str)
    run_pandas.qid = run_pandas.qid.astype(str)
    run_pandas["rank"] = run_pandas["rank"].astype(int)
    run_pandas.score = run_pandas.score.astype(float)
    # if run_1_alt is not None:
        #     run_1_alt, run_1_alt_sub = load_jsonl(run_1_alt)
    return new_run, run_pandas


with st.sidebar:
    dataset_name = st.selectbox("Select a dataset in BEIR", ("scifact", "scidocs","trec-covid", "fever", "fiqa", "nfcorpus", "msmarco", "bioasq", "nq", "hotpotqa", "signal1m", "trec-news", "robust04", "arguana", "quora", "climate-fever", "dbpedia-entity", "webis-touche2020", "cqadupstack"))
    metric_name = st.selectbox("Select a metric", ("recall_10", "recall_5"))
    # sliderbar of how many Top N to choose
    top_n = st.slider("Top N", 1, 100, 3)
    x = st.header('Upload a run file')
    run1_file = st.file_uploader("Choose a file", key="run1")
    y = st.header("Upload a second run file")
    run2_file = st.file_uploader("Choose a file", key="run2")
    incorrect_only = st.checkbox("Show only incorrect instances", value=False)
    one_better_than_two = st.checkbox("Show only instances where run 1 is better than run 2", value=False)
    two_better_than_one = st.checkbox("Show only instances where run 2 is better than run 1", value=False)







col1, col2 = st.columns([1, 2], gap="medium")

incorrect = 0
is_better_run1_count = 0
is_better_run2_count = 0
checkboxes = None
with col1:
    st.title("Instances")
    if run1_file is not None:
        print("Running....")
        corpus, queries, qrels = get_beir(dataset_name)
        evaluator = pytrec_eval.RelevanceEvaluator(
                qrels, pytrec_eval.supported_measures)
        
        if run1_file is not None:
            run1, run1_pandas = load_run(run1_file)
            results1 = evaluator.evaluate(run1) # dict of instance then metrics then values
        if run2_file is not None:
            run2, run2_pandas = load_run(run2_file)
            results2 = evaluator.evaluate(run2)
            
        name_of_columns = ["Overview"] + sorted([str(item) for item in set(run1_pandas.qid.tolist())])
        checkboxes = [("Overview", st.checkbox("Overview", key=f"0overview"))]
        st.divider()
        for idx, item in enumerate(name_of_columns):
            is_overview = item == "Overview"
            if is_overview:
                continue
            is_incorrect = False
            is_better_run1 = False
            is_better_run2 = False

            run1_score = results1[str(item)][metric_name] if not is_overview else 1
            if run2_file is not None:
                run2_score = results2[str(item)][metric_name] if not is_overview else 1

                if not is_overview and run1_score == 0 or run2_score == 0:
                    incorrect += 1
                    is_incorrect = True

                if not is_overview and run1_score > run2_score:
                    is_better_run1_count += 1
                    is_better_run1 = True
                elif not is_overview and run2_score > run1_score:
                    is_better_run2_count += 1
                    is_better_run2 = True

                if not incorrect_only or is_incorrect:
                    if not one_better_than_two or is_better_run1:
                        if not two_better_than_one or is_better_run2:
                            check = st.checkbox(str(item), key=f"{idx}check")  
                            st.divider()
                            checkboxes.append((item, check))
            else:
                if not is_overview and run1_score == 0:
                    incorrect += 1
                    is_incorrect = True

                if not incorrect_only or is_incorrect:
                    check = st.checkbox(str(item), key=f"{idx}check")  
                    st.divider()
                    checkboxes.append((item, check))


with col2:
    if checkboxes is not None:
        st.title(f"Information ({len(checkboxes) - 1 if checkboxes else 0}/{len(name_of_columns) - 1})")
    else:
        st.title(f"Information")
    ### Only one run file
    if run1_file is not None and run2_file is None:
        for check_idx, (inst_num, checkbox) in enumerate(checkboxes):
            if checkbox:
                if inst_num == "Overview":
                    st.header("Overview")
                    st.markdown("TODO: Add overview")
                else:
                    st.header(f"Instance Number: {inst_num}")

                    st.subheader(f"Query")
                    query_text = queries[str(inst_num)]
                    st.markdown(query_text)
                    st.divider()

                    ## Documents
                    # relevant
                    relevant_docs = list(qrels[str(inst_num)].keys())
                    doc_texts = [(doc_id, corpus[doc_id]["title"], corpus[doc_id]["text"]) for doc_id in relevant_docs]
                    st.subheader("Relevant Documents")
                    for (docid, title, text) in doc_texts:
                        st.text_area(f"{docid}: {title}", text)

                    # top ranked
                    pred_doc = run1_pandas[run1_pandas.doc_id.isin(relevant_docs)]
                    rank_pred = pred_doc[pred_doc.qid == str(inst_num)]["rank"].tolist()
                    st.subheader("Ranked of Documents")
                    st.markdown(f"Rank: {rank_pred}")

                    st.divider()

                    if st.checkbox('Show top ranked documents'):
                        st.subheader("Top N Ranked Documents")
                        run1_top_n = run1_pandas[run1_pandas.qid == str(inst_num)][:top_n]
                        run1_top_n_docs = [corpus[str(doc_id)] for doc_id in run1_top_n.doc_id.tolist()]
                        for d_idx, doc in enumerate(run1_top_n_docs):
                            st.text_area(f"{run1_top_n['doc_id'].iloc[d_idx]}: {doc['title']}", doc["text"])
                        st.divider()


                    st.subheader("Score")
                    st.markdown(f"{results1[str(inst_num)][metric_name]}")
                break

    ## Both run files available
    elif run1_file is not None and run2_file is not None:
        for check_idx, (inst_num, checkbox) in enumerate(checkboxes):
            if checkbox:
                if inst_num == "Overview":
                    st.header("Overview")
                    st.markdown("TODO: Add overview")
                else:
                    st.header(f"Instance Number: {inst_num}")

                    st.subheader(f"Query")
                    query_text = queries[str(inst_num)]
                    st.markdown(query_text)
                    st.divider()

                    ## Documents
                    # relevant
                    relevant_docs = list(qrels[str(inst_num)].keys())
                    doc_texts = [(doc_id, corpus[doc_id]["title"], corpus[doc_id]["text"]) for doc_id in relevant_docs]
                    st.subheader("Relevant Documents")
                    for (docid, title, text) in doc_texts:
                        st.text_area(f"{docid}: {title}", text)

                    # top ranked
                    pred_doc1 = run1_pandas[run1_pandas.doc_id.isin(relevant_docs)]
                    rank_pred1 = pred_doc1[pred_doc1.qid == str(inst_num)]["rank"].tolist()
                    pred_doc2 = run2_pandas[run2_pandas.doc_id.isin(relevant_docs)]
                    rank_pred2 = pred_doc2[pred_doc2.qid == str(inst_num)]["rank"].tolist()
                    st.subheader("Ranked of Documents")
                    st.markdown(f"Run 1 Rank: {rank_pred1}")
                    st.markdown(f"Run 2 Rank: {rank_pred2}")


                    st.divider()

                    if st.checkbox('Show top ranked documents for Run 1'):
                        st.subheader("Top N Ranked Documents")
                        run1_top_n = run1_pandas[run1_pandas.qid == str(inst_num)][:top_n]
                        run1_top_n_docs = [corpus[str(doc_id)] for doc_id in run1_top_n.doc_id.tolist()]
                        for d_idx, doc in enumerate(run1_top_n_docs):
                            st.text_area(f"{run1_top_n['doc_id'].iloc[d_idx]}: {doc['title']}", doc["text"])
                        
                    if st.checkbox('Show top ranked documents for Run 2'):
                        st.subheader("Top N Ranked Documents")
                        run2_top_n = run2_pandas[run2_pandas.qid == str(inst_num)][:top_n]
                        run2_top_n_docs = [corpus[str(doc_id)] for doc_id in run2_top_n.doc_id.tolist()]
                        for d_idx, doc in enumerate(run2_top_n_docs):
                            st.text_area(f"{run2_top_n['doc_id'].iloc[d_idx]}: {doc['title']}", doc["text"])

                    st.divider()


                    st.subheader("Scores")
                    st.markdown(f"Run 1: {results1[str(inst_num)][metric_name]}")
                    st.markdown(f"Run 2: {results2[str(inst_num)][metric_name]}")

                break