File size: 8,806 Bytes
58bcf08
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
import json

import streamlit as st
from annotated_text import annotated_text
from cherche import compose, qa, rank, retrieve, summary
from sentence_transformers import SentenceTransformer
from sklearn.feature_extraction.text import TfidfVectorizer
from transformers import pipeline


@st.cache(hash_funcs={compose.Pipeline: lambda _: None}, allow_output_mutation=True)
def loading_pipelines():
    """Create three pipelines dedicated to neural research. The first one is dedicated to game
    retrieval. The second is dedicated to the question answering task. The third is dedicated to
    the summarization task. Save pipelines as pickle file.

    >>> search = (
    ...    tfidf(on = "game") + ranker(on = "game") | tfidf(on = ["game", "summary"]) +
    ...    ranker(on = ["game", "summary"]) + documents
    ... )

    """
    # Load documents
    with open("games.json", "r") as documents_file:
        documents = json.load(documents_file)

    # A first retriever dedicated to title
    retriever_title = retrieve.TfIdf(
        key="id",
        on=["game"],
        documents=documents,
        tfidf=TfidfVectorizer(
            lowercase=True,
            min_df=1,
            max_df=0.9,
            ngram_range=(3, 7),
            analyzer="char",
        ),
        k=30,
    )

    # A second retriever dedicated to title and also summary of games.
    retriever_title_summary = retrieve.TfIdf(
        key="id",
        on=["game", "summary"],
        documents=documents,
        tfidf=TfidfVectorizer(
            lowercase=True,
            min_df=1,
            max_df=0.9,
            ngram_range=(3, 7),
            analyzer="char",
        ),
        k=30,
    )

    # Load our encoder to re-rank retrievers documents.
    encoder = SentenceTransformer("sentence-transformers/all-mpnet-base-v2").encode

    # A ranker dedicated to title
    ranker_title = rank.Encoder(
        key="id",
        on=["game"],
        encoder=encoder,
        k=5,
        path="games_title.pkl",
    )

    # A ranker dedicated to title and summary
    ranker_title_summary = rank.Encoder(
        key="id",
        on=["game", "summary"],
        encoder=encoder,
        k=5,
        path="games_summary.pkl",
    )

    # Pipeline creation
    search = (
        (retriever_title + ranker_title) | (retriever_title_summary + ranker_title_summary)
    ) + documents

    # Index
    search.add(documents)
    return search


@st.cache(hash_funcs={compose.Pipeline: lambda _: None}, allow_output_mutation=True)
def write_search(query):
    return search(query)[:5]


@st.cache(hash_funcs={compose.Pipeline: lambda _: None}, allow_output_mutation=True)
def loading_summarization_pipeline():
    summarizer = summary.Summary(
        model=pipeline(
            "summarization",
            model="sshleifer/distilbart-cnn-12-6",
            tokenizer="sshleifer/distilbart-cnn-12-6",
            framework="pt",
        ),
        on=["game", "summary"],
        max_length=50,
    )

    search_summarize = search + summarizer
    return search_summarize


@st.cache(hash_funcs={compose.Pipeline: lambda _: None}, allow_output_mutation=True)
def write_search_summarize(query_summarize):
    return search_summarize(query_summarize)


@st.cache(hash_funcs={compose.Pipeline: lambda _: None}, allow_output_mutation=True)
def loading_qa_pipeline():
    question_answering = qa.QA(
        model=pipeline(
            "question-answering",
            model="deepset/roberta-base-squad2",
            tokenizer="deepset/roberta-base-squad2",
        ),
        k=3,
        on="summary",
    )
    search_qa = search + question_answering
    return search_qa


@st.cache(hash_funcs={compose.Pipeline: lambda _: None}, allow_output_mutation=True)
def write_search_qa(query_qa):
    return search_qa(query_qa)


if __name__ == "__main__":

    st.markdown("# 🕹 Cherche")

    st.markdown(
        "[Cherche](https://github.com/raphaelsty/cherche) (search in French) allows you to create a \
        neural search pipeline using retrievers and pre-trained language models as rankers. Cherche's main strength is its ability to build diverse and end-to-end pipelines."
    )

    st.image("explain.png")

    st.markdown(
        "Here is a demo of neural search for video games using a sample of reviews made by [Metacritic](https://www.metacritic.com). \
        Starting the app may take a while if the models are not stored in cache."
    )

    # Will be slow the first time, you will need to compute embeddings.
    search = loading_pipelines()

    st.markdown("## 👾 Neural search")

    st.markdown(
        '```search = (tfidf(on = "title") + ranker(on = "title") | tfidf(on = ["title", "summary"]) + ranker(on = ["game", "summary"]) + documents)```'
    )

    query = st.text_input(
        "games",
        value="super smash bros",
        max_chars=None,
        key=None,
        type="default",
        help=None,
        autocomplete=None,
        on_change=None,
        args=None,
        kwargs=None,
    )

    if query:

        for document in write_search(query):
            if document["rate"] < 10:
                document["rate"] *= 10

            st.markdown(f"### {document['game']}")
            st.markdown(f"Metacritic Rating: {document['rate']}")

            col_1, col_2 = st.columns([1, 5])
            with col_1:
                st.image(document["image"], width=100)
            with col_2:
                st.write(f"{document['summary'][:430]}...")

    st.markdown("## 🎲 Summarization")

    st.markdown(
        '```search = (tfidf(on = "title") + ranker(on = "title") | tfidf(on = ["title", "summary"]) + ranker(on = ["game", "summary"]) + documents + summarization(on = "summary"))```'
    )

    st.markdown(
        "Let's create a summay but it may take few seconds. Summarization models are not that fast using CPU. Also it may take time to load the summarization model if it's not in cache yet.."
    )

    query_summarize = st.text_input(
        "summarization",
        value="super smash bros",
        max_chars=None,
        key=None,
        type="default",
        help=None,
        autocomplete=None,
        on_change=None,
        args=None,
        kwargs=None,
    )

    if query_summarize:
        search_summarize = loading_summarization_pipeline()
        st.write(f"**{write_search_summarize(query_summarize)}**")

    st.markdown("## 🎮 Question answering")

    st.markdown(
        '```search = (tfidf(on = "title") + ranker(on = "title") | tfidf(on = ["title", "summary"]) + ranker(on = ["game", "summary"]) + documents + question_answering(on = "summary"))```'
    )

    st.markdown(
        "It may take few seconds. Question answering models are not that fast using CPU. Also it may take time to load the question answering model if it's not in cache yet."
    )

    query_qa = st.text_input(
        "question",
        value="What is the purpose of playing Super Smash Bros?",
        max_chars=None,
        key=None,
        type="default",
        help=None,
        autocomplete=None,
        on_change=None,
        args=None,
        kwargs=None,
    )

    if query_qa:

        search_qa = loading_qa_pipeline()
        for document_qa in write_search_qa(query_qa):

            st.markdown(f"### {document_qa['game']}")
            st.markdown(f"Metacritic Rating: {document_qa['rate']}")

            col_1, col_2 = st.columns([1, 5])
            with col_1:
                st.image(document_qa["image"], width=100)
            with col_2:

                annotations = document_qa["summary"].split(document_qa["answer"])

                if document_qa["start"] == 0:
                    annotated_text(
                        (
                            document_qa["answer"],
                            f"answer {round(document_qa['qa_score'], 2)}",
                            "#8ef",
                        ),
                        " ",
                        " ".join(annotations[1:]),
                    )

                elif document_qa["end"] == len(document_qa["summary"]):
                    annotated_text(
                        " ".join(annotations[:-1]),
                        (
                            document_qa["answer"],
                            f"answer {round(document_qa['qa_score'], 2)}",
                            "#8ef",
                        ),
                    )

                else:
                    annotated_text(
                        annotations[0],
                        (
                            document_qa["answer"],
                            f"answer {round(document_qa['qa_score'], 2)}",
                            "#8ef",
                        ),
                        annotations[1],
                    )