Spaces:
Runtime error
Runtime error
raphaelsty
commited on
Commit
โข
58bcf08
1
Parent(s):
296aec7
app
Browse files- .gitattributes +1 -0
- README.md +3 -3
- app.py +285 -0
- explain.png +0 -0
- games.json +0 -0
- games_summary.pkl +3 -0
- games_title.pkl +3 -0
- requirements.txt +1 -0
.gitattributes
CHANGED
@@ -25,3 +25,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
25 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
26 |
*.zstandard filter=lfs diff=lfs merge=lfs -text
|
27 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
25 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
26 |
*.zstandard filter=lfs diff=lfs merge=lfs -text
|
27 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
28 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
README.md
CHANGED
@@ -1,11 +1,11 @@
|
|
1 |
---
|
2 |
-
title:
|
3 |
-
emoji:
|
4 |
colorFrom: gray
|
5 |
colorTo: purple
|
6 |
sdk: streamlit
|
7 |
app_file: app.py
|
8 |
-
pinned:
|
9 |
---
|
10 |
|
11 |
# Configuration
|
|
|
1 |
---
|
2 |
+
title: End-to-end Neural Search
|
3 |
+
emoji: ๐พ
|
4 |
colorFrom: gray
|
5 |
colorTo: purple
|
6 |
sdk: streamlit
|
7 |
app_file: app.py
|
8 |
+
pinned: true
|
9 |
---
|
10 |
|
11 |
# Configuration
|
app.py
ADDED
@@ -0,0 +1,285 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import annotations
|
2 |
+
|
3 |
+
import json
|
4 |
+
|
5 |
+
import streamlit as st
|
6 |
+
from annotated_text import annotated_text
|
7 |
+
from cherche import compose, qa, rank, retrieve, summary
|
8 |
+
from sentence_transformers import SentenceTransformer
|
9 |
+
from sklearn.feature_extraction.text import TfidfVectorizer
|
10 |
+
from transformers import pipeline
|
11 |
+
|
12 |
+
|
13 |
+
@st.cache(hash_funcs={compose.Pipeline: lambda _: None}, allow_output_mutation=True)
|
14 |
+
def loading_pipelines():
|
15 |
+
"""Create three pipelines dedicated to neural research. The first one is dedicated to game
|
16 |
+
retrieval. The second is dedicated to the question answering task. The third is dedicated to
|
17 |
+
the summarization task. Save pipelines as pickle file.
|
18 |
+
|
19 |
+
>>> search = (
|
20 |
+
... tfidf(on = "game") + ranker(on = "game") | tfidf(on = ["game", "summary"]) +
|
21 |
+
... ranker(on = ["game", "summary"]) + documents
|
22 |
+
... )
|
23 |
+
|
24 |
+
"""
|
25 |
+
# Load documents
|
26 |
+
with open("games.json", "r") as documents_file:
|
27 |
+
documents = json.load(documents_file)
|
28 |
+
|
29 |
+
# A first retriever dedicated to title
|
30 |
+
retriever_title = retrieve.TfIdf(
|
31 |
+
key="id",
|
32 |
+
on=["game"],
|
33 |
+
documents=documents,
|
34 |
+
tfidf=TfidfVectorizer(
|
35 |
+
lowercase=True,
|
36 |
+
min_df=1,
|
37 |
+
max_df=0.9,
|
38 |
+
ngram_range=(3, 7),
|
39 |
+
analyzer="char",
|
40 |
+
),
|
41 |
+
k=30,
|
42 |
+
)
|
43 |
+
|
44 |
+
# A second retriever dedicated to title and also summary of games.
|
45 |
+
retriever_title_summary = retrieve.TfIdf(
|
46 |
+
key="id",
|
47 |
+
on=["game", "summary"],
|
48 |
+
documents=documents,
|
49 |
+
tfidf=TfidfVectorizer(
|
50 |
+
lowercase=True,
|
51 |
+
min_df=1,
|
52 |
+
max_df=0.9,
|
53 |
+
ngram_range=(3, 7),
|
54 |
+
analyzer="char",
|
55 |
+
),
|
56 |
+
k=30,
|
57 |
+
)
|
58 |
+
|
59 |
+
# Load our encoder to re-rank retrievers documents.
|
60 |
+
encoder = SentenceTransformer("sentence-transformers/all-mpnet-base-v2").encode
|
61 |
+
|
62 |
+
# A ranker dedicated to title
|
63 |
+
ranker_title = rank.Encoder(
|
64 |
+
key="id",
|
65 |
+
on=["game"],
|
66 |
+
encoder=encoder,
|
67 |
+
k=5,
|
68 |
+
path="games_title.pkl",
|
69 |
+
)
|
70 |
+
|
71 |
+
# A ranker dedicated to title and summary
|
72 |
+
ranker_title_summary = rank.Encoder(
|
73 |
+
key="id",
|
74 |
+
on=["game", "summary"],
|
75 |
+
encoder=encoder,
|
76 |
+
k=5,
|
77 |
+
path="games_summary.pkl",
|
78 |
+
)
|
79 |
+
|
80 |
+
# Pipeline creation
|
81 |
+
search = (
|
82 |
+
(retriever_title + ranker_title) | (retriever_title_summary + ranker_title_summary)
|
83 |
+
) + documents
|
84 |
+
|
85 |
+
# Index
|
86 |
+
search.add(documents)
|
87 |
+
return search
|
88 |
+
|
89 |
+
|
90 |
+
@st.cache(hash_funcs={compose.Pipeline: lambda _: None}, allow_output_mutation=True)
|
91 |
+
def write_search(query):
|
92 |
+
return search(query)[:5]
|
93 |
+
|
94 |
+
|
95 |
+
@st.cache(hash_funcs={compose.Pipeline: lambda _: None}, allow_output_mutation=True)
|
96 |
+
def loading_summarization_pipeline():
|
97 |
+
summarizer = summary.Summary(
|
98 |
+
model=pipeline(
|
99 |
+
"summarization",
|
100 |
+
model="sshleifer/distilbart-cnn-12-6",
|
101 |
+
tokenizer="sshleifer/distilbart-cnn-12-6",
|
102 |
+
framework="pt",
|
103 |
+
),
|
104 |
+
on=["game", "summary"],
|
105 |
+
max_length=50,
|
106 |
+
)
|
107 |
+
|
108 |
+
search_summarize = search + summarizer
|
109 |
+
return search_summarize
|
110 |
+
|
111 |
+
|
112 |
+
@st.cache(hash_funcs={compose.Pipeline: lambda _: None}, allow_output_mutation=True)
|
113 |
+
def write_search_summarize(query_summarize):
|
114 |
+
return search_summarize(query_summarize)
|
115 |
+
|
116 |
+
|
117 |
+
@st.cache(hash_funcs={compose.Pipeline: lambda _: None}, allow_output_mutation=True)
|
118 |
+
def loading_qa_pipeline():
|
119 |
+
question_answering = qa.QA(
|
120 |
+
model=pipeline(
|
121 |
+
"question-answering",
|
122 |
+
model="deepset/roberta-base-squad2",
|
123 |
+
tokenizer="deepset/roberta-base-squad2",
|
124 |
+
),
|
125 |
+
k=3,
|
126 |
+
on="summary",
|
127 |
+
)
|
128 |
+
search_qa = search + question_answering
|
129 |
+
return search_qa
|
130 |
+
|
131 |
+
|
132 |
+
@st.cache(hash_funcs={compose.Pipeline: lambda _: None}, allow_output_mutation=True)
|
133 |
+
def write_search_qa(query_qa):
|
134 |
+
return search_qa(query_qa)
|
135 |
+
|
136 |
+
|
137 |
+
if __name__ == "__main__":
|
138 |
+
|
139 |
+
st.markdown("# ๐น Cherche")
|
140 |
+
|
141 |
+
st.markdown(
|
142 |
+
"[Cherche](https://github.com/raphaelsty/cherche) (search in French) allows you to create a \
|
143 |
+
neural search pipeline using retrievers and pre-trained language models as rankers. Cherche's main strength is its ability to build diverse and end-to-end pipelines."
|
144 |
+
)
|
145 |
+
|
146 |
+
st.image("explain.png")
|
147 |
+
|
148 |
+
st.markdown(
|
149 |
+
"Here is a demo of neural search for video games using a sample of reviews made by [Metacritic](https://www.metacritic.com). \
|
150 |
+
Starting the app may take a while if the models are not stored in cache."
|
151 |
+
)
|
152 |
+
|
153 |
+
# Will be slow the first time, you will need to compute embeddings.
|
154 |
+
search = loading_pipelines()
|
155 |
+
|
156 |
+
st.markdown("## ๐พ Neural search")
|
157 |
+
|
158 |
+
st.markdown(
|
159 |
+
'```search = (tfidf(on = "title") + ranker(on = "title") | tfidf(on = ["title", "summary"]) + ranker(on = ["game", "summary"]) + documents)```'
|
160 |
+
)
|
161 |
+
|
162 |
+
query = st.text_input(
|
163 |
+
"games",
|
164 |
+
value="super smash bros",
|
165 |
+
max_chars=None,
|
166 |
+
key=None,
|
167 |
+
type="default",
|
168 |
+
help=None,
|
169 |
+
autocomplete=None,
|
170 |
+
on_change=None,
|
171 |
+
args=None,
|
172 |
+
kwargs=None,
|
173 |
+
)
|
174 |
+
|
175 |
+
if query:
|
176 |
+
|
177 |
+
for document in write_search(query):
|
178 |
+
if document["rate"] < 10:
|
179 |
+
document["rate"] *= 10
|
180 |
+
|
181 |
+
st.markdown(f"### {document['game']}")
|
182 |
+
st.markdown(f"Metacritic Rating: {document['rate']}")
|
183 |
+
|
184 |
+
col_1, col_2 = st.columns([1, 5])
|
185 |
+
with col_1:
|
186 |
+
st.image(document["image"], width=100)
|
187 |
+
with col_2:
|
188 |
+
st.write(f"{document['summary'][:430]}...")
|
189 |
+
|
190 |
+
st.markdown("## ๐ฒ Summarization")
|
191 |
+
|
192 |
+
st.markdown(
|
193 |
+
'```search = (tfidf(on = "title") + ranker(on = "title") | tfidf(on = ["title", "summary"]) + ranker(on = ["game", "summary"]) + documents + summarization(on = "summary"))```'
|
194 |
+
)
|
195 |
+
|
196 |
+
st.markdown(
|
197 |
+
"Let's create a summay but it may take few seconds. Summarization models are not that fast using CPU. Also it may take time to load the summarization model if it's not in cache yet.."
|
198 |
+
)
|
199 |
+
|
200 |
+
query_summarize = st.text_input(
|
201 |
+
"summarization",
|
202 |
+
value="super smash bros",
|
203 |
+
max_chars=None,
|
204 |
+
key=None,
|
205 |
+
type="default",
|
206 |
+
help=None,
|
207 |
+
autocomplete=None,
|
208 |
+
on_change=None,
|
209 |
+
args=None,
|
210 |
+
kwargs=None,
|
211 |
+
)
|
212 |
+
|
213 |
+
if query_summarize:
|
214 |
+
search_summarize = loading_summarization_pipeline()
|
215 |
+
st.write(f"**{write_search_summarize(query_summarize)}**")
|
216 |
+
|
217 |
+
st.markdown("## ๐ฎ Question answering")
|
218 |
+
|
219 |
+
st.markdown(
|
220 |
+
'```search = (tfidf(on = "title") + ranker(on = "title") | tfidf(on = ["title", "summary"]) + ranker(on = ["game", "summary"]) + documents + question_answering(on = "summary"))```'
|
221 |
+
)
|
222 |
+
|
223 |
+
st.markdown(
|
224 |
+
"It may take few seconds. Question answering models are not that fast using CPU. Also it may take time to load the question answering model if it's not in cache yet."
|
225 |
+
)
|
226 |
+
|
227 |
+
query_qa = st.text_input(
|
228 |
+
"question",
|
229 |
+
value="What is the purpose of playing Super Smash Bros?",
|
230 |
+
max_chars=None,
|
231 |
+
key=None,
|
232 |
+
type="default",
|
233 |
+
help=None,
|
234 |
+
autocomplete=None,
|
235 |
+
on_change=None,
|
236 |
+
args=None,
|
237 |
+
kwargs=None,
|
238 |
+
)
|
239 |
+
|
240 |
+
if query_qa:
|
241 |
+
|
242 |
+
search_qa = loading_qa_pipeline()
|
243 |
+
for document_qa in write_search_qa(query_qa):
|
244 |
+
|
245 |
+
st.markdown(f"### {document_qa['game']}")
|
246 |
+
st.markdown(f"Metacritic Rating: {document_qa['rate']}")
|
247 |
+
|
248 |
+
col_1, col_2 = st.columns([1, 5])
|
249 |
+
with col_1:
|
250 |
+
st.image(document_qa["image"], width=100)
|
251 |
+
with col_2:
|
252 |
+
|
253 |
+
annotations = document_qa["summary"].split(document_qa["answer"])
|
254 |
+
|
255 |
+
if document_qa["start"] == 0:
|
256 |
+
annotated_text(
|
257 |
+
(
|
258 |
+
document_qa["answer"],
|
259 |
+
f"answer {round(document_qa['qa_score'], 2)}",
|
260 |
+
"#8ef",
|
261 |
+
),
|
262 |
+
" ",
|
263 |
+
" ".join(annotations[1:]),
|
264 |
+
)
|
265 |
+
|
266 |
+
elif document_qa["end"] == len(document_qa["summary"]):
|
267 |
+
annotated_text(
|
268 |
+
" ".join(annotations[:-1]),
|
269 |
+
(
|
270 |
+
document_qa["answer"],
|
271 |
+
f"answer {round(document_qa['qa_score'], 2)}",
|
272 |
+
"#8ef",
|
273 |
+
),
|
274 |
+
)
|
275 |
+
|
276 |
+
else:
|
277 |
+
annotated_text(
|
278 |
+
annotations[0],
|
279 |
+
(
|
280 |
+
document_qa["answer"],
|
281 |
+
f"answer {round(document_qa['qa_score'], 2)}",
|
282 |
+
"#8ef",
|
283 |
+
),
|
284 |
+
annotations[1],
|
285 |
+
)
|
explain.png
ADDED
games.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
games_summary.pkl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:0c7087c97a430756b9d35fa50418803669703e0f040445f5fdf5a87df520ef5e
|
3 |
+
size 22703994
|
games_title.pkl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:3be1863037c608280a4abddc3cce82926783bccba08cd215ad9a2e775ef2e32f
|
3 |
+
size 22533764
|
requirements.txt
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
cherche == 0.0.1
|