Spaces:
Running
Running
File size: 8,728 Bytes
fbf5042 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 |
import re
import json
import base64
import pandas as pd
import gradio as gr
import pyterrier as pt
pt.init()
import pyt_splade
factory = pyt_splade.SpladeFactory()
pipe_queries = factory.query()
pipe_docs = factory.indexing()
COLAB_NAME = 'pyterrier_splade.ipynb'
COLAB_INSTALL = '''
!pip install -q git+https://github.com/naver/splade
!pip install -q git+https://github.com/cmacdonald/pyt_splade@misc
'''.strip()
def df2code(df):
rows = []
for row in df.itertuples(index=False):
rows.append(f' {dict(row._asdict())},')
rows = '\n'.join(rows)
return f'''pd.DataFrame([
{rows}
])'''
def code2colab(code):
enc_code = base64.b64encode((COLAB_INSTALL + '\n\n' + code.strip()).encode()).decode()
dec = base64.b64decode(enc_code)
url = f'https://colaburl.macavaney.us/?py64={enc_code}&name={COLAB_NAME}'
return f'<div style="text-align: center; margin-bottom: -16px;"><a href="{url}" rel="nofollow" target="_blank"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab" style="margin: 0; display: inline-block;" /></a></div>'
def code2md(code):
return f'''
{code2colab(code)}
```python
{code.strip()}
```
'''
def generate_vis(df, mode='Document'):
if len(df) == 0:
return ''
result = []
if mode == 'Document':
max_score = max(max(t.values()) for t in df['toks'])
for row in df.itertuples(index=False):
if mode == 'Query':
tok_scores = {m.group(2): float(m.group(1)) for m in re.finditer(r'combine:0=([0-9.]+)\(([^)]+)\)', row.query)}
max_score = max(tok_scores.values())
orig_tokens = factory.tokenizer.tokenize(row.query_0)
id = row.qid
else:
tok_scores = row.toks
orig_tokens = factory.tokenizer.tokenize(row.text)
id = row.docno
def toks2span(toks):
return '<kbd> </kbd>'.join(f'<kbd style="background-color: rgba(66, 135, 245, {tok_scores.get(t, 0)/max_score});">{t}</kbd>' for t in toks)
orig_tokens_set = set(orig_tokens)
exp_tokens = [t for t, v in sorted(tok_scores.items(), key=lambda x: (-x[1], x[0])) if t not in orig_tokens_set]
result.append(f'''
<div style="font-size: 1.2em;">{mode}: <strong>{id}</strong></div>
<div style="margin: 4px 0 16px; padding: 4px; border: 1px solid black;">
<div>
{toks2span(orig_tokens)}
</div>
<div><strong>Expansion Tokens:</strong> {toks2span(exp_tokens)}</div>
</div>
''')
return '\n'.join(result)
def predict_query(input):
code = f'''import pandas as pd
import pyterrier as pt ; pt.init()
import pyt_splade
factory = pyt_splade.SpladeFactory()
query_pipeline = factory.query()
query_pipeline({df2code(input)})
'''
res = pipe_queries(input)
vis = generate_vis(res, mode='Query')
return (res, code2md(code), vis)
def predict_doc(input):
code = f'''import pandas as pd
import pyterrier as pt ; pt.init()
import pyt_splade
factory = pyt_splade.SpladeFactory()
doc_pipeline = factory.indexing()
doc_pipeline({df2code(input)})
'''
res = pipe_docs(input)
vis = generate_vis(res, mode='Document')
res['toks'] = [json.dumps({k: round(v, 4) for k, v in t.items()}) for t in res['toks']]
return (res, code2md(code), vis)
with gr.Blocks(css="table.font-mono td, table.font-mono th { white-space: pre-line; font-size: 11px; line-height: 16px; } table.font-mono td input { width: 95%; } th .cursor-pointer {display: none;} th .min-h-\[2\.3rem\] {min-height: inherit;}") as demo:
gr.Markdown("<h1 style='text-align: center; margin-bottom: 1rem'>🐕 PyTerrier: SPLADE</h1>")
gr.Markdown(open('README.md', 'rt').read().split('\n---\n')[-1])
example_inp = pd.DataFrame([
{'qid': '1112389', 'query': 'what is the county for grand rapids, mn'},
])
example_out = predict_query(example_inp)
inputs, outputs = [], []
with gr.Row().style(equal_height=False):
with gr.Column(scale=1):
with gr.Tab('Pipeline Input'):
inputs.append(gr.Dataframe(
headers=["qid", "query"],
datatype=["str", "str"],
col_count=(2, "fixed"),
row_count=1,
wrap=True,
value=example_inp,
))
submit_btn = gr.Button("Submit", variant="primary")
with gr.Column(scale=2):
with gr.Tab('Pipeline Output'):
outputs.append(gr.Dataframe(
headers=["qid", "query", "docno", "score", "rank", "text"],
datatype=["str", "str", "str", "number", "number", "str"],
col_count=6,
row_count=1,
wrap=True,
value=example_out[0],
))
with gr.Tab('Code'):
outputs.append(gr.Markdown(value=example_out[1]))
with gr.Tab('Visualisation'):
outputs.append(gr.HTML(value=example_out[2]))
submit_btn.click(predict_query, inputs, outputs, api_name="predict_query", scroll_to_output=True)
gr.Markdown('''
### Document Encoding
The document encoder works similarly to the query encoder: it is a `D→D` (document rewriting, doc-to-doc) transformer, and can be used in pipelines accordingly.
It maps a document's text into a dictionary with terms from the document re-weighted and weighted expansion terms added.
<div class="pipeline">
<div class="df" title="Document Frame">D</div>
<div class="transformer" title="SPLADE Indexing Transformer">SPLADE</div>
<div class="df" title="Document Frame">D</div>
</div>
''')
example_inp = pd.DataFrame([
{'docno': '0', 'text': 'The presence of communication amid scientific minds was equally important to the success of the Manhattan Project as scientific intellect was. The only cloud hanging over the impressive achievement of the atomic researchers and engineers is what their success truly meant; hundreds of thousands of innocent lives obliterated.'},
])
example_out = predict_doc(example_inp)
inputs, outputs = [], []
with gr.Row().style(equal_height=False):
with gr.Column(scale=1):
with gr.Tab("Pipeline Input"):
inputs.append(gr.Dataframe(
headers=["docno", "text"],
datatype=["str", "str"],
col_count=(2, "fixed"),
row_count=1,
wrap=True,
value=example_inp,
))
submit_btn = gr.Button("Submit", variant="primary")
with gr.Column(scale=2):
with gr.Tab("Pipeline Output"):
outputs.append(gr.Dataframe(
headers=["qid", "query", "docno", "score", "rank", "text"],
datatype=["str", "str", "str", "number", "number", "str"],
col_count=6,
row_count=1,
wrap=True,
value=example_out[0],
))
with gr.Tab('Code'):
outputs.append(gr.Markdown(value=example_out[1]))
with gr.Tab('Visualisation'):
outputs.append(gr.HTML(value=example_out[2]))
submit_btn.click(predict_doc, inputs, outputs, api_name="predict_doc", scroll_to_output=True)
gr.Markdown('''
### Putting it all together
When you use the document encoder in an indexing pipeline, the rewritting document contents are indexed:
<div class="pipeline">
<div class="df" title="Document Frame">D</div>
<div class="transformer" title="SPLADE Indexing Transformer">SPLADE</div>
<div class="df" title="Document Frame">D</div>
<div class="transformer boring" title="Indexer">Indexer</div>
</div>
```python
import pyterrer as pt
pt.init(version='snapshot')
import pyt_splade
dataset = pt.get_dataset('irds:msmarco-passage')
factory = pyt_splade.SpladeFactory()
indexer = pt.IterDictIndexer('./msmarco_psg', pretokenized=True)
indxer_pipe = factory.indexing() >> indexer
indxer_pipe.index(dataset.get_corpus_iter())
```
Once you built an index, you can build a retrieval pipeline that first encodes the query,
and then performs retrieval:
<div class="pipeline">
<div class="df" title="Query Frame">Q</div>
<div class="transformer" title="SPLADE Query Transformer">SPLADE</div>
<div class="df" title="Query Frame">Q</div>
<div class="transformer boring" title="Term Frequency Transformer">TF Retriever</div>
<div class="df" title="Result Frame">R</div>
</div>
```python
splade_retr = factory.query() >> pt.BatchRetrieve('./msmarco_psg', wmodel='Tf')
```
### References & Credits
This package uses [Naver's SPLADE repository](https://github.com/naver/splade).
- Thibault Formal, Benjamin Piwowarski, Stéphane Clinchant. [SPLADE: Sparse Lexical and Expansion Model for First Stage Ranking](https://arxiv.org/abs/2107.05720). SIGIR 2021.
- Craig Macdonald, Nicola Tonellotto, Sean MacAvaney, Iadh Ounis. [PyTerrier: Declarative Experimentation in Python from BM25 to Dense Retrieval](https://dl.acm.org/doi/abs/10.1145/3459637.3482013). CIKM 2021.
''')
demo.launch(share=False)
|