Datasets:

Modalities:
Text
Formats:
parquet
Languages:
French
ArXiv:
Libraries:
Datasets
pandas
License:
mteb-fr-reranking-alloprof-s2p / build_reranking_dataset_BM25.py
mciancone's picture
Upload build_reranking_dataset_BM25.py
d52136f verified
raw
history blame
6.14 kB
from rank_bm25 import BM25Plus
import datasets
from sklearn.base import BaseEstimator
from sklearn.model_selection import GridSearchCV
from huggingface_hub import create_repo
from huggingface_hub.utils._errors import HfHubHTTPError
N_NEGATIVE_DOCS = 10
SPLIT = "test"
# Prepare documents
def create_text(example:dict) -> str:
return "\n".join([example["title"], example["text"]])
documents = datasets.load_dataset("lyon-nlp/alloprof", "documents")["test"]
documents = documents.map(lambda x: {"text": create_text(x)})
documents = documents.rename_column("uuid", "doc_id")
documents = documents.remove_columns(["__index_level_0__", "title", "topic"])
# Prepare queries
queries = datasets.load_dataset("lyon-nlp/alloprof", "queries")[SPLIT]
queries = queries.rename_columns({"text": "queries", "relevant": "doc_id"})
queries = queries.remove_columns(["__index_level_0__", "answer", "id", "subject"])
# Optimize BM25 parameters
### Build sklearn estimator feature BM25
class BM25Estimator(BaseEstimator):
def __init__(self, corpus_dataset:datasets.Dataset, *, k1:float=1.5, b:float=.75, delta:int=1):
"""Initialize BM25 estimator using the coprus dataset.
The dataset must contain 2 columns:
- "doc_id" : the documents ids
- "text" : the document texts
Args:
corpus_dataset (datasets.Dataset): _description_
k1 (float, optional): _description_. Defaults to 1.5.
b (float, optional): _description_. Defaults to .75.
delta (int, optional): _description_. Defaults to 1.
"""
self.is_fitted_ = False
self.corpus_dataset = corpus_dataset
self.k1 = k1
self.b = b
self.delta=delta
self.bm25 = None
def tokenize_corpus(self, corpus:list[str]) -> list[str]:
"""Tokenize a corpus of strings
Args:
corpus (list[str]): the list of string to tokenize
Returns:
list[str]: the tokeinzed corpus
"""
if isinstance(corpus, str):
return corpus.lower().split()
return [c.lower().split() for c in corpus]
def fit(self, X=None, y=None):
"""Fits the BM25 using the dataset of documents
Args are placeholders required by sklearn
"""
tokenized_corpus = self.tokenize_corpus(self.corpus_dataset["text"])
self.bm25 = BM25Plus(
corpus=tokenized_corpus,
k1=self.k1,
b=self.b,
delta=self.delta
)
self.is_fitted_ = True
return self
def predict(self, query:str, topN:int=10) -> list[str]:
"""Returns the best doc ids in order of best relevance first
Args:
query (str): _description_
topN (int, optional): _description_. Defaults to 10.
Returns:
list[str]: _description_
"""
if not self.is_fitted_:
self.fit()
tokenized_query = self.tokenize_corpus(query)
best_docs = self.bm25.get_top_n(tokenized_query, self.corpus_dataset["text"], n=topN)
doc_text2id = dict(list(zip(self.corpus_dataset["text"], self.corpus_dataset["doc_id"])))
best_docs_ids = [doc_text2id[doc] for doc in best_docs]
return best_docs_ids
def score(self, queries:list[str], relevant_docs:list[list[str]]):
"""Scores the bm25 using the queries and relevant docs,
using MRR as the metric.
Args:
queries (list[str]): list of queries
relevant_docs (list[list[str]]): list of relevant documents ids for each query
"""
best_docs_ids_preds = [self.predict(q, N_NEGATIVE_DOCS) for q in queries]
best_docs_isrelevant = [
[
doc in rel_docs for doc in best_docs_ids_pred
]
for best_docs_ids_pred, rel_docs in zip(best_docs_ids_preds, relevant_docs)
]
mrrs = [self._compute_mrr(preds) for preds in best_docs_isrelevant]
mrr = sum(mrrs)/len(mrrs)
return mrr
def _compute_mrr(self, predictions:list[bool]) -> float:
"""Compute the mrr considering a list of boolean predictions.
Example:
if predictions = [False, False, True, False], it would indicate
that only the third document was labeled as relevant to the query
Args:
predictions (list[bool]): the binarized relevancy of predictions
Returns:
float: the mrr
"""
if any(predictions):
mrr = [1/(i+1) for i, pred in enumerate(predictions) if pred]
mrr = sum(mrr)/len(mrr)
return mrr
else:
return 0
### Perform gridSearch to find best parameters for BM25
print("Optimizing BM25 parameters...")
params = {
"k1":[1.25, 1.5, 1.75],
"b": [.5, .75, 1.],
"delta": [0, 1]
}
gscv = GridSearchCV(BM25Estimator(documents), params, verbose=1)
gscv.fit(queries["queries"], queries["doc_id"])
print("Best parameterss :", gscv.best_params_)
print("Best MRR score :", gscv.best_score_)
# Build reranking dataset with positives and negative queries using best estimator
print("Generating reranking dataset...")
reranking_dataset = datasets.Dataset.from_dict(
{
"query": queries["queries"],
"positive": queries["doc_id"],
"negative": [
[doc_id for doc_id in gscv.estimator.predict(q, N_NEGATIVE_DOCS) if doc_id not in relevant_ids]
for q, relevant_ids in zip(queries["queries"], queries["doc_id"])
]
})
# Push dataset to hub
### create HF repo
repo_id = "lyon-nlp/mteb-fr-reranking-alloprof-s2p"
try:
create_repo(repo_id, repo_type="dataset")
except HfHubHTTPError as e:
print("HF repo already exist")
### push to hub
reranking_dataset.push_to_hub(repo_id, config_name="queries", split=SPLIT)
documents.push_to_hub(repo_id, config_name="documents", split="test")