from haystack.document_stores.faiss import FAISSDocumentStore from haystack.nodes.retriever import EmbeddingRetriever from haystack.nodes.ranker import BaseRanker from haystack.pipelines import Pipeline from haystack.document_stores.base import BaseDocumentStore from haystack.schema import Document from typing import Optional, List from huggingface_hub import get_inference_endpoint from datasets import load_dataset from time import perf_counter import gradio as gr import numpy as np import requests import os RETRIEVER_URL = os.getenv("RETRIEVER_URL") RANKER_URL = os.getenv("RANKER_URL") HF_TOKEN = os.getenv("HF_TOKEN") RETRIEVER_IE = get_inference_endpoint( "fastrag-retriever", namespace="optimum-intel", token=HF_TOKEN ) RANKER_IE = get_inference_endpoint( "fastrag-ranker", namespace="optimum-intel", token=HF_TOKEN ) def post(url, payload): response = requests.post( url, json=payload, headers={"Authorization": f"Bearer {HF_TOKEN}"}, ) return response.json() def method_timer(method): def timed(self, *args, **kw): start_time = perf_counter() result = method(self, *args, **kw) end_time = perf_counter() print( f"{self.__class__.__name__}.{method.__name__} took {end_time - start_time} seconds" ) return result return timed class Retriever(EmbeddingRetriever): def __init__( self, document_store: Optional[BaseDocumentStore] = None, top_k: int = 10, batch_size: int = 32, scale_score: bool = True, ): self.document_store = document_store self.top_k = top_k self.batch_size = batch_size self.scale_score = scale_score @method_timer def embed_queries(self, queries: List[str]) -> np.ndarray: payload = {"queries": queries, "inputs": ""} response = post(RETRIEVER_URL, payload) if "error" in response: raise gr.Error(response["error"]) arrays = np.array(response) return arrays @method_timer def embed_documents(self, documents: List[Document]) -> np.ndarray: documents = [d.to_dict() for d in documents] for doc in documents: doc["embedding"] = None payload = {"documents": documents, "inputs": ""} response = post(RETRIEVER_URL, payload) if "error" in response: raise gr.Error(response["error"]) arrays = np.array(response) return arrays class Ranker(BaseRanker): @method_timer def predict( self, query: str, documents: List[Document], top_k: Optional[int] = None ) -> List[Document]: documents = [d.to_dict() for d in documents] for doc in documents: doc["embedding"] = None payload = {"query": query, "documents": documents, "top_k": top_k, "inputs": ""} response = post(RANKER_URL, payload) if "error" in response: raise gr.Error(response["error"]) return [Document.from_dict(d) for d in response] @method_timer def predict_batch( self, queries: List[str], documents: List[List[Document]], batch_size: Optional[int] = None, top_k: Optional[int] = None, ) -> List[List[Document]]: documents = [[d.to_dict() for d in docs] for docs in documents] for docs in documents: for doc in docs: doc["embedding"] = None payload = { "queries": queries, "documents": documents, "batch_size": batch_size, "top_k": top_k, "inputs": "", } response = post(RANKER_URL, payload) if "error" in response: raise gr.Error(response["error"]) return [[Document.from_dict(d) for d in docs] for docs in response] TOP_K = 2 BATCH_SIZE = 16 if ( os.path.exists("/data/faiss_document_store.db") and os.path.exists("/data/faiss_index.json") and os.path.exists("/data/faiss_index") ): document_store = FAISSDocumentStore.load("/data/faiss_index") retriever = Retriever( document_store=document_store, top_k=TOP_K, batch_size=BATCH_SIZE ) document_store.save(index_path="/data/faiss_index") else: try: os.remove("/data/faiss_index") os.remove("/data/faiss_index.json") os.remove("/data/faiss_document_store.db") except FileNotFoundError: pass document_store = FAISSDocumentStore( sql_url="sqlite:////data/faiss_document_store.db", return_embedding=True, embedding_dim=384, ) DATASET = load_dataset("bilgeyucel/seven-wonders", split="train") document_store.write_documents(DATASET) retriever = Retriever(document_store=document_store, top_k=TOP_K, batch_size=BATCH_SIZE) document_store.update_embeddings(retriever=retriever) document_store.save(index_path="/data/faiss_index") ranker = Ranker() pipe = Pipeline() pipe.add_node(component=retriever, name="Retriever", inputs=["Query"]) pipe.add_node(component=ranker, name="Ranker", inputs=["Retriever"]) def run(query: str) -> dict: if RETRIEVER_IE.status != "running": RETRIEVER_IE.resume() raise gr.Error( "Retriever Inference Endpoint is not running. " "Sent a request to resume it. Please try again in a few minutes." ) if RANKER_IE.status != "running": RANKER_IE.resume() raise gr.Error( "Ranker Inference Endpoint is not running. " "Sent a request to resume it. Please try again in a few minutes." ) pipe_output = pipe.run(query=query) output = f"""

Query

{query}

Top {TOP_K} Documents

""" for i, doc in enumerate(pipe_output["documents"]): output += f"""

Document {i + 1}

ID: {doc.id}

Score: {doc.score}

Content: {doc.content}

""" return output examples = [ "Where is Gardens of Babylon?", "Why did people build Great Pyramid of Giza?", "What does Rhodes Statue look like?", "Why did people visit the Temple of Artemis?", "What is the importance of Colossus of Rhodes?", "What happened to the Tomb of Mausolus?", "How did Colossus of Rhodes collapse?", ] input_text = gr.components.Textbox( label="Query", placeholder="Enter a query", value=examples[0], lines=3, ) output_html = gr.components.HTML(label="Results") gr.Interface( fn=run, inputs=input_text, outputs=output_html, title="End-to-End Retrieval & Ranking", examples=examples, description="A [haystack](https://haystack.deepset.ai/) pipeline for retrieving and ranking " "documents from the [seven-wonders dataset](bilgeyucel/seven-wonders) based on a query, " "using a FAISS database as a document store (kept in the space's persistent storage) " "and two [Inference Endpoints for the Retriever and Ranker](https://huggingface.co/collections/optimum-intel/fast-rag-inference-endpoints-6641c6cbb98ddf3fe49c7728).", ).launch()