Ritesh-hf commited on
Commit
291a319
·
verified ·
1 Parent(s): 0955314

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +11 -9
app.py CHANGED
@@ -86,12 +86,12 @@ retriever = PineconeHybridSearchRetriever(
86
  llm = ChatPerplexity(temperature=0, pplx_api_key=GROQ_API_KEY, model="llama-3.1-sonar-large-128k-chat", max_tokens=512, max_retries=2)
87
 
88
  # Initialize Reranker
89
- # model = HuggingFaceCrossEncoder(model_name="BAAI/bge-reranker-base")
90
- # compressor = CrossEncoderReranker(model=model, top_n=10)
91
 
92
- # compression_retriever = ContextualCompressionRetriever(
93
- # base_compressor=compressor, base_retriever=retriever
94
- # )
95
  # from langchain.retrievers.document_compressors import LLMChainExtractor
96
 
97
  # compressor = LLMChainExtractor.from_llm(llm)
@@ -100,9 +100,11 @@ llm = ChatPerplexity(temperature=0, pplx_api_key=GROQ_API_KEY, model="llama-3.1-
100
  # )
101
 
102
  # compressor = FlashrankRerank(top_n=10)
103
- # compression_retriever = ContextualCompressionRetriever(
104
- # base_compressor=compressor, base_retriever=retriever
105
- # )
 
 
106
 
107
  # Contextualization prompt and retriever
108
  contextualize_q_system_prompt = """ Given a chat history and the latest user question \
@@ -117,7 +119,7 @@ contextualize_q_prompt = ChatPromptTemplate.from_messages(
117
  ("human", "{input}")
118
  ]
119
  )
120
- history_aware_retriever = create_history_aware_retriever(llm, retriever, contextualize_q_prompt)
121
 
122
  # QA system prompt and chain
123
  qa_system_prompt = """ You are a highly skilled information retrieval assistant. Use the following context to answer questions effectively.
 
86
  llm = ChatPerplexity(temperature=0, pplx_api_key=GROQ_API_KEY, model="llama-3.1-sonar-large-128k-chat", max_tokens=512, max_retries=2)
87
 
88
  # Initialize Reranker
89
+ model = HuggingFaceCrossEncoder(model_name="BAAI/bge-reranker-base")
90
+ compressor = CrossEncoderReranker(model=model, top_n=10)
91
 
92
+ compression_retriever = ContextualCompressionRetriever(
93
+ base_compressor=compressor, base_retriever=retriever
94
+ )
95
  # from langchain.retrievers.document_compressors import LLMChainExtractor
96
 
97
  # compressor = LLMChainExtractor.from_llm(llm)
 
100
  # )
101
 
102
  # compressor = FlashrankRerank(top_n=10)
103
+
104
+
105
+ compression_retriever = ContextualCompressionRetriever(
106
+ base_compressor=compressor, base_retriever=retriever
107
+ )
108
 
109
  # Contextualization prompt and retriever
110
  contextualize_q_system_prompt = """ Given a chat history and the latest user question \
 
119
  ("human", "{input}")
120
  ]
121
  )
122
+ history_aware_retriever = create_history_aware_retriever(llm, compression_retriever, contextualize_q_prompt)
123
 
124
  # QA system prompt and chain
125
  qa_system_prompt = """ You are a highly skilled information retrieval assistant. Use the following context to answer questions effectively.