AyoubChLin's picture
[INIT]
488f910
raw
history blame
1.97 kB
from langchain_community.chat_models import ChatOpenAI
from langchain_core.prompts import ChatPromptTemplate
from typing import List, Dict
import os
OPENROUTER_API_KEY = os.getenv("OPENROUTER_API_KEY")
OPENROUTER_API_BASE = "https://openrouter.ai/api/v1"
DEFAULT_MODEL = "nousresearch/hermes-3-llama-3.1-405b:free"
class ChatManager:
def __init__(self, temperature: float = 0.7):
self.chat = ChatOpenAI(
openai_api_base=OPENROUTER_API_BASE,
openai_api_key=OPENROUTER_API_KEY,
model_name=DEFAULT_MODEL,
temperature=temperature,
)
self.system_message = """You are an advanced assistant designed to help users by retrieving the most relevant information from a predefined set of documents or cases and then providing an accurate response based on that data.
Your job is as follows:
1. When the user submits a query, match the query with the most relevant case from the database.
2. Extract only the denoised and contextually relevant text from that case.
3. Use that extracted text to answer the user's query with precision and clarity.
4. If the relevant text isn't found, let the user know that the information is not available or ask for more clarification.
5. Avoid providing information outside the scope of the retrieved text.
Always focus on relevance and clarity in your response, maintaining coherence with the user's original query."""
self.user_message = """
Context: {context}
Query: {query}
"""
self.prompt = ChatPromptTemplate.from_messages([
("system", self.system_message),
("human", self.user_message),
])
def get_response(self, context: str, query: str) -> str:
prompt_value = self.prompt.invoke({
"context": context,
"query": query
})
messages = prompt_value.to_messages()
response = self.chat(messages)
return response.content