Spaces:
Running
Running
from langchain_openai import OpenAIEmbeddings | |
from langchain_community.vectorstores import FAISS | |
from langchain_core.documents import Document | |
from langchain_openai import ChatOpenAI | |
from langchain_core.output_parsers import StrOutputParser | |
from langchain_core.prompts import PromptTemplate | |
from uuid import uuid4 | |
from prompt import * | |
import random | |
from itext2kg.models import KnowledgeGraph | |
import faiss | |
from langchain_community.docstore.in_memory import InMemoryDocstore | |
from pydantic import BaseModel, Field | |
from dotenv import load_dotenv | |
import os | |
from langchain_core.tools import tool | |
import pickle | |
import unicodedata | |
load_dotenv() | |
index_name = os.environ.get("INDEX_NAME") | |
# Global initialization | |
embedding_model = "text-embedding-3-small" | |
embedding = OpenAIEmbeddings(model=embedding_model) | |
# vector_store = PineconeVectorStore(index=index_name, embedding=embedding) | |
def advanced_graph_to_json(graph:KnowledgeGraph): | |
nodes = [] | |
edges = [] | |
for node in graph.entities: | |
node_id = node.name.replace(" ", "_") | |
label = node.name | |
type = node.label | |
nodes.append({"id": node_id, "label": label, "type": type}) | |
for relationship in graph.relationships: | |
source = relationship.startEntity | |
source_id = source.name.replace(" ", "_") | |
target = relationship.endEntity | |
target_id = target.name.replace(" ", "_") | |
label = relationship.name | |
edges.append({"source": source_id, "label": label, "cible": target_id}) | |
return {"noeuds": nodes, "relations": edges} | |
with open("kg_ia_signature.pkl", "rb") as file: | |
loaded_graph = pickle.load(file) | |
graph = advanced_graph_to_json(loaded_graph) | |
print("Graph loaded") | |
with open("chunks_ia_signature.pkl", "rb") as file: | |
chunks = pickle.load(file) | |
print("Chunks loaded") | |
class sphinx_output(BaseModel): | |
question: str = Field(description="The question to ask the user to test if they read the entire book") | |
answers: list[str] = Field(description="The possible answers to the question to test if the user read the entire book") | |
class verify_response_model(BaseModel): | |
response: str = Field(description="The response from the user to the question") | |
answers: list[str] = Field(description="The possible answers to the question to test if the user read the entire book") | |
initial_question: str = Field(description="The question asked to the user to test if they read the entire book") | |
class verification_score(BaseModel): | |
score: float = Field(description="The score of the user's response from 0 to 10 to the question") | |
llm = ChatOpenAI(model="gpt-4o", max_tokens=300, temperature=0.5) | |
def get_random_chunk() -> str: | |
return chunks[random.randint(0, len(chunks) - 1)] | |
def get_vectorstore() -> FAISS: | |
index = faiss.IndexFlatL2(len(embedding.embed_query("hello world"))) | |
vector_store = FAISS( | |
embedding_function=embedding, | |
index=index, | |
docstore=InMemoryDocstore(), | |
index_to_docstore_id={}, | |
) | |
documents = [Document(page_content=chunk) for chunk in chunks] | |
uuids = [str(uuid4()) for _ in range(len(documents))] | |
vector_store.add_documents(documents=documents, ids=uuids) | |
return vector_store | |
vectore_store = get_vectorstore() | |
def generate_sphinx_response() -> sphinx_output: | |
writer = "Laurent Tripied" | |
book_name = "Limites de l'imaginaire ou limites planétaires" | |
summary = summary_text | |
excerpt = get_random_chunk() | |
prompt = PromptTemplate.from_template(template_sphinx) | |
structured_llm = llm.with_structured_output(sphinx_output) | |
# Create an LLM chain with the prompt and the LLM | |
llm_chain = prompt | structured_llm | |
return llm_chain.invoke({"writer":writer,"book_name":book_name,"summary":summary,"excerpt":excerpt}) | |
def verify_response(response:str,answers:list[str],question:str) -> bool: | |
prompt = PromptTemplate.from_template(template_verify) | |
structured_llm = llm.with_structured_output(verification_score) | |
llm_chain = prompt | structured_llm | |
score = llm_chain.invoke({"response":response,"answers":answers,"initial_question":question}) | |
if score.score >= 0: | |
return True | |
def retrieve_context_from_vectorestore(query:str) -> str: | |
retriever = vectore_store.as_retriever(search_type="mmr", search_kwargs={"k": 3}) | |
return retriever.invoke(query) | |
def generate_stream(query:str,messages = [], model = "gpt-4o-mini", max_tokens = 300, temperature = 0.5,index_name="",stream=True,vector_store=None): | |
try: | |
print("init chat") | |
print("init template") | |
prompt = PromptTemplate.from_template(template) | |
writer = "Laurent Tripied" | |
name_book = "Limites de l'imaginaire ou limites planétaires" | |
name_icon = "Magritte" | |
kg = loaded_graph | |
print("retreiving context") | |
context = retrieve_context_from_vectorestore(query) | |
print(f"Context: {context}") | |
llm_chain = prompt | llm | StrOutputParser() | |
print("streaming") | |
if stream: | |
return llm_chain.stream({"name_book":name_book,"writer":writer,"name_icon":name_icon,"kg":graph,"context":context,"query":query}) | |
else: | |
return llm_chain.invoke({"name_book":name_book,"writer":writer,"name_icon":name_icon,"kg":graph,"context":context,"query":query}) | |
except Exception as e: | |
print(e) | |
return False |