Spaces:
Running
Running
File size: 5,407 Bytes
a336311 4c0c6d3 a336311 4c0c6d3 a336311 4c0c6d3 a336311 4c0c6d3 a336311 4c0c6d3 a336311 4c0c6d3 a336311 4c0c6d3 a336311 4c0c6d3 a336311 4c0c6d3 a336311 4c0c6d3 a336311 4c0c6d3 a336311 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 |
from langchain_openai import OpenAIEmbeddings
from langchain_community.vectorstores import FAISS
from langchain_core.documents import Document
from langchain_openai import ChatOpenAI
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import PromptTemplate
from uuid import uuid4
from prompt import *
import random
from itext2kg.models import KnowledgeGraph
import faiss
from langchain_community.docstore.in_memory import InMemoryDocstore
from pydantic import BaseModel, Field
from dotenv import load_dotenv
import os
from langchain_core.tools import tool
import pickle
import unicodedata
load_dotenv()
index_name = os.environ.get("INDEX_NAME")
# Global initialization
embedding_model = "text-embedding-3-small"
embedding = OpenAIEmbeddings(model=embedding_model)
# vector_store = PineconeVectorStore(index=index_name, embedding=embedding)
def advanced_graph_to_json(graph:KnowledgeGraph):
nodes = []
edges = []
for node in graph.entities:
node_id = node.name.replace(" ", "_")
label = node.name
type = node.label
nodes.append({"id": node_id, "label": label, "type": type})
for relationship in graph.relationships:
source = relationship.startEntity
source_id = source.name.replace(" ", "_")
target = relationship.endEntity
target_id = target.name.replace(" ", "_")
label = relationship.name
edges.append({"source": source_id, "label": label, "cible": target_id})
return {"noeuds": nodes, "relations": edges}
with open("kg_ia_signature.pkl", "rb") as file:
loaded_graph = pickle.load(file)
graph = advanced_graph_to_json(loaded_graph)
print("Graph loaded")
with open("chunks_ia_signature.pkl", "rb") as file:
chunks = pickle.load(file)
print("Chunks loaded")
class sphinx_output(BaseModel):
question: str = Field(description="The question to ask the user to test if they read the entire book")
answers: list[str] = Field(description="The possible answers to the question to test if the user read the entire book")
class verify_response_model(BaseModel):
response: str = Field(description="The response from the user to the question")
answers: list[str] = Field(description="The possible answers to the question to test if the user read the entire book")
initial_question: str = Field(description="The question asked to the user to test if they read the entire book")
class verification_score(BaseModel):
score: float = Field(description="The score of the user's response from 0 to 10 to the question")
llm = ChatOpenAI(model="gpt-4o", max_tokens=300, temperature=0.5)
def get_random_chunk() -> str:
return chunks[random.randint(0, len(chunks) - 1)]
def get_vectorstore() -> FAISS:
index = faiss.IndexFlatL2(len(embedding.embed_query("hello world")))
vector_store = FAISS(
embedding_function=embedding,
index=index,
docstore=InMemoryDocstore(),
index_to_docstore_id={},
)
documents = [Document(page_content=chunk) for chunk in chunks]
uuids = [str(uuid4()) for _ in range(len(documents))]
vector_store.add_documents(documents=documents, ids=uuids)
return vector_store
vectore_store = get_vectorstore()
def generate_sphinx_response() -> sphinx_output:
writer = "Laurent Tripied"
book_name = "Limites de l'imaginaire ou limites planétaires"
summary = summary_text
excerpt = get_random_chunk()
prompt = PromptTemplate.from_template(template_sphinx)
structured_llm = llm.with_structured_output(sphinx_output)
# Create an LLM chain with the prompt and the LLM
llm_chain = prompt | structured_llm
return llm_chain.invoke({"writer":writer,"book_name":book_name,"summary":summary,"excerpt":excerpt})
def verify_response(response:str,answers:list[str],question:str) -> bool:
prompt = PromptTemplate.from_template(template_verify)
structured_llm = llm.with_structured_output(verification_score)
llm_chain = prompt | structured_llm
score = llm_chain.invoke({"response":response,"answers":answers,"initial_question":question})
if score.score >= 0:
return True
def retrieve_context_from_vectorestore(query:str) -> str:
retriever = vectore_store.as_retriever(search_type="mmr", search_kwargs={"k": 3})
return retriever.invoke(query)
def generate_stream(query:str,messages = [], model = "gpt-4o-mini", max_tokens = 300, temperature = 0.5,index_name="",stream=True,vector_store=None):
try:
print("init chat")
print("init template")
prompt = PromptTemplate.from_template(template)
writer = "Laurent Tripied"
name_book = "Limites de l'imaginaire ou limites planétaires"
name_icon = "Magritte"
kg = loaded_graph
print("retreiving context")
context = retrieve_context_from_vectorestore(query)
print(f"Context: {context}")
llm_chain = prompt | llm | StrOutputParser()
print("streaming")
if stream:
return llm_chain.stream({"name_book":name_book,"writer":writer,"name_icon":name_icon,"kg":graph,"context":context,"query":query})
else:
return llm_chain.invoke({"name_book":name_book,"writer":writer,"name_icon":name_icon,"kg":graph,"context":context,"query":query})
except Exception as e:
print(e)
return False |