Spaces:
Running
Running
File size: 8,447 Bytes
a336311 4c0c6d3 9b1fd5f 4c0c6d3 a336311 4c0c6d3 a336311 5e72909 a336311 4c0c6d3 9b1fd5f a336311 4c0c6d3 060b78c a336311 9b1fd5f 38fc6d8 9b1fd5f d3527f5 9b1fd5f d70f173 9b1fd5f a336311 4c0c6d3 a336311 4c0c6d3 9b1fd5f 4c0c6d3 9b1fd5f 4c0c6d3 291a445 4c0c6d3 a336311 9b1fd5f a336311 4c0c6d3 a336311 4c0c6d3 a336311 4c0c6d3 a336311 4c0c6d3 a336311 060b78c d8b8d75 060b78c 5e72909 060b78c be13230 a336311 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 |
from langchain_openai import OpenAIEmbeddings
from langchain_community.vectorstores import FAISS
from langchain_core.documents import Document
from langchain_openai import ChatOpenAI
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import PromptTemplate
from uuid import uuid4
from prompt import *
import random
from itext2kg.models import KnowledgeGraph
from langchain.text_splitter import RecursiveCharacterTextSplitter
import faiss
from langchain_community.docstore.in_memory import InMemoryDocstore
from pydantic import BaseModel, Field
from dotenv import load_dotenv
import os
from langchain_core.tools import tool
import pickle
import unicodedata
load_dotenv()
index_name = os.environ.get("INDEX_NAME")
# Global initialization
embedding_model = "text-embedding-3-small"
embedding = OpenAIEmbeddings(model=embedding_model)
# vector_store = PineconeVectorStore(index=index_name, embedding=embedding)
def advanced_graph_to_json(graph:KnowledgeGraph):
nodes = []
edges = []
for node in graph.entities:
node_id = node.name.replace(" ", "_")
label = node.name
type = node.label
nodes.append({"id": node_id, "label": label, "type": type})
for relationship in graph.relationships:
source = relationship.startEntity
source_id = source.name.replace(" ", "_")
target = relationship.endEntity
target_id = target.name.replace(" ", "_")
label = relationship.name
edges.append({"source": source_id, "label": label, "cible": target_id})
return {"noeuds": nodes, "relations": edges}
with open("kg_ia_signature.pkl", "rb") as file:
loaded_graph = pickle.load(file)
graph = advanced_graph_to_json(loaded_graph)
print("Graph loaded")
with open("chunks_ia_signature.pkl", "rb") as file:
chunks = pickle.load(file)
print("Chunks loaded")
with open("scenes.pkl", "rb") as file:
scenes = pickle.load(file)
print("Scenes loaded")
class sphinx_output(BaseModel):
question: str = Field(description="The question to ask the user to test if they read the entire book")
answers: list[str] = Field(description="The possible answers to the question to test if the user read the entire book")
class verify_response_model(BaseModel):
response: str = Field(description="The response from the user to the question")
answers: list[str] = Field(description="The possible answers to the question to test if the user read the entire book")
initial_question: str = Field(description="The question asked to the user to test if they read the entire book")
class verification_score(BaseModel):
score: float = Field(description="The score of the user's response from 0 to 10 to the question")
llm = ChatOpenAI(model="gpt-4o", max_tokens=700, temperature=0.5)
def split_texts(text : str) -> list[str]:
splitter = RecursiveCharacterTextSplitter(
chunk_size=1000,
chunk_overlap=200,
length_function=len,
is_separator_regex=False,
)
return splitter.split_text(text)
#########################################################################
### PAR ICI , CHOISIR UNE SCENE SPECIFIQUE DANS L'ARGUMENT DE LA FONCTION
def get_random_chunk(scene_specific = [2]) : # scene_specific = None signifie qu'on considère tout le récit
if scene_specific:
scene_specific_content = [scenes[i-1] for i in scene_specific]
scene_specific_content = " ".join(scene_specific_content)
chunks_scene = split_texts(scene_specific_content)
print(f"Scene {scene_specific} has {len(chunks_scene)} chunks")
print([chunk[0:50] for chunk in chunks_scene])
print('---')
chunk_chosen = chunks_scene[random.randint(0, len(chunks_scene) - 1)]
print(f"Chosen chunk: {chunk_chosen}")
return chunk_chosen, scene_specific
return chunks[random.randint(0, len(chunks) - 1)],scene_specific
def get_vectorstore() -> FAISS:
index = faiss.IndexFlatL2(len(embedding.embed_query("hello world")))
vector_store = FAISS(
embedding_function=embedding,
index=index,
docstore=InMemoryDocstore(),
index_to_docstore_id={},
)
documents = [Document(page_content=chunk) for chunk in chunks]
uuids = [str(uuid4()) for _ in range(len(documents))]
vector_store.add_documents(documents=documents, ids=uuids)
return vector_store
vectore_store = get_vectorstore()
def generate_sphinx_response() -> sphinx_output:
writer = "Laurent Tripied"
book_name = "Limites de l'imaginaire ou limites planétaires"
summary = summary_text
excerpt , scene_number = get_random_chunk()
if scene_number:
summary = "scene " + str(scene_number)
prompt = PromptTemplate.from_template(template_sphinx)
structured_llm = llm.with_structured_output(sphinx_output)
# Create an LLM chain with the prompt and the LLM
llm_chain = prompt | structured_llm
return llm_chain.invoke({"writer":writer,"book_name":book_name,"summary":summary,"excerpt":excerpt})
#############################################################
### PAR ICI , CHOISIR LE DEGRE DE SEVERITE DE LA VERIFICATION
def verify_response(response:str,answers:list[str],question:str) -> bool:
prompt = PromptTemplate.from_template(template_verify)
structured_llm = llm.with_structured_output(verification_score)
llm_chain = prompt | structured_llm
score = llm_chain.invoke({"response":response,"answers":answers,"initial_question":question})
if score.score >= 5:
return True
def retrieve_context_from_vectorestore(query:str) -> str:
retriever = vectore_store.as_retriever(search_type="mmr", search_kwargs={"k": 3})
return retriever.invoke(query)
def generate_stream(query:str,messages = [], model = "gpt-4o-mini", max_tokens = 300, temperature = 1,index_name="",stream=True,vector_store=None):
try:
print("init chat")
print("init template")
prompt = PromptTemplate.from_template(template)
writer = "Laurent Tripied"
name_book = "Limites de l'imaginaire ou limites planétaires"
name_icon = "Magritte"
kg = loaded_graph
print("retreiving context")
context = retrieve_context_from_vectorestore(query)
print(f"Context: {context}")
llm_chain = prompt | llm | StrOutputParser()
print("streaming")
if stream:
return llm_chain.stream({"name_book":name_book,"writer":writer,"name_icon":name_icon,"kg":graph,"context":context,"query":query})
else:
return llm_chain.invoke({"name_book":name_book,"writer":writer,"name_icon":name_icon,"kg":graph,"context":context,"query":query})
except Exception as e:
print(e)
return False
def generate_whatif_stream(question:str,response:str, stream:bool = False) -> str:
try:
prompt = PromptTemplate.from_template(template_whatif)
llm_chain = prompt | llm | StrOutputParser()
print("Enter whatif")
context = retrieve_context_from_vectorestore(f"{question} {response}")
print(f"Context: {context}")
if stream:
return llm_chain.stream({"question":question,"response":response,"context":context})
else:
return llm_chain.invoke({"question":question,"response":response,"context":context})
except Exception as e:
print(e)
return False
def generate_stream_whatif_chat(query:str,messages = [], model = "gpt-4o-mini", max_tokens = 500, temperature = 1,index_name="",stream=True,vector_store=None):
try:
print("init chat")
print("init template")
prompt = PromptTemplate.from_template(template_whatif_response)
writer = "Laurent Tripied"
name_book = "Limites de l'imaginaire ou limites planétaires"
print("retreiving context")
context = retrieve_context_from_vectorestore(query)
print(f"Context: {context}")
llm_chain = prompt | llm | StrOutputParser()
print("streaming")
if stream:
return llm_chain.stream({"name_book":name_book,"writer":writer,"messages":messages,"context":context,"query":query})
else:
return llm_chain.invoke({"name_book":name_book,"writer":writer,"messages":messages,"context":context,"query":query})
except Exception as e:
print(e)
return False |