datacipen commited on
Commit
a0b1f51
1 Parent(s): e380708

Update main.py

Browse files
Files changed (1) hide show
  1. main.py +34 -26
main.py CHANGED
@@ -16,7 +16,7 @@ from langchain.schema import StrOutputParser
16
  from langchain.schema.runnable import Runnable
17
  from langchain.schema.runnable.config import RunnableConfig
18
  from langchain.schema.runnable import Runnable, RunnablePassthrough, RunnableLambda
19
- from langchain_core.prompts import PromptTemplate
20
 
21
  @cl.password_auth_callback
22
  def auth_callback(username: str, password: str):
@@ -47,16 +47,6 @@ async def chat_profile():
47
  cl.ChatProfile(name="Imagestream",markdown_description="Requêter sur un ensemble d'images",icon="./public/logo-ofipe.jpg",),
48
  ]
49
 
50
- @cl.step(type="llm")
51
- async def IA():
52
- os.environ['HUGGINGFACEHUB_API_TOKEN'] = os.environ['HUGGINGFACEHUB_API_TOKEN']
53
- repo_id = "mistralai/Mixtral-8x7B-Instruct-v0.1"
54
-
55
- llm = HuggingFaceEndpoint(
56
- repo_id=repo_id, max_new_tokens=8000, temperature=1.0, task="text2text-generation", streaming=True
57
- )
58
- return llm
59
-
60
  @cl.on_chat_start
61
  async def start():
62
  await cl.Message(f"> REVIEWSTREAM").send()
@@ -90,18 +80,14 @@ async def start():
90
  {context}
91
  {question} [/INST] </s>
92
  """
93
- prompt = PromptTemplate(template=template, input_variables=["question","context"])
94
- #llm = await IA()
95
- #chain = (
96
- # RunnablePassthrough.assign(
97
- # history=RunnableLambda(memory.load_memory_variables) | itemgetter("history")
98
- # )
99
- # | prompt | llm
100
- #)
101
- #cl.user_session.set("runnable", chain)
102
 
103
- @cl.on_message
104
- async def main(message: cl.Message):
 
 
 
 
 
105
  os.environ['PINECONE_API_KEY'] = os.environ['PINECONE_API_KEY']
106
  embeddings = HuggingFaceEmbeddings()
107
  index_name = "all-venus"
@@ -109,16 +95,38 @@ async def main(message: cl.Message):
109
  api_key=os.environ['PINECONE_API_KEY']
110
  )
111
  index = pc.Index(index_name)
112
- memory = cl.user_session.get("memory")
113
- runnable = cl.user_session.get("runnable")
114
-
115
  xq = embeddings.embed_query(message.content)
116
  xc = index.query(vector=xq, filter={"categorie": {"$eq": "bibliographie-OPP-DGDIN"}},top_k=150, include_metadata=True)
117
  context_p = ""
118
  for result in xc['matches']:
119
  context_p = context_p + result['metadata']['text']
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
120
  msg = cl.Message(author="Assistant Reviewstream",content="")
121
- async for chunk in runnable.astream({"question": message.content,"context":context_p},
122
  config=RunnableConfig(callbacks=[cl.AsyncLangchainCallbackHandler(stream_final_answer=True)])):
123
  await msg.stream_token(chunk)
124
 
 
16
  from langchain.schema.runnable import Runnable
17
  from langchain.schema.runnable.config import RunnableConfig
18
  from langchain.schema.runnable import Runnable, RunnablePassthrough, RunnableLambda
19
+ from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder
20
 
21
  @cl.password_auth_callback
22
  def auth_callback(username: str, password: str):
 
47
  cl.ChatProfile(name="Imagestream",markdown_description="Requêter sur un ensemble d'images",icon="./public/logo-ofipe.jpg",),
48
  ]
49
 
 
 
 
 
 
 
 
 
 
 
50
  @cl.on_chat_start
51
  async def start():
52
  await cl.Message(f"> REVIEWSTREAM").send()
 
80
  {context}
81
  {question} [/INST] </s>
82
  """
 
 
 
 
 
 
 
 
 
83
 
84
+ os.environ['HUGGINGFACEHUB_API_TOKEN'] = os.environ['HUGGINGFACEHUB_API_TOKEN']
85
+ repo_id = "mistralai/Mixtral-8x7B-Instruct-v0.1"
86
+
87
+ llm = HuggingFaceEndpoint(
88
+ repo_id=repo_id, max_new_tokens=8000, temperature=1.0, task="text2text-generation", streaming=True
89
+ )
90
+
91
  os.environ['PINECONE_API_KEY'] = os.environ['PINECONE_API_KEY']
92
  embeddings = HuggingFaceEmbeddings()
93
  index_name = "all-venus"
 
95
  api_key=os.environ['PINECONE_API_KEY']
96
  )
97
  index = pc.Index(index_name)
 
 
 
98
  xq = embeddings.embed_query(message.content)
99
  xc = index.query(vector=xq, filter={"categorie": {"$eq": "bibliographie-OPP-DGDIN"}},top_k=150, include_metadata=True)
100
  context_p = ""
101
  for result in xc['matches']:
102
  context_p = context_p + result['metadata']['text']
103
+
104
+ prompt = ChatPromptTemplate.from_messages(
105
+ [
106
+ (
107
+ "system",
108
+ f"Contexte : Vous êtes un chercheur de l'enseignement supérieur et vous êtes doué pour faire des analyses d'articles de recherche sur les thématiques liées à la pédagogie. En fonction des informations suivantes et du contexte suivant seulement et strictement. Contexte : {context_p}, réponds à la question suivante de la manière la plus pertinente, la plus exhaustive et la plus détaillée possible. ",
109
+ ),
110
+ MessagesPlaceholder(variable_name="history"),
111
+ ("human", "{question}, dans le contexte fourni."),
112
+ ]
113
+ )
114
+ runnable = (
115
+ RunnablePassthrough.assign(
116
+ history=RunnableLambda(memory.load_memory_variables) | itemgetter("history")
117
+ )
118
+ | prompt
119
+ | model
120
+ )
121
+ cl.user_session.set("runnable", runnable)
122
+
123
+ @cl.on_message
124
+ async def main(message: cl.Message):
125
+ memory = cl.user_session.get("memory")
126
+ runnable = cl.user_session.get("runnable")
127
+
128
  msg = cl.Message(author="Assistant Reviewstream",content="")
129
+ async for chunk in runnable.astream({"question": message.content},
130
  config=RunnableConfig(callbacks=[cl.AsyncLangchainCallbackHandler(stream_final_answer=True)])):
131
  await msg.stream_token(chunk)
132