datacipen commited on
Commit
eefba2d
1 Parent(s): 0991434

Update main.py

Browse files
Files changed (1) hide show
  1. main.py +27 -2
main.py CHANGED
@@ -1,6 +1,7 @@
1
  import os
2
  import json
3
  import bcrypt
 
4
  from typing import List
5
  from pathlib import Path
6
  from langchain_huggingface import HuggingFaceEmbeddings
@@ -16,7 +17,7 @@ from langchain.memory import ConversationBufferMemory
16
  from langchain.schema.runnable import Runnable, RunnablePassthrough, RunnableConfig, RunnableLambda
17
  from langchain.callbacks.base import BaseCallbackHandler
18
  from langchain.chains import (
19
- StuffDocumentsChain, ConversationalRetrievalChain
20
  )
21
 
22
  import chainlit as cl
@@ -52,6 +53,29 @@ async def Retriever(categorie):
52
  retriever = vectorstore.as_retriever(search_type="similarity_score_threshold", search_kwargs={"score_threshold": .7, "k": 250,"filter": {"title": {"$eq": "videos-table-rondeia"}, "time": {"$gte": 1320}}})
53
  return retriever
54
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
55
  @cl.step(type="embedding")
56
  async def Search(input, categorie):
57
  vectorstore = await VectorDatabase(categorie)
@@ -167,7 +191,8 @@ async def on_message(message: cl.Message):
167
 
168
  await cl.Message(content=GoogleTranslator(source='auto', target='fr').translate(answer)).send()
169
 
170
- #search = vectorstore.similarity_search(message.content,k=50, filter={"categorie": {"$eq": "bibliographie-OPP-DGDIN"}})
 
171
  search = await Search(message.content, "videosTC")
172
 
173
  sources = [
 
1
  import os
2
  import json
3
  import bcrypt
4
+ import pandas as pd
5
  from typing import List
6
  from pathlib import Path
7
  from langchain_huggingface import HuggingFaceEmbeddings
 
17
  from langchain.schema.runnable import Runnable, RunnablePassthrough, RunnableConfig, RunnableLambda
18
  from langchain.callbacks.base import BaseCallbackHandler
19
  from langchain.chains import (
20
+ StuffDocumentsChain, ConversationalRetrievalChain, create_extraction_chain
21
  )
22
 
23
  import chainlit as cl
 
53
  retriever = vectorstore.as_retriever(search_type="similarity_score_threshold", search_kwargs={"score_threshold": .7, "k": 250,"filter": {"title": {"$eq": "videos-table-rondeia"}, "time": {"$gte": 1320}}})
54
  return retriever
55
 
56
+ @cl.step(type="tool")
57
+ async def OtherRequest(answer):
58
+ schema = {
59
+ "properties": {
60
+ "Questions en relation avec le contexte": {"type": "string"},
61
+ },
62
+ "required": ["Questions en relation avec le contexte"],
63
+ }
64
+ llm = await LLMistral()
65
+ chainExtraction = create_extraction_chain(schema, llm)
66
+ dataframe = chainExtraction.invoke(GoogleTranslator(source='auto', target='fr').translate(answer))
67
+ actRequest = dataframe['text']
68
+ df_actRequest = pd.DataFrame(actRequest)
69
+ allRequest = pd.DataFrame(df_actRequest['Questions en relation avec le contexte'])
70
+ allRequest.drop_duplicates(keep = 'first', inplace=True)
71
+ allRequestArray = allRequest.values.tolist()
72
+ RequestArray = []
73
+ for act in allRequestArray:
74
+ RequestArray.append(cl.Starter(label=act[0], message=act[0], icon="/public/request-theme.svg",),)
75
+ print(RequestArray)
76
+ await cl.Message(content=RequestArray).send()
77
+
78
+
79
  @cl.step(type="embedding")
80
  async def Search(input, categorie):
81
  vectorstore = await VectorDatabase(categorie)
 
191
 
192
  await cl.Message(content=GoogleTranslator(source='auto', target='fr').translate(answer)).send()
193
 
194
+ await OtherRequest(answer)
195
+
196
  search = await Search(message.content, "videosTC")
197
 
198
  sources = [