|
import os |
|
import json |
|
import bcrypt |
|
import pandas as pd |
|
import numpy as np |
|
from typing import List |
|
from pathlib import Path |
|
from langchain_huggingface import HuggingFaceEndpoint |
|
from langchain.schema.runnable.config import RunnableConfig |
|
from langchain.schema import StrOutputParser |
|
from langchain_anthropic import AnthropicLLM, ChatAnthropic |
|
from langchain_core.prompts import ChatPromptTemplate |
|
|
|
from langchain.agents import AgentExecutor |
|
from langchain.agents.agent_types import AgentType |
|
from langchain_experimental.agents.agent_toolkits import create_pandas_dataframe_agent, create_csv_agent |
|
|
|
from pandasai.llm import OpenAI |
|
from pandasai import SmartDataframe |
|
import chainlit as cl |
|
from chainlit.input_widget import TextInput, Select, Switch, Slider |
|
|
|
from deep_translator import GoogleTranslator |
|
|
|
@cl.password_auth_callback |
|
def auth_callback(username: str, password: str): |
|
auth = json.loads(os.environ['CHAINLIT_AUTH_LOGIN']) |
|
ident = next(d['ident'] for d in auth if d['ident'] == username) |
|
pwd = next(d['pwd'] for d in auth if d['ident'] == username) |
|
resultLogAdmin = bcrypt.checkpw(username.encode('utf-8'), bcrypt.hashpw(ident.encode('utf-8'), bcrypt.gensalt())) |
|
resultPwdAdmin = bcrypt.checkpw(password.encode('utf-8'), bcrypt.hashpw(pwd.encode('utf-8'), bcrypt.gensalt())) |
|
resultRole = next(d['role'] for d in auth if d['ident'] == username) |
|
if resultLogAdmin and resultPwdAdmin and resultRole == "admindatapcc": |
|
return cl.User( |
|
identifier=ident + " : 🧑💼 Admin Datapcc", metadata={"role": "admin", "provider": "credentials"} |
|
) |
|
elif resultLogAdmin and resultPwdAdmin and resultRole == "userdatapcc": |
|
return cl.User( |
|
identifier=ident + " : 🧑🎓 User Datapcc", metadata={"role": "user", "provider": "credentials"} |
|
) |
|
|
|
@cl.step(type="tool") |
|
async def LLMistral(): |
|
os.environ['HUGGINGFACEHUB_API_TOKEN'] = os.environ['HUGGINGFACEHUB_API_TOKEN'] |
|
repo_id = "mistralai/Mixtral-8x7B-Instruct-v0.1" |
|
llm = HuggingFaceEndpoint( |
|
repo_id=repo_id, max_new_tokens=5300, temperature=0.1, task="text2text-generation", streaming=True |
|
) |
|
return llm |
|
|
|
@cl.step(type="tool") |
|
async def LLM(): |
|
os.environ['OPENAI_API_KEY'] = os.environ['OPENAI_API_KEY'] |
|
llm = OpenAI(model="gpt-4o-2024-05-13") |
|
return llm |
|
|
|
@cl.step(type="tool") |
|
async def File(): |
|
llm = await LLM() |
|
df = SmartDataframe("./public/ExpeCFA_LP_CAA.csv", config={"llm": llm}) |
|
return df |
|
|
|
@cl.set_chat_profiles |
|
async def chat_profile(): |
|
return [ |
|
cl.ChatProfile(name="Traitement des données d'enquête : «Expé CFA : questionnaire auprès des professionnels de la branche de l'agencement»",markdown_description="Vidéo exploratoire autour de l'événement",icon="/public/logo-ofipe.png",), |
|
] |
|
|
|
@cl.set_starters |
|
async def set_starters(): |
|
return [ |
|
cl.Starter( |
|
label="Répartition du nombre de CAA dans les entreprises", |
|
message="Quel est le nombre de chargé.e d'affaires en agencement dans chaque type d'entreprises?", |
|
icon="/public/request-theme.svg", |
|
) |
|
] |
|
|
|
@cl.on_message |
|
async def on_message(message: cl.Message): |
|
await cl.Message(f"> SURVEYIA").send() |
|
|
|
df = await File() |
|
msg = cl.Message(content="") |
|
res = df.chat(message.content) |
|
|
|
|
|
|
|
|
|
|
|
await cl.Message(content=GoogleTranslator(source='auto', target='fr').translate(res)).send() |