Spaces:
Runtime error
Runtime error
import chainlit as cl | |
import langchain | |
# from langchain.indexes import VectorstoreIndexCreator | |
from langchain.document_loaders import TextLoader, CSVLoader, JSONLoader | |
from langchain.text_splitter import CharacterTextSplitter | |
from langchain.vectorstores import Chroma | |
import os | |
from langchain.embeddings import OpenAIEmbeddings | |
from langchain.prompts import PromptTemplate | |
# from langchain.chains import RetrievalQA | |
# from langchain.llms import OpenAI | |
from langchain.chat_models import ChatOpenAI | |
import openai | |
from getpass import getpass | |
from langchain.embeddings import OpenAIEmbeddings | |
from langchain.text_splitter import RecursiveCharacterTextSplitter | |
from langchain.prompts import ChatPromptTemplate | |
from operator import itemgetter | |
from langchain.chat_models import ChatOpenAI | |
from langchain.schema.output_parser import StrOutputParser | |
from langchain.schema.runnable import RunnableLambda, RunnablePassthrough | |
# At the beginning of your script, add logging to get more detailed error messages | |
import logging | |
logging.basicConfig(level=logging.DEBUG) | |
# ... rest of your code | |
# Set the OpenAI key | |
os.environ["OPENAI_API_KEY"] = "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" | |
openai.api_key = os.environ["OPENAI_API_KEY"] | |
# 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 | |
from langchain.prompts import ChatPromptTemplate | |
template = """ | |
Please act as a professional and polite Osio Labs' HR assistant for "OSIO LABS" company. | |
You are responsible to answer the questions provided based on "Osio Labs" company's information. | |
You are to reply in this format: "Provide a detailed explanation of the answer based on the context provided without | |
omitting any important information. | |
Please also include the github LINK provided in the data". | |
"For example: Mention one of the owners of Osio Lab" | |
"Response: Jeff Robbins, who holds the position of President" | |
If you don't know the question being asked, please say "I don't understand your question, | |
can you rephrase it please?" | |
Context: | |
{context} | |
Question: | |
{question} | |
""" | |
# load documents | |
loader = CSVLoader("data/OsioLabs_Data.csv", encoding="cp1252") | |
documents = loader.load() | |
# Vector DB | |
text_splitter = RecursiveCharacterTextSplitter(chunk_size=250) | |
docs = text_splitter.split_documents(documents) | |
vectorstore = Chroma.from_documents(docs, OpenAIEmbeddings()) | |
prompt = ChatPromptTemplate.from_template(template) | |
retriever = vectorstore.as_retriever(search_kwargs={"k":2}) | |
# Initialize OpenAI Chat Model | |
primary_qa_llm = ChatOpenAI(model_name="gpt-4", temperature=0.5) | |
retrieval_augmented_qa_chain = ( | |
# INVOKE CHAIN WITH: {"question" : "<<SOME USER QUESTION>>"} | |
# "question" : populated by getting the value of the "question" key | |
# "context" : populated by getting the value of the "question" key and chaining it into the base_retriever | |
{"context": itemgetter("question") | retriever, "question": itemgetter("question")} | |
# "context" : is assigned to a RunnablePassthrough object (will not be called or considered in the next step) | |
# by getting the value of the "context" key from the previous step | |
| RunnablePassthrough.assign(context=itemgetter("context")) | |
# "response" : the "context" and "question" values are used to format our prompt object and then piped | |
# into the LLM and stored in a key called "response" | |
# "context" : populated by getting the value of the "context" key from the previous step | |
| {"response": prompt | primary_qa_llm, "context": itemgetter("context")} | |
) | |
# Set up the Chainlit UI | |
def start_chat(): | |
settings = { | |
"model": "gpt-4", | |
"temperature": 0, | |
# ... other settings | |
} | |
cl.user_session.set("settings", settings) | |
async def main(message: str): | |
# Here, adapt the logic to use your retrieval_augmented_qa_chain and ChatOpenAI model | |
try: | |
# Extract the content from the message object | |
user_query = message.content if hasattr(message, 'content') else str(message) | |
print(f"Received message: {user_query}") | |
# Adapt this part to use your retrieval_augmented_qa_chain | |
# For example: | |
chain_input = {"question": user_query} | |
chain_output = retrieval_augmented_qa_chain.invoke(chain_input) # Remove await | |
if chain_output and "response" in chain_output: | |
response_content = chain_output["response"].content | |
else: | |
response_content = "No response generated." | |
except Exception as e: | |
response_content = f"Error occurred: {str(e)}" | |
print(f"Error: {str(e)}") # Debugging | |
# Send a response back to the user | |
await cl.Message(content = response_content).send() | |
# cl.title("HR Assistant Chatbot") | |
# user_question = cl.text_input("Enter your question:") | |
# if user_question: | |
# response = main(user_question) | |
# cl.text_area("Response:", response, height=300) | |
# @cl.on_message | |
# async def main(message: str): | |
# # Here, adapt the logic to use your retrieval_augmented_qa_chain and ChatOpenAI model | |
# try: | |
# # Extract the content from the message object | |
# user_query = message.text if hasattr(message, 'text') else str(message) | |
# print(f"Received message: {user_query}") | |
# # Adapt this part to use your retrieval_augmented_qa_chain | |
# # For example: | |
# chain_input = {"question": user_query} | |
# chain_output = retrieval_augmented_qa_chain.invoke(chain_input) # Remove await | |
# if chain_output and "response" in chain_output: | |
# response_content = chain_output["response"] | |
# else: | |
# response_content = "No response generated." | |
# except Exception as e: | |
# response_content = f"Error occurred: {str(e)}" | |
# print(f"Error: {str(e)}") # Debugging | |
# # Send a response back to the user | |
# await cl.Message(content=response_content).send() |