Spaces:
Runtime error
Runtime error
File size: 5,788 Bytes
6d9813e |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 |
import chainlit as cl
import langchain
# from langchain.indexes import VectorstoreIndexCreator
from langchain.document_loaders import TextLoader, CSVLoader, JSONLoader
from langchain.text_splitter import CharacterTextSplitter
from langchain.vectorstores import Chroma
import os
from langchain.embeddings import OpenAIEmbeddings
from langchain.prompts import PromptTemplate
# from langchain.chains import RetrievalQA
# from langchain.llms import OpenAI
from langchain.chat_models import ChatOpenAI
import openai
from getpass import getpass
from langchain.embeddings import OpenAIEmbeddings
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.prompts import ChatPromptTemplate
from operator import itemgetter
from langchain.chat_models import ChatOpenAI
from langchain.schema.output_parser import StrOutputParser
from langchain.schema.runnable import RunnableLambda, RunnablePassthrough
# At the beginning of your script, add logging to get more detailed error messages
import logging
logging.basicConfig(level=logging.DEBUG)
# ... rest of your code
# Set the OpenAI key
os.environ["OPENAI_API_KEY"] = "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"
openai.api_key = os.environ["OPENAI_API_KEY"]
# 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
from langchain.prompts import ChatPromptTemplate
template = """
Please act as a professional and polite HR assistant for "Osio Labs" company. You are responsible to answer the questions from documents provided.
You are to reply in this format: "Provide a detailed explanation of the answer based on the context provided without
omitting any important information including the github LINK provided in the data.
If you don't know the question being asked, please say I don't understand your question,
can you rephrase it please?
Context:
{context}
Question:
{question}
"""
# load documents
loader = CSVLoader("data/OsioLabs_Data.csv", encoding="cp1252")
documents = loader.load()
# Vector DB
text_splitter = RecursiveCharacterTextSplitter(chunk_size=250)
docs = text_splitter.split_documents(documents)
vectorstore = Chroma.from_documents(docs, OpenAIEmbeddings())
prompt = ChatPromptTemplate.from_template(template)
retriever = vectorstore.as_retriever(search_kwargs={"k":2})
# Initialize OpenAI Chat Model
primary_qa_llm = ChatOpenAI(model_name="gpt-4", temperature=0.5)
retrieval_augmented_qa_chain = (
# INVOKE CHAIN WITH: {"question" : "<<SOME USER QUESTION>>"}
# "question" : populated by getting the value of the "question" key
# "context" : populated by getting the value of the "question" key and chaining it into the base_retriever
{"context": itemgetter("question") | retriever, "question": itemgetter("question")}
# "context" : is assigned to a RunnablePassthrough object (will not be called or considered in the next step)
# by getting the value of the "context" key from the previous step
| RunnablePassthrough.assign(context=itemgetter("context"))
# "response" : the "context" and "question" values are used to format our prompt object and then piped
# into the LLM and stored in a key called "response"
# "context" : populated by getting the value of the "context" key from the previous step
| {"response": prompt | primary_qa_llm, "context": itemgetter("context")}
)
# Set up the Chainlit UI
@cl.on_chat_start
def start_chat():
settings = {
"model": "gpt-4",
"temperature": 0,
# ... other settings
}
cl.user_session.set("settings", settings)
@cl.on_message
async def main(message: str):
# Here, adapt the logic to use your retrieval_augmented_qa_chain and ChatOpenAI model
try:
# Extract the content from the message object
user_query = message.content if hasattr(message, 'content') else str(message)
print(f"Received message: {user_query}")
# Adapt this part to use your retrieval_augmented_qa_chain
# For example:
chain_input = {"question": user_query}
chain_output = retrieval_augmented_qa_chain.invoke(chain_input) # Remove await
if chain_output and "response" in chain_output:
response_content = chain_output["response"].content
else:
response_content = "No response generated."
except Exception as e:
response_content = f"Error occurred: {str(e)}"
print(f"Error: {str(e)}") # Debugging
# Send a response back to the user
await cl.Message(content = response_content).send()
# cl.title("HR Assistant Chatbot")
# user_question = cl.text_input("Enter your question:")
# if user_question:
# response = main(user_question)
# cl.text_area("Response:", response, height=300)
# @cl.on_message
# async def main(message: str):
# # Here, adapt the logic to use your retrieval_augmented_qa_chain and ChatOpenAI model
# try:
# # Extract the content from the message object
# user_query = message.text if hasattr(message, 'text') else str(message)
# print(f"Received message: {user_query}")
# # Adapt this part to use your retrieval_augmented_qa_chain
# # For example:
# chain_input = {"question": user_query}
# chain_output = retrieval_augmented_qa_chain.invoke(chain_input) # Remove await
# if chain_output and "response" in chain_output:
# response_content = chain_output["response"]
# else:
# response_content = "No response generated."
# except Exception as e:
# response_content = f"Error occurred: {str(e)}"
# print(f"Error: {str(e)}") # Debugging
# # Send a response back to the user
# await cl.Message(content=response_content).send() |