advanced-rag / openai_.py
bstraehle's picture
Update openai_.py
b724070 verified
raw
history blame
2.06 kB
import openai, os
from IPython.display import display, HTML
from mongodb_ import vector_search
def get_embedding(text):
if not text or not isinstance(text, str):
return None
try:
return openai.embeddings.create(
input=text,
model="text-embedding-3-small", dimensions=1536).data[0].embedding
except Exception as e:
print(f"Error in get_embedding: {e}")
return None
def handle_user_prompt(openai_api_key, prompt, db, collection):
openai.api_key = openai_api_key
# Assuming vector_search returns a list of dictionaries with keys 'title' and 'plot'
get_knowledge = vector_search(prompt, db, collection)
# Check if there are any results
if not get_knowledge:
return "No results found.", "No source information available."
# Convert search results into a list of SearchResultItem models
search_results_models = [
SearchResultItem(**result)
for result in get_knowledge
]
# Convert search results into a DataFrame for better rendering in Jupyter
search_results_df = pd.DataFrame([item.dict() for item in search_results_models])
# Generate system response using OpenAI's completion
completion = openai.chat.completions.create(
model="gpt-3.5-turbo",
messages=[
{
"role": "system",
"content": "You are a airbnb listing recommendation system."},
{
"role": "user",
"content": f"Answer this user prompt: {prompt} with the following context:\n{search_results_df}"
}
]
)
system_response = completion.choices[0].message.content
# Print User Question, System Response, and Source Information
print(f"- User Question:\n{prompt}\n")
print(f"- System Response:\n{system_response}\n")
# Display the DataFrame as an HTML table
display(HTML(search_results_df.to_html()))
# Return structured response and source info as a string
return system_response