fastlane / routes /input_handler.py
hrguarinv's picture
Update routes/input_handler.py
d684251 verified
from fastapi import APIRouter, status, HTTPException
from models.input import Input
from routes import search_products, purchase, order_management, account_management, customer_support
from services.nlp import recognize_intent, generate_response, transcribe
from services.utils import extract_order_id_from_query, generate_image_embedding, generate_text_embedding
router = APIRouter()
FUNCTION_DESCRIPTIONS_FOR_PRODUCTS = {
"search_products_by_keywords": "User wants to find products based on keywords",
"search_products_by_filters": "User wants to refine search results with filters",
"get_product_details": "User wants detailed information about a specific product"
}
FUNCTION_DESCRIPTIONS_FOR_ORDERS = {
"get_order_location": "Find the location (city or state) of a specific order using an identification number order",
"get_recent_order": "Track the most recent order of a customer",
"get_order_details": "Get details about a specific order using an identification number order",
"get_order_quantity": "Calculate the total number of products in a specific order",
"get_order_amount": "Calculate the total amount spent in a specific order",
"cancel_order": "Process order cancellation requests"
}
image_extensions = (".jpg", ".jpeg", ".png", ".bmp", ".gif", ".tiff")
audio_extensions = (".mp3", ".wav", ".flac", ".ogg", ".aac", ".m4a")
def query_processing(input: Input):
if input.text:
intent_from_text = recognize_intent(input.text)
encoded_text = generate_text_embedding(input.text)
print(f'Intent_from_text: {intent_from_text}')
print(f'Text embedding: {encoded_text.shape}')
if input.files:
for file in input.files:
if file.endswith(audio_extensions):
text_from_audio = transcribe(file)
print(f'Transcription: {text_from_audio}')
# history_openai_format.append({"role": "user", "content": message})
elif file.endswith(image_extensions):
image_vector = generate_image_embedding(file)
print(f'Shape of the image vector: {image_vector.shape}')
return {"Similar products to the image attached": search_products.handle_search_products_by_keywords(image_vector)}
# history_openai_format.append({"role": "user", "content": "User has sent an image."})
else:
return "Please attach a valid file (image or audio)"
if intent_from_text == "search for products":
return {"products related to the search": search_products.handle_search_products_by_keywords(encoded_text)}
elif intent_from_text == "order management":
order_id = extract_order_id_from_query(input.text)
if order_id:
return order_management.handle_track_order(order_id)
else:
return "Please explain to the user that an order number should be provided in the chat interface"
else:
return None
@router.post("/")
async def handle_input(input: Input):
context_from_elasticsearch = query_processing(input)
return {"generative response": generate_response(input, context_from_elasticsearch)}