File size: 1,704 Bytes
42a7266
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
from .input_processing import parse_app_request, initialize_conversation, parse_prompt
from .response_generation import generate_sim
from .response_processing import process_model_response
from streamlit.logger import get_logger

logger = get_logger(__name__)

def process_app_request(app_request: dict, endpoint_url: str, bearer_token: str) -> dict:
    """Process the app request and return the response in the required format."""

    ############################# Input Processing ###################################
    # Parse the app request into model_input and extract the prompt
    model_input, prompt, conversation_id = parse_app_request(app_request)
    
    # Initialize the conversation (adds the system message)
    model_input = initialize_conversation(model_input, conversation_id)

    # Parse the prompt into messages
    prompt_messages = parse_prompt(prompt)

    # Append the messages parsed from the app prompt to the conversation history
    model_input['messages'].extend(prompt_messages)

    ####################################################################################

    ####################### Output Generation & Processing #############################

    # Generate the assistant's response (texter's reply)
    completion = generate_sim(model_input, endpoint_url, bearer_token)

    # Process the raw model response (parse, guardrails, split)
    final_response = process_model_response(completion, model_input, endpoint_url, bearer_token)

    # Format the response for the APP
    response = {"predictions": [{"generated_text": final_response}]}

    ####################################################################################

    return response