File size: 988 Bytes
ce61e92
 
9f5f200
123ba7e
 
65db96a
ce61e92
 
 
 
65db96a
123ba7e
ce61e92
 
 
 
 
 
 
 
 
 
 
 
 
9f5f200
ce61e92
 
 
 
 
 
 
 
9f5f200
 
ce61e92
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.runnables import RunnablePassthrough

from src.utils import load_config_values
from src.dev_llm import FakeLLM

# TODO: Change this to reflect prod model rather than dev models
# Initalise fake values and a fake LLM to test out the full pipeline
tmp_llm = FakeLLM()
tmp_pdf_text = "This patient is due for an appointment on 1st June 2024" # replace with Runner to a file uploader

# Load in model and pipeline configuration values
system_message, context_message, model_id = load_config_values(
    config_keys=[
        "system_message",
        "context_message",
        "model_id",
    ]
)


prompt = ChatPromptTemplate.from_template(
    template=context_message,
)


chain = (
    {
        "system_message": lambda x: system_message,
        "pdf_text": lambda x: tmp_pdf_text,
        "data_to_extract": RunnablePassthrough()
    }
    |prompt
    |tmp_llm
)

print(chain.invoke("{\"appointment_date\"}"))