pdf-to-table / app_langchain.py
regraded01's picture
feat: create end-to-end run using fake data/llm
ce61e92
raw
history blame
988 Bytes
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.runnables import RunnablePassthrough
from src.utils import load_config_values
from src.dev_llm import FakeLLM
# TODO: Change this to reflect prod model rather than dev models
# Initalise fake values and a fake LLM to test out the full pipeline
tmp_llm = FakeLLM()
tmp_pdf_text = "This patient is due for an appointment on 1st June 2024" # replace with Runner to a file uploader
# Load in model and pipeline configuration values
system_message, context_message, model_id = load_config_values(
config_keys=[
"system_message",
"context_message",
"model_id",
]
)
prompt = ChatPromptTemplate.from_template(
template=context_message,
)
chain = (
{
"system_message": lambda x: system_message,
"pdf_text": lambda x: tmp_pdf_text,
"data_to_extract": RunnablePassthrough()
}
|prompt
|tmp_llm
)
print(chain.invoke("{\"appointment_date\"}"))