Spaces:
Sleeping
Sleeping
import logging | |
from models.custom_parsers import CustomStringOutputParser | |
from langchain.chains import ConversationChain | |
from langchain_openai import ChatOpenAI | |
from langchain.prompts import PromptTemplate | |
from models.business_logic_utils.input_processing import initialize_conversation | |
OPENAI_TEMPLATE = """{template} | |
{{history}} | |
helper: {{input}} | |
texter:""" | |
def get_template_role_models(issue: str, language: str, texter_name: str = "") -> str: | |
model_input = { | |
"issue": issue, | |
"language": language, | |
"texter_name": texter_name, | |
"messages": [], | |
} | |
# Initialize the conversation (adds the system message) | |
model_input = initialize_conversation(model_input, "") | |
return model_input["messages"][0]["content"] | |
def get_role_chain(template, memory, temperature=0.8): | |
template = OPENAI_TEMPLATE.format(template=template) | |
PROMPT = PromptTemplate( | |
input_variables=['history', 'input'], | |
template=template | |
) | |
llm = ChatOpenAI( | |
model="gpt-4o", | |
temperature=temperature, | |
max_tokens=256, | |
) | |
llm_chain = ConversationChain( | |
llm=llm, | |
prompt=PROMPT, | |
memory=memory, | |
output_parser=CustomStringOutputParser(), | |
verbose=True, | |
) | |
logging.debug(f"loaded GPT4o model") | |
return llm_chain, "helper:" |