#Importing Libraries import os import openai import chainlit as cl from chainlit.prompt import Prompt, PromptMessage from chainlit.playground.providers import ChatOpenAI from dotenv import load_dotenv load_dotenv() openai.api_key = os.environ["OPENAI_API_KEY"] #Templates system_template = """ You are a helpful assistant who always speaks in a pleasant tone! """ user_template = """{input} Think through your response step by step. """ # Runs at the start of the user cession @cl.on_chat_start async def start_chat(): settings = { "model": "gpt-3.5-turbo",# Model that we will be using "temperature": 0, # Randomness in the answer "max_tokens": 500,# Maximum length of tokens as input "top_p": 1, # "frequency_penalty": 0, "presence_penalty": 0, } cl.user_session.set("settings", settings) #Function that will run on each time the bot gets a user input @cl.on_message async def main(message:str): settings = cl.user_session.get("settings") prompt = Prompt( provider = ChatOpenAI.id, messages = [ PromptMessage( role = "system", template = system_template, formatted = system_template, ), PromptMessage( role = "user", template = user_template, formatted=user_template.format(input=message) ), ], inputs = {"input":message}, settings = settings, ) print([m.to_openai() for m in prompt.messages]) msg = cl.Message(content="") # Call OpenAI async for stream_resp in await openai.ChatCompletion.acreate( messages = [m.to_openai() for m in prompt.messages],stream = True, **settings ): token = stream_resp.choices[0]["delta"].get("content","") await msg.stream_token(token) #Updating the prompt answer to the answer generated by the LLM prompt.completion = msg.content #Updating the message to set the context right. msg.prompt = prompt await msg.send()