Spaces:
Sleeping
Sleeping
import gradio as gr | |
import os | |
from openai import OpenAI | |
import os.path | |
from datetime import datetime | |
################# Start PERSONA-SPECIFIC VALUES ###################### | |
coach_code = os.getenv("COACH_CODE") | |
coach_name_short = os.getenv("COACH_NAME_SHORT") | |
coach_name_upper = os.getenv("COACH_NAME_UPPER") | |
sys_prompt_new = os.getenv("PROMPT_NEW") | |
theme=os.getenv("THEME") | |
################# End PERSONA-SPECIFIC VALUES ###################### | |
################# Start OpenAI-SPECIFIC VALUES ###################### | |
# Initialize OpenAI API client with API key | |
client = OpenAI(api_key=os.getenv("OPENAI_API_KEY")) | |
# OpenAI model | |
openai_model = os.getenv("OPENAI_MODEL") | |
################# End OpenAI-SPECIFIC VALUES ###################### | |
tx = os.getenv("TX") | |
prefix = os.getenv("PREFIX") # "/data/" if in HF or "data/" if local | |
file_name = os.getenv("FILE_NAME") | |
############### CHAT ################### | |
def predict(user_input, history): | |
max_length = 2000 | |
transcript_file_path = f"{prefix}{coach_code}-{file_name}" | |
transcript = "" # Initialize the transcript variable | |
if user_input == tx: | |
try: | |
# Prepare the transcript for the Textbox output | |
if os.path.exists(transcript_file_path): | |
with open(transcript_file_path, "r", encoding="UTF-8") as file: | |
transcript = file.read() | |
return transcript | |
except FileNotFoundError: | |
return "File '" + file_name + "' not found." | |
elif len(user_input) > max_length: | |
user_input = "" | |
# raise gr.Error(f"Input is TOO LONG. Max length is {max_length} characters. Try again.") | |
history_openai_format = [ | |
{"role": "system", "content": "IDENTITY: " + sys_prompt_new} | |
] | |
for human, assistant in history: | |
history_openai_format.append({"role": "user", "content": human}) | |
history_openai_format.append({"role": "assistant", "content": assistant}) | |
history_openai_format.append({"role": "user", "content": user_input}) | |
completion = client.chat.completions.create( | |
model=openai_model, | |
messages=history_openai_format, | |
temperature=1.2, | |
frequency_penalty=0.4, | |
presence_penalty=0.1, | |
stream=True | |
) | |
output_stream = "" | |
try: | |
for chunk in completion: | |
if chunk.choices[0].delta.content is not None: | |
output_stream = output_stream + (chunk.choices[0].delta.content) | |
message_content = output_stream | |
except StopAsyncIteration: | |
pass | |
# Append latest user and assistant messages to the transcript | |
transcript += "Date/Time: " + datetime.now().strftime("%Y-%m-%d %H:%M:%S") + "\n" | |
transcript += f"YOU: {user_input}\n\n" | |
transcript += f"{coach_name_upper}: {message_content}\n\n\n" | |
# Write the updated transcript to the file | |
with open(transcript_file_path, "a", encoding="UTF-8") as file: | |
file.write(transcript) | |
return message_content | |
#GUI | |
with gr.Blocks(theme, css="footer {visibility: hidden}") as demo: | |
gr.ChatInterface(predict, submit_btn="Chat with "+ coach_name_short, retry_btn=None, undo_btn=None, clear_btn=None, autofocus=True) | |
demo.launch(show_api=False) |