Chat / handler_best.py
Vijish's picture
Rename handler.py to handler_best.py
5e592f1 verified
from pydantic import BaseModel
import openai
from environs import Env
from typing import List, Dict, Any
import requests
def download_env_file(url: str, local_path: str):
response = requests.get(url)
response.raise_for_status() # Ensure we notice bad responses
with open(local_path, 'wb') as f:
f.write(response.content)
# Download the .env file
env_file_url = "https://www.dropbox.com/scl/fi/21ldek2cdsak2v3mhyy5x/openai.env?rlkey=nxdkd8l8esdy8npa3vfgvqkhp&st=s2f2zzwl&dl=1" # Adjusted URL for direct download
local_env_path = "openai.env"
download_env_file(env_file_url, local_env_path)
# Load environment variables
env = Env()
env.read_env("openai.env")
openai.api_key = env.str("OPENAI_API_KEY")
# Constants
MODEL = env.str("MODEL", "gpt-3.5-turbo")
AI_RESPONSE_TIMEOUT = env.int("AI_RESPONSE_TIMEOUT", 20)
class EndpointHandler:
def __init__(self, model_dir=None):
self.model_dir = model_dir
def __call__(self, data: Dict[str, Any]) -> Dict[str, Any]:
try:
if "inputs" in data: # Check if data is in Hugging Face JSON format
return self.process_hf_input(data)
else:
return self.process_json_input(data)
except ValueError as e:
return {"error": str(e)}
except Exception as e:
return {"error": str(e)}
def process_json_input(self, json_data):
if "FromUserKavasQuestions" in json_data and "Chatmood" in json_data:
prompt = self.create_conversation_starter_prompt(
json_data["FromUserKavasQuestions"],
json_data["Chatmood"]
)
starter_suggestion = self.generate_conversation_starters(prompt)
return {"conversation_starter": starter_suggestion}
elif "LastChatMessages" in json_data:
last_chat_messages = json_data["LastChatMessages"][-4:]
response = {
"version": "1.0.0-alpha",
"suggested_responses": self.get_conversation_suggestions(last_chat_messages)
}
return response
else:
raise ValueError("Invalid JSON structure.")
def process_hf_input(self, hf_data):
print("Received HF Data:", hf_data) # Debugging line
if "inputs" in hf_data:
actual_data = hf_data["inputs"]
print("Processing actual data:", actual_data) # Debugging line
return self.process_json_input(actual_data)
else:
return {"error": "Invalid Hugging Face JSON structure."}
def create_conversation_starter_prompt(self, user_questions, chatmood):
formatted_info = " ".join([f"{qa['Question']} - {qa['Answer']}" for qa in user_questions if qa['Answer']])
prompt = (f"Based on user profile info and a {chatmood} mood, "
f"generate 3 subtle and very short conversation starters. "
f"Explore various topics like travel, hobbies, movies, and not just culinary tastes. "
f"\nProfile Info: {formatted_info}")
return prompt
def generate_conversation_starters(self, prompt):
try:
response = openai.ChatCompletion.create(
model=MODEL,
messages=[{"role": "system", "content": prompt}],
temperature=0.7,
max_tokens=100,
n=1,
request_timeout=AI_RESPONSE_TIMEOUT
)
return response.choices[0].message["content"]
except openai.error.OpenAIError as e:
raise Exception(f"OpenAI API error: {str(e)}")
except Exception as e:
raise Exception(f"Unexpected error: {str(e)}")
def transform_messages(self, last_chat_messages):
t_messages = []
for chat in last_chat_messages:
if "fromUser" in chat:
from_user = chat['fromUser']
message = chat.get('touser', '')
t_messages.append(f"{from_user}: {message}")
elif "touser" in chat:
to_user = chat['touser']
message = chat.get('fromUser', '')
t_messages.append(f"{to_user}: {message}")
if t_messages and "touser" in last_chat_messages[-1]:
latest_message = t_messages[-1]
latest_message = f"Q: {latest_message}"
t_messages[-1] = latest_message
return t_messages
def generate_system_prompt(self, last_chat_messages, fromusername, tousername, zodiansign=None, chatmood=None):
prompt = ""
if not last_chat_messages or ("touser" not in last_chat_messages[-1]):
prompt = f"Suggest a casual and friendly message for {fromusername} to start a conversation with {tousername} or continue naturally, as if talking to a good friend. Strictly avoid replying to messages from {fromusername} or answering their questions."
else:
prompt = f"Suggest a warm and friendly reply for {fromusername} to respond to the last message from {tousername}, as if responding to a dear friend. Strictly avoid replying to messages from {fromusername} or answering their questions."
if zodiansign:
prompt += f" Keep in mind {tousername}'s {zodiansign} zodiac sign."
if chatmood:
if chatmood == "Casual Vibes":
prompt += " Keep the conversation relaxed and informal."
elif chatmood == "Flirty Fun":
prompt += " Add a playful and teasing tone to the conversation."
elif chatmood == "Deep and Thoughtful":
prompt += " Encourage reflective and introspective responses."
elif chatmood == "Humor Central":
prompt += " Incorporate witty and humorous elements into the conversation."
elif chatmood == "Romantic Feels":
prompt += " Express affection and use sweet and romantic language."
elif chatmood == "Intellectual Banter":
prompt += " Engage in thought-provoking discussions on topics like books and movies."
elif chatmood == "Supportive Mode":
prompt += " Offer empathy, support, and encouragement in the conversation."
elif chatmood == "Curiosity Unleashed":
prompt += " Show eagerness to learn and explore interests by asking questions."
elif chatmood == "Chill and Easygoing":
prompt += " Maintain a relaxed and laid-back tone in the conversation."
elif chatmood == "Adventurous Spirit":
prompt += " Share travel stories and plans with enthusiasm and energy."
return prompt
def get_conversation_suggestions(self, last_chat_messages):
fromusername = last_chat_messages[-1].get("fromusername", "")
tousername = last_chat_messages[-1].get("tousername", "")
zodiansign = last_chat_messages[-1].get("zodiansign", "")
chatmood = last_chat_messages[-1].get("Chatmood", "")
messages = self.transform_messages(last_chat_messages)
system_prompt = self.generate_system_prompt(last_chat_messages, fromusername, tousername, zodiansign, chatmood)
messages_final = [{"role": "system", "content": system_prompt}]
if messages:
messages_final.extend([{"role": "user", "content": m} for m in messages])
else:
# If there are no messages, add a default message to ensure a response is generated
default_message = f"{tousername}: Hi there!"
messages_final.append({"role": "user", "content": default_message})
try:
response = openai.ChatCompletion.create(
model=MODEL,
messages=messages_final,
temperature=0.7,
max_tokens=150,
n=3,
request_timeout=AI_RESPONSE_TIMEOUT
)
formatted_replies = []
for idx, choice in enumerate(response.choices):
formatted_replies.append({
"type": "TEXT",
"body": choice.message['content'],
"title": f"AI Reply {idx + 1}",
"confidence": 1,
})
return formatted_replies
except openai.error.Timeout as e:
formatted_reply = [{
"type": "TEXT",
"body": "Request to the AI response generator has timed out. Please try again later.",
"title": "AI Response Error",
"confidence": 1
}]
return formatted_reply