Spaces:
Runtime error
Runtime error
import openai | |
import gradio as gr | |
import json | |
openai.api_key = "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" | |
def save_conversation(): | |
with open('conversation.json', 'w') as f: | |
json.dump(messages, f) | |
def load_conversation(): | |
try: | |
with open('conversation.json', 'r') as f: | |
return json.load(f) | |
except FileNotFoundError: | |
return [] | |
messages = load_conversation() | |
if not messages: | |
messages.append({"role": "system", "content": "You are a knowledgeable assistant specialized in providing assistance regarding tenancy rights and regulation in New South Wales, Australia."}) | |
def CustomChatGPT(user_input): | |
messages.append({"role": "user", "content": user_input}) | |
# Ensure the conversation fits within the model's maximum token limit | |
conversation = messages[-4096:] | |
try: | |
response = openai.ChatCompletion.create( | |
model="gpt-3.5-turbo", | |
messages=conversation, | |
max_tokens=1000, | |
temperature=0.7) | |
except openai.api_resources.request_error.RequestError as e: | |
print(f"Received error from OpenAI: {e}") | |
return "I'm sorry, but I'm unable to generate a response at this time." | |
ChatGPT_reply = response["choices"][0]["message"]["content"] | |
messages.append({"role": "assistant", "content": ChatGPT_reply}) | |
save_conversation() | |
return ChatGPT_reply | |
interface = gr.Interface(fn=CustomChatGPT, | |
inputs="textbox", | |
outputs="textbox", | |
title="HR HELPER", | |
description="Chat with an AI assistant that can answer questions about tenancy rights in New South Wales, Australia.") | |
interface.launch() | |