import openai import gradio as gr import json openai.api_key = "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" def save_conversation(): with open('conversation.json', 'w') as f: json.dump(messages, f) def load_conversation(): try: with open('conversation.json', 'r') as f: return json.load(f) except FileNotFoundError: return [] messages = load_conversation() if not messages: messages.append({"role": "system", "content": "You are a super computer coding assistant, capable of solving any programming problem. You are equipped with the knowledge of various programming languages and tools, and can assist beginners in quickly creating incredibly powerful code."}) def CustomChatGPT(user_input): messages.append({"role": "user", "content": user_input}) # Ensure the conversation fits within the model's maximum token limit conversation = messages[-4096:] try: # Code that might raise an exception response = openai.ChatCompletion.create( model="gpt-3.5-turbo", messages=conversation, max_tokens=1000, temperature=0.7) except openai.error.RequestError as e: # Code to run if the exception is raised print(f"Received error from OpenAI: {e}") return "I'm sorry, but I'm unable to generate a response at this time." except openai.error.RequestError as e: print(f"Received error from OpenAI: {e}") return "I'm sorry, but I'm unable to generate a response at this time." ChatGPT_reply = response["choices"][0]["message"]["content"] messages.append({"role": "assistant", "content": ChatGPT_reply}) save_conversation() return ChatGPT_reply interface = gr.Interface(fn=CustomChatGPT, inputs="textbox", outputs="textbox", title="Coding Assistant AL", description="Coding Quantum Assistant, Trained on all relevant languages as of June 2, 2023. Developed by A. Leschik.") interface.launch()