assistants-api / app.py
shigeru saito
first commit
db3f703
raw
history blame
No virus
2.32 kB
import codecs
import json
import time
import openai
import gradio as gr
import os
from dotenv import load_dotenv
# OpenAI API キーの設定
load_dotenv()
openai.api_key = os.getenv('OPENAI_API_KEY')
assistant_id = os.getenv('OPENAI_ASSISTANT_ID')
import json
def assistant_response(prompt):
client = openai.OpenAI()
print("### Step 1: Get the Assistant's ID ###")
assistant = client.beta.assistants.retrieve(assistant_id)
print(assistant)
"### Step 2: Create a Thread ###"
empty_thread = client.beta.threads.create()
thread_id = empty_thread.id
print(empty_thread)
print("### Step 3: Add a Message to the Thread ###")
thread = client.beta.threads.retrieve(thread_id)
print(thread)
print("### Step 4: Add a Message to the Thread ###")
thread_message = client.beta.threads.messages.create(
thread_id,
role="user",
content=prompt,
)
message_id = thread_message.id
print(thread_message)
print("### Step 5: Retrieve the Message ###")
message = client.beta.threads.messages.retrieve(
message_id=message_id,
thread_id=thread_id,
)
print(message)
print("### Step 6: Run the Assistant ###")
run = client.beta.threads.runs.create(
thread_id=thread.id,
assistant_id=assistant.id,
)
print("### Step 7: Wait for the Assistant to Finish ###")
def wait_on_run(run, thread):
while run.status == "queued" or run.status == "in_progress":
run = client.beta.threads.runs.retrieve(
thread_id=thread.id,
run_id=run.id,
)
time.sleep(0.5)
return run
run = wait_on_run(run, thread)
print(run)
print("### Step 8: Retrieve the Assistant's Response ###")
messages = client.beta.threads.messages.list(
thread_id=thread.id
)
messages_str = json.dumps(messages.dict(), indent=2)
print(codecs.decode(messages_str, 'unicode-escape'))
answer = messages.data[0].content[0].text.value
print(answer)
return answer
# Gradio インターフェースの設定
iface = gr.Interface(
fn=assistant_response,
inputs=gr.Textbox(lines=2, placeholder="Enter your prompt here..."),
outputs="text"
)
# アプリケーションの起動
iface.launch()