File size: 2,610 Bytes
edc4821
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f938d75
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
import os
from dotenv import load_dotenv
import openai
import gradio as gr

# Load environment variables from the .env file
load_dotenv()

openai.api_key = os.getenv("openai_api_key")
openai.organization = os.getenv("openai_organization_id")


message_history = [{"role": "system", "content":"You are a physics assistant chatbot and reject to answer anything unrealted to the physics."}, 
                   {"role": "assistant", "content":"Hi, I am a physics assistant. I can help you with your physics questions."}]

def predict(input):
    global message_history

    message_history.append({"role": "user", "content": f"{input}"})

    completion = openai.ChatCompletion.create(
        model="gpt-4",
        messages=message_history
    )

    reply_content = completion.choices[0].message.content

    if check_in_role(reply_content):
        message_history.append({"role": "assistant", "content": f"{reply_content}"})
    else:
        message_history.append({"role": "assistant", "content": "I'm sorry, but the question you have asked seems to be unrelated to the context of this conversation, and unfortunately, I'm not able to provide an answer. If you have any questions related to physics, I would be happy to try and assist you."})
    response = [(message_history[i]["content"], message_history[i+1]["content"]) for i in range(2, len(message_history)-1, 2)]  # convert to tuples of list
    return response

def check_in_role(reply_content):

    p = "Is the following question related to physics? Answer it using only 'yes' or 'no'.\n\n" + reply_content + "\n\n---\nLabel:"
    q = [{"role": "user", "content":f"{p}"}]

    res = openai.ChatCompletion.create(
        model="gpt-4",
        messages=q
    )
    label = res.choices[0].message.content.lower()
    print(label)
    if "yes" in label:
        return True
    return False



with gr.Blocks(theme=gr.themes.Soft(), title="Physics Assistant") as demo:
    
    with gr.Row():
        gr.Markdown("Get instant physics help with our chatbot! Ask any physics-related questions and receive accurate and reliable answers in seconds. Perfect for students, researchers, and anyone interested in the laws of the universe.")

    bot = gr.Chatbot().style(height=500)
    
    with gr.Row():
        with gr.Column(scale=0.85):
            txt = gr.Textbox(
                show_label=False,
                placeholder="Enter a physics related text",
            ).style(container=False)
        with gr.Column(scale=0.15, min_width=0):
            send = gr.Button("Send")

    send.click(predict, inputs=[txt], outputs=bot)

demo.launch()