File size: 3,212 Bytes
3575e16
 
 
 
 
4166b67
3575e16
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
import openai
import transformers
import gradio as gr

# Set up the OpenAI API client
openai.api_key = "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"

# Define the chat function for OpenAI API
def openai_chat(api_key, model, message):
    # Check if an API key has been provided
    if api_key is None:
        return "Please enter your OpenAI API key and try again."
    
    # Set up the OpenAI API request
    response = openai.Completion.create(
        engine=model,
        prompt=message,
        max_tokens=1024,
        n=1,
        stop=None,
        temperature=0.5,
        api_key=api_key,
    )
    
    # Extract the bot's response from the API request
    bot_response = response.choices[0].text.strip()
    
    return bot_response

# Define the chat function for Hugging Face API
def hf_chat(model_name, message):
    # Load the model and tokenizer
    model = transformers.pipeline("text-generation", model=model_name)
    
    # Generate a response from the model
    bot_response = model(message, max_length=1024, do_sample=True, temperature=0.7)[0]["generated_text"]
    
    return bot_response

# Define the Gradio interface for chatbot
api_key_input = gr.inputs.Textbox(label="OpenAI API Key", default=None, block="sidebar")
model_input = gr.inputs.Dropdown(
    label="Select OpenAI model",
    choices=["davinci", "curie", "babbage"],
    default="davinci",
    block="sidebar"
)
hf_model_input = gr.inputs.Dropdown(
    label="Select Hugging Face model",
    choices=["microsoft/DialoGPT-large", "Salesforce/codegen-2B-multi", "microsoft/DialoGPT-small"],
    default="microsoft/DialoGPT-large",
    block="sidebar"
)
mode_input = gr.inputs.Dropdown(
    label="Select chatbot mode",
    choices=["OpenAI", "Hugging Face"],
    default="OpenAI",
    block="sidebar"
)
message_input = gr.inputs.Textbox(label="Enter your message here", block="input")
output = gr.outputs.Textbox(label="Bot response", block="output")

# Define the chat window
chat_window = []

def chatbot(chat_window, message, mode, model, hf_model, api_key, send_button, clear_button):
    if clear_button:
        chat_window.clear()
        return "Chat history cleared."
    if send_button:
        if message:
            if mode == "Hugging Face":
                bot_response = hf_chat(hf_model, message)
            else:
                bot_response = openai_chat(api_key, model, message)
            chat_window.append(("User", message))
            chat_window.append(("Bot", bot_response))
    return "\n".join([f"{name}: {text}" for name, text in chat_window])

# Define the Gradio interface for chatbot
send_button = gr.inputs.Button(label="Send")
clear_button = gr.inputs.Button(label="Clear Chat History")
chat_interface = gr.Interface(
    fn=chatbot,
    inputs=[
        message_input,
        mode_input,
        model_input,
        hf_model_input,
        api_key_input,
        send_button,
        clear_button
    ],
    outputs=output,
    title="Chatbot",
    description="Enter your message below to chat with an AI",
    theme="compact",
    allow_flagging=False,
    allow_screenshot=False,
    allow_share=False,
    layout="vertical"
)

# Launch the page
chat_interface.launch()