Spaces:
Running
Running
File size: 6,775 Bytes
0c34dbf 7ea3e04 4ec9ef9 d474927 c5c1fc5 4ec9ef9 0c34dbf 4ec9ef9 0c34dbf 4ec9ef9 0c34dbf 4ec9ef9 0c34dbf 4ec9ef9 0c34dbf 4ec9ef9 0c34dbf 4ec9ef9 0c34dbf 4ec9ef9 0c34dbf 4ec9ef9 0c34dbf 4ec9ef9 0c34dbf 4ec9ef9 0c34dbf 4ec9ef9 0c34dbf 4ec9ef9 0c34dbf 4ec9ef9 0c34dbf 4ec9ef9 0c34dbf 4ec9ef9 0c34dbf 4ec9ef9 172d497 4ec9ef9 d474927 0edfafb 835fb47 4ec9ef9 d474927 4ec9ef9 0c34dbf 4ec9ef9 0c34dbf 172d497 4ec9ef9 0c34dbf 172d497 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 |
import gradio as gr
import os
import openai
import requests
import json
openai.api_key = os.environ.get("OPENAI_API_KEY")
openai.api_base="https://api.chatanywhere.tech/v1"
# OPENAI_API_BASE="https://api.chatanywhere.tech/v1"
prompt_templates = {"Default ChatGPT": ""}
def get_empty_state():
return {"total_tokens": 0, "messages": []}
def download_prompt_templates():
url = "https://raw.githubusercontent.com/JunchuanYu/Sydney/main/prompts.csv"
response = requests.get(url)
for line in response.text.splitlines()[1:]:
act, prompt = line.split('","')
prompt_templates[act.replace('"', '')] = prompt.replace('"', '')
choices = list(prompt_templates.keys())
return gr.update(value=choices[0], choices=choices)
def on_token_change(user_token):
openai.api_key = user_token or os.environ.get("OPENAI_API_KEY")
def on_prompt_template_change(prompt_template):
if not isinstance(prompt_template, str): return
return prompt_templates[prompt_template]
def submit_message(user_token, prompt, prompt_template, temperature, max_tokens, state):
history = state['messages']
if not prompt:
return gr.update(value='', visible=state['total_tokens'] < 1_000), [(history[i]['content'], history[i+1]['content']) for i in range(0, len(history)-1, 2)], f"Total tokens used: {state['total_tokens']} / 3000", state
prompt_template = prompt_templates[prompt_template]
system_prompt = []
if prompt_template:
system_prompt = [{ "role": "system", "content": prompt_template }]
prompt_msg = { "role": "user", "content": prompt }
try:
completion = openai.ChatCompletion.create(model="gpt-3.5-turbo", messages=system_prompt + history + [prompt_msg], temperature=temperature, max_tokens=max_tokens)
history.append(prompt_msg)
history.append(completion.choices[0].message.to_dict())
state['total_tokens'] += completion['usage']['total_tokens']
except Exception as e:
history.append(prompt_msg)
history.append({
"role": "system",
"content": f"Error: {e}"
})
total_tokens_used_msg = f"Total tokens used: {state['total_tokens']} / 3000" if not user_token else ""
chat_messages = [(history[i]['content'], history[i+1]['content']) for i in range(0, len(history)-1, 2)]
input_visibility = user_token or state['total_tokens'] < 3000
return gr.update(value='', visible=input_visibility), chat_messages, total_tokens_used_msg, state
def clear_conversation():
return gr.update(value=None, visible=True), None, "", get_empty_state()
css = """
#col-container {max-width: 80%; margin-left: auto; margin-right: auto;}
#chatbox {min-height: 400px;}
#header {text-align: center;}
#prompt_template_preview {padding: 1em; border-width: 1px; border-style: solid; border-color: #e0e0e0; border-radius: 4px;}
#total_tokens_str {text-align: right; font-size: 0.8em; color: #666; height: 1em;}
#label {font-size: 0.8em; padding: 0.5em; margin: 0;}
.message { font-size: 1.2em; }
"""
with gr.Blocks(css=css) as demo:
state = gr.State(get_empty_state())
with gr.Column(elem_id="col-container"):
# gr.Markdown("""
# # Sydney-AI <b>
# <p align="left"> This app is an intelligent online chat app developed based on the newly released OpenAI API "gpt-3.5-turbo". The app's operating costs are sponsored by "45度科研人". Currently, the tokens is limited to 3000. If you want to remove this restriction, you can input your own OpenAI API key.<p>
# <p align="left"> The default model role of the app is the original assistant of ChatGPT, but you can also choose from the provided roles. <p>
# <p align="left"> Two adjustable parameters are provided to optimize the model: temperature, where a larger value leads to more creative replies, and max tokens, where a larger value allows the model to reply with more content. <p>
# """)
gr.Markdown("""# <div align=center> Sydney-AI </div>""")
with gr.Row():
with gr.Column():
chatbot = gr.Chatbot(elem_id="chatbox").style(color_map=("blue", "green"))
input_message = gr.Textbox(show_label=False, placeholder="Enter text and press submit", visible=True).style(container=False)
btn_submit = gr.Button("Submit")
total_tokens_str = gr.Markdown(elem_id="total_tokens_str")
btn_clear_conversation = gr.Button("Restart Conversation")
with gr.Column():
# gr.Markdown("Enter your own OpenAI API Key to remove the 3000 token limit. You can get it follow this instruction [here](https://blog.pangao.vip/%E8%B6%85%E8%AF%A6%E7%BB%86%E6%B3%A8%E5%86%8COpenAI%E6%8E%A5%E5%8F%A3%E8%B4%A6%E5%8F%B7%E7%9A%84%E6%95%99%E7%A8%8B/).", elem_id="label")
user_token = gr.Textbox(placeholder="OpenAI API Key", type="password", show_label=False)
prompt_template = gr.Dropdown(label="Set a custom insruction for the chatbot:", choices=list(prompt_templates.keys()))
prompt_template_preview = gr.Markdown(elem_id="prompt_template_preview")
with gr.Accordion("Advanced parameters", open=False):
temperature = gr.Slider(minimum=0, maximum=2.0, value=0.7, step=0.1, interactive=True, label="Temperature (higher = more creative/chaotic)")
max_tokens = gr.Slider(minimum=100, maximum=4096, value=1000, step=1, interactive=True, label="Max tokens per response")
# gr.Markdown("",elem_id="header")
gr.Markdown("""
you can follow the WeChat public account [45度科研人] and leave me a message!
<div align=center><img width = '200' height ='200' src ="https://dunazo.oss-cn-beijing.aliyuncs.com/blog/wechat-simple.png"/></div>""", elem_id="header")
btn_submit.click(submit_message, [user_token, input_message, prompt_template, temperature, max_tokens, state], [input_message, chatbot, total_tokens_str, state])
input_message.submit(submit_message, [user_token, input_message, prompt_template, temperature, max_tokens, state], [input_message, chatbot, total_tokens_str, state])
btn_clear_conversation.click(clear_conversation, [], [input_message, chatbot, total_tokens_str, state])
prompt_template.change(on_prompt_template_change, inputs=[prompt_template], outputs=[prompt_template_preview])
user_token.change(on_token_change, inputs=[user_token], outputs=[])
demo.load(download_prompt_templates, inputs=None, outputs=[prompt_template])
demo.launch(debug=True, height='800px') |