Spaces:
Running
Running
File size: 7,817 Bytes
2319518 3cac10f 2319518 3cac10f 2319518 3cac10f 2319518 3cac10f 2319518 3cac10f 2319518 3cac10f 2319518 3cac10f 2319518 b48806d 2319518 3cac10f 2319518 3cac10f 2319518 3cac10f 2319518 3cac10f 2319518 3cac10f 2319518 3cac10f 2319518 3cac10f 2319518 3cac10f 2319518 3cac10f 2319518 3cac10f 1e62994 2319518 1e62994 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 |
import json
import os
from pathlib import Path
import add_qwen_libs # NOQA
import gradio as gr
import jsonlines
from schema import GlobalConfig
from qwen_agent.actions import RetrievalQA
from qwen_agent.llm import get_chat_model
from qwen_agent.log import logger
from qwen_agent.memory import Memory
# Read config
with open(Path(__file__).resolve().parent / 'server_config.json', 'r') as f:
server_config = json.load(f)
server_config = GlobalConfig(**server_config)
llm = get_chat_model(model=server_config.server.llm,
api_key=server_config.server.api_key,
model_server=server_config.server.model_server)
mem = Memory(llm=llm, stream=False)
cache_file = os.path.join(server_config.path.cache_root, 'browse.jsonl')
cache_file_popup_url = os.path.join(server_config.path.cache_root, 'popup_url.jsonl')
PAGE_URL = []
with open(Path(__file__).resolve().parent / 'css/main.css', 'r') as f:
css = f.read()
with open(Path(__file__).resolve().parent / 'js/main.js', 'r') as f:
js = f.read()
def check_access_token(access_token):
if not access_token:
gr.Info('The access token is not valid, please reset!')
return False
return True
def initialize(request: gr.Request):
set_page_url()
# print("Request headers dictionary:", request.headers)
params = dict(request.query_params)
access_token = params.get("access_token")
if not access_token or os.getenv("ACCESS_TOKEN") != access_token:
gr.Info('The access token is not valid, please reset!')
return ''
# print("Query parameters:", dict(request.query_params))
return access_token
def add_text(history, text, access_token):
if not check_access_token(access_token):
return
history = history + [(text, None)]
return history, gr.update(value='', interactive=False)
def rm_text(history, access_token):
if not check_access_token(access_token):
return
if not history:
gr.Warning('No input content!')
elif not history[-1][1]:
return history, gr.update(value='', interactive=False)
else:
history = history[:-1] + [(history[-1][0], None)]
return history, gr.update(value='', interactive=False)
def add_file(history, file):
history = history + [((file.name,), None)]
return history
def set_page_url():
lines = []
assert os.path.exists(cache_file_popup_url)
for line in jsonlines.open(cache_file_popup_url):
lines.append(line)
PAGE_URL.append(lines[-1]['url'])
logger.info('The current access page is: ' + PAGE_URL[-1])
def bot(history):
set_page_url()
if not history:
yield history
else:
now_page = None
_ref = ''
if not os.path.exists(cache_file):
gr.Info("Please add this page to ChatGPT's Reading List first!")
else:
for line in jsonlines.open(cache_file):
if line['url'] == PAGE_URL[-1]:
now_page = line
if not now_page:
gr.Info(
"This page has not yet been added to the ChatGPT's reading list!"
)
elif not now_page['raw']:
gr.Info('Please reopen later, ChatGPT is analyzing this page...')
else:
_ref_list = mem.get(
history[-1][0], [now_page],
max_token=server_config.server.max_ref_token)
if _ref_list:
_ref = '\n'.join(
json.dumps(x, ensure_ascii=False) for x in _ref_list)
else:
_ref = ''
# TODO: considering history for retrieval qa
agent = RetrievalQA(stream=True, llm=llm)
history[-1][1] = ''
response = agent.run(user_request=history[-1][0], ref_doc=_ref)
for chunk in response:
if chunk is not None:
history[-1][1] += chunk
yield history
# save history
if now_page:
now_page['session'] = history
lines = []
for line in jsonlines.open(cache_file):
if line['url'] != PAGE_URL[-1]:
lines.append(line)
lines.append(now_page)
with jsonlines.open(cache_file, mode='w') as writer:
for new_line in lines:
writer.write(new_line)
def load_history_session(history, access_token):
if not check_access_token(access_token):
return
now_page = None
if not os.path.exists(cache_file):
gr.Info("Please add this page to ChatGPT's Reading List first!")
return []
for line in jsonlines.open(cache_file):
if line['url'] == PAGE_URL[-1]:
now_page = line
if not now_page:
gr.Info("Please add this page to ChatGPT's Reading List first!")
return []
if not now_page['raw']:
gr.Info('Please wait, ChatGPT is analyzing this page...')
return []
return now_page['session']
def clear_session(access_token):
if not check_access_token(access_token):
return
if not os.path.exists(cache_file):
return None
now_page = None
lines = []
for line in jsonlines.open(cache_file):
if line['url'] == PAGE_URL[-1]:
now_page = line
else:
lines.append(line)
if not now_page:
return None
now_page['session'] = []
lines.append(now_page)
with jsonlines.open(cache_file, mode='w') as writer:
for new_line in lines:
writer.write(new_line)
return None
with gr.Blocks(css=css, theme='soft') as demo:
access_token = gr.State("")
chatbot = gr.Chatbot([],
elem_id='chatbot',
height=480,
avatar_images=(None, (os.path.join(
Path(__file__).resolve().parent,
'img/logo.png'))))
with gr.Row():
with gr.Column(scale=7):
txt = gr.Textbox(show_label=False,
placeholder='Chat with ChatGPT...',
container=False)
# with gr.Column(scale=0.06, min_width=0):
# smt_bt = gr.Button('β')
with gr.Column(scale=1, min_width=0):
clr_bt = gr.Button('π§Ή', elem_classes='bt_small_font')
with gr.Column(scale=1, min_width=0):
stop_bt = gr.Button('π«', elem_classes='bt_small_font')
with gr.Column(scale=1, min_width=0):
re_bt = gr.Button('π', elem_classes='bt_small_font')
txt_msg = txt.submit(add_text, [chatbot, txt, access_token], [chatbot, txt],
queue=False).then(bot, chatbot, chatbot)
txt_msg.then(lambda: gr.update(interactive=True), None, [txt], queue=False)
# txt_msg_bt = smt_bt.click(add_text, [chatbot, txt], [chatbot, txt],
# queue=False).then(bot, chatbot, chatbot)
# txt_msg_bt.then(lambda: gr.update(interactive=True),
# None, [txt],
# queue=False)
clr_bt.click(clear_session, [access_token], chatbot, queue=False)
re_txt_msg = re_bt.click(rm_text, [chatbot, access_token], [chatbot, txt],
queue=False).then(bot, chatbot, chatbot)
re_txt_msg.then(lambda: gr.update(interactive=True),
None, [txt],
queue=False)
stop_bt.click(None, None, None, cancels=[txt_msg, re_txt_msg], queue=False)
demo.load(initialize, [], [access_token]).then(load_history_session, [chatbot, access_token], chatbot)
demo.queue()
# demo.queue().launch(server_name=server_config.server.server_host, server_port=server_config.server.app_in_browser_port)
|