|
import hashlib |
|
import os |
|
import re |
|
import json |
|
import threading |
|
from hf_api import restart_space |
|
|
|
import gradio as gr |
|
|
|
from chat_template import ChatTemplate |
|
from llama_cpp_python_streamingllm import StreamingLLM |
|
|
|
|
|
lock = threading.Lock() |
|
session_active = False |
|
|
|
|
|
custom_css = r''' |
|
#area > div { |
|
height: 100%; |
|
} |
|
#RAG-area { |
|
flex-grow: 1; |
|
} |
|
#RAG-area > label { |
|
height: 100%; |
|
display: flex; |
|
flex-direction: column; |
|
} |
|
#RAG-area > label > textarea { |
|
flex-grow: 1; |
|
max-height: 20vh; |
|
} |
|
#VO-area { |
|
flex-grow: 1; |
|
} |
|
#VO-area > label { |
|
height: 100%; |
|
display: flex; |
|
flex-direction: column; |
|
} |
|
#VO-area > label > textarea { |
|
flex-grow: 1; |
|
max-height: 20vh; |
|
} |
|
#prompt > label > textarea { |
|
max-height: 63px; |
|
} |
|
''' |
|
|
|
|
|
|
|
def text_format(text: str, _env=None, **env): |
|
if _env is not None: |
|
for k, v in _env.items(): |
|
text = text.replace(r'{{' + k + r'}}', v) |
|
for k, v in env.items(): |
|
text = text.replace(r'{{' + k + r'}}', v) |
|
return text |
|
|
|
|
|
|
|
def x_hash(x: str): |
|
return hashlib.sha1(x.encode('utf-8')).hexdigest() |
|
|
|
|
|
|
|
with open('rp_config.json', encoding='utf-8') as f: |
|
tmp = f.read() |
|
with open('rp_sample_config.json', encoding='utf-8') as f: |
|
cfg = json.load(f) |
|
cfg['setting_cache_path']['value'] += x_hash(tmp) |
|
cfg.update(json.loads(tmp)) |
|
|
|
|
|
reg_q = re.compile(r'“(.+?)”') |
|
|
|
|
|
def chat_display_format(text: str): |
|
return reg_q.sub(r' **\g<0>** ', text) |
|
|
|
|
|
|
|
with gr.Blocks() as setting: |
|
with gr.Row(): |
|
setting_path = gr.Textbox(label="模型路径", max_lines=1, scale=2, **cfg['setting_path']) |
|
setting_cache_path = gr.Textbox(label="缓存路径", max_lines=1, scale=2, **cfg['setting_cache_path']) |
|
setting_seed = gr.Number(label="随机种子", scale=1, **cfg['setting_seed']) |
|
setting_n_gpu_layers = gr.Number(label="n_gpu_layers", scale=1, **cfg['setting_n_gpu_layers']) |
|
with gr.Row(): |
|
setting_ctx = gr.Number(label="上下文大小(Tokens)", **cfg['setting_ctx']) |
|
setting_max_tokens = gr.Number(label="最大响应长度(Tokens)", interactive=True, **cfg['setting_max_tokens']) |
|
setting_n_keep = gr.Number(value=10, label="n_keep", interactive=False) |
|
setting_n_discard = gr.Number(label="n_discard", interactive=True, **cfg['setting_n_discard']) |
|
with gr.Row(): |
|
setting_temperature = gr.Number(label="温度", interactive=True, **cfg['setting_temperature']) |
|
setting_repeat_penalty = gr.Number(label="重复惩罚", interactive=True, **cfg['setting_repeat_penalty']) |
|
setting_frequency_penalty = gr.Number(label="频率惩罚", interactive=True, **cfg['setting_frequency_penalty']) |
|
setting_presence_penalty = gr.Number(label="存在惩罚", interactive=True, **cfg['setting_presence_penalty']) |
|
setting_repeat_last_n = gr.Number(label="惩罚范围", interactive=True, **cfg['setting_repeat_last_n']) |
|
with gr.Row(): |
|
setting_top_k = gr.Number(label="Top-K", interactive=True, **cfg['setting_top_k']) |
|
setting_top_p = gr.Number(label="Top P", interactive=True, **cfg['setting_top_p']) |
|
setting_min_p = gr.Number(label="Min P", interactive=True, **cfg['setting_min_p']) |
|
setting_typical_p = gr.Number(label="Typical", interactive=True, **cfg['setting_typical_p']) |
|
setting_tfs_z = gr.Number(label="TFS", interactive=True, **cfg['setting_tfs_z']) |
|
with gr.Row(): |
|
setting_mirostat_mode = gr.Number(label="Mirostat 模式", **cfg['setting_mirostat_mode']) |
|
setting_mirostat_eta = gr.Number(label="Mirostat 学习率", interactive=True, **cfg['setting_mirostat_eta']) |
|
setting_mirostat_tau = gr.Number(label="Mirostat 目标熵", interactive=True, **cfg['setting_mirostat_tau']) |
|
|
|
|
|
if os.path.exists(setting_path.value): |
|
print(f"The file {setting_path.value} exists.") |
|
else: |
|
from huggingface_hub import snapshot_download |
|
os.mkdir("downloads") |
|
os.mkdir("cache") |
|
snapshot_download(repo_id='TheBloke/CausalLM-7B-GGUF', local_dir=r'downloads', |
|
allow_patterns='causallm_7b.Q5_K_M.gguf') |
|
snapshot_download(repo_id='Limour/llama-python-streamingllm-cache', repo_type='dataset', local_dir=r'cache') |
|
|
|
|
|
model = StreamingLLM(model_path=setting_path.value, |
|
seed=setting_seed.value, |
|
n_gpu_layers=setting_n_gpu_layers.value, |
|
n_ctx=setting_ctx.value) |
|
setting_ctx.value = model.n_ctx() |
|
|
|
|
|
chat_template = ChatTemplate(model) |
|
|
|
|
|
with gr.Blocks() as role: |
|
with gr.Row(): |
|
role_usr = gr.Textbox(label="用户名称", max_lines=1, interactive=False, **cfg['role_usr']) |
|
role_char = gr.Textbox(label="角色名称", max_lines=1, interactive=False, **cfg['role_char']) |
|
|
|
role_char_d = gr.Textbox(lines=10, label="故事描述", **cfg['role_char_d']) |
|
role_chat_style = gr.Textbox(lines=10, label="回复示例", **cfg['role_chat_style']) |
|
|
|
|
|
if os.path.exists(setting_cache_path.value): |
|
|
|
tmp = model.load_session(setting_cache_path.value) |
|
print(f'load cache from {setting_cache_path.value} {tmp}') |
|
tmp = chat_template('system', |
|
text_format(role_char_d.value, |
|
char=role_char.value, |
|
user=role_usr.value)) |
|
setting_n_keep.value = len(tmp) |
|
tmp = chat_template(role_char.value, |
|
text_format(role_chat_style.value, |
|
char=role_char.value, |
|
user=role_usr.value)) |
|
setting_n_keep.value += len(tmp) |
|
|
|
chatbot = [] |
|
for one in cfg["role_char_first"]: |
|
one['name'] = text_format(one['name'], |
|
char=role_char.value, |
|
user=role_usr.value) |
|
one['value'] = text_format(one['value'], |
|
char=role_char.value, |
|
user=role_usr.value) |
|
if one['name'] == role_char.value: |
|
chatbot.append((None, chat_display_format(one['value']))) |
|
print(one) |
|
else: |
|
|
|
tmp = chat_template('system', |
|
text_format(role_char_d.value, |
|
char=role_char.value, |
|
user=role_usr.value)) |
|
setting_n_keep.value = model.eval_t(tmp) |
|
|
|
|
|
tmp = chat_template(role_char.value, |
|
text_format(role_chat_style.value, |
|
char=role_char.value, |
|
user=role_usr.value)) |
|
setting_n_keep.value = model.eval_t(tmp) |
|
|
|
|
|
chatbot = [] |
|
for one in cfg["role_char_first"]: |
|
one['name'] = text_format(one['name'], |
|
char=role_char.value, |
|
user=role_usr.value) |
|
one['value'] = text_format(one['value'], |
|
char=role_char.value, |
|
user=role_usr.value) |
|
if one['name'] == role_char.value: |
|
chatbot.append((None, chat_display_format(one['value']))) |
|
print(one) |
|
tmp = chat_template(one['name'], one['value']) |
|
model.eval_t(tmp) |
|
|
|
|
|
with open(setting_cache_path.value, 'wb') as f: |
|
pass |
|
tmp = model.save_session(setting_cache_path.value) |
|
print(f'save cache {tmp}') |
|
|
|
from huggingface_hub import login, CommitScheduler |
|
login(token=os.environ.get("HF_TOKEN"), write_permission=True) |
|
CommitScheduler(repo_id='Limour/llama-python-streamingllm-cache', repo_type='dataset', folder_path='cache') |
|
|
|
|
|
|
|
def btn_submit_com(_n_keep, _n_discard, |
|
_temperature, _repeat_penalty, _frequency_penalty, |
|
_presence_penalty, _repeat_last_n, _top_k, |
|
_top_p, _min_p, _typical_p, |
|
_tfs_z, _mirostat_mode, _mirostat_eta, |
|
_mirostat_tau, _role, _max_tokens): |
|
with lock: |
|
if not session_active: |
|
raise RuntimeError |
|
|
|
t_bot = chat_template(_role) |
|
completion_tokens = [] |
|
history = '' |
|
|
|
for token in model.generate_t( |
|
tokens=t_bot, |
|
n_keep=_n_keep, |
|
n_discard=_n_discard, |
|
im_start=chat_template.im_start_token, |
|
top_k=_top_k, |
|
top_p=_top_p, |
|
min_p=_min_p, |
|
typical_p=_typical_p, |
|
temp=_temperature, |
|
repeat_penalty=_repeat_penalty, |
|
repeat_last_n=_repeat_last_n, |
|
frequency_penalty=_frequency_penalty, |
|
presence_penalty=_presence_penalty, |
|
tfs_z=_tfs_z, |
|
mirostat_mode=_mirostat_mode, |
|
mirostat_tau=_mirostat_tau, |
|
mirostat_eta=_mirostat_eta, |
|
): |
|
if token in chat_template.eos or token == chat_template.nlnl: |
|
t_bot.extend(completion_tokens) |
|
print('token in eos', token) |
|
break |
|
completion_tokens.append(token) |
|
all_text = model.str_detokenize(completion_tokens) |
|
if not all_text: |
|
continue |
|
t_bot.extend(completion_tokens) |
|
history += all_text |
|
yield history |
|
if token in chat_template.onenl: |
|
|
|
if t_bot[-2] in chat_template.onenl: |
|
model.venv_pop_token() |
|
break |
|
if t_bot[-2] in chat_template.onerl and t_bot[-3] in chat_template.onenl: |
|
model.venv_pop_token() |
|
break |
|
if history[-2:] == '\n\n': |
|
print('t_bot[-4:]', t_bot[-4:], repr(model.str_detokenize(t_bot[-4:])), |
|
repr(model.str_detokenize(t_bot[-1:]))) |
|
break |
|
if len(t_bot) > _max_tokens: |
|
break |
|
completion_tokens = [] |
|
|
|
print('history', repr(history)) |
|
|
|
model.eval_t(chat_template.im_end_nl, _n_keep, _n_discard) |
|
t_bot.extend(chat_template.im_end_nl) |
|
|
|
|
|
|
|
def btn_submit_usr(message: str, history): |
|
global session_active |
|
with lock: |
|
if session_active: |
|
raise RuntimeError |
|
session_active = True |
|
|
|
if history is None: |
|
history = [] |
|
return "", history + [[message.strip(), '']], gr.update(interactive=False) |
|
|
|
|
|
|
|
def btn_submit_bot(history, _n_keep, _n_discard, |
|
_temperature, _repeat_penalty, _frequency_penalty, |
|
_presence_penalty, _repeat_last_n, _top_k, |
|
_top_p, _min_p, _typical_p, |
|
_tfs_z, _mirostat_mode, _mirostat_eta, |
|
_mirostat_tau, _usr, _char, |
|
_rag, _max_tokens): |
|
with lock: |
|
if not session_active: |
|
raise RuntimeError |
|
|
|
rag_idx = None |
|
if len(_rag) > 0: |
|
rag_idx = model.venv_create() |
|
t_rag = chat_template('system', _rag) |
|
model.eval_t(t_rag, _n_keep, _n_discard) |
|
model.venv_create() |
|
|
|
t_msg = history[-1][0] |
|
t_msg = chat_template(_usr, t_msg) |
|
model.eval_t(t_msg, _n_keep, _n_discard) |
|
|
|
_tmp = btn_submit_com(_n_keep, _n_discard, |
|
_temperature, _repeat_penalty, _frequency_penalty, |
|
_presence_penalty, _repeat_last_n, _top_k, |
|
_top_p, _min_p, _typical_p, |
|
_tfs_z, _mirostat_mode, _mirostat_eta, |
|
_mirostat_tau, _char, _max_tokens) |
|
for _h in _tmp: |
|
history[-1][1] = _h |
|
yield history, str((model.n_tokens, model.venv)) |
|
|
|
history[-1][1] = chat_display_format(history[-1][1]) |
|
yield history, str((model.n_tokens, model.venv)) |
|
|
|
if vo_idx > 0: |
|
print('vo_idx', vo_idx, model.venv) |
|
model.venv_remove(vo_idx) |
|
print('vo_idx', vo_idx, model.venv) |
|
if rag_idx and vo_idx < rag_idx: |
|
rag_idx -= 1 |
|
|
|
if rag_idx is not None: |
|
model.venv_remove(rag_idx) |
|
model.venv_disband() |
|
yield history, str((model.n_tokens, model.venv)) |
|
print('venv_disband', vo_idx, model.venv) |
|
|
|
|
|
|
|
def btn_rag_(_rag, _msg): |
|
retn = '' |
|
return retn |
|
|
|
|
|
vo_idx = 0 |
|
|
|
|
|
|
|
def btn_submit_vo(_n_keep, _n_discard, |
|
_temperature, _repeat_penalty, _frequency_penalty, |
|
_presence_penalty, _repeat_last_n, _top_k, |
|
_top_p, _min_p, _typical_p, |
|
_tfs_z, _mirostat_mode, _mirostat_eta, |
|
_mirostat_tau, _max_tokens): |
|
with lock: |
|
if not session_active: |
|
raise RuntimeError |
|
global vo_idx |
|
vo_idx = model.venv_create() |
|
|
|
_tmp = btn_submit_com(_n_keep, _n_discard, |
|
_temperature, _repeat_penalty, _frequency_penalty, |
|
_presence_penalty, _repeat_last_n, _top_k, |
|
_top_p, _min_p, _typical_p, |
|
_tfs_z, _mirostat_mode, _mirostat_eta, |
|
_mirostat_tau, '旁白', _max_tokens) |
|
for _h in _tmp: |
|
yield _h, str((model.n_tokens, model.venv)) |
|
|
|
|
|
|
|
def btn_submit_suggest(_n_keep, _n_discard, |
|
_temperature, _repeat_penalty, _frequency_penalty, |
|
_presence_penalty, _repeat_last_n, _top_k, |
|
_top_p, _min_p, _typical_p, |
|
_tfs_z, _mirostat_mode, _mirostat_eta, |
|
_mirostat_tau, _usr, _max_tokens): |
|
with lock: |
|
if not session_active: |
|
raise RuntimeError |
|
model.venv_create() |
|
|
|
_tmp = btn_submit_com(_n_keep, _n_discard, |
|
_temperature, _repeat_penalty, _frequency_penalty, |
|
_presence_penalty, _repeat_last_n, _top_k, |
|
_top_p, _min_p, _typical_p, |
|
_tfs_z, _mirostat_mode, _mirostat_eta, |
|
_mirostat_tau, _usr, _max_tokens) |
|
_h = '' |
|
for _h in _tmp: |
|
yield _h, str((model.n_tokens, model.venv)) |
|
model.venv_remove() |
|
yield _h, str((model.n_tokens, model.venv)) |
|
|
|
|
|
def btn_submit_finish(): |
|
global session_active |
|
with lock: |
|
if not session_active: |
|
raise RuntimeError |
|
session_active = False |
|
return gr.update(interactive=True) |
|
|
|
|
|
|
|
with gr.Blocks() as chatting: |
|
with gr.Row(equal_height=True): |
|
chatbot = gr.Chatbot(height='60vh', scale=2, value=chatbot, |
|
avatar_images=(r'assets/user.png', r'assets/chatbot.webp')) |
|
with gr.Column(scale=1, elem_id="area"): |
|
rag = gr.Textbox(label='RAG', show_copy_button=True, elem_id="RAG-area") |
|
vo = gr.Textbox(label='VO', show_copy_button=True, elem_id="VO-area") |
|
s_info = gr.Textbox(value=str((model.n_tokens, model.venv)), max_lines=1, label='info', interactive=False) |
|
msg = gr.Textbox(label='Prompt', lines=2, max_lines=2, elem_id='prompt', autofocus=True, **cfg['msg']) |
|
with gr.Row(): |
|
btn_rag = gr.Button("RAG") |
|
btn_submit = gr.Button("Submit") |
|
btn_retry = gr.Button("Retry") |
|
btn_com1 = gr.Button("自定义1") |
|
btn_com2 = gr.Button("自定义2") |
|
btn_com3 = gr.Button("自定义3") |
|
|
|
btn_rag.click(fn=btn_rag_, outputs=rag, |
|
inputs=[rag, msg]) |
|
|
|
btn_submit.click( |
|
fn=btn_submit_usr, api_name="submit", |
|
inputs=[msg, chatbot], |
|
outputs=[msg, chatbot, btn_submit] |
|
).success( |
|
fn=btn_submit_bot, |
|
inputs=[chatbot, setting_n_keep, setting_n_discard, |
|
setting_temperature, setting_repeat_penalty, setting_frequency_penalty, |
|
setting_presence_penalty, setting_repeat_last_n, setting_top_k, |
|
setting_top_p, setting_min_p, setting_typical_p, |
|
setting_tfs_z, setting_mirostat_mode, setting_mirostat_eta, |
|
setting_mirostat_tau, role_usr, role_char, |
|
rag, setting_max_tokens], |
|
outputs=[chatbot, s_info] |
|
).success( |
|
fn=btn_submit_vo, |
|
inputs=[setting_n_keep, setting_n_discard, |
|
setting_temperature, setting_repeat_penalty, setting_frequency_penalty, |
|
setting_presence_penalty, setting_repeat_last_n, setting_top_k, |
|
setting_top_p, setting_min_p, setting_typical_p, |
|
setting_tfs_z, setting_mirostat_mode, setting_mirostat_eta, |
|
setting_mirostat_tau, setting_max_tokens], |
|
outputs=[vo, s_info] |
|
).success( |
|
fn=btn_submit_suggest, |
|
inputs=[setting_n_keep, setting_n_discard, |
|
setting_temperature, setting_repeat_penalty, setting_frequency_penalty, |
|
setting_presence_penalty, setting_repeat_last_n, setting_top_k, |
|
setting_top_p, setting_min_p, setting_typical_p, |
|
setting_tfs_z, setting_mirostat_mode, setting_mirostat_eta, |
|
setting_mirostat_tau, role_usr, setting_max_tokens], |
|
outputs=[msg, s_info] |
|
).success( |
|
fn=btn_submit_finish, |
|
outputs=btn_submit |
|
) |
|
|
|
|
|
|
|
|
|
|
|
@btn_com2.click(inputs=setting_cache_path, |
|
outputs=[s_info, btn_submit]) |
|
def btn_com2(_cache_path): |
|
try: |
|
with lock: |
|
_tmp = model.load_session(setting_cache_path.value) |
|
print(f'load cache from {setting_cache_path.value} {_tmp}') |
|
global vo_idx |
|
vo_idx = 0 |
|
model.venv = [0] |
|
global session_active |
|
session_active = False |
|
return str((model.n_tokens, model.venv)), gr.update(interactive=True) |
|
except Exception as e: |
|
restart_space() |
|
raise e |
|
|
|
@btn_com3.click() |
|
def btn_com3(): |
|
restart_space() |
|
|
|
|
|
|
|
demo = gr.TabbedInterface([chatting, setting, role], |
|
["聊天", "设置", '角色'], |
|
css=custom_css) |
|
gr.close_all() |
|
demo.queue(max_size=1).launch(share=False) |
|
|