Limour commited on
Commit
67ae2ac
1 Parent(s): 6481b74

Upload 2 files

Browse files
gradio_streamingllm.py CHANGED
@@ -25,6 +25,9 @@ from mods.btn_suggest import init as btn_suggest_init
25
  # ========== 融合功能的按钮 ==========
26
  from mods.btn_submit_vo_suggest import init as btn_submit_vo_suggest_init
27
 
 
 
 
28
  # ========== 重置按钮 ==========
29
  from mods.btn_reset import init as btn_reset_init
30
 
@@ -98,10 +101,17 @@ with gr.Blocks() as chatting:
98
  with gr.Row(equal_height=True):
99
  cfg['chatbot'] = gr.Chatbot(height='60vh', scale=2, value=cfg['chatbot'],
100
  avatar_images=(r'assets/user.png', r'assets/chatbot.webp'))
101
- with gr.Column(scale=1, elem_id="area"):
102
- cfg['rag'] = gr.Textbox(label='RAG', lines=2, show_copy_button=True, elem_id="RAG-area")
103
- cfg['vo'] = gr.Textbox(label='VO', show_copy_button=True, elem_id="VO-area")
104
- cfg['s_info'] = gr.Textbox(value=cfg['model'].venv_info, max_lines=1, label='info', interactive=False)
 
 
 
 
 
 
 
105
  cfg['msg'] = gr.Textbox(label='Prompt', lines=2, max_lines=2, elem_id='prompt', autofocus=True, **cfg['msg'])
106
 
107
  cfg['gr'] = gr
@@ -119,37 +129,27 @@ with gr.Blocks() as chatting:
119
 
120
  btn_submit_vo_suggest_init(cfg)
121
 
 
 
122
  # ========== 用于调试 ==========
123
  btn_reset_init(cfg)
124
 
125
  # ========== 让聊天界面的文本框等高 ==========
126
  custom_css = r'''
127
- #area > div {
128
- height: 100%;
129
- }
130
- #RAG-area {
131
- flex-grow: 1;
132
- }
133
- #RAG-area > label {
134
- height: 100%;
135
- display: flex;
136
- flex-direction: column;
137
  }
138
- #RAG-area > label > textarea {
139
  flex-grow: 1;
140
- max-height: 20vh;
141
  }
142
- #VO-area {
143
- flex-grow: 1;
144
- }
145
- #VO-area > label {
146
  height: 100%;
147
  display: flex;
148
  flex-direction: column;
 
149
  }
150
- #VO-area > label > textarea {
151
  flex-grow: 1;
152
- max-height: 20vh;
153
  }
154
  #prompt > label > textarea {
155
  max-height: 63px;
@@ -162,6 +162,9 @@ custom_css = r'''
162
  .setting input {
163
  margin-top: auto;
164
  }
 
 
 
165
  '''
166
 
167
  # ========== 开始运行 ==========
 
25
  # ========== 融合功能的按钮 ==========
26
  from mods.btn_submit_vo_suggest import init as btn_submit_vo_suggest_init
27
 
28
+ # ========== 更新状态栏的按钮 ==========
29
+ from mods.btn_status_bar import init as btn_status_bar_init
30
+
31
  # ========== 重置按钮 ==========
32
  from mods.btn_reset import init as btn_reset_init
33
 
 
101
  with gr.Row(equal_height=True):
102
  cfg['chatbot'] = gr.Chatbot(height='60vh', scale=2, value=cfg['chatbot'],
103
  avatar_images=(r'assets/user.png', r'assets/chatbot.webp'))
104
+ with gr.Column(scale=1):
105
+ with gr.Tab(label='Main', elem_id='area'):
106
+ cfg['rag'] = gr.Textbox(label='RAG', lines=2, show_copy_button=True, elem_classes="area")
107
+ cfg['vo'] = gr.Textbox(label='VO', lines=2, show_copy_button=True, elem_classes="area")
108
+ cfg['s_info'] = gr.Textbox(value=cfg['model'].venv_info, max_lines=1, label='info', interactive=False)
109
+ with gr.Tab(label='状态栏', elem_id='area'):
110
+ cfg['status_bar'] = gr.Dataframe(
111
+ headers=['属性', '值'],
112
+ type="array",
113
+ elem_id='StatusBar'
114
+ )
115
  cfg['msg'] = gr.Textbox(label='Prompt', lines=2, max_lines=2, elem_id='prompt', autofocus=True, **cfg['msg'])
116
 
117
  cfg['gr'] = gr
 
129
 
130
  btn_submit_vo_suggest_init(cfg)
131
 
132
+ btn_status_bar_init(cfg)
133
+
134
  # ========== 用于调试 ==========
135
  btn_reset_init(cfg)
136
 
137
  # ========== 让聊天界面的文本框等高 ==========
138
  custom_css = r'''
139
+ #area > div > div {
140
+ height: 53vh;
 
 
 
 
 
 
 
 
141
  }
142
+ .area {
143
  flex-grow: 1;
 
144
  }
145
+ .area > label {
 
 
 
146
  height: 100%;
147
  display: flex;
148
  flex-direction: column;
149
+ max-height: 16vh;
150
  }
151
+ .area > label > textarea {
152
  flex-grow: 1;
 
153
  }
154
  #prompt > label > textarea {
155
  max-height: 63px;
 
162
  .setting input {
163
  margin-top: auto;
164
  }
165
+ #StatusBar {
166
+ max-height: 53vh;
167
+ }
168
  '''
169
 
170
  # ========== 开始运行 ==========
llama_cpp_python_streamingllm.py CHANGED
@@ -57,15 +57,15 @@ class StreamingLLM(Llama):
57
  self.kv_cache_seq_trim()
58
  return True
59
 
60
- def venv_remove(self, name: str, keep_last=False):
61
  if len(self.venv) <= 1:
62
  return False
63
  if name not in self.venv_idx_map:
64
  return False
65
  venv_idx = self.venv_idx_map.index(name) + 1
66
  while self.venv_idx_map:
67
- if keep_last and self.venv_idx_map.count(name) <= 1:
68
- break # 保留最后一个
69
  self.venv_idx_map.pop(venv_idx - 1) # 删除
70
  if venv_idx == len(self.venv) - 1:
71
  # 最后一层
@@ -282,10 +282,9 @@ class StreamingLLM(Llama):
282
  self._input_ids, self._scores[-1, :]
283
  ):
284
  return
285
- tokens_or_none = yield token
286
- tokens = [token]
287
- if tokens_or_none is not None:
288
- tokens.extend(tokens_or_none)
289
 
290
  def load_session(self, filepath: str):
291
  n_tokens = POINTER(llama_cpp.c_size_t)(llama_cpp.c_size_t(0))
 
57
  self.kv_cache_seq_trim()
58
  return True
59
 
60
+ def venv_remove(self, name: str, keep_last=0):
61
  if len(self.venv) <= 1:
62
  return False
63
  if name not in self.venv_idx_map:
64
  return False
65
  venv_idx = self.venv_idx_map.index(name) + 1
66
  while self.venv_idx_map:
67
+ if keep_last and self.venv_idx_map.count(name) <= keep_last:
68
+ break # 保留最后n个
69
  self.venv_idx_map.pop(venv_idx - 1) # 删除
70
  if venv_idx == len(self.venv) - 1:
71
  # 最后一层
 
282
  self._input_ids, self._scores[-1, :]
283
  ):
284
  return
285
+ tokens = yield token
286
+ if tokens is None:
287
+ tokens = [token]
 
288
 
289
  def load_session(self, filepath: str):
290
  n_tokens = POINTER(llama_cpp.c_size_t)(llama_cpp.c_size_t(0))