OedoSoldier commited on
Commit
27f37de
1 Parent(s): cdb5c54

Init commit

Browse files
Files changed (5) hide show
  1. LICENSE +28 -0
  2. README.md +19 -13
  3. app.py +207 -0
  4. config.json +6 -0
  5. requirements.txt +6 -0
LICENSE ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ BSD 3-Clause License
2
+
3
+ Copyright (c) 2023, OedoSoldier
4
+
5
+ Redistribution and use in source and binary forms, with or without
6
+ modification, are permitted provided that the following conditions are met:
7
+
8
+ 1. Redistributions of source code must retain the above copyright notice, this
9
+ list of conditions and the following disclaimer.
10
+
11
+ 2. Redistributions in binary form must reproduce the above copyright notice,
12
+ this list of conditions and the following disclaimer in the documentation
13
+ and/or other materials provided with the distribution.
14
+
15
+ 3. Neither the name of the copyright holder nor the names of its
16
+ contributors may be used to endorse or promote products derived from
17
+ this software without specific prior written permission.
18
+
19
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23
+ FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24
+ DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25
+ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26
+ CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27
+ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
README.md CHANGED
@@ -1,13 +1,19 @@
1
- ---
2
- title: Chatglm Webui
3
- emoji: 📊
4
- colorFrom: yellow
5
- colorTo: indigo
6
- sdk: gradio
7
- sdk_version: 3.21.0
8
- app_file: app.py
9
- pinned: false
10
- license: bsd-3-clause
11
- ---
12
-
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
1
+ # ChatGLM WebUI
2
+
3
+ 基于 [ChatGPT WebUI](https://github.com/dotmet/chatgpt_webui) 的一个简单的 [ChatGLM](https://github.com/THUDM/ChatGLM-6B) WebUI。
4
+
5
+ ## 环境安装
6
+
7
+ `pip install -r requirements.txt`
8
+
9
+ Gradio 版本必须大于 3.21.0!
10
+
11
+ Windows 下 CPU 运行需要安装编译器,可参考 (https://www.freesion.com/article/4185669814/)。
12
+
13
+ ## 运行
14
+
15
+ 通过 `git clone https://huggingface.co/THUDM/chatglm-6b-int4` 下载模型文件到根目录下然后 `python main.py` 即可,默认状态至少需要 4 GB 显存(CPU 运行则需要 5.2 GB 内存)。
16
+
17
+ ### 实参
18
+
19
+ - `--path`:指定模型所在文件夹
app.py ADDED
@@ -0,0 +1,207 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import os
3
+ import json
4
+ import datetime
5
+ import gradio as gr
6
+ import torch
7
+ from transformers import AutoTokenizer, AutoModel
8
+
9
+
10
+ def get_args():
11
+ parser = argparse.ArgumentParser(description='ChatGLM Arguments')
12
+
13
+ parser.add_argument('--path', default='THUDM/chatglm-6b-int4', help='The path of ChatGLM model')
14
+
15
+ return parser.parse_args()
16
+
17
+
18
+ args = get_args()
19
+
20
+ if not os.path.isdir(args.path):
21
+ raise FileNotFoundError('Model not found')
22
+
23
+ if torch.cuda.is_available():
24
+ device = 'cuda'
25
+ else:
26
+ device = 'cpu'
27
+
28
+ tokenizer = AutoTokenizer.from_pretrained(args.path, trust_remote_code=True)
29
+
30
+ if device == 'cuda':
31
+ model = AutoModel.from_pretrained(args.path, trust_remote_code=True).half().cuda()
32
+ else:
33
+ model = AutoModel.from_pretrained(args.path, trust_remote_code=True).float()
34
+
35
+ model = model.eval()
36
+
37
+
38
+ def parse_text(text):
39
+ lines = text.split('\n')
40
+ for i, line in enumerate(lines):
41
+ if '```' in line:
42
+ item = line.split('`')[-1]
43
+ if item:
44
+ lines[i] = f'<pre><code class="{item}">'
45
+ else:
46
+ lines[i] = '</code></pre>'
47
+ else:
48
+ if i > 0:
49
+ line = line.replace('<', '&lt;').replace('>', '&gt;')
50
+ lines[i] = f'<br/>{line}'
51
+ return ''.join(lines)
52
+
53
+
54
+ def chat_wrapper(query, styled_history, history, max_length, top_p, temperature, memory_limit):
55
+ if query == '':
56
+ return [], [], '', *gr_hide()
57
+ if memory_limit == 0:
58
+ history = []
59
+ styled_history = []
60
+ elif memory_limit > 0:
61
+ history = history[-memory_limit:]
62
+ styled_history = styled_history[-memory_limit:]
63
+ flag = True
64
+ styled_history_pos = 0
65
+ for message, history in model.stream_chat(tokenizer, query, history=history,
66
+ max_length=max_length, top_p=top_p, temperature=temperature):
67
+ if flag:
68
+ styled_history.append((parse_text(query), parse_text(message)))
69
+ styled_history_pos = len(styled_history) - 1
70
+ flag = False
71
+ else:
72
+ styled_history[styled_history_pos] = (parse_text(query), parse_text(message))
73
+ yield styled_history, history, '', *gr_hide()
74
+
75
+
76
+ def regenerate_wrapper(styled_history, history, max_length, top_p, temperature, memory_limit):
77
+ if not history:
78
+ return [], [], '', *gr_hide()
79
+
80
+ styled_history, history, query, _, _, _ = edit_wrapper(styled_history, history)
81
+ for ret in chat_wrapper(query, styled_history, history, max_length, top_p, temperature, memory_limit):
82
+ yield ret
83
+
84
+
85
+ def edit_wrapper(styled_history, history):
86
+ if len(history) == 0:
87
+ return [], [], ''
88
+ query = history[-1][0]
89
+ history = history[:-1]
90
+ styled_history = styled_history[:-1]
91
+ return styled_history, history, query, *gr_hide()
92
+
93
+
94
+ def reset_history():
95
+ return [], [], '', *gr_hide()
96
+
97
+
98
+ def save_history(history):
99
+ os.makedirs('log', exist_ok=True)
100
+ dict_list = [{'input': q, 'output': a} for q, a in history]
101
+ with open(f'log/{datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S")}.json', 'w', encoding='utf-8') as f:
102
+ json.dump(dict_list, f, ensure_ascii=False, indent=2)
103
+
104
+
105
+ def save_config(max_length=2048, top_p=0.7, temperature=0.95, memory_limit=-1.0):
106
+ with open('config.json', 'w') as f:
107
+ json.dump({'max_length': max_length, 'top_p': top_p, 'temperature': temperature, 'memory_limit': memory_limit}, f, indent=2)
108
+
109
+
110
+ def load_history(file, styled_history, history):
111
+ current_styled_history, current_history = styled_history.copy(), history.copy()
112
+ try:
113
+ with open(file.name, 'r', encoding='utf-8') as f:
114
+ dict_list = json.load(f)
115
+ history = [(item['input'], item['output']) for item in dict_list]
116
+ styled_history = [(parse_text(item['input']), parse_text(item['output'])) for item in dict_list]
117
+ except BaseException:
118
+ return current_styled_history, current_history, ''
119
+ return styled_history, history, '', *gr_hide()
120
+
121
+
122
+ def gr_show_and_load(history, evt: gr.SelectData):
123
+ if evt.index[1] == 0:
124
+ label = f'Editing Q{evt.index[0]}:'
125
+ else:
126
+ label = f'Editing Q{evt.index[0]}:'
127
+ return {'visible': True, '__type__': 'update'}, {'value': history[evt.index[0]][evt.index[1]], 'label': label, '__type__': 'update'}, evt.index
128
+
129
+
130
+ def update_history(styled_history, history, log, idx):
131
+ if log == '':
132
+ return styled_history, history, {'visible': True, '__type__': 'update'}, {'value': history[idx[0]][idx[1]], '__type__': 'update'}, idx
133
+
134
+ def swap_value(lst, idx, value):
135
+ lst[idx[0]] = tuple(value if j == idx[1] else elem for j, elem in enumerate(lst[idx[0]]))
136
+ return lst
137
+ styled_history = swap_value(styled_history, idx, parse_text(log))
138
+ history = swap_value(history, idx, log)
139
+ return styled_history, history, *gr_hide()
140
+
141
+
142
+ def gr_hide():
143
+ return {'visible': False, '__type__': 'update'}, {'value': '', 'label': '', '__type__': 'update'}, []
144
+
145
+
146
+ with gr.Blocks() as demo:
147
+ if not os.path.isfile('config.json'):
148
+ save_config()
149
+
150
+ with open('config.json', 'r', encoding='utf-8') as f:
151
+ configs = json.loads(f.read())
152
+
153
+ gr.Markdown('''<h1><center>ChatGLM Demo</center></h1>''')
154
+
155
+ with gr.Row():
156
+ max_length = gr.Slider(minimum=4.0, maximum=4096.0, step=4.0, label='Max Length', value=configs['max_length'])
157
+ top_p = gr.Slider(minimum=0.01, maximum=1.0, step=0.01, label='Top P', value=configs['top_p'])
158
+ temperature = gr.Slider(minimum=0.01, maximum=2.0, step=0.01, label='Temperature', value=configs['temperature'])
159
+ memory_limit = gr.Slider(minimum=-1.0, maximum=20.0, step=1.0, label='Memory Limit', value=configs['memory_limit'])
160
+ save_conf = gr.Button('保存设置', visible=False)
161
+
162
+ gr.Markdown('''<h2>Hint: click on a chat bubble to edit chat history</h2>''')
163
+
164
+ state = gr.State([])
165
+ chatbot = gr.Chatbot(elem_id='chatbot', show_label=False)
166
+ with gr.Row(visible=False) as edit_log:
167
+ with gr.Column():
168
+ log = gr.Textbox()
169
+ with gr.Row():
170
+ submit_log = gr.Button('保存')
171
+ cancel_log = gr.Button('取消')
172
+ log_idx = gr.State([])
173
+
174
+ message = gr.Textbox(placeholder='Input your message', label='Q:')
175
+
176
+ with gr.Row():
177
+ submit = gr.Button('Submit')
178
+ edit = gr.Button('Edit last question')
179
+ regen = gr.Button('Re-generate')
180
+
181
+ delete = gr.Button('Reset chat')
182
+
183
+ with gr.Row():
184
+ save = gr.Button('保存对话(在 `log` 文件夹下)', visible=False)
185
+ load = gr.UploadButton('读取对话', file_types=['file'], file_count='single', visible=False)
186
+
187
+ input_list = [message, chatbot, state, max_length, top_p, temperature, memory_limit]
188
+ output_list = [chatbot, state, message]
189
+ edit_list = [edit_log, log, log_idx]
190
+
191
+ save_conf.click(save_config, inputs=input_list[3:])
192
+ load.upload(load_history, inputs=[load, chatbot, state], outputs=output_list + edit_list)
193
+ save.click(save_history, inputs=[state])
194
+ message.submit(chat_wrapper, inputs=input_list, outputs=output_list + edit_list)
195
+ submit.click(chat_wrapper, inputs=input_list, outputs=output_list + edit_list)
196
+ edit.click(edit_wrapper, inputs=input_list[1:3], outputs=output_list + edit_list)
197
+ regen.click(regenerate_wrapper, inputs=input_list[1:], outputs=output_list + edit_list)
198
+ delete.click(reset_history, outputs=output_list + edit_list)
199
+ chatbot.select(gr_show_and_load, inputs=[state], outputs=edit_list)
200
+ edit_kwargs = {'inputs': [chatbot, state, log, log_idx], 'outputs': [chatbot, state] + edit_list}
201
+ log.submit(update_history, **edit_kwargs)
202
+ submit_log.click(update_history, **edit_kwargs)
203
+ cancel_log.click(gr_hide, outputs=edit_list)
204
+
205
+
206
+ if __name__ == '__main__':
207
+ demo.queue(concurrency_count=5, max_size=20).launch(debug=True)
config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "max_length": 2048,
3
+ "top_p": 0.7,
4
+ "temperature": 0.95,
5
+ "memory_limit": -1
6
+ }
requirements.txt ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ protobuf>=3.19.5,<3.20.1
2
+ transformers>=4.26.1
3
+ icetk
4
+ cpm_kernels
5
+ torch>=1.10
6
+ gradio>=3.21.0