Cran-May commited on
Commit
4f5ce81
1 Parent(s): 34c2963

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +7 -241
app.py CHANGED
@@ -2,249 +2,15 @@ from typing import Iterator
2
 
3
  import gradio as gr
4
 
 
5
 
6
- from model import run
7
-
8
- DEFAULT_SYSTEM_PROMPT = ""
9
- MAX_MAX_NEW_TOKENS = 2048
10
- DEFAULT_MAX_NEW_TOKENS = 1024
11
- MAX_INPUT_TOKEN_LENGTH = 4000
12
-
13
- DESCRIPTION = """
14
- # 玉刚六号改/yugangVI-Chat
15
- """
16
- LICENSE="基于Baichuan-13B-Chat以及https://github.com/ouwei2013/baichuan13b.cpp"
17
-
18
-
19
-
20
- def clear_and_save_textbox(message: str) -> tuple[str, str]:
21
- return '', message
22
-
23
-
24
- def display_input(message: str,
25
- history: list[tuple[str, str]]) -> list[tuple[str, str]]:
26
- history.append((message, ''))
27
- return history
28
-
29
-
30
- def delete_prev_fn(
31
- history: list[tuple[str, str]]) -> tuple[list[tuple[str, str]], str]:
32
- try:
33
- message, _ = history.pop()
34
- except IndexError:
35
- message = ''
36
- return history, message or ''
37
-
38
-
39
- def generate(
40
- message: str,
41
- history_with_input: list[tuple[str, str]],
42
- system_prompt: str,
43
- max_new_tokens: int,
44
- temperature: float,
45
- top_p: float,
46
- top_k: int,
47
- ) -> Iterator[list[tuple[str, str]]]:
48
-
49
- history = history_with_input[:-1]
50
- generator = run(message, history, system_prompt, max_new_tokens, temperature, top_p, top_k)
51
- for response in generator:
52
- yield history + [(message, response)]
53
-
54
-
55
- def process_example(message: str) -> tuple[str, list[tuple[str, str]]]:
56
- generator = generate(message, [], DEFAULT_SYSTEM_PROMPT, 8192, 1, 0.95, 50)
57
- for x in generator:
58
- pass
59
- return '', x
60
-
61
-
62
- def check_input_token_length(message: str, chat_history: list[tuple[str, str]], system_prompt: str) -> None:
63
- a = 1
64
-
65
-
66
- with gr.Blocks(css='style.css') as demo:
67
- gr.Markdown(DESCRIPTION)
68
- gr.DuplicateButton(value='Duplicate Space for private use',
69
- elem_id='duplicate-button')
70
-
71
- with gr.Group():
72
- chatbot = gr.Chatbot(label='Chatbot')
73
- with gr.Row():
74
- textbox = gr.Textbox(
75
- container=False,
76
- show_label=False,
77
- placeholder='请输入/Type a message...',
78
- scale=10,
79
- )
80
- submit_button = gr.Button('提交/Submit',
81
- variant='primary',
82
- scale=1,
83
- min_width=0)
84
- with gr.Row():
85
- retry_button = gr.Button('🔄 重来/Retry', variant='secondary')
86
- undo_button = gr.Button('↩️ 撤销/Undo', variant='secondary')
87
- clear_button = gr.Button('🗑️ 清除/Clear', variant='secondary')
88
-
89
- saved_input = gr.State()
90
-
91
- with gr.Accordion(label='进阶设置/Advanced options', open=False):
92
- system_prompt = gr.Textbox(label='预设引导词/System prompt',
93
- value=DEFAULT_SYSTEM_PROMPT,
94
- lines=6)
95
- max_new_tokens = gr.Slider(
96
- label='Max new tokens',
97
- minimum=1,
98
- maximum=MAX_MAX_NEW_TOKENS,
99
- step=1,
100
- value=DEFAULT_MAX_NEW_TOKENS,
101
- )
102
- temperature = gr.Slider(
103
- label='情感温度/Temperature',
104
- minimum=0.1,
105
- maximum=4.0,
106
- step=0.1,
107
- value=0.3,
108
- )
109
- top_p = gr.Slider(
110
- label='Top-p (nucleus sampling)',
111
- minimum=0.05,
112
- maximum=1.0,
113
- step=0.05,
114
- value=0.85,
115
- )
116
- top_k = gr.Slider(
117
- label='Top-k',
118
- minimum=1,
119
- maximum=1000,
120
- step=1,
121
- value=5,
122
- )
123
-
124
- gr.Examples(
125
- examples=[
126
- '中华人民共和国的首都是?',
127
-
128
- ],
129
- inputs=textbox,
130
- outputs=[textbox, chatbot],
131
- fn=process_example,
132
- cache_examples=True,
133
- )
134
-
135
- gr.Markdown(LICENSE)
136
-
137
- textbox.submit(
138
- fn=clear_and_save_textbox,
139
- inputs=textbox,
140
- outputs=[textbox, saved_input],
141
- api_name=False,
142
- queue=False,
143
- ).then(
144
- fn=display_input,
145
- inputs=[saved_input, chatbot],
146
- outputs=chatbot,
147
- api_name=False,
148
- queue=False,
149
- ).then(
150
- fn=check_input_token_length,
151
- inputs=[saved_input, chatbot, system_prompt],
152
- api_name=False,
153
- queue=False,
154
- ).success(
155
- fn=generate,
156
- inputs=[
157
- saved_input,
158
- chatbot,
159
- system_prompt,
160
- max_new_tokens,
161
- temperature,
162
- top_p,
163
- top_k,
164
- ],
165
- outputs=chatbot,
166
- api_name=False,
167
- )
168
-
169
- button_event_preprocess = submit_button.click(
170
- fn=clear_and_save_textbox,
171
- inputs=textbox,
172
- outputs=[textbox, saved_input],
173
- api_name=False,
174
- queue=False,
175
- ).then(
176
- fn=display_input,
177
- inputs=[saved_input, chatbot],
178
- outputs=chatbot,
179
- api_name=False,
180
- queue=False,
181
- ).then(
182
- fn=check_input_token_length,
183
- inputs=[saved_input, chatbot, system_prompt],
184
- api_name=False,
185
- queue=False,
186
- ).success(
187
- fn=generate,
188
- inputs=[
189
- saved_input,
190
- chatbot,
191
- system_prompt,
192
- max_new_tokens,
193
- temperature,
194
- top_p,
195
- top_k,
196
- ],
197
- outputs=chatbot,
198
- api_name=False,
199
- )
200
-
201
- retry_button.click(
202
- fn=delete_prev_fn,
203
- inputs=chatbot,
204
- outputs=[chatbot, saved_input],
205
- api_name=False,
206
- queue=False,
207
- ).then(
208
- fn=display_input,
209
- inputs=[saved_input, chatbot],
210
- outputs=chatbot,
211
- api_name=False,
212
- queue=False,
213
- ).then(
214
- fn=generate,
215
- inputs=[
216
- saved_input,
217
- chatbot,
218
- system_prompt,
219
- max_new_tokens,
220
- temperature,
221
- top_p,
222
- top_k,
223
- ],
224
- outputs=chatbot,
225
- api_name=False,
226
- )
227
 
228
- undo_button.click(
229
 
230
- fn=delete_prev_fn,
231
- inputs=chatbot,
232
- outputs=[chatbot, saved_input],
233
- api_name=False,
234
- queue=False,
235
- ).then(
236
- fn=lambda x: x,
237
- inputs=[saved_input],
238
- outputs=textbox,
239
- api_name=False,
240
- queue=False,
241
- )
242
 
243
- clear_button.click(
244
- fn=lambda: ([], ''),
245
- outputs=[chatbot, saved_input],
246
- queue=False,
247
- api_name=False,
248
- )
249
 
250
- demo.queue(max_size=20).launch()
 
2
 
3
  import gradio as gr
4
 
5
+ from typing import Iterator
6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7
 
8
+ model_id = 'sam-ezai/MindChat-Qwen-7B-v2-GGML'
9
 
10
+ from huggingface_hub import snapshot_download,hf_hub_download
 
 
 
 
 
 
 
 
 
 
 
11
 
12
+ hf_hub_download(model_id, local_dir="./", filename="MindChat-Qwen-7B-v2.Q4_0.ggml")
13
+ hf_hub_download(model_id, local_dir="./", filename="qwen.tiktoken")
14
+ hf_hub_download(repo_id="twodgirl/Qwen-14b-GGML",local_dir="./", filename="main")
 
 
 
15
 
16
+ ./main -m ./MindChat-Qwen-7B-v2.Q4_0.ggml --tiktoken ./qwen.tiktoken -i