Dorado607 commited on
Commit
6a65971
1 Parent(s): a8e4844

update to the latest version

Browse files
.gitattributes DELETED
@@ -1,34 +0,0 @@
1
- *.7z filter=lfs diff=lfs merge=lfs -text
2
- *.arrow filter=lfs diff=lfs merge=lfs -text
3
- *.bin filter=lfs diff=lfs merge=lfs -text
4
- *.bz2 filter=lfs diff=lfs merge=lfs -text
5
- *.ckpt filter=lfs diff=lfs merge=lfs -text
6
- *.ftz filter=lfs diff=lfs merge=lfs -text
7
- *.gz filter=lfs diff=lfs merge=lfs -text
8
- *.h5 filter=lfs diff=lfs merge=lfs -text
9
- *.joblib filter=lfs diff=lfs merge=lfs -text
10
- *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
- *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
- *.model filter=lfs diff=lfs merge=lfs -text
13
- *.msgpack filter=lfs diff=lfs merge=lfs -text
14
- *.npy filter=lfs diff=lfs merge=lfs -text
15
- *.npz filter=lfs diff=lfs merge=lfs -text
16
- *.onnx filter=lfs diff=lfs merge=lfs -text
17
- *.ot filter=lfs diff=lfs merge=lfs -text
18
- *.parquet filter=lfs diff=lfs merge=lfs -text
19
- *.pb filter=lfs diff=lfs merge=lfs -text
20
- *.pickle filter=lfs diff=lfs merge=lfs -text
21
- *.pkl filter=lfs diff=lfs merge=lfs -text
22
- *.pt filter=lfs diff=lfs merge=lfs -text
23
- *.pth filter=lfs diff=lfs merge=lfs -text
24
- *.rar filter=lfs diff=lfs merge=lfs -text
25
- *.safetensors filter=lfs diff=lfs merge=lfs -text
26
- saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
- *.tar.* filter=lfs diff=lfs merge=lfs -text
28
- *.tflite filter=lfs diff=lfs merge=lfs -text
29
- *.tgz filter=lfs diff=lfs merge=lfs -text
30
- *.wasm filter=lfs diff=lfs merge=lfs -text
31
- *.xz filter=lfs diff=lfs merge=lfs -text
32
- *.zip filter=lfs diff=lfs merge=lfs -text
33
- *.zst filter=lfs diff=lfs merge=lfs -text
34
- *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
assets/Kelpy-Codos.js DELETED
@@ -1,76 +0,0 @@
1
- // ==UserScript==
2
- // @name Kelpy Codos
3
- // @namespace https://github.com/Keldos-Li/Kelpy-Codos
4
- // @version 1.0.5
5
- // @author Keldos; https://keldos.me/
6
- // @description Add copy button to PRE tags before CODE tag, for Chuanhu ChatGPT especially.
7
- // Based on Chuanhu ChatGPT version: ac04408 (2023-3-22)
8
- // @license GPL-3.0
9
- // @grant none
10
- // ==/UserScript==
11
-
12
- (function () {
13
- 'use strict';
14
-
15
- function addCopyButton(pre) {
16
- var code = pre.querySelector('code');
17
- if (!code) {
18
- return; // 如果没有找到 <code> 元素,则不添加按钮
19
- }
20
- var firstChild = code.firstChild;
21
- if (!firstChild) {
22
- return; // 如果 <code> 元素没有子节点,则不添加按钮
23
- }
24
- var button = document.createElement('button');
25
- button.textContent = '\uD83D\uDCCE'; // 使用 📎 符号作为“复制”按钮的文本
26
- button.style.position = 'relative';
27
- button.style.float = 'right';
28
- button.style.fontSize = '1em'; // 可选:调整按钮大小
29
- button.style.background = 'none'; // 可选:去掉背景颜色
30
- button.style.border = 'none'; // 可选:去掉边框
31
- button.style.cursor = 'pointer'; // 可选:显示指针样式
32
- button.addEventListener('click', function () {
33
- var range = document.createRange();
34
- range.selectNodeContents(code);
35
- range.setStartBefore(firstChild); // 将范围设置为第一个子节点之前
36
- var selection = window.getSelection();
37
- selection.removeAllRanges();
38
- selection.addRange(range);
39
-
40
- try {
41
- var success = document.execCommand('copy');
42
- if (success) {
43
- button.textContent = '\u2714';
44
- setTimeout(function () {
45
- button.textContent = '\uD83D\uDCCE'; // 恢复按钮为“复制”
46
- }, 2000);
47
- } else {
48
- button.textContent = '\u2716';
49
- }
50
- } catch (e) {
51
- console.error(e);
52
- button.textContent = '\u2716';
53
- }
54
-
55
- selection.removeAllRanges();
56
- });
57
- code.insertBefore(button, firstChild); // 将按钮插入到第一个子元素之前
58
- }
59
-
60
- function handleNewElements(mutationsList, observer) {
61
- for (var mutation of mutationsList) {
62
- if (mutation.type === 'childList') {
63
- for (var node of mutation.addedNodes) {
64
- if (node.nodeName === 'PRE') {
65
- addCopyButton(node);
66
- }
67
- }
68
- }
69
- }
70
- }
71
-
72
- var observer = new MutationObserver(handleNewElements);
73
- observer.observe(document.documentElement, { childList: true, subtree: true });
74
-
75
- document.querySelectorAll('pre').forEach(addCopyButton);
76
- })();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
assets/favicon.png DELETED
Binary file (560 kB)
 
chatgpt - macOS.command DELETED
@@ -1,7 +0,0 @@
1
- #!/bin/bash
2
- echo Opening ChuanhuChatGPT...
3
- cd "$(dirname "${BASH_SOURCE[0]}")"
4
- nohup python3 ChuanhuChatbot.py >/dev/null 2>&1 &
5
- sleep 5
6
- open http://127.0.0.1:7860
7
- echo Finished opening ChuanhuChatGPT (http://127.0.0.1:7860/). If you kill ChuanhuChatbot, Use "pkill -f 'ChuanhuChatbot'" command in terminal.
 
 
 
 
 
 
 
 
chatgpt - windows.bat DELETED
@@ -1,14 +0,0 @@
1
- @echo off
2
- echo Opening ChuanhuChatGPT...
3
-
4
- REM Open powershell via bat
5
- start powershell.exe -NoExit -Command "python ./ChuanhuChatbot.py"
6
-
7
- REM The web page can be accessed with delayed start http://127.0.0.1:7860/
8
- ping -n 5 127.0.0.1>nul
9
-
10
- REM access chargpt via your default browser
11
- start "" "http://127.0.0.1:7860/"
12
-
13
-
14
- echo Finished opening ChuanhuChatGPT (http://127.0.0.1:7860/).
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
config.json DELETED
@@ -1,3 +0,0 @@
1
- {
2
- "hide_history_when_not_logged_in": true
3
- }
 
 
 
 
custom.css DELETED
@@ -1,162 +0,0 @@
1
- :root {
2
- --chatbot-color-light: #F3F3F3;
3
- --chatbot-color-dark: #121111;
4
- }
5
-
6
- /* status_display */
7
- #status_display {
8
- display: flex;
9
- min-height: 2.5em;
10
- align-items: flex-end;
11
- justify-content: flex-end;
12
- }
13
- #status_display p {
14
- font-size: .85em;
15
- font-family: monospace;
16
- color: var(--body-text-color-subdued);
17
- }
18
-
19
- #chuanhu_chatbot, #status_display {
20
- transition: all 0.6s;
21
- }
22
- /* list */
23
- ol:not(.options), ul:not(.options) {
24
- padding-inline-start: 2em !important;
25
- }
26
-
27
- /* 亮色 */
28
- #chuanhu_chatbot {
29
- background-color: var(--chatbot-color-light) !important;
30
- }
31
- [data-testid = "bot"] {
32
- background-color: #FFFFFF !important;
33
- }
34
- [data-testid = "user"] {
35
- background-color: #95EC69 !important;
36
- }
37
- /* 对话气泡 */
38
- [class *= "message"] {
39
- border-radius: var(--radius-xl) !important;
40
- border: none;
41
- padding: var(--spacing-xl) !important;
42
- font-size: var(--text-md) !important;
43
- line-height: var(--line-md) !important;
44
- min-height: calc(var(--text-md)*var(--line-md) + 2*var(--spacing-xl));
45
- min-width: calc(var(--text-md)*var(--line-md) + 2*var(--spacing-xl));
46
- }
47
- [data-testid = "bot"] {
48
- max-width: 85%;
49
- border-bottom-left-radius: 0 !important;
50
- }
51
- [data-testid = "user"] {
52
- max-width: 85%;
53
- width: auto !important;
54
- border-bottom-right-radius: 0 !important;
55
- }
56
- /* 表格 */
57
- table {
58
- margin: 1em 0;
59
- border-collapse: collapse;
60
- empty-cells: show;
61
- }
62
- td,th {
63
- border: 1.2px solid var(--border-color-primary) !important;
64
- padding: 0.2em;
65
- }
66
- thead {
67
- background-color: rgba(175,184,193,0.2);
68
- }
69
- thead th {
70
- padding: .5em .2em;
71
- }
72
- /* 行内代码 */
73
- code {
74
- display: inline;
75
- white-space: break-spaces;
76
- border-radius: 6px;
77
- margin: 0 2px 0 2px;
78
- padding: .2em .4em .1em .4em;
79
- background-color: rgba(175,184,193,0.2);
80
- }
81
- /* 代码块 */
82
- pre code {
83
- display: block;
84
- overflow: auto;
85
- white-space: pre;
86
- background-color: hsla(0, 0%, 0%, 80%)!important;
87
- border-radius: 10px;
88
- padding: 1.4em 1.2em 0em 1.4em;
89
- margin: 1.2em 2em 1.2em 0.5em;
90
- color: #FFF;
91
- box-shadow: 6px 6px 16px hsla(0, 0%, 0%, 0.2);
92
- }
93
- /* 代码高亮样式 */
94
- .highlight .hll { background-color: #49483e }
95
- .highlight .c { color: #75715e } /* Comment */
96
- .highlight .err { color: #960050; background-color: #1e0010 } /* Error */
97
- .highlight .k { color: #66d9ef } /* Keyword */
98
- .highlight .l { color: #ae81ff } /* Literal */
99
- .highlight .n { color: #f8f8f2 } /* Name */
100
- .highlight .o { color: #f92672 } /* Operator */
101
- .highlight .p { color: #f8f8f2 } /* Punctuation */
102
- .highlight .ch { color: #75715e } /* Comment.Hashbang */
103
- .highlight .cm { color: #75715e } /* Comment.Multiline */
104
- .highlight .cp { color: #75715e } /* Comment.Preproc */
105
- .highlight .cpf { color: #75715e } /* Comment.PreprocFile */
106
- .highlight .c1 { color: #75715e } /* Comment.Single */
107
- .highlight .cs { color: #75715e } /* Comment.Special */
108
- .highlight .gd { color: #f92672 } /* Generic.Deleted */
109
- .highlight .ge { font-style: italic } /* Generic.Emph */
110
- .highlight .gi { color: #a6e22e } /* Generic.Inserted */
111
- .highlight .gs { font-weight: bold } /* Generic.Strong */
112
- .highlight .gu { color: #75715e } /* Generic.Subheading */
113
- .highlight .kc { color: #66d9ef } /* Keyword.Constant */
114
- .highlight .kd { color: #66d9ef } /* Keyword.Declaration */
115
- .highlight .kn { color: #f92672 } /* Keyword.Namespace */
116
- .highlight .kp { color: #66d9ef } /* Keyword.Pseudo */
117
- .highlight .kr { color: #66d9ef } /* Keyword.Reserved */
118
- .highlight .kt { color: #66d9ef } /* Keyword.Type */
119
- .highlight .ld { color: #e6db74 } /* Literal.Date */
120
- .highlight .m { color: #ae81ff } /* Literal.Number */
121
- .highlight .s { color: #e6db74 } /* Literal.String */
122
- .highlight .na { color: #a6e22e } /* Name.Attribute */
123
- .highlight .nb { color: #f8f8f2 } /* Name.Builtin */
124
- .highlight .nc { color: #a6e22e } /* Name.Class */
125
- .highlight .no { color: #66d9ef } /* Name.Constant */
126
- .highlight .nd { color: #a6e22e } /* Name.Decorator */
127
- .highlight .ni { color: #f8f8f2 } /* Name.Entity */
128
- .highlight .ne { color: #a6e22e } /* Name.Exception */
129
- .highlight .nf { color: #a6e22e } /* Name.Function */
130
- .highlight .nl { color: #f8f8f2 } /* Name.Label */
131
- .highlight .nn { color: #f8f8f2 } /* Name.Namespace */
132
- .highlight .nx { color: #a6e22e } /* Name.Other */
133
- .highlight .py { color: #f8f8f2 } /* Name.Property */
134
- .highlight .nt { color: #f92672 } /* Name.Tag */
135
- .highlight .nv { color: #f8f8f2 } /* Name.Variable */
136
- .highlight .ow { color: #f92672 } /* Operator.Word */
137
- .highlight .w { color: #f8f8f2 } /* Text.Whitespace */
138
- .highlight .mb { color: #ae81ff } /* Literal.Number.Bin */
139
- .highlight .mf { color: #ae81ff } /* Literal.Number.Float */
140
- .highlight .mh { color: #ae81ff } /* Literal.Number.Hex */
141
- .highlight .mi { color: #ae81ff } /* Literal.Number.Integer */
142
- .highlight .mo { color: #ae81ff } /* Literal.Number.Oct */
143
- .highlight .sa { color: #e6db74 } /* Literal.String.Affix */
144
- .highlight .sb { color: #e6db74 } /* Literal.String.Backtick */
145
- .highlight .sc { color: #e6db74 } /* Literal.String.Char */
146
- .highlight .dl { color: #e6db74 } /* Literal.String.Delimiter */
147
- .highlight .sd { color: #e6db74 } /* Literal.String.Doc */
148
- .highlight .s2 { color: #e6db74 } /* Literal.String.Double */
149
- .highlight .se { color: #ae81ff } /* Literal.String.Escape */
150
- .highlight .sh { color: #e6db74 } /* Literal.String.Heredoc */
151
- .highlight .si { color: #e6db74 } /* Literal.String.Interpol */
152
- .highlight .sx { color: #e6db74 } /* Literal.String.Other */
153
- .highlight .sr { color: #e6db74 } /* Literal.String.Regex */
154
- .highlight .s1 { color: #e6db74 } /* Literal.String.Single */
155
- .highlight .ss { color: #e6db74 } /* Literal.String.Symbol */
156
- .highlight .bp { color: #f8f8f2 } /* Name.Builtin.Pseudo */
157
- .highlight .fm { color: #a6e22e } /* Name.Function.Magic */
158
- .highlight .vc { color: #f8f8f2 } /* Name.Variable.Class */
159
- .highlight .vg { color: #f8f8f2 } /* Name.Variable.Global */
160
- .highlight .vi { color: #f8f8f2 } /* Name.Variable.Instance */
161
- .highlight .vm { color: #f8f8f2 } /* Name.Variable.Magic */
162
- .highlight .il { color: #ae81ff } /* Literal.Number.Integer.Long */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
history/2023-06-14_15-05-04.json DELETED
File without changes
modules/base_model.py DELETED
@@ -1,561 +0,0 @@
1
- from __future__ import annotations
2
- from typing import TYPE_CHECKING, List
3
-
4
- import logging
5
- import json
6
- import commentjson as cjson
7
- import os
8
- import sys
9
- import requests
10
- import urllib3
11
- import traceback
12
-
13
- from tqdm import tqdm
14
- import colorama
15
- from duckduckgo_search import ddg
16
- import asyncio
17
- import aiohttp
18
- from enum import Enum
19
-
20
- from .presets import *
21
- from .llama_func import *
22
- from .utils import *
23
- from . import shared
24
- from .config import retrieve_proxy
25
-
26
-
27
- class ModelType(Enum):
28
- Unknown = -1
29
- OpenAI = 0
30
- ChatGLM = 1
31
- LLaMA = 2
32
- XMChat = 3
33
-
34
- @classmethod
35
- def get_type(cls, model_name: str):
36
- model_type = None
37
- model_name_lower = model_name.lower()
38
- if "gpt" in model_name_lower:
39
- model_type = ModelType.OpenAI
40
- elif "chatglm" in model_name_lower:
41
- model_type = ModelType.ChatGLM
42
- elif "llama" in model_name_lower or "alpaca" in model_name_lower:
43
- model_type = ModelType.LLaMA
44
- elif "xmchat" in model_name_lower:
45
- model_type = ModelType.XMChat
46
- else:
47
- model_type = ModelType.Unknown
48
- return model_type
49
-
50
-
51
- class BaseLLMModel:
52
- def __init__(
53
- self,
54
- model_name,
55
- system_prompt="",
56
- temperature=1.0,
57
- top_p=1.0,
58
- n_choices=1,
59
- stop=None,
60
- max_generation_token=None,
61
- presence_penalty=0,
62
- frequency_penalty=0,
63
- logit_bias=None,
64
- user="",
65
- ) -> None:
66
- self.history = []
67
- self.all_token_counts = []
68
- self.model_name = model_name
69
- self.model_type = ModelType.get_type(model_name)
70
- try:
71
- self.token_upper_limit = MODEL_TOKEN_LIMIT[model_name]
72
- except KeyError:
73
- self.token_upper_limit = DEFAULT_TOKEN_LIMIT
74
- self.interrupted = False
75
- self.system_prompt = system_prompt
76
- self.api_key = None
77
- self.need_api_key = False
78
- self.single_turn = False
79
-
80
- self.temperature = temperature
81
- self.top_p = top_p
82
- self.n_choices = n_choices
83
- self.stop_sequence = stop
84
- self.max_generation_token = None
85
- self.presence_penalty = presence_penalty
86
- self.frequency_penalty = frequency_penalty
87
- self.logit_bias = logit_bias
88
- self.user_identifier = user
89
-
90
- def get_answer_stream_iter(self):
91
- """stream predict, need to be implemented
92
- conversations are stored in self.history, with the most recent question, in OpenAI format
93
- should return a generator, each time give the next word (str) in the answer
94
- """
95
- logging.warning("stream predict not implemented, using at once predict instead")
96
- response, _ = self.get_answer_at_once()
97
- yield response
98
-
99
- def get_answer_at_once(self):
100
- """predict at once, need to be implemented
101
- conversations are stored in self.history, with the most recent question, in OpenAI format
102
- Should return:
103
- the answer (str)
104
- total token count (int)
105
- """
106
- logging.warning("at once predict not implemented, using stream predict instead")
107
- response_iter = self.get_answer_stream_iter()
108
- count = 0
109
- for response in response_iter:
110
- count += 1
111
- return response, sum(self.all_token_counts) + count
112
-
113
- def billing_info(self):
114
- """get billing infomation, inplement if needed"""
115
- logging.warning("billing info not implemented, using default")
116
- return BILLING_NOT_APPLICABLE_MSG
117
-
118
- def count_token(self, user_input):
119
- """get token count from input, implement if needed"""
120
- logging.warning("token count not implemented, using default")
121
- return len(user_input)
122
-
123
- def stream_next_chatbot(self, inputs, chatbot, fake_input=None, display_append=""):
124
- def get_return_value():
125
- return chatbot, status_text
126
-
127
- status_text = i18n("开始实时传输回答……")
128
- if fake_input:
129
- chatbot.append((fake_input, ""))
130
- else:
131
- chatbot.append((inputs, ""))
132
-
133
- user_token_count = self.count_token(inputs)
134
- self.all_token_counts.append(user_token_count)
135
- logging.debug(f"输入token计数: {user_token_count}")
136
-
137
- stream_iter = self.get_answer_stream_iter()
138
-
139
- for partial_text in stream_iter:
140
- chatbot[-1] = (chatbot[-1][0], partial_text + display_append)
141
- self.all_token_counts[-1] += 1
142
- status_text = self.token_message()
143
- yield get_return_value()
144
- if self.interrupted:
145
- self.recover()
146
- break
147
- self.history.append(construct_assistant(partial_text))
148
-
149
- def next_chatbot_at_once(self, inputs, chatbot, fake_input=None, display_append=""):
150
- if fake_input:
151
- chatbot.append((fake_input, ""))
152
- else:
153
- chatbot.append((inputs, ""))
154
- if fake_input is not None:
155
- user_token_count = self.count_token(fake_input)
156
- else:
157
- user_token_count = self.count_token(inputs)
158
- self.all_token_counts.append(user_token_count)
159
- ai_reply, total_token_count = self.get_answer_at_once()
160
- self.history.append(construct_assistant(ai_reply))
161
- if fake_input is not None:
162
- self.history[-2] = construct_user(fake_input)
163
- chatbot[-1] = (chatbot[-1][0], ai_reply + display_append)
164
- if fake_input is not None:
165
- self.all_token_counts[-1] += count_token(construct_assistant(ai_reply))
166
- else:
167
- self.all_token_counts[-1] = total_token_count - sum(self.all_token_counts)
168
- status_text = self.token_message()
169
- return chatbot, status_text
170
-
171
- def handle_file_upload(self, files, chatbot):
172
- """if the model accepts multi modal input, implement this function"""
173
- status = gr.Markdown.update()
174
- if files:
175
- construct_index(self.api_key, file_src=files)
176
- status = "索引构建完成"
177
- return gr.Files.update(), chatbot, status
178
-
179
- def prepare_inputs(self, real_inputs, use_websearch, files, reply_language, chatbot):
180
- fake_inputs = None
181
- display_append = []
182
- limited_context = False
183
- fake_inputs = real_inputs
184
- if files:
185
- from llama_index.indices.vector_store.base_query import GPTVectorStoreIndexQuery
186
- from llama_index.indices.query.schema import QueryBundle
187
- from langchain.embeddings.huggingface import HuggingFaceEmbeddings
188
- from langchain.chat_models import ChatOpenAI
189
- from llama_index import (
190
- GPTSimpleVectorIndex,
191
- ServiceContext,
192
- LangchainEmbedding,
193
- OpenAIEmbedding,
194
- )
195
- limited_context = True
196
- msg = "加载索引中……"
197
- logging.info(msg)
198
- # yield chatbot + [(inputs, "")], msg
199
- index = construct_index(self.api_key, file_src=files)
200
- assert index is not None, "获取索引失败"
201
- msg = "索引获取成功,生成回答中……"
202
- logging.info(msg)
203
- if local_embedding or self.model_type != ModelType.OpenAI:
204
- embed_model = LangchainEmbedding(HuggingFaceEmbeddings(model_name = "sentence-transformers/distiluse-base-multilingual-cased-v2"))
205
- else:
206
- embed_model = OpenAIEmbedding()
207
- # yield chatbot + [(inputs, "")], msg
208
- with retrieve_proxy():
209
- prompt_helper = PromptHelper(
210
- max_input_size=4096,
211
- num_output=5,
212
- max_chunk_overlap=20,
213
- chunk_size_limit=600,
214
- )
215
- from llama_index import ServiceContext
216
-
217
- service_context = ServiceContext.from_defaults(
218
- prompt_helper=prompt_helper, embed_model=embed_model
219
- )
220
- query_object = GPTVectorStoreIndexQuery(
221
- index.index_struct,
222
- service_context=service_context,
223
- similarity_top_k=5,
224
- vector_store=index._vector_store,
225
- docstore=index._docstore,
226
- )
227
- query_bundle = QueryBundle(real_inputs)
228
- nodes = query_object.retrieve(query_bundle)
229
- reference_results = [n.node.text for n in nodes]
230
- reference_results = add_source_numbers(reference_results, use_source=False)
231
- display_append = add_details(reference_results)
232
- display_append = "\n\n" + "".join(display_append)
233
- real_inputs = (
234
- replace_today(PROMPT_TEMPLATE)
235
- .replace("{query_str}", real_inputs)
236
- .replace("{context_str}", "\n\n".join(reference_results))
237
- .replace("{reply_language}", reply_language)
238
- )
239
- elif use_websearch:
240
- limited_context = True
241
- search_results = ddg(real_inputs, max_results=5)
242
- reference_results = []
243
- for idx, result in enumerate(search_results):
244
- logging.debug(f"搜索结果{idx + 1}:{result}")
245
- domain_name = urllib3.util.parse_url(result["href"]).host
246
- reference_results.append([result["body"], result["href"]])
247
- display_append.append(
248
- # f"{idx+1}. [{domain_name}]({result['href']})\n"
249
- f"<li><a href=\"{result['href']}\" target=\"_blank\">{domain_name}</a></li>\n"
250
- )
251
- reference_results = add_source_numbers(reference_results)
252
- display_append = "<ol>\n\n" + "".join(display_append) + "</ol>"
253
- real_inputs = (
254
- replace_today(WEBSEARCH_PTOMPT_TEMPLATE)
255
- .replace("{query}", real_inputs)
256
- .replace("{web_results}", "\n\n".join(reference_results))
257
- .replace("{reply_language}", reply_language)
258
- )
259
- else:
260
- display_append = ""
261
- return limited_context, fake_inputs, display_append, real_inputs, chatbot
262
-
263
- def predict(
264
- self,
265
- inputs,
266
- chatbot,
267
- stream=False,
268
- use_websearch=False,
269
- files=None,
270
- reply_language="中文",
271
- should_check_token_count=True,
272
- ): # repetition_penalty, top_k
273
-
274
- status_text = "开始生成回答……"
275
- logging.info(
276
- "输入为:" + colorama.Fore.BLUE + f"{inputs}" + colorama.Style.RESET_ALL
277
- )
278
- if should_check_token_count:
279
- yield chatbot + [(inputs, "")], status_text
280
- if reply_language == "跟随问题语言(不稳定)":
281
- reply_language = "the same language as the question, such as English, 中文, 日本語, Español, Français, or Deutsch."
282
-
283
- limited_context, fake_inputs, display_append, inputs, chatbot = self.prepare_inputs(real_inputs=inputs, use_websearch=use_websearch, files=files, reply_language=reply_language, chatbot=chatbot)
284
- yield chatbot + [(fake_inputs, "")], status_text
285
-
286
- if (
287
- self.need_api_key and
288
- self.api_key is None
289
- and not shared.state.multi_api_key
290
- ):
291
- status_text = STANDARD_ERROR_MSG + NO_APIKEY_MSG
292
- logging.info(status_text)
293
- chatbot.append((inputs, ""))
294
- if len(self.history) == 0:
295
- self.history.append(construct_user(inputs))
296
- self.history.append("")
297
- self.all_token_counts.append(0)
298
- else:
299
- self.history[-2] = construct_user(inputs)
300
- yield chatbot + [(inputs, "")], status_text
301
- return
302
- elif len(inputs.strip()) == 0:
303
- status_text = STANDARD_ERROR_MSG + NO_INPUT_MSG
304
- logging.info(status_text)
305
- yield chatbot + [(inputs, "")], status_text
306
- return
307
-
308
- if self.single_turn:
309
- self.history = []
310
- self.all_token_counts = []
311
- self.history.append(construct_user(inputs))
312
-
313
- try:
314
- if stream:
315
- logging.debug("使用流式传输")
316
- iter = self.stream_next_chatbot(
317
- inputs,
318
- chatbot,
319
- fake_input=fake_inputs,
320
- display_append=display_append,
321
- )
322
- for chatbot, status_text in iter:
323
- yield chatbot, status_text
324
- else:
325
- logging.debug("不使用流式传输")
326
- chatbot, status_text = self.next_chatbot_at_once(
327
- inputs,
328
- chatbot,
329
- fake_input=fake_inputs,
330
- display_append=display_append,
331
- )
332
- yield chatbot, status_text
333
- except Exception as e:
334
- traceback.print_exc()
335
- status_text = STANDARD_ERROR_MSG + str(e)
336
- yield chatbot, status_text
337
-
338
- if len(self.history) > 1 and self.history[-1]["content"] != inputs:
339
- logging.info(
340
- "回答为:"
341
- + colorama.Fore.BLUE
342
- + f"{self.history[-1]['content']}"
343
- + colorama.Style.RESET_ALL
344
- )
345
-
346
- if limited_context:
347
- # self.history = self.history[-4:]
348
- # self.all_token_counts = self.all_token_counts[-2:]
349
- self.history = []
350
- self.all_token_counts = []
351
-
352
- max_token = self.token_upper_limit - TOKEN_OFFSET
353
-
354
- if sum(self.all_token_counts) > max_token and should_check_token_count:
355
- count = 0
356
- while (
357
- sum(self.all_token_counts)
358
- > self.token_upper_limit * REDUCE_TOKEN_FACTOR
359
- and sum(self.all_token_counts) > 0
360
- ):
361
- count += 1
362
- del self.all_token_counts[0]
363
- del self.history[:2]
364
- logging.info(status_text)
365
- status_text = f"为了防止token超限,模型忘记了早期的 {count} 轮对话"
366
- yield chatbot, status_text
367
-
368
- def retry(
369
- self,
370
- chatbot,
371
- stream=False,
372
- use_websearch=False,
373
- files=None,
374
- reply_language="中文",
375
- ):
376
- logging.debug("重试中……")
377
- if len(self.history) > 0:
378
- inputs = self.history[-2]["content"]
379
- del self.history[-2:]
380
- self.all_token_counts.pop()
381
- elif len(chatbot) > 0:
382
- inputs = chatbot[-1][0]
383
- else:
384
- yield chatbot, f"{STANDARD_ERROR_MSG}上下文是空的"
385
- return
386
-
387
- iter = self.predict(
388
- inputs,
389
- chatbot,
390
- stream=stream,
391
- use_websearch=use_websearch,
392
- files=files,
393
- reply_language=reply_language,
394
- )
395
- for x in iter:
396
- yield x
397
- logging.debug("重试完毕")
398
-
399
- # def reduce_token_size(self, chatbot):
400
- # logging.info("开始减少token数量……")
401
- # chatbot, status_text = self.next_chatbot_at_once(
402
- # summarize_prompt,
403
- # chatbot
404
- # )
405
- # max_token_count = self.token_upper_limit * REDUCE_TOKEN_FACTOR
406
- # num_chat = find_n(self.all_token_counts, max_token_count)
407
- # logging.info(f"previous_token_count: {self.all_token_counts}, keeping {num_chat} chats")
408
- # chatbot = chatbot[:-1]
409
- # self.history = self.history[-2*num_chat:] if num_chat > 0 else []
410
- # self.all_token_counts = self.all_token_counts[-num_chat:] if num_chat > 0 else []
411
- # msg = f"保留了最近{num_chat}轮对话"
412
- # logging.info(msg)
413
- # logging.info("减少token数量完毕")
414
- # return chatbot, msg + "," + self.token_message(self.all_token_counts if len(self.all_token_counts) > 0 else [0])
415
-
416
- def interrupt(self):
417
- self.interrupted = True
418
-
419
- def recover(self):
420
- self.interrupted = False
421
-
422
- def set_token_upper_limit(self, new_upper_limit):
423
- self.token_upper_limit = new_upper_limit
424
- print(f"token上限设置为{new_upper_limit}")
425
-
426
- def set_temperature(self, new_temperature):
427
- self.temperature = new_temperature
428
-
429
- def set_top_p(self, new_top_p):
430
- self.top_p = new_top_p
431
-
432
- def set_n_choices(self, new_n_choices):
433
- self.n_choices = new_n_choices
434
-
435
- def set_stop_sequence(self, new_stop_sequence: str):
436
- new_stop_sequence = new_stop_sequence.split(",")
437
- self.stop_sequence = new_stop_sequence
438
-
439
- def set_max_tokens(self, new_max_tokens):
440
- self.max_generation_token = new_max_tokens
441
-
442
- def set_presence_penalty(self, new_presence_penalty):
443
- self.presence_penalty = new_presence_penalty
444
-
445
- def set_frequency_penalty(self, new_frequency_penalty):
446
- self.frequency_penalty = new_frequency_penalty
447
-
448
- def set_logit_bias(self, logit_bias):
449
- logit_bias = logit_bias.split()
450
- bias_map = {}
451
- encoding = tiktoken.get_encoding("cl100k_base")
452
- for line in logit_bias:
453
- word, bias_amount = line.split(":")
454
- if word:
455
- for token in encoding.encode(word):
456
- bias_map[token] = float(bias_amount)
457
- self.logit_bias = bias_map
458
-
459
- def set_user_identifier(self, new_user_identifier):
460
- self.user_identifier = new_user_identifier
461
-
462
- def set_system_prompt(self, new_system_prompt):
463
- self.system_prompt = new_system_prompt
464
-
465
- def set_key(self, new_access_key):
466
- self.api_key = new_access_key.strip()
467
- msg = i18n("API密钥更改为了") + hide_middle_chars(self.api_key)
468
- logging.info(msg)
469
- return self.api_key, msg
470
-
471
- def set_single_turn(self, new_single_turn):
472
- self.single_turn = new_single_turn
473
-
474
- def reset(self):
475
- self.history = []
476
- self.all_token_counts = []
477
- self.interrupted = False
478
- return [], self.token_message([0])
479
-
480
- def delete_first_conversation(self):
481
- if self.history:
482
- del self.history[:2]
483
- del self.all_token_counts[0]
484
- return self.token_message()
485
-
486
- def delete_last_conversation(self, chatbot):
487
- if len(chatbot) > 0 and STANDARD_ERROR_MSG in chatbot[-1][1]:
488
- msg = "由于包含报错信息,只删除chatbot记录"
489
- chatbot.pop()
490
- return chatbot, self.history
491
- if len(self.history) > 0:
492
- self.history.pop()
493
- self.history.pop()
494
- if len(chatbot) > 0:
495
- msg = "删除了一组chatbot对话"
496
- chatbot.pop()
497
- if len(self.all_token_counts) > 0:
498
- msg = "删除了一组对话的token计数记录"
499
- self.all_token_counts.pop()
500
- msg = "删除了一组对话"
501
- return chatbot, msg
502
-
503
- def token_message(self, token_lst=None):
504
- if token_lst is None:
505
- token_lst = self.all_token_counts
506
- token_sum = 0
507
- for i in range(len(token_lst)):
508
- token_sum += sum(token_lst[: i + 1])
509
- return i18n("Token 计数: ") + f"{sum(token_lst)}" + i18n(",本次对话累计消耗了 ") + f"{token_sum} tokens"
510
-
511
- def save_chat_history(self, filename, chatbot, user_name):
512
- if filename == "":
513
- return
514
- if not filename.endswith(".json"):
515
- filename += ".json"
516
- return save_file(filename, self.system_prompt, self.history, chatbot, user_name)
517
-
518
- def export_markdown(self, filename, chatbot, user_name):
519
- if filename == "":
520
- return
521
- if not filename.endswith(".md"):
522
- filename += ".md"
523
- return save_file(filename, self.system_prompt, self.history, chatbot, user_name)
524
-
525
- def load_chat_history(self, filename, chatbot, user_name):
526
- logging.debug(f"{user_name} 加载对话历史中……")
527
- if type(filename) != str:
528
- filename = filename.name
529
- try:
530
- with open(os.path.join(HISTORY_DIR, user_name, filename), "r") as f:
531
- json_s = json.load(f)
532
- try:
533
- if type(json_s["history"][0]) == str:
534
- logging.info("历史记录格式为旧版,正在转换……")
535
- new_history = []
536
- for index, item in enumerate(json_s["history"]):
537
- if index % 2 == 0:
538
- new_history.append(construct_user(item))
539
- else:
540
- new_history.append(construct_assistant(item))
541
- json_s["history"] = new_history
542
- logging.info(new_history)
543
- except:
544
- # 没有对话历史
545
- pass
546
- logging.debug(f"{user_name} 加载对话历史完毕")
547
- self.history = json_s["history"]
548
- return filename, json_s["system"], json_s["chatbot"]
549
- except FileNotFoundError:
550
- logging.warning(f"{user_name} 没有找到对话历史文件,不执行任何操作")
551
- return filename, self.system_prompt, chatbot
552
-
553
- def like(self):
554
- """like the last response, implement if needed
555
- """
556
- return gr.update()
557
-
558
- def dislike(self):
559
- """dislike the last response, implement if needed
560
- """
561
- return gr.update()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
modules/llama_func.py DELETED
@@ -1,166 +0,0 @@
1
- import os
2
- import logging
3
-
4
- from llama_index import download_loader
5
- from llama_index import (
6
- Document,
7
- LLMPredictor,
8
- PromptHelper,
9
- QuestionAnswerPrompt,
10
- RefinePrompt,
11
- )
12
- import colorama
13
- import PyPDF2
14
- from tqdm import tqdm
15
-
16
- from modules.presets import *
17
- from modules.utils import *
18
- from modules.config import local_embedding
19
-
20
-
21
- def get_index_name(file_src):
22
- file_paths = [x.name for x in file_src]
23
- file_paths.sort(key=lambda x: os.path.basename(x))
24
-
25
- md5_hash = hashlib.md5()
26
- for file_path in file_paths:
27
- with open(file_path, "rb") as f:
28
- while chunk := f.read(8192):
29
- md5_hash.update(chunk)
30
-
31
- return md5_hash.hexdigest()
32
-
33
-
34
- def block_split(text):
35
- blocks = []
36
- while len(text) > 0:
37
- blocks.append(Document(text[:1000]))
38
- text = text[1000:]
39
- return blocks
40
-
41
-
42
- def get_documents(file_src):
43
- documents = []
44
- logging.debug("Loading documents...")
45
- logging.debug(f"file_src: {file_src}")
46
- for file in file_src:
47
- filepath = file.name
48
- filename = os.path.basename(filepath)
49
- file_type = os.path.splitext(filepath)[1]
50
- logging.info(f"loading file: {filename}")
51
- try:
52
- if file_type == ".pdf":
53
- logging.debug("Loading PDF...")
54
- try:
55
- from modules.pdf_func import parse_pdf
56
- from modules.config import advance_docs
57
-
58
- two_column = advance_docs["pdf"].get("two_column", False)
59
- pdftext = parse_pdf(filepath, two_column).text
60
- except:
61
- pdftext = ""
62
- with open(filepath, "rb") as pdfFileObj:
63
- pdfReader = PyPDF2.PdfReader(pdfFileObj)
64
- for page in tqdm(pdfReader.pages):
65
- pdftext += page.extract_text()
66
- text_raw = pdftext
67
- elif file_type == ".docx":
68
- logging.debug("Loading Word...")
69
- DocxReader = download_loader("DocxReader")
70
- loader = DocxReader()
71
- text_raw = loader.load_data(file=filepath)[0].text
72
- elif file_type == ".epub":
73
- logging.debug("Loading EPUB...")
74
- EpubReader = download_loader("EpubReader")
75
- loader = EpubReader()
76
- text_raw = loader.load_data(file=filepath)[0].text
77
- elif file_type == ".xlsx":
78
- logging.debug("Loading Excel...")
79
- text_list = excel_to_string(filepath)
80
- for elem in text_list:
81
- documents.append(Document(elem))
82
- continue
83
- else:
84
- logging.debug("Loading text file...")
85
- with open(filepath, "r", encoding="utf-8") as f:
86
- text_raw = f.read()
87
- except Exception as e:
88
- logging.error(f"Error loading file: {filename}")
89
- pass
90
- text = add_space(text_raw)
91
- # text = block_split(text)
92
- # documents += text
93
- documents += [Document(text)]
94
- logging.debug("Documents loaded.")
95
- return documents
96
-
97
-
98
- def construct_index(
99
- api_key,
100
- file_src,
101
- max_input_size=4096,
102
- num_outputs=5,
103
- max_chunk_overlap=20,
104
- chunk_size_limit=600,
105
- embedding_limit=None,
106
- separator=" ",
107
- ):
108
- from langchain.chat_models import ChatOpenAI
109
- from langchain.embeddings.huggingface import HuggingFaceEmbeddings
110
- from llama_index import GPTSimpleVectorIndex, ServiceContext, LangchainEmbedding, OpenAIEmbedding
111
-
112
- if api_key:
113
- os.environ["OPENAI_API_KEY"] = api_key
114
- else:
115
- # 由于一个依赖的愚蠢的设计,这里必须要有一个API KEY
116
- os.environ["OPENAI_API_KEY"] = "sk-xxxxxxx"
117
- chunk_size_limit = None if chunk_size_limit == 0 else chunk_size_limit
118
- embedding_limit = None if embedding_limit == 0 else embedding_limit
119
- separator = " " if separator == "" else separator
120
-
121
- prompt_helper = PromptHelper(
122
- max_input_size=max_input_size,
123
- num_output=num_outputs,
124
- max_chunk_overlap=max_chunk_overlap,
125
- embedding_limit=embedding_limit,
126
- chunk_size_limit=600,
127
- separator=separator,
128
- )
129
- index_name = get_index_name(file_src)
130
- if os.path.exists(f"./index/{index_name}.json"):
131
- logging.info("找到了缓存的索引文件,加载中……")
132
- return GPTSimpleVectorIndex.load_from_disk(f"./index/{index_name}.json")
133
- else:
134
- try:
135
- documents = get_documents(file_src)
136
- if local_embedding:
137
- embed_model = LangchainEmbedding(HuggingFaceEmbeddings(model_name = "sentence-transformers/distiluse-base-multilingual-cased-v2"))
138
- else:
139
- embed_model = OpenAIEmbedding()
140
- logging.info("构建索引中……")
141
- with retrieve_proxy():
142
- service_context = ServiceContext.from_defaults(
143
- prompt_helper=prompt_helper,
144
- chunk_size_limit=chunk_size_limit,
145
- embed_model=embed_model,
146
- )
147
- index = GPTSimpleVectorIndex.from_documents(
148
- documents, service_context=service_context
149
- )
150
- logging.debug("索引构建完成!")
151
- os.makedirs("./index", exist_ok=True)
152
- index.save_to_disk(f"./index/{index_name}.json")
153
- logging.debug("索引已保存至本地!")
154
- return index
155
-
156
- except Exception as e:
157
- logging.error("索引构建失败!", e)
158
- print(e)
159
- return None
160
-
161
-
162
- def add_space(text):
163
- punctuations = {",": ", ", "。": "。 ", "?": "? ", "!": "! ", ":": ": ", ";": "; "}
164
- for cn_punc, en_punc in punctuations.items():
165
- text = text.replace(cn_punc, en_punc)
166
- return text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
modules/models.py DELETED
@@ -1,625 +0,0 @@
1
- from __future__ import annotations
2
- from typing import TYPE_CHECKING, List
3
-
4
- import logging
5
- import json
6
- import commentjson as cjson
7
- import os
8
- import sys
9
- import requests
10
- import urllib3
11
- import platform
12
- import base64
13
- from io import BytesIO
14
- from PIL import Image
15
-
16
- from tqdm import tqdm
17
- import colorama
18
- from duckduckgo_search import ddg
19
- import asyncio
20
- import aiohttp
21
- from enum import Enum
22
- import uuid
23
-
24
- from .presets import *
25
- from .llama_func import *
26
- from .utils import *
27
- from . import shared
28
- from .config import retrieve_proxy
29
- from modules import config
30
- from .base_model import BaseLLMModel, ModelType
31
-
32
-
33
- class OpenAIClient(BaseLLMModel):
34
- def __init__(
35
- self,
36
- model_name,
37
- api_key,
38
- system_prompt=INITIAL_SYSTEM_PROMPT,
39
- temperature=1.0,
40
- top_p=1.0,
41
- ) -> None:
42
- super().__init__(
43
- model_name=model_name,
44
- temperature=temperature,
45
- top_p=top_p,
46
- system_prompt=system_prompt,
47
- )
48
- self.api_key = api_key
49
- self.need_api_key = True
50
- self._refresh_header()
51
-
52
- def get_answer_stream_iter(self):
53
- response = self._get_response(stream=True)
54
- if response is not None:
55
- iter = self._decode_chat_response(response)
56
- partial_text = ""
57
- for i in iter:
58
- partial_text += i
59
- yield partial_text
60
- else:
61
- yield STANDARD_ERROR_MSG + GENERAL_ERROR_MSG
62
-
63
- def get_answer_at_once(self):
64
- response = self._get_response()
65
- response = json.loads(response.text)
66
- content = response["choices"][0]["message"]["content"]
67
- total_token_count = response["usage"]["total_tokens"]
68
- return content, total_token_count
69
-
70
- def count_token(self, user_input):
71
- input_token_count = count_token(construct_user(user_input))
72
- if self.system_prompt is not None and len(self.all_token_counts) == 0:
73
- system_prompt_token_count = count_token(
74
- construct_system(self.system_prompt)
75
- )
76
- return input_token_count + system_prompt_token_count
77
- return input_token_count
78
-
79
- def billing_info(self):
80
- try:
81
- curr_time = datetime.datetime.now()
82
- last_day_of_month = get_last_day_of_month(
83
- curr_time).strftime("%Y-%m-%d")
84
- first_day_of_month = curr_time.replace(day=1).strftime("%Y-%m-%d")
85
- usage_url = f"{shared.state.usage_api_url}?start_date={first_day_of_month}&end_date={last_day_of_month}"
86
- try:
87
- usage_data = self._get_billing_data(usage_url)
88
- except Exception as e:
89
- logging.error(f"获取API使用情况失败:" + str(e))
90
- return i18n("**获取API使用情况失败**")
91
- rounded_usage = "{:.5f}".format(usage_data["total_usage"] / 100)
92
- return i18n("**本月使用金额** ") + f"\u3000 ${rounded_usage}"
93
- except requests.exceptions.ConnectTimeout:
94
- status_text = (
95
- STANDARD_ERROR_MSG + CONNECTION_TIMEOUT_MSG + ERROR_RETRIEVE_MSG
96
- )
97
- return status_text
98
- except requests.exceptions.ReadTimeout:
99
- status_text = STANDARD_ERROR_MSG + READ_TIMEOUT_MSG + ERROR_RETRIEVE_MSG
100
- return status_text
101
- except Exception as e:
102
- import traceback
103
- traceback.print_exc()
104
- logging.error(i18n("获取API使用情况失败:") + str(e))
105
- return STANDARD_ERROR_MSG + ERROR_RETRIEVE_MSG
106
-
107
- def set_token_upper_limit(self, new_upper_limit):
108
- pass
109
-
110
- @shared.state.switching_api_key # 在不开启多账号模式的时候,这个装饰器不会起作用
111
- def _get_response(self, stream=False):
112
- openai_api_key = self.api_key
113
- system_prompt = self.system_prompt
114
- history = self.history
115
- logging.debug(colorama.Fore.YELLOW +
116
- f"{history}" + colorama.Fore.RESET)
117
- headers = {
118
- "Content-Type": "application/json",
119
- "Authorization": f"Bearer {openai_api_key}",
120
- }
121
-
122
- if system_prompt is not None:
123
- history = [construct_system(system_prompt), *history]
124
-
125
- payload = {
126
- "model": self.model_name,
127
- "messages": history,
128
- "temperature": self.temperature,
129
- "top_p": self.top_p,
130
- "n": self.n_choices,
131
- "stream": stream,
132
- "presence_penalty": self.presence_penalty,
133
- "frequency_penalty": self.frequency_penalty,
134
- }
135
-
136
- if self.max_generation_token is not None:
137
- payload["max_tokens"] = self.max_generation_token
138
- if self.stop_sequence is not None:
139
- payload["stop"] = self.stop_sequence
140
- if self.logit_bias is not None:
141
- payload["logit_bias"] = self.logit_bias
142
- if self.user_identifier is not None:
143
- payload["user"] = self.user_identifier
144
-
145
- if stream:
146
- timeout = TIMEOUT_STREAMING
147
- else:
148
- timeout = TIMEOUT_ALL
149
-
150
- # 如果有自定义的api-host,使用自定义host发送请求,否则使用默认设置发送请求
151
- if shared.state.completion_url != COMPLETION_URL:
152
- logging.info(f"使用自定义API URL: {shared.state.completion_url}")
153
-
154
- with retrieve_proxy():
155
- try:
156
- response = requests.post(
157
- shared.state.completion_url,
158
- headers=headers,
159
- json=payload,
160
- stream=stream,
161
- timeout=timeout,
162
- )
163
- except:
164
- return None
165
- return response
166
-
167
- def _refresh_header(self):
168
- self.headers = {
169
- "Content-Type": "application/json",
170
- "Authorization": f"Bearer {self.api_key}",
171
- }
172
-
173
- def _get_billing_data(self, billing_url):
174
- with retrieve_proxy():
175
- response = requests.get(
176
- billing_url,
177
- headers=self.headers,
178
- timeout=TIMEOUT_ALL,
179
- )
180
-
181
- if response.status_code == 200:
182
- data = response.json()
183
- return data
184
- else:
185
- raise Exception(
186
- f"API request failed with status code {response.status_code}: {response.text}"
187
- )
188
-
189
- def _decode_chat_response(self, response):
190
- error_msg = ""
191
- for chunk in response.iter_lines():
192
- if chunk:
193
- chunk = chunk.decode()
194
- chunk_length = len(chunk)
195
- try:
196
- chunk = json.loads(chunk[6:])
197
- except json.JSONDecodeError:
198
- print(i18n("JSON解析错误,收到的内容: ") + f"{chunk}")
199
- error_msg += chunk
200
- continue
201
- if chunk_length > 6 and "delta" in chunk["choices"][0]:
202
- if chunk["choices"][0]["finish_reason"] == "stop":
203
- break
204
- try:
205
- yield chunk["choices"][0]["delta"]["content"]
206
- except Exception as e:
207
- # logging.error(f"Error: {e}")
208
- continue
209
- if error_msg:
210
- raise Exception(error_msg)
211
-
212
- def set_key(self, new_access_key):
213
- ret = super().set_key(new_access_key)
214
- self._refresh_header()
215
- return ret
216
-
217
-
218
- class ChatGLM_Client(BaseLLMModel):
219
- def __init__(self, model_name) -> None:
220
- super().__init__(model_name=model_name)
221
- from transformers import AutoTokenizer, AutoModel
222
- import torch
223
- global CHATGLM_TOKENIZER, CHATGLM_MODEL
224
- if CHATGLM_TOKENIZER is None or CHATGLM_MODEL is None:
225
- system_name = platform.system()
226
- model_path = None
227
- if os.path.exists("models"):
228
- model_dirs = os.listdir("models")
229
- if model_name in model_dirs:
230
- model_path = f"models/{model_name}"
231
- if model_path is not None:
232
- model_source = model_path
233
- else:
234
- model_source = f"THUDM/{model_name}"
235
- CHATGLM_TOKENIZER = AutoTokenizer.from_pretrained(
236
- model_source, trust_remote_code=True
237
- )
238
- quantified = False
239
- if "int4" in model_name:
240
- quantified = True
241
- model = AutoModel.from_pretrained(
242
- model_source, trust_remote_code=True
243
- )
244
- if torch.cuda.is_available():
245
- # run on CUDA
246
- logging.info("CUDA is available, using CUDA")
247
- model = model.half().cuda()
248
- # mps加速还存在一些问题,暂时不使用
249
- elif system_name == "Darwin" and model_path is not None and not quantified:
250
- logging.info("Running on macOS, using MPS")
251
- # running on macOS and model already downloaded
252
- model = model.half().to("mps")
253
- else:
254
- logging.info("GPU is not available, using CPU")
255
- model = model.float()
256
- model = model.eval()
257
- CHATGLM_MODEL = model
258
-
259
- def _get_glm_style_input(self):
260
- history = [x["content"] for x in self.history]
261
- query = history.pop()
262
- logging.debug(colorama.Fore.YELLOW +
263
- f"{history}" + colorama.Fore.RESET)
264
- assert (
265
- len(history) % 2 == 0
266
- ), f"History should be even length. current history is: {history}"
267
- history = [[history[i], history[i + 1]]
268
- for i in range(0, len(history), 2)]
269
- return history, query
270
-
271
- def get_answer_at_once(self):
272
- history, query = self._get_glm_style_input()
273
- response, _ = CHATGLM_MODEL.chat(
274
- CHATGLM_TOKENIZER, query, history=history)
275
- return response, len(response)
276
-
277
- def get_answer_stream_iter(self):
278
- history, query = self._get_glm_style_input()
279
- for response, history in CHATGLM_MODEL.stream_chat(
280
- CHATGLM_TOKENIZER,
281
- query,
282
- history,
283
- max_length=self.token_upper_limit,
284
- top_p=self.top_p,
285
- temperature=self.temperature,
286
- ):
287
- yield response
288
-
289
-
290
- class LLaMA_Client(BaseLLMModel):
291
- def __init__(
292
- self,
293
- model_name,
294
- lora_path=None,
295
- ) -> None:
296
- super().__init__(model_name=model_name)
297
- from lmflow.datasets.dataset import Dataset
298
- from lmflow.pipeline.auto_pipeline import AutoPipeline
299
- from lmflow.models.auto_model import AutoModel
300
- from lmflow.args import ModelArguments, DatasetArguments, InferencerArguments
301
-
302
- self.max_generation_token = 1000
303
- self.end_string = "\n\n"
304
- # We don't need input data
305
- data_args = DatasetArguments(dataset_path=None)
306
- self.dataset = Dataset(data_args)
307
- self.system_prompt = ""
308
-
309
- global LLAMA_MODEL, LLAMA_INFERENCER
310
- if LLAMA_MODEL is None or LLAMA_INFERENCER is None:
311
- model_path = None
312
- if os.path.exists("models"):
313
- model_dirs = os.listdir("models")
314
- if model_name in model_dirs:
315
- model_path = f"models/{model_name}"
316
- if model_path is not None:
317
- model_source = model_path
318
- else:
319
- model_source = f"decapoda-research/{model_name}"
320
- # raise Exception(f"models目录下没有这个模型: {model_name}")
321
- if lora_path is not None:
322
- lora_path = f"lora/{lora_path}"
323
- model_args = ModelArguments(model_name_or_path=model_source, lora_model_path=lora_path, model_type=None, config_overrides=None, config_name=None, tokenizer_name=None, cache_dir=None,
324
- use_fast_tokenizer=True, model_revision='main', use_auth_token=False, torch_dtype=None, use_lora=False, lora_r=8, lora_alpha=32, lora_dropout=0.1, use_ram_optimized_load=True)
325
- pipeline_args = InferencerArguments(
326
- local_rank=0, random_seed=1, deepspeed='configs/ds_config_chatbot.json', mixed_precision='bf16')
327
-
328
- with open(pipeline_args.deepspeed, "r") as f:
329
- ds_config = json.load(f)
330
- LLAMA_MODEL = AutoModel.get_model(
331
- model_args,
332
- tune_strategy="none",
333
- ds_config=ds_config,
334
- )
335
- LLAMA_INFERENCER = AutoPipeline.get_pipeline(
336
- pipeline_name="inferencer",
337
- model_args=model_args,
338
- data_args=data_args,
339
- pipeline_args=pipeline_args,
340
- )
341
-
342
- def _get_llama_style_input(self):
343
- history = []
344
- instruction = ""
345
- if self.system_prompt:
346
- instruction = (f"Instruction: {self.system_prompt}\n")
347
- for x in self.history:
348
- if x["role"] == "user":
349
- history.append(f"{instruction}Input: {x['content']}")
350
- else:
351
- history.append(f"Output: {x['content']}")
352
- context = "\n\n".join(history)
353
- context += "\n\nOutput: "
354
- return context
355
-
356
- def get_answer_at_once(self):
357
- context = self._get_llama_style_input()
358
-
359
- input_dataset = self.dataset.from_dict(
360
- {"type": "text_only", "instances": [{"text": context}]}
361
- )
362
-
363
- output_dataset = LLAMA_INFERENCER.inference(
364
- model=LLAMA_MODEL,
365
- dataset=input_dataset,
366
- max_new_tokens=self.max_generation_token,
367
- temperature=self.temperature,
368
- )
369
-
370
- response = output_dataset.to_dict()["instances"][0]["text"]
371
- return response, len(response)
372
-
373
- def get_answer_stream_iter(self):
374
- context = self._get_llama_style_input()
375
- partial_text = ""
376
- step = 1
377
- for _ in range(0, self.max_generation_token, step):
378
- input_dataset = self.dataset.from_dict(
379
- {"type": "text_only", "instances": [
380
- {"text": context + partial_text}]}
381
- )
382
- output_dataset = LLAMA_INFERENCER.inference(
383
- model=LLAMA_MODEL,
384
- dataset=input_dataset,
385
- max_new_tokens=step,
386
- temperature=self.temperature,
387
- )
388
- response = output_dataset.to_dict()["instances"][0]["text"]
389
- if response == "" or response == self.end_string:
390
- break
391
- partial_text += response
392
- yield partial_text
393
-
394
-
395
- class XMChat(BaseLLMModel):
396
- def __init__(self, api_key):
397
- super().__init__(model_name="xmchat")
398
- self.api_key = api_key
399
- self.session_id = None
400
- self.reset()
401
- self.image_bytes = None
402
- self.image_path = None
403
- self.xm_history = []
404
- self.url = "https://xmbot.net/web"
405
- self.last_conv_id = None
406
-
407
- def reset(self):
408
- self.session_id = str(uuid.uuid4())
409
- self.last_conv_id = None
410
- return [], "已重置"
411
-
412
- def image_to_base64(self, image_path):
413
- # 打开并加载图片
414
- img = Image.open(image_path)
415
-
416
- # 获取图片的宽度和高度
417
- width, height = img.size
418
-
419
- # 计算压缩比例,以确保最长边小于4096像素
420
- max_dimension = 2048
421
- scale_ratio = min(max_dimension / width, max_dimension / height)
422
-
423
- if scale_ratio < 1:
424
- # 按压缩比例调整图片大小
425
- new_width = int(width * scale_ratio)
426
- new_height = int(height * scale_ratio)
427
- img = img.resize((new_width, new_height), Image.ANTIALIAS)
428
-
429
- # 将图片转换为jpg格式的二进制数据
430
- buffer = BytesIO()
431
- if img.mode == "RGBA":
432
- img = img.convert("RGB")
433
- img.save(buffer, format='JPEG')
434
- binary_image = buffer.getvalue()
435
-
436
- # 对二进制数据进行Base64编码
437
- base64_image = base64.b64encode(binary_image).decode('utf-8')
438
-
439
- return base64_image
440
-
441
- def try_read_image(self, filepath):
442
- def is_image_file(filepath):
443
- # 判断文件是否为图片
444
- valid_image_extensions = [".jpg", ".jpeg", ".png", ".bmp", ".gif", ".tiff"]
445
- file_extension = os.path.splitext(filepath)[1].lower()
446
- return file_extension in valid_image_extensions
447
-
448
- if is_image_file(filepath):
449
- logging.info(f"读取图片文件: {filepath}")
450
- self.image_bytes = self.image_to_base64(filepath)
451
- self.image_path = filepath
452
- else:
453
- self.image_bytes = None
454
- self.image_path = None
455
-
456
- def like(self):
457
- if self.last_conv_id is None:
458
- return "点赞失败,你还没发送过消息"
459
- data = {
460
- "uuid": self.last_conv_id,
461
- "appraise": "good"
462
- }
463
- response = requests.post(self.url, json=data)
464
- return "👍点赞成功,,感谢反馈~"
465
-
466
- def dislike(self):
467
- if self.last_conv_id is None:
468
- return "点踩失败,你还没发送过消息"
469
- data = {
470
- "uuid": self.last_conv_id,
471
- "appraise": "bad"
472
- }
473
- response = requests.post(self.url, json=data)
474
- return "👎点踩成功,感谢反馈~"
475
-
476
- def prepare_inputs(self, real_inputs, use_websearch, files, reply_language, chatbot):
477
- fake_inputs = real_inputs
478
- display_append = ""
479
- limited_context = False
480
- return limited_context, fake_inputs, display_append, real_inputs, chatbot
481
-
482
- def handle_file_upload(self, files, chatbot):
483
- """if the model accepts multi modal input, implement this function"""
484
- if files:
485
- for file in files:
486
- if file.name:
487
- logging.info(f"尝试读取图像: {file.name}")
488
- self.try_read_image(file.name)
489
- if self.image_path is not None:
490
- chatbot = chatbot + [((self.image_path,), None)]
491
- if self.image_bytes is not None:
492
- logging.info("使用图片作为输入")
493
- # XMChat的一轮对话中实际上只能处理一张图片
494
- self.reset()
495
- conv_id = str(uuid.uuid4())
496
- data = {
497
- "user_id": self.api_key,
498
- "session_id": self.session_id,
499
- "uuid": conv_id,
500
- "data_type": "imgbase64",
501
- "data": self.image_bytes
502
- }
503
- response = requests.post(self.url, json=data)
504
- response = json.loads(response.text)
505
- logging.info(f"图片回复: {response['data']}")
506
- return None, chatbot, None
507
-
508
- def get_answer_at_once(self):
509
- question = self.history[-1]["content"]
510
- conv_id = str(uuid.uuid4())
511
- self.last_conv_id = conv_id
512
- data = {
513
- "user_id": self.api_key,
514
- "session_id": self.session_id,
515
- "uuid": conv_id,
516
- "data_type": "text",
517
- "data": question
518
- }
519
- response = requests.post(self.url, json=data)
520
- try:
521
- response = json.loads(response.text)
522
- return response["data"], len(response["data"])
523
- except Exception as e:
524
- return response.text, len(response.text)
525
-
526
-
527
-
528
-
529
- def get_model(
530
- model_name,
531
- lora_model_path=None,
532
- access_key=None,
533
- temperature=None,
534
- top_p=None,
535
- system_prompt=None,
536
- ) -> BaseLLMModel:
537
- msg = i18n("模型设置为了:") + f" {model_name}"
538
- model_type = ModelType.get_type(model_name)
539
- lora_selector_visibility = False
540
- lora_choices = []
541
- dont_change_lora_selector = False
542
- if model_type != ModelType.OpenAI:
543
- config.local_embedding = True
544
- # del current_model.model
545
- model = None
546
- try:
547
- if model_type == ModelType.OpenAI:
548
- logging.info(f"正在加载OpenAI模型: {model_name}")
549
- model = OpenAIClient(
550
- model_name=model_name,
551
- api_key=access_key,
552
- system_prompt=system_prompt,
553
- temperature=temperature,
554
- top_p=top_p,
555
- )
556
- elif model_type == ModelType.ChatGLM:
557
- logging.info(f"正在加载ChatGLM模型: {model_name}")
558
- model = ChatGLM_Client(model_name)
559
- elif model_type == ModelType.LLaMA and lora_model_path == "":
560
- msg = f"现在请为 {model_name} 选择LoRA模型"
561
- logging.info(msg)
562
- lora_selector_visibility = True
563
- if os.path.isdir("lora"):
564
- lora_choices = get_file_names(
565
- "lora", plain=True, filetypes=[""])
566
- lora_choices = ["No LoRA"] + lora_choices
567
- elif model_type == ModelType.LLaMA and lora_model_path != "":
568
- logging.info(f"正在加载LLaMA模型: {model_name} + {lora_model_path}")
569
- dont_change_lora_selector = True
570
- if lora_model_path == "No LoRA":
571
- lora_model_path = None
572
- msg += " + No LoRA"
573
- else:
574
- msg += f" + {lora_model_path}"
575
- model = LLaMA_Client(model_name, lora_model_path)
576
- elif model_type == ModelType.XMChat:
577
- if os.environ.get("XMCHAT_API_KEY") != "":
578
- access_key = os.environ.get("XMCHAT_API_KEY")
579
- model = XMChat(api_key=access_key)
580
- elif model_type == ModelType.Unknown:
581
- raise ValueError(f"未知模型: {model_name}")
582
- logging.info(msg)
583
- except Exception as e:
584
- logging.error(e)
585
- msg = f"{STANDARD_ERROR_MSG}: {e}"
586
- if dont_change_lora_selector:
587
- return model, msg
588
- else:
589
- return model, msg, gr.Dropdown.update(choices=lora_choices, visible=lora_selector_visibility)
590
-
591
-
592
- if __name__ == "__main__":
593
- with open("config.json", "r") as f:
594
- openai_api_key = cjson.load(f)["openai_api_key"]
595
- # set logging level to debug
596
- logging.basicConfig(level=logging.DEBUG)
597
- # client = ModelManager(model_name="gpt-3.5-turbo", access_key=openai_api_key)
598
- client = get_model(model_name="chatglm-6b-int4")
599
- chatbot = []
600
- stream = False
601
- # 测试账单功能
602
- logging.info(colorama.Back.GREEN + "测试账单功能" + colorama.Back.RESET)
603
- logging.info(client.billing_info())
604
- # 测试问答
605
- logging.info(colorama.Back.GREEN + "测试问答" + colorama.Back.RESET)
606
- question = "巴黎是中国的首都吗?"
607
- for i in client.predict(inputs=question, chatbot=chatbot, stream=stream):
608
- logging.info(i)
609
- logging.info(f"测试问答后history : {client.history}")
610
- # 测试记忆力
611
- logging.info(colorama.Back.GREEN + "测试记忆力" + colorama.Back.RESET)
612
- question = "我刚刚问了你什么问题?"
613
- for i in client.predict(inputs=question, chatbot=chatbot, stream=stream):
614
- logging.info(i)
615
- logging.info(f"测试记忆力后history : {client.history}")
616
- # 测试重试功能
617
- logging.info(colorama.Back.GREEN + "测试重试功能" + colorama.Back.RESET)
618
- for i in client.retry(chatbot=chatbot, stream=stream):
619
- logging.info(i)
620
- logging.info(f"重试后history : {client.history}")
621
- # # 测试总结功能
622
- # print(colorama.Back.GREEN + "测试总结功能" + colorama.Back.RESET)
623
- # chatbot, msg = client.reduce_token_size(chatbot=chatbot)
624
- # print(chatbot, msg)
625
- # print(f"总结后history: {client.history}")