0816ChatGPT / modules /presets.py
Kate0816's picture
Update modules/presets.py
80a73f7
# -*- coding:utf-8 -*-
import os
from pathlib import Path
import gradio as gr
from .webui_locale import I18nAuto
i18n = I18nAuto() # internationalization
CHATGLM_MODEL = None
CHATGLM_TOKENIZER = None
LLAMA_MODEL = None
LLAMA_INFERENCER = None
# ChatGPT 設置
INITIAL_SYSTEM_PROMPT = "You are a helpful assistant."
API_HOST = "api.openai.com"
COMPLETION_URL = "https://api.openai.com/v1/chat/completions"
BALANCE_API_URL="https://api.openai.com/dashboard/billing/credit_grants"
USAGE_API_URL="https://api.openai.com/dashboard/billing/usage"
HISTORY_DIR = Path("history")
HISTORY_DIR = "history"
TEMPLATES_DIR = "templates"
# 錯誤資訊
STANDARD_ERROR_MSG = i18n("☹️發生了錯誤:") # 錯誤資訊的標準首碼
GENERAL_ERROR_MSG = i18n("獲取對話時發生錯誤,請查看後臺日誌")
ERROR_RETRIEVE_MSG = i18n("請檢查網路連接,或者API-Key是否有效。")
CONNECTION_TIMEOUT_MSG = i18n("連接逾時,無法獲取對話。") # 連接逾時
READ_TIMEOUT_MSG = i18n("讀取超時,無法獲取對話。") # 讀取超時
PROXY_ERROR_MSG = i18n("代理錯誤,無法獲取對話。") # 代理錯誤
SSL_ERROR_PROMPT = i18n("SSL錯誤,無法獲取對話。") # SSL 錯誤
NO_APIKEY_MSG = i18n("API key為空,請檢查是否輸入正確。") # API key 長度不足 51 位
NO_INPUT_MSG = i18n("請輸入對話內容。") # 未輸入對話內容
BILLING_NOT_APPLICABLE_MSG = i18n("帳單資訊不適用") # 本地運行的模型返回的帳單資訊
TIMEOUT_STREAMING = 60 # 流式對話時的超時時間
TIMEOUT_ALL = 200 # 非流式對話時的超時時間
ENABLE_STREAMING_OPTION = True # 是否啟用選擇選擇是否即時顯示回答的勾選框
HIDE_MY_KEY = False # 如果你想在UI中隱藏你的 API 金鑰,將此值設置為 True
CONCURRENT_COUNT = 100 # 允許同時使用的用戶數量
SIM_K = 5
INDEX_QUERY_TEMPRATURE = 1.0
CHUANHU_TITLE = i18n("🚀 中華大學 孔維珺 B10902211 🚀")
CHUANHU_DESCRIPTION = i18n("中華大學 B10902211 孔維珺")
ONLINE_MODELS = [
"gpt-3.5-turbo",
"gpt-3.5-turbo-16k",
"gpt-3.5-turbo-0301",
"gpt-3.5-turbo-0613",
"gpt-4",
"gpt-4-0314",
"gpt-4-0613",
"gpt-4-32k",
"gpt-4-32k-0314",
"gpt-4-32k-0613",
"川虎助理",
"川虎助理 Pro",
"GooglePaLM",
"xmchat",
"Azure OpenAI",
"yuanai-1.0-base_10B",
"yuanai-1.0-translate",
"yuanai-1.0-dialog",
"yuanai-1.0-rhythm_poems",
"minimax-abab4-chat",
"minimax-abab5-chat",
"midjourney"
]
LOCAL_MODELS = [
"chatglm-6b",
"chatglm-6b-int4",
"chatglm-6b-int4-ge",
"chatglm2-6b",
"chatglm2-6b-int4",
"StableLM",
"MOSS",
"llama-7b-hf",
"llama-13b-hf",
"llama-30b-hf",
"llama-65b-hf",
]
if os.environ.get('HIDE_LOCAL_MODELS', 'false') == 'true':
MODELS = ONLINE_MODELS
else:
MODELS = ONLINE_MODELS + LOCAL_MODELS
DEFAULT_MODEL = 0
os.makedirs("models", exist_ok=True)
os.makedirs("lora", exist_ok=True)
os.makedirs("history", exist_ok=True)
for dir_name in os.listdir("models"):
if os.path.isdir(os.path.join("models", dir_name)):
if dir_name not in MODELS:
MODELS.append(dir_name)
MODEL_TOKEN_LIMIT = {
"gpt-3.5-turbo": 4096,
"gpt-3.5-turbo-16k": 16384,
"gpt-3.5-turbo-0301": 4096,
"gpt-3.5-turbo-0613": 4096,
"gpt-4": 8192,
"gpt-4-0314": 8192,
"gpt-4-0613": 8192,
"gpt-4-32k": 32768,
"gpt-4-32k-0314": 32768,
"gpt-4-32k-0613": 32768
}
TOKEN_OFFSET = 1000 # 模型的token上限減去這個值,得到軟上限。到達軟上限之後,自動嘗試減少token佔用。
DEFAULT_TOKEN_LIMIT = 3000 # 默認的token上限
REDUCE_TOKEN_FACTOR = 0.5 # 與模型token上限想乘,得到目標token數。減少token佔用時,將token佔用減少到目標token數以下。
REPLY_LANGUAGES = [
"簡體中文",
"繁體中文",
"English",
"日本語",
"Español",
"Français",
"Deutsch",
"한국어",
"跟隨問題語言(不穩定)"
]
WEBSEARCH_PTOMPT_TEMPLATE = """\
Web search results:
{web_results}
Current date: {current_date}
Instructions: Using the provided web search results, write a comprehensive reply to the given query. Make sure to cite results using [[number](URL)] notation after the reference. If the provided search results refer to multiple subjects with the same name, write separate answers for each subject.
Query: {query}
Reply in {reply_language}
"""
PROMPT_TEMPLATE = """\
Context information is below.
---------------------
{context_str}
---------------------
Current date: {current_date}.
Using the provided context information, write a comprehensive reply to the given query.
Make sure to cite results using [number] notation after the reference.
If the provided context information refer to multiple subjects with the same name, write separate answers for each subject.
Use prior knowledge only if the given context didn't provide enough information.
Answer the question: {query_str}
Reply in {reply_language}
"""
REFINE_TEMPLATE = """\
The original question is as follows: {query_str}
We have provided an existing answer: {existing_answer}
We have the opportunity to refine the existing answer
(only if needed) with some more context below.
------------
{context_msg}
------------
Given the new context, refine the original answer to better
Reply in {reply_language}
If the context isn't useful, return the original answer.
"""
SUMMARIZE_PROMPT = """Write a concise summary of the following:
{text}
CONCISE SUMMARY IN 中文:"""
ALREADY_CONVERTED_MARK = "<!-- ALREADY CONVERTED BY PARSER. -->"
START_OF_OUTPUT_MARK = "<!-- SOO IN MESSAGE -->"
END_OF_OUTPUT_MARK = "<!-- EOO IN MESSAGE -->"
small_and_beautiful_theme = gr.themes.Soft(
primary_hue=gr.themes.Color(
c50="#EBFAF2",
c100="#CFF3E1",
c200="#A8EAC8",
c300="#77DEA9",
c400="#3FD086",
c500="#02C160",
c600="#06AE56",
c700="#05974E",
c800="#057F45",
c900="#04673D",
c950="#2E5541",
name="small_and_beautiful",
),
secondary_hue=gr.themes.Color(
c50="#576b95",
c100="#576b95",
c200="#576b95",
c300="#576b95",
c400="#576b95",
c500="#576b95",
c600="#576b95",
c700="#576b95",
c800="#576b95",
c900="#576b95",
c950="#576b95",
),
neutral_hue=gr.themes.Color(
name="gray",
c50="#f6f7f8",
# c100="#f3f4f6",
c100="#F2F2F2",
c200="#e5e7eb",
c300="#d1d5db",
c400="#B2B2B2",
c500="#808080",
c600="#636363",
c700="#515151",
c800="#393939",
# c900="#272727",
c900="#2B2B2B",
c950="#171717",
),
radius_size=gr.themes.sizes.radius_sm,
).set(
# button_primary_background_fill="*primary_500",
button_primary_background_fill_dark="*primary_600",
# button_primary_background_fill_hover="*primary_400",
# button_primary_border_color="*primary_500",
button_primary_border_color_dark="*primary_600",
button_primary_text_color="white",
button_primary_text_color_dark="white",
button_secondary_background_fill="*neutral_100",
button_secondary_background_fill_hover="*neutral_50",
button_secondary_background_fill_dark="*neutral_900",
button_secondary_text_color="*neutral_800",
button_secondary_text_color_dark="white",
# background_fill_primary="#F7F7F7",
# background_fill_primary_dark="#1F1F1F",
# block_title_text_color="*primary_500",
block_title_background_fill_dark="*primary_900",
block_label_background_fill_dark="*primary_900",
input_background_fill="#F6F6F6",
chatbot_code_background_color="*neutral_950",
chatbot_code_background_color_dark="*neutral_950",
)