Spaces:
Paused
Paused
Alexandra Kueck
commited on
Commit
•
9e1636e
0
Parent(s):
Duplicate from alexkueck/ChatBotLI2
Browse files- .gitattributes +34 -0
- README.md +13 -0
- app.py +190 -0
- cookies.json +132 -0
- presets.py +83 -0
- requirements.txt +7 -0
- utils.py +147 -0
.gitattributes
ADDED
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
5 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
11 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
12 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
13 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
14 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
15 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
16 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
17 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
18 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
19 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
20 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
21 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
22 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
23 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
24 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
25 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
26 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
27 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
28 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
29 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
30 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
31 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
32 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
33 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
34 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
README.md
ADDED
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
title: ChatBotLI2
|
3 |
+
emoji: 🔥
|
4 |
+
colorFrom: indigo
|
5 |
+
colorTo: indigo
|
6 |
+
sdk: gradio
|
7 |
+
sdk_version: 3.29.0
|
8 |
+
app_file: app.py
|
9 |
+
pinned: false
|
10 |
+
duplicated_from: alexkueck/ChatBotLI2
|
11 |
+
---
|
12 |
+
|
13 |
+
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
app.py
ADDED
@@ -0,0 +1,190 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python
|
2 |
+
# -*- coding: utf-8 -*-
|
3 |
+
|
4 |
+
import gradio as gr
|
5 |
+
#from transformers import pipeline
|
6 |
+
import torch
|
7 |
+
from utils import *
|
8 |
+
from presets import *
|
9 |
+
|
10 |
+
#antwort=""
|
11 |
+
|
12 |
+
# Create a chatbot connection
|
13 |
+
#chatbot = hugchat.ChatBot(cookie_path="cookies.json")
|
14 |
+
|
15 |
+
#Alternativ mit beliebigen Modellen:
|
16 |
+
base_model = "project-baize/baize-v2-7b"
|
17 |
+
adapter_model = None
|
18 |
+
tokenizer,model,device = load_tokenizer_and_model(base_model,adapter_model)
|
19 |
+
|
20 |
+
# New a conversation (ignore error)
|
21 |
+
#id = chatbot.new_conversation()
|
22 |
+
#chatbot.change_conversation(id)
|
23 |
+
|
24 |
+
|
25 |
+
def predict(text, chatbotGr, history):
|
26 |
+
#global antwort
|
27 |
+
inputs = generate_prompt_with_history(text,history,tokenizer,max_length=max_context_length_tokens)
|
28 |
+
|
29 |
+
if inputs is None:
|
30 |
+
yield chatbotGr,history,"Input too long."
|
31 |
+
return
|
32 |
+
else:
|
33 |
+
prompt,inputs=inputs
|
34 |
+
begin_length = len(prompt)
|
35 |
+
input_ids = inputs["input_ids"][:,-max_context_length_tokens:].to(device)
|
36 |
+
torch.cuda.empty_cache()
|
37 |
+
global total_count
|
38 |
+
total_count += 1
|
39 |
+
print(total_count)
|
40 |
+
if total_count % 50 == 0 :
|
41 |
+
os.system("nvidia-smi")
|
42 |
+
with torch.no_grad():
|
43 |
+
for x in greedy_search(input_ids,model,tokenizer,stop_words=["[|Human|]", "[|AI|]"],max_length=max_length_tokens,temperature=temperature,top_p=top_p):
|
44 |
+
if is_stop_word_or_prefix(x,["[|Human|]", "[|AI|]"]) is False:
|
45 |
+
if "[|Human|]" in x:
|
46 |
+
x = x[:x.index("[|Human|]")].strip()
|
47 |
+
if "[|AI|]" in x:
|
48 |
+
x = x[:x.index("[|AI|]")].strip()
|
49 |
+
x = x.strip()
|
50 |
+
a, b= [[y[0],convert_to_markdown(y[1])] for y in history]+[[text, convert_to_markdown(x)]],history + [[text,x]]
|
51 |
+
yield a, b, "Generating..."
|
52 |
+
if shared_state.interrupted:
|
53 |
+
shared_state.recover()
|
54 |
+
try:
|
55 |
+
yield a, b, "Stop: Success"
|
56 |
+
return
|
57 |
+
except:
|
58 |
+
pass
|
59 |
+
del input_ids
|
60 |
+
gc.collect()
|
61 |
+
torch.cuda.empty_cache()
|
62 |
+
#print(text)
|
63 |
+
#print(x)
|
64 |
+
#print("="*80)
|
65 |
+
try:
|
66 |
+
yield a,b,"Generate: Success"
|
67 |
+
except:
|
68 |
+
pass
|
69 |
+
|
70 |
+
"""
|
71 |
+
if inputs is None:
|
72 |
+
#antwort=""
|
73 |
+
yield chatbotGr,history,"Eingabe zu lang."
|
74 |
+
return
|
75 |
+
else:
|
76 |
+
prompt,inputs=inputs
|
77 |
+
#begin_length = len(prompt)
|
78 |
+
|
79 |
+
antwort = chatbot.chat(prompt)
|
80 |
+
"""
|
81 |
+
|
82 |
+
def reset_chat():
|
83 |
+
id_new = chatbot.new_conversation()
|
84 |
+
chatbot.change_conversation(id_new)
|
85 |
+
reset_textbox()
|
86 |
+
|
87 |
+
|
88 |
+
|
89 |
+
with gr.Blocks(theme=small_and_beautiful_theme) as demo:
|
90 |
+
history = gr.State([])
|
91 |
+
user_question = gr.State("")
|
92 |
+
with gr.Row():
|
93 |
+
gr.HTML(title)
|
94 |
+
status_display = gr.Markdown("Erfolg", elem_id="status_display")
|
95 |
+
gr.Markdown(description_top)
|
96 |
+
with gr.Row(scale=1).style(equal_height=True):
|
97 |
+
with gr.Column(scale=5):
|
98 |
+
with gr.Row(scale=1):
|
99 |
+
chatbotGr = gr.Chatbot(elem_id="LI_chatbot").style(height="100%")
|
100 |
+
with gr.Row(scale=1):
|
101 |
+
with gr.Column(scale=12):
|
102 |
+
user_input = gr.Textbox(
|
103 |
+
show_label=False, placeholder="Gib deinen Text / Frage ein."
|
104 |
+
).style(container=False)
|
105 |
+
with gr.Column(min_width=90, scale=1):
|
106 |
+
submitBtn = gr.Button("Absenden")
|
107 |
+
with gr.Column(min_width=90, scale=1):
|
108 |
+
cancelBtn = gr.Button("Stoppen")
|
109 |
+
with gr.Row(scale=1):
|
110 |
+
emptyBtn = gr.Button(
|
111 |
+
"🧹 Neuer Chat",
|
112 |
+
)
|
113 |
+
with gr.Column():
|
114 |
+
with gr.Column(min_width=50, scale=1):
|
115 |
+
with gr.Tab(label="Parameter zum Model"):
|
116 |
+
gr.Markdown("# Parameters")
|
117 |
+
top_p = gr.Slider(
|
118 |
+
minimum=-0,
|
119 |
+
maximum=1.0,
|
120 |
+
value=0.95,
|
121 |
+
step=0.05,
|
122 |
+
interactive=True,
|
123 |
+
label="Top-p",
|
124 |
+
)
|
125 |
+
temperature = gr.Slider(
|
126 |
+
minimum=0.1,
|
127 |
+
maximum=2.0,
|
128 |
+
value=1,
|
129 |
+
step=0.1,
|
130 |
+
interactive=True,
|
131 |
+
label="Temperature",
|
132 |
+
)
|
133 |
+
max_length_tokens = gr.Slider(
|
134 |
+
minimum=0,
|
135 |
+
maximum=512,
|
136 |
+
value=512,
|
137 |
+
step=8,
|
138 |
+
interactive=True,
|
139 |
+
label="Max Generation Tokens",
|
140 |
+
)
|
141 |
+
max_context_length_tokens = gr.Slider(
|
142 |
+
minimum=0,
|
143 |
+
maximum=4096,
|
144 |
+
value=2048,
|
145 |
+
step=128,
|
146 |
+
interactive=True,
|
147 |
+
label="Max History Tokens",
|
148 |
+
)
|
149 |
+
gr.Markdown(description)
|
150 |
+
|
151 |
+
predict_args = dict(
|
152 |
+
fn=predict,
|
153 |
+
inputs=[
|
154 |
+
user_question,
|
155 |
+
chatbotGr,
|
156 |
+
history,
|
157 |
+
top_p,
|
158 |
+
temperature,
|
159 |
+
max_length_tokens,
|
160 |
+
max_context_length_tokens,
|
161 |
+
],
|
162 |
+
outputs=[chatbotGr, history, status_display],
|
163 |
+
show_progress=True,
|
164 |
+
)
|
165 |
+
|
166 |
+
#neuer Chat
|
167 |
+
reset_args = dict(
|
168 |
+
fn=reset_chat, inputs=[], outputs=[user_input, status_display]
|
169 |
+
)
|
170 |
+
|
171 |
+
# Chatbot
|
172 |
+
transfer_input_args = dict(
|
173 |
+
fn=transfer_input, inputs=[user_input], outputs=[user_question, user_input, submitBtn], show_progress=True
|
174 |
+
)
|
175 |
+
|
176 |
+
#Listener auf Start-Click auf Button oder Return
|
177 |
+
predict_event1 = user_input.submit(**transfer_input_args).then(**predict_args)
|
178 |
+
predict_event2 = submitBtn.click(**transfer_input_args).then(**predict_args)
|
179 |
+
|
180 |
+
#Listener, Wenn reset...
|
181 |
+
emptyBtn.click(
|
182 |
+
reset_state,
|
183 |
+
outputs=[chatbotGr, history, status_display],
|
184 |
+
show_progress=True,
|
185 |
+
)
|
186 |
+
emptyBtn.click(**reset_args)
|
187 |
+
|
188 |
+
demo.title = "LI Chat"
|
189 |
+
#demo.queue(concurrency_count=1).launch(share=True)
|
190 |
+
demo.queue(concurrency_count=1).launch()
|
cookies.json
ADDED
@@ -0,0 +1,132 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[
|
2 |
+
{
|
3 |
+
"domain": ".huggingface.co",
|
4 |
+
"expirationDate": 1684947627,
|
5 |
+
"hostOnly": false,
|
6 |
+
"httpOnly": false,
|
7 |
+
"name": "intercom-session-hgve3glw",
|
8 |
+
"path": "/",
|
9 |
+
"sameSite": "lax",
|
10 |
+
"secure": false,
|
11 |
+
"session": false,
|
12 |
+
"storeId": null,
|
13 |
+
"value": ""
|
14 |
+
},
|
15 |
+
{
|
16 |
+
"domain": ".huggingface.co",
|
17 |
+
"expirationDate": 1716107825,
|
18 |
+
"hostOnly": false,
|
19 |
+
"httpOnly": false,
|
20 |
+
"name": "__stripe_mid",
|
21 |
+
"path": "/",
|
22 |
+
"sameSite": "strict",
|
23 |
+
"secure": true,
|
24 |
+
"session": false,
|
25 |
+
"storeId": null,
|
26 |
+
"value": "ee917974-9f17-465f-bd5f-98c3ef273dcebfc299"
|
27 |
+
},
|
28 |
+
{
|
29 |
+
"domain": ".huggingface.co",
|
30 |
+
"expirationDate": 1719131825.057236,
|
31 |
+
"hostOnly": false,
|
32 |
+
"httpOnly": false,
|
33 |
+
"name": "_ga",
|
34 |
+
"path": "/",
|
35 |
+
"sameSite": null,
|
36 |
+
"secure": false,
|
37 |
+
"session": false,
|
38 |
+
"storeId": null,
|
39 |
+
"value": "GA1.1.1854576425.1683459014"
|
40 |
+
},
|
41 |
+
{
|
42 |
+
"domain": ".huggingface.co",
|
43 |
+
"expirationDate": 1715265897.480276,
|
44 |
+
"hostOnly": false,
|
45 |
+
"httpOnly": true,
|
46 |
+
"name": "token",
|
47 |
+
"path": "/",
|
48 |
+
"sameSite": "lax",
|
49 |
+
"secure": true,
|
50 |
+
"session": false,
|
51 |
+
"storeId": null,
|
52 |
+
"value": "XPKKEnVzdMeoBdDLQWVipFyNlQAEAHWQohUpGAfcoHwPXNZogPpxHYWbdDcGRdiSrZcOCHFsKuPvVIQwMsybldJAmgkzemIAcjPHwizDfPeitRXgmSlfPpDGFBvFHVsM"
|
53 |
+
},
|
54 |
+
{
|
55 |
+
"domain": ".huggingface.co",
|
56 |
+
"expirationDate": 1707672827,
|
57 |
+
"hostOnly": false,
|
58 |
+
"httpOnly": false,
|
59 |
+
"name": "intercom-device-id-hgve3glw",
|
60 |
+
"path": "/",
|
61 |
+
"sameSite": "lax",
|
62 |
+
"secure": false,
|
63 |
+
"session": false,
|
64 |
+
"storeId": null,
|
65 |
+
"value": "7ff02e75-b8a1-43e8-8af3-2520b153983e"
|
66 |
+
},
|
67 |
+
{
|
68 |
+
"domain": ".huggingface.co",
|
69 |
+
"expirationDate": 1684573625,
|
70 |
+
"hostOnly": false,
|
71 |
+
"httpOnly": false,
|
72 |
+
"name": "__stripe_sid",
|
73 |
+
"path": "/",
|
74 |
+
"sameSite": "strict",
|
75 |
+
"secure": true,
|
76 |
+
"session": false,
|
77 |
+
"storeId": null,
|
78 |
+
"value": "e18d3fcd-4185-49bc-b24e-463d3eb18f443c0c01"
|
79 |
+
},
|
80 |
+
{
|
81 |
+
"domain": ".huggingface.co",
|
82 |
+
"expirationDate": 1719131825.056149,
|
83 |
+
"hostOnly": false,
|
84 |
+
"httpOnly": false,
|
85 |
+
"name": "_ga_8Q63TH4CSL",
|
86 |
+
"path": "/",
|
87 |
+
"sameSite": null,
|
88 |
+
"secure": false,
|
89 |
+
"session": false,
|
90 |
+
"storeId": null,
|
91 |
+
"value": "GS1.1.1684571540.41.1.1684571825.0.0.0"
|
92 |
+
},
|
93 |
+
{
|
94 |
+
"domain": ".huggingface.co",
|
95 |
+
"expirationDate": 1684658225,
|
96 |
+
"hostOnly": false,
|
97 |
+
"httpOnly": false,
|
98 |
+
"name": "_gid",
|
99 |
+
"path": "/",
|
100 |
+
"sameSite": null,
|
101 |
+
"secure": false,
|
102 |
+
"session": false,
|
103 |
+
"storeId": null,
|
104 |
+
"value": "GA1.2.1709365894.1683962834"
|
105 |
+
},
|
106 |
+
{
|
107 |
+
"domain": "huggingface.co",
|
108 |
+
"expirationDate": 1716194076.862726,
|
109 |
+
"hostOnly": true,
|
110 |
+
"httpOnly": true,
|
111 |
+
"name": "hf-chat",
|
112 |
+
"path": "/",
|
113 |
+
"sameSite": "no_restriction",
|
114 |
+
"secure": true,
|
115 |
+
"session": false,
|
116 |
+
"storeId": null,
|
117 |
+
"value": "65609f48-0d3b-4b69-931b-cd572b1fc88d"
|
118 |
+
},
|
119 |
+
{
|
120 |
+
"domain": ".huggingface.co",
|
121 |
+
"expirationDate": 1707672827,
|
122 |
+
"hostOnly": false,
|
123 |
+
"httpOnly": false,
|
124 |
+
"name": "intercom-id-hgve3glw",
|
125 |
+
"path": "/",
|
126 |
+
"sameSite": "lax",
|
127 |
+
"secure": false,
|
128 |
+
"session": false,
|
129 |
+
"storeId": null,
|
130 |
+
"value": "80725bd4-464f-425b-b9c9-313cf5b23012"
|
131 |
+
}
|
132 |
+
]
|
presets.py
ADDED
@@ -0,0 +1,83 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# -*- coding:utf-8 -*-
|
2 |
+
import gradio as gr
|
3 |
+
|
4 |
+
|
5 |
+
title = """<h1 align="left" style="min-width:200px; margin-top:0;"> Hugging Chat </h1>"""
|
6 |
+
description_top = """\
|
7 |
+
<div align="left">
|
8 |
+
<p> Hinterlegtes KI-Model: HuggingChat mit ...</p>
|
9 |
+
<p>
|
10 |
+
Disclaimer: Das KI-Model, welches hier verwendet wird, kommt vom Hugging Face model hub und ist ein Open-Source Model. Diese Demo darf nicht für komerzielle genutzt werden! Der Output des Models ist nicht zensiert und die Authoren der KI stimmen nicht unbedingt mit den Inhalten überein. Gebrauch auf eigenes Risiko
|
11 |
+
</p >
|
12 |
+
</div>
|
13 |
+
"""
|
14 |
+
description = """\
|
15 |
+
<div align="center" style="margin:16px 0">
|
16 |
+
Diese Demo setzt auf auf dem Open-Source Model von HuggingChat.
|
17 |
+
</div>
|
18 |
+
"""
|
19 |
+
CONCURRENT_COUNT = 100
|
20 |
+
|
21 |
+
|
22 |
+
ALREADY_CONVERTED_MARK = "<!-- ALREADY CONVERTED BY PARSER. -->"
|
23 |
+
|
24 |
+
small_and_beautiful_theme = gr.themes.Soft(
|
25 |
+
primary_hue=gr.themes.Color(
|
26 |
+
c50="#02C160",
|
27 |
+
c100="rgba(2, 193, 96, 0.2)",
|
28 |
+
c200="#02C160",
|
29 |
+
c300="rgba(2, 193, 96, 0.32)",
|
30 |
+
c400="rgba(2, 193, 96, 0.32)",
|
31 |
+
c500="rgba(2, 193, 96, 1.0)",
|
32 |
+
c600="rgba(2, 193, 96, 1.0)",
|
33 |
+
c700="rgba(2, 193, 96, 0.32)",
|
34 |
+
c800="rgba(2, 193, 96, 0.32)",
|
35 |
+
c900="#02C160",
|
36 |
+
c950="#02C160",
|
37 |
+
),
|
38 |
+
secondary_hue=gr.themes.Color(
|
39 |
+
c50="#576b95",
|
40 |
+
c100="#576b95",
|
41 |
+
c200="#576b95",
|
42 |
+
c300="#576b95",
|
43 |
+
c400="#576b95",
|
44 |
+
c500="#576b95",
|
45 |
+
c600="#576b95",
|
46 |
+
c700="#576b95",
|
47 |
+
c800="#576b95",
|
48 |
+
c900="#576b95",
|
49 |
+
c950="#576b95",
|
50 |
+
),
|
51 |
+
neutral_hue=gr.themes.Color(
|
52 |
+
name="gray",
|
53 |
+
c50="#f9fafb",
|
54 |
+
c100="#f3f4f6",
|
55 |
+
c200="#e5e7eb",
|
56 |
+
c300="#d1d5db",
|
57 |
+
c400="#B2B2B2",
|
58 |
+
c500="#808080",
|
59 |
+
c600="#636363",
|
60 |
+
c700="#515151",
|
61 |
+
c800="#393939",
|
62 |
+
c900="#272727",
|
63 |
+
c950="#171717",
|
64 |
+
),
|
65 |
+
radius_size=gr.themes.sizes.radius_sm,
|
66 |
+
).set(
|
67 |
+
button_primary_background_fill="#06AE56",
|
68 |
+
button_primary_background_fill_dark="#06AE56",
|
69 |
+
button_primary_background_fill_hover="#07C863",
|
70 |
+
button_primary_border_color="#06AE56",
|
71 |
+
button_primary_border_color_dark="#06AE56",
|
72 |
+
button_primary_text_color="#FFFFFF",
|
73 |
+
button_primary_text_color_dark="#FFFFFF",
|
74 |
+
button_secondary_background_fill="#F2F2F2",
|
75 |
+
button_secondary_background_fill_dark="#2B2B2B",
|
76 |
+
button_secondary_text_color="#393939",
|
77 |
+
button_secondary_text_color_dark="#FFFFFF",
|
78 |
+
# background_fill_primary="#F7F7F7",
|
79 |
+
# background_fill_primary_dark="#1F1F1F",
|
80 |
+
block_title_text_color="*primary_500",
|
81 |
+
block_title_background_fill="*primary_100",
|
82 |
+
input_background_fill="#F6F6F6",
|
83 |
+
)
|
requirements.txt
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
gradio
|
2 |
+
torch
|
3 |
+
sentencepiece
|
4 |
+
transformers==4.28.1
|
5 |
+
|
6 |
+
|
7 |
+
|
utils.py
ADDED
@@ -0,0 +1,147 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import annotations
|
2 |
+
from typing import TYPE_CHECKING, Any, Callable, Dict, List, Tuple, Type
|
3 |
+
import logging
|
4 |
+
import json
|
5 |
+
import os
|
6 |
+
import datetime
|
7 |
+
import hashlib
|
8 |
+
import csv
|
9 |
+
import requests
|
10 |
+
import re
|
11 |
+
import html
|
12 |
+
import torch
|
13 |
+
import sys
|
14 |
+
import gc
|
15 |
+
from pygments.lexers import guess_lexer, ClassNotFound
|
16 |
+
import gradio as gr
|
17 |
+
from pygments import highlight
|
18 |
+
from pygments.lexers import guess_lexer,get_lexer_by_name
|
19 |
+
from pygments.formatters import HtmlFormatter
|
20 |
+
import transformers
|
21 |
+
from transformers import AutoTokenizer, AutoModelForCausalLM
|
22 |
+
|
23 |
+
|
24 |
+
|
25 |
+
def reset_state():
|
26 |
+
return [], [], "Reset Done"
|
27 |
+
|
28 |
+
def reset_textbox():
|
29 |
+
return gr.update(value=""),""
|
30 |
+
|
31 |
+
def cancel_outputing():
|
32 |
+
return "Stop Done"
|
33 |
+
|
34 |
+
def transfer_input(inputs):
|
35 |
+
textbox = reset_textbox()
|
36 |
+
return (
|
37 |
+
inputs,
|
38 |
+
gr.update(value=""),
|
39 |
+
gr.Button.update(visible=True),
|
40 |
+
)
|
41 |
+
|
42 |
+
def generate_prompt_with_history(text, history, tokenizer, max_length=2048):
|
43 |
+
prompt = "The following is a conversation between a human and an AI assistant named Baize (named after a mythical creature in Chinese folklore). Baize is an open-source AI assistant developed by UCSD and Sun Yat-Sen University. The human and the AI assistant take turns chatting. Human statements start with [|Human|] and AI assistant statements start with [|AI|]. The AI assistant always provides responses in as much detail as possible, and in Markdown format. The AI assistant always declines to engage with topics, questions and instructions related to unethical, controversial, or sensitive issues. Complete the transcript in exactly that format.\n[|Human|]Hello!\n[|AI|]Hi!"
|
44 |
+
history = ["\n[|Human|]{}\n[|AI|]{}".format(x[0],x[1]) for x in history]
|
45 |
+
history.append("\n[|Human|]{}\n[|AI|]".format(text))
|
46 |
+
history_text = ""
|
47 |
+
flag = False
|
48 |
+
for x in history[::-1]:
|
49 |
+
if tokenizer(prompt+history_text+x, return_tensors="pt")['input_ids'].size(-1) <= max_length:
|
50 |
+
history_text = x + history_text
|
51 |
+
flag = True
|
52 |
+
else:
|
53 |
+
break
|
54 |
+
if flag:
|
55 |
+
return prompt+history_text,tokenizer(prompt+history_text, return_tensors="pt")
|
56 |
+
else:
|
57 |
+
return None
|
58 |
+
|
59 |
+
|
60 |
+
|
61 |
+
|
62 |
+
|
63 |
+
tokenizer = AutoTokenizer.from_pretrained("project-baize/baize-v2-7b")
|
64 |
+
|
65 |
+
model = AutoModelForCausalLM.from_pretrained("project-baize/baize-v2-7b")
|
66 |
+
|
67 |
+
def load_tokenizer_and_model(base_model,load_8bit=False):
|
68 |
+
if torch.cuda.is_available():
|
69 |
+
device = "cuda"
|
70 |
+
else:
|
71 |
+
device = "cpu"
|
72 |
+
|
73 |
+
tokenizer = AutoTokenizer.from_pretrained.from_pretrained(base_model, use_fast = False)
|
74 |
+
if device == "cuda":
|
75 |
+
model = AutoModelForCausalLM.from_pretrained(
|
76 |
+
base_model,
|
77 |
+
load_in_8bit=load_8bit,
|
78 |
+
torch_dtype=torch.float16,
|
79 |
+
device_map="auto",
|
80 |
+
)
|
81 |
+
else:
|
82 |
+
model = AutoModelForCausalLM.from_pretrained(
|
83 |
+
base_model, device_map={"": device}, low_cpu_mem_usage=True
|
84 |
+
)
|
85 |
+
|
86 |
+
if not load_8bit:
|
87 |
+
model.half() # seems to fix bugs for some users.
|
88 |
+
|
89 |
+
model.eval()
|
90 |
+
return tokenizer,model,device
|
91 |
+
|
92 |
+
# Greedy Search
|
93 |
+
def greedy_search(input_ids: torch.Tensor,
|
94 |
+
model: torch.nn.Module,
|
95 |
+
tokenizer: transformers.PreTrainedTokenizer,
|
96 |
+
stop_words: list,
|
97 |
+
max_length: int,
|
98 |
+
temperature: float = 1.0,
|
99 |
+
top_p: float = 1.0,
|
100 |
+
top_k: int = 25) -> Iterator[str]:
|
101 |
+
generated_tokens = []
|
102 |
+
past_key_values = None
|
103 |
+
current_length = 1
|
104 |
+
for i in range(max_length):
|
105 |
+
with torch.no_grad():
|
106 |
+
if past_key_values is None:
|
107 |
+
outputs = model(input_ids)
|
108 |
+
else:
|
109 |
+
outputs = model(input_ids[:, -1:], past_key_values=past_key_values)
|
110 |
+
logits = outputs.logits[:, -1, :]
|
111 |
+
past_key_values = outputs.past_key_values
|
112 |
+
|
113 |
+
# apply temperature
|
114 |
+
logits /= temperature
|
115 |
+
|
116 |
+
probs = torch.softmax(logits, dim=-1)
|
117 |
+
# apply top_p
|
118 |
+
probs_sort, probs_idx = torch.sort(probs, dim=-1, descending=True)
|
119 |
+
probs_sum = torch.cumsum(probs_sort, dim=-1)
|
120 |
+
mask = probs_sum - probs_sort > top_p
|
121 |
+
probs_sort[mask] = 0.0
|
122 |
+
|
123 |
+
# apply top_k
|
124 |
+
#if top_k is not None:
|
125 |
+
# probs_sort1, _ = torch.topk(probs_sort, top_k)
|
126 |
+
# min_top_probs_sort = torch.min(probs_sort1, dim=-1, keepdim=True).values
|
127 |
+
# probs_sort = torch.where(probs_sort < min_top_probs_sort, torch.full_like(probs_sort, float(0.0)), probs_sort)
|
128 |
+
|
129 |
+
probs_sort.div_(probs_sort.sum(dim=-1, keepdim=True))
|
130 |
+
next_token = torch.multinomial(probs_sort, num_samples=1)
|
131 |
+
next_token = torch.gather(probs_idx, -1, next_token)
|
132 |
+
|
133 |
+
input_ids = torch.cat((input_ids, next_token), dim=-1)
|
134 |
+
|
135 |
+
generated_tokens.append(next_token[0].item())
|
136 |
+
text = tokenizer.decode(generated_tokens)
|
137 |
+
|
138 |
+
yield text
|
139 |
+
if any([x in text for x in stop_words]):
|
140 |
+
del past_key_values
|
141 |
+
del logits
|
142 |
+
del probs
|
143 |
+
del probs_sort
|
144 |
+
del probs_idx
|
145 |
+
del probs_sum
|
146 |
+
gc.collect()
|
147 |
+
return
|