Spaces:
Sleeping
Sleeping
AFischer1985
commited on
initial commit
Browse files- requirements.txt +3 -0
- run.py +345 -0
requirements.txt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
llama-cpp-python[server]
|
2 |
+
chromadb
|
3 |
+
sentence_transformers
|
run.py
ADDED
@@ -0,0 +1,345 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#########################################################################################
|
2 |
+
# Title: Gradio Writing Assistant
|
3 |
+
# Author: Andreas Fischer
|
4 |
+
# Date: May 23th, 2024
|
5 |
+
# Last update: May 23th, 2024
|
6 |
+
##########################################################################################
|
7 |
+
|
8 |
+
#https://github.com/abetlen/llama-cpp-python/issues/306
|
9 |
+
#sudo apt install libclblast-dev
|
10 |
+
#CMAKE_ARGS="-DLLAMA_CLBLAST=on" FORCE_CMAKE=1 pip install llama-cpp-python --force-reinstall --upgrade --no-cache-dir -v
|
11 |
+
|
12 |
+
|
13 |
+
# Prepare resources
|
14 |
+
#-------------------
|
15 |
+
import torch
|
16 |
+
import gc
|
17 |
+
torch.cuda.empty_cache()
|
18 |
+
gc.collect()
|
19 |
+
|
20 |
+
|
21 |
+
# Chroma-DB
|
22 |
+
#-----------
|
23 |
+
import os
|
24 |
+
import chromadb
|
25 |
+
dbPath = "/home/af/Schreibtisch/Code/gradio/Chroma/db"
|
26 |
+
onPrem = True if(os.path.exists(dbPath)) else False
|
27 |
+
if(onPrem==False): dbPath="/home/user/app/db"
|
28 |
+
|
29 |
+
#onPrem=True # uncomment to override automatic detection
|
30 |
+
print(dbPath)
|
31 |
+
#client = chromadb.Client()
|
32 |
+
path=dbPath
|
33 |
+
client = chromadb.PersistentClient(path=path)
|
34 |
+
print(client.heartbeat())
|
35 |
+
print(client.get_version())
|
36 |
+
print(client.list_collections())
|
37 |
+
from chromadb.utils import embedding_functions
|
38 |
+
default_ef = embedding_functions.DefaultEmbeddingFunction()
|
39 |
+
#sentence_transformer_ef = embedding_functions.SentenceTransformerEmbeddingFunction(model_name="T-Systems-onsite/cross-en-de-roberta-sentence-transformer")
|
40 |
+
#instructor_ef = embedding_functions.InstructorEmbeddingFunction(model_name="hkunlp/instructor-large", device="cuda")
|
41 |
+
embeddingModel = embedding_functions.SentenceTransformerEmbeddingFunction(model_name="T-Systems-onsite/cross-en-de-roberta-sentence-transformer", device="cuda" if(onPrem) else "cpu")
|
42 |
+
print(str(client.list_collections()))
|
43 |
+
|
44 |
+
global collection
|
45 |
+
dbName="writingStyleDB1"
|
46 |
+
|
47 |
+
if("name="+dbName in str(client.list_collections())): client.delete_collection(name=dbName) # deletes collection
|
48 |
+
|
49 |
+
if("name="+dbName in str(client.list_collections())):
|
50 |
+
print(dbName+" found!")
|
51 |
+
collection = client.get_collection(name=dbName, embedding_function=embeddingModel) #sentence_transformer_ef)
|
52 |
+
else:
|
53 |
+
#client.delete_collection(name=dbName)
|
54 |
+
print(dbName+" created!")
|
55 |
+
collection = client.create_collection(
|
56 |
+
dbName,
|
57 |
+
embedding_function=embeddingModel,
|
58 |
+
metadata={"hnsw:space": "cosine"})
|
59 |
+
|
60 |
+
print("Database ready!")
|
61 |
+
print(collection.count())
|
62 |
+
|
63 |
+
x=collection.get(include=[])["ids"]
|
64 |
+
if(len(x)==0):
|
65 |
+
x=collection.get(include=[])["ids"]
|
66 |
+
collection.add(
|
67 |
+
documents=["Ich möchte einen Blogbeitrag","Ich möchte einen Gliederungsvorschlag","Ich möchte einen Social Media Beitrag"],
|
68 |
+
metadatas=[
|
69 |
+
{"prompt": "Bitte schreibe einen Blogbeitrag zur Anfrage des Users!"},
|
70 |
+
{"prompt": "Bitte entwerfe einen Gliederungsvorschlag zur Anfrage des Users!"},
|
71 |
+
{"prompt": "Bitte verfasse einen Beitrag für die professionelle social media Plattform LinkedIn zur Anfrage des Users!"}],
|
72 |
+
ids=[str(len(x)+1),str(len(x)+2),str(len(x)+3)]
|
73 |
+
)
|
74 |
+
|
75 |
+
RAGResults=collection.query(
|
76 |
+
query_texts=["Dies ist ein Test"],
|
77 |
+
n_results=1,
|
78 |
+
#where={"source": "USER"}
|
79 |
+
)
|
80 |
+
RAGResults["metadatas"][0][0]["prompt"]
|
81 |
+
x=collection.get(where_document={"$contains":"Blogbeitrag"},include=["metadatas"])['metadatas'][0]['prompt']
|
82 |
+
|
83 |
+
|
84 |
+
# Model
|
85 |
+
#-------
|
86 |
+
onPrem=False
|
87 |
+
myModel="mistralai/Mixtral-8x7B-Instruct-v0.1"
|
88 |
+
if(onPrem==False):
|
89 |
+
modelPath=myModel
|
90 |
+
from huggingface_hub import InferenceClient
|
91 |
+
import gradio as gr
|
92 |
+
client = InferenceClient(
|
93 |
+
model=modelPath,
|
94 |
+
#token="hf_..."
|
95 |
+
)
|
96 |
+
else:
|
97 |
+
import os
|
98 |
+
import requests
|
99 |
+
import subprocess
|
100 |
+
#modelPath="/home/af/gguf/models/c4ai-command-r-v01-Q4_0.gguf"
|
101 |
+
#modelPath="/home/af/gguf/models/Discolm_german_7b_v1.Q4_0.gguf"
|
102 |
+
modelPath="/home/af/gguf/models/Mixtral-8x7b-instruct-v0.1.Q4_0.gguf"
|
103 |
+
if(os.path.exists(modelPath)==False):
|
104 |
+
#url="https://huggingface.co/TheBloke/DiscoLM_German_7b_v1-GGUF/resolve/main/discolm_german_7b_v1.Q4_0.gguf?download=true"
|
105 |
+
url="https://huggingface.co/TheBloke/Mixtral-8x7B-Instruct-v0.1-GGUF/resolve/main/mixtral-8x7b-instruct-v0.1.Q4_0.gguf?download=true"
|
106 |
+
response = requests.get(url)
|
107 |
+
with open("./Mixtral-8x7b-instruct.gguf", mode="wb") as file:
|
108 |
+
file.write(response.content)
|
109 |
+
print("Model downloaded")
|
110 |
+
modelPath="./Mixtral-8x7b-instruct.gguf"
|
111 |
+
print(modelPath)
|
112 |
+
n="20"
|
113 |
+
if("Mixtral-8x7b-instruct" in modelPath): n="0" # mixtral seems to cause problems here...
|
114 |
+
command = ["python3", "-m", "llama_cpp.server", "--model", modelPath, "--host", "0.0.0.0", "--port", "2600", "--n_threads", "8", "--n_gpu_layers", n]
|
115 |
+
subprocess.Popen(command)
|
116 |
+
print("Server ready!")
|
117 |
+
|
118 |
+
|
119 |
+
# Check template
|
120 |
+
#----------------
|
121 |
+
if(False):
|
122 |
+
from transformers import AutoTokenizer
|
123 |
+
#mod="mistralai/Mixtral-8x22B-Instruct-v0.1"
|
124 |
+
#mod="mistralai/Mixtral-8x7b-instruct-v0.1"
|
125 |
+
mod="VAGOsolutions/Llama-3-SauerkrautLM-8b-Instruct"
|
126 |
+
tok=AutoTokenizer.from_pretrained(mod) #,token="hf_...")
|
127 |
+
cha=[{"role":"system","content":"A"},{"role":"user","content":"B"},{"role":"assistant","content":"C"}]
|
128 |
+
res=tok.apply_chat_template(cha)
|
129 |
+
print(tok.decode(res))
|
130 |
+
cha=[{"role":"user","content":"U1"},{"role":"assistant","content":"A1"},{"role":"user","content":"U2"},{"role":"assistant","content":"A2"}]
|
131 |
+
res=tok.apply_chat_template(cha)
|
132 |
+
print(tok.decode(res))
|
133 |
+
|
134 |
+
|
135 |
+
# Gradio-GUI
|
136 |
+
#------------
|
137 |
+
import re
|
138 |
+
def extend_prompt(message="", history=None, system=None, RAGAddon=None, system2=None, zeichenlimit=None,historylimit=4, removeHTML=True):
|
139 |
+
startOfString=""
|
140 |
+
if zeichenlimit is None: zeichenlimit=1000000000 # :-)
|
141 |
+
template0=" [INST]{system}\n [/INST] </s>"
|
142 |
+
template1=" [INST] {message} [/INST]"
|
143 |
+
template2=" {response}</s>"
|
144 |
+
if("command-r" in modelPath): #https://huggingface.co/CohereForAI/c4ai-command-r-v01
|
145 |
+
## <BOS_TOKEN><|START_OF_TURN_TOKEN|><|USER_TOKEN|>Hello, how are you?<|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|>
|
146 |
+
template0="<BOS_TOKEN><|START_OF_TURN_TOKEN|><|SYSTEM_TOKEN|> {system}<|END_OF_TURN_TOKEN|>"
|
147 |
+
template1="<|START_OF_TURN_TOKEN|><|USER_TOKEN|>{message}<|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|>"
|
148 |
+
template2="{response}<|END_OF_TURN_TOKEN|>"
|
149 |
+
if("Gemma-" in modelPath): # https://huggingface.co/mistralai/Mixtral-8x7B-Instruct-v0.1
|
150 |
+
template0="<start_of_turn>user{system}</end_of_turn>"
|
151 |
+
template1="<start_of_turn>user{message}</end_of_turn><start_of_turn>model"
|
152 |
+
template2="{response}</end_of_turn>"
|
153 |
+
if("Mixtral-8x22B-Instruct" in modelPath): # AutoTokenizer: <s>[INST] U1[/INST] A1</s>[INST] U2[/INST] A2</s>
|
154 |
+
startOfString="<s>"
|
155 |
+
template0="[INST]{system}\n [/INST] </s>"
|
156 |
+
template1="[INST] {message}[/INST]"
|
157 |
+
template2=" {response}</s>"
|
158 |
+
if("Mixtral-8x7b-instruct" in modelPath): # https://huggingface.co/mistralai/Mixtral-8x7B-Instruct-v0.1
|
159 |
+
startOfString="<s>" # AutoTokenzizer: <s> [INST] U1 [/INST]A1</s> [INST] U2 [/INST]A2</s>
|
160 |
+
template0=" [INST]{system}\n [/INST] </s>"
|
161 |
+
template1=" [INST] {message} [/INST]"
|
162 |
+
template2=" {response}</s>"
|
163 |
+
if("Mistral-7B-Instruct" in modelPath): #https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.2
|
164 |
+
startOfString="<s>"
|
165 |
+
template0="[INST]{system}\n [/INST]</s>"
|
166 |
+
template1="[INST] {message} [/INST]"
|
167 |
+
template2=" {response}</s>"
|
168 |
+
if("Openchat-3.5" in modelPath): #https://huggingface.co/TheBloke/openchat-3.5-0106-GGUF
|
169 |
+
template0="GPT4 Correct User: {system}<|end_of_turn|>GPT4 Correct Assistant: Okay.<|end_of_turn|>"
|
170 |
+
template1="GPT4 Correct User: {message}<|end_of_turn|>GPT4 Correct Assistant: "
|
171 |
+
template2="{response}<|end_of_turn|>"
|
172 |
+
if(("Discolm_german_7b" in modelPath) or ("SauerkrautLM-7b-HerO" in modelPath)): #https://huggingface.co/VAGOsolutions/SauerkrautLM-7b-HerO
|
173 |
+
template0="<|im_start|>system\n{system}<|im_end|>\n"
|
174 |
+
template1="<|im_start|>user\n{message}<|im_end|>\n<|im_start|>assistant\n"
|
175 |
+
template2="{response}<|im_end|>\n"
|
176 |
+
if("Llama-3-SauerkrautLM-8b-Instruct" in modelPath): #https://huggingface.co/VAGOsolutions/SauerkrautLM-7b-HerO
|
177 |
+
template0="<|begin_of_text|><|start_header_id|>system<|end_header_id|>\n\n{system}<|eot_id|>"
|
178 |
+
template1="<|start_header_id|>user<|end_header_id|>\n\n{message}<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n"
|
179 |
+
template2="{response}<|eot_id|>\n"
|
180 |
+
if("WizardLM-13B-V1.2" in modelPath): #https://huggingface.co/WizardLM/WizardLM-13B-V1.2
|
181 |
+
template0="{system} " #<s>
|
182 |
+
template1="USER: {message} ASSISTANT: "
|
183 |
+
template2="{response}</s>"
|
184 |
+
if("Phi-2" in modelPath): #https://huggingface.co/TheBloke/phi-2-GGUF
|
185 |
+
template0="Instruct: {system}\nOutput: Okay.\n"
|
186 |
+
template1="Instruct: {message}\nOutput:"
|
187 |
+
template2="{response}\n"
|
188 |
+
prompt = ""
|
189 |
+
if RAGAddon is not None:
|
190 |
+
system += RAGAddon
|
191 |
+
if system is not None:
|
192 |
+
prompt += template0.format(system=system) #"<s>"
|
193 |
+
if history is not None:
|
194 |
+
for user_message, bot_response in history[-historylimit:]:
|
195 |
+
if user_message is None: user_message = ""
|
196 |
+
if bot_response is None: bot_response = ""
|
197 |
+
bot_response = re.sub("\n\n<details>((.|\n)*?)</details>","", bot_response) # remove RAG-compontents
|
198 |
+
if removeHTML==True: bot_response = re.sub("<(.*?)>","\n", bot_response) # remove HTML-components in general (may cause bugs with markdown-rendering)
|
199 |
+
if user_message is not None: prompt += template1.format(message=user_message[:zeichenlimit])
|
200 |
+
if bot_response is not None: prompt += template2.format(response=bot_response[:zeichenlimit])
|
201 |
+
if message is not None: prompt += template1.format(message=message[:zeichenlimit])
|
202 |
+
if system2 is not None:
|
203 |
+
prompt += system2
|
204 |
+
return startOfString+prompt
|
205 |
+
|
206 |
+
|
207 |
+
import gradio as gr
|
208 |
+
import requests
|
209 |
+
import json
|
210 |
+
from datetime import datetime
|
211 |
+
import os
|
212 |
+
import re
|
213 |
+
|
214 |
+
def response(message, history,customSysPrompt, genre, hfToken):
|
215 |
+
if((onPrem==False) & (hfToken.startswith("hf_"))): # use HF-hub with custom token if token is provided
|
216 |
+
from huggingface_hub import InferenceClient
|
217 |
+
import gradio as gr
|
218 |
+
client = InferenceClient(
|
219 |
+
model=myModel,
|
220 |
+
token=hfToken
|
221 |
+
)
|
222 |
+
removeHTML=True
|
223 |
+
system=customSysPrompt # system-prompt can be changed in the UI (usually defaults to something like the following system-prompt)
|
224 |
+
if(system==""): system="Du bist wissenschaftlicher Mitarbeiter an einem Forschungsinstitut und zuständig für die Wissenschaftskommunikation."
|
225 |
+
message=message.replace("[INST]","")
|
226 |
+
message=message.replace("[/INST]","")
|
227 |
+
message=message.replace("</s>","")
|
228 |
+
message=re.sub("<[|](im_start|im_end|end_of_turn)[|]>", '', message)
|
229 |
+
x=collection.get(include=[])["ids"]
|
230 |
+
rag=None # RAG is turned off until history gets too long
|
231 |
+
historylimit=2
|
232 |
+
if(genre==""): # use RAG to define genre if there is none
|
233 |
+
RAGResults=collection.query(query_texts=[message], n_results=1)
|
234 |
+
genre=str(RAGResults['documents'][0][0]) # determine genre based on best-matching db-entry
|
235 |
+
|
236 |
+
rag="\n\n"+collection.get(where_document={"$contains":genre},include=["metadatas"])['metadatas'][0]['prompt'] # genre-specific addendum to system prompt (rag)
|
237 |
+
if(len(history)>0):
|
238 |
+
rag=rag+"\nFalls der User Rückfragen oder Änderungsvorschläge zu deinem Entwurf hat, gehe darauf ein." # add dialog-specific addendum to rag
|
239 |
+
|
240 |
+
system2=None # system2 can be used as fictive first words of the AI, which are not displayed or stored
|
241 |
+
prompt=extend_prompt(
|
242 |
+
message, # current message of the user
|
243 |
+
history, # complete history
|
244 |
+
system, # system prompt
|
245 |
+
rag, # RAG-component added to the system prompt
|
246 |
+
system2, # fictive first words of the AI (neither displayed nor stored)
|
247 |
+
historylimit=historylimit,# number of past messages to consider for response to current message
|
248 |
+
removeHTML=removeHTML # remove HTML-components from History (to prevent bugs with Markdown)
|
249 |
+
)
|
250 |
+
if(True):
|
251 |
+
print("\n\nMESSAGE:"+str(message))
|
252 |
+
print("\n\nHISTORY:"+str(history))
|
253 |
+
print("\n\nSYSTEM:"+str(system))
|
254 |
+
print("\n\nRAG:"+str(rag))
|
255 |
+
print("\n\nSYSTEM2:"+str(system2))
|
256 |
+
print("\n\n*** Prompt:\n"+prompt+"\n***\n\n")
|
257 |
+
|
258 |
+
## Request response from model
|
259 |
+
#------------------------------
|
260 |
+
|
261 |
+
print("AI running on prem!" if(onPrem) else "AI running HFHub!")
|
262 |
+
if(onPrem==False):
|
263 |
+
temperature=float(0.9)
|
264 |
+
max_new_tokens=1000
|
265 |
+
top_p=0.95
|
266 |
+
repetition_penalty=1.0
|
267 |
+
if temperature < 1e-2: temperature = 1e-2
|
268 |
+
top_p = float(top_p)
|
269 |
+
generate_kwargs = dict(
|
270 |
+
temperature=temperature,
|
271 |
+
max_new_tokens=max_new_tokens,
|
272 |
+
top_p=top_p,
|
273 |
+
repetition_penalty=repetition_penalty,
|
274 |
+
do_sample=True,
|
275 |
+
seed=42,
|
276 |
+
)
|
277 |
+
stream = client.text_generation(prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
|
278 |
+
response = ""
|
279 |
+
#print("User: "+message+"\nAI: ")
|
280 |
+
for text in stream:
|
281 |
+
part=text.token.text
|
282 |
+
#print(part, end="", flush=True)
|
283 |
+
response += part
|
284 |
+
if removeHTML==True: response = re.sub("<(.*?)>","\n", response) # remove HTML-components in general (may cause bugs with markdown-rendering)
|
285 |
+
yield response
|
286 |
+
|
287 |
+
if(onPrem==True):
|
288 |
+
# url="https://afischer1985-wizardlm-13b-v1-2-q4-0-gguf.hf.space/v1/completions"
|
289 |
+
url="http://0.0.0.0:2600/v1/completions"
|
290 |
+
body={"prompt":prompt,"max_tokens":None, "echo":"False","stream":"True"} # e.g. Mixtral-Instruct
|
291 |
+
if("Discolm_german_7b" in modelPath): body.update({"stop": ["<|im_end|>"]}) # fix stop-token of DiscoLM
|
292 |
+
if("Gemma-" in modelPath): body.update({"stop": ["<|im_end|>","</end_of_turn>"]}) # fix stop-token of Gemma
|
293 |
+
response="" #+"("+myType+")\n"
|
294 |
+
buffer=""
|
295 |
+
#print("URL: "+url)
|
296 |
+
#print("User: "+message+"\nAI: ")
|
297 |
+
for text in requests.post(url, json=body, stream=True): #-H 'accept: application/json' -H 'Content-Type: application/json'
|
298 |
+
if buffer is None: buffer=""
|
299 |
+
buffer=str("".join(buffer))
|
300 |
+
# print("*** Raw String: "+str(text)+"\n***\n")
|
301 |
+
text=text.decode('utf-8')
|
302 |
+
if((text.startswith(": ping -")==False) & (len(text.strip("\n\r"))>0)): buffer=buffer+str(text)
|
303 |
+
# print("\n*** Buffer: "+str(buffer)+"\n***\n")
|
304 |
+
buffer=buffer.split('"finish_reason": null}]}')
|
305 |
+
if(len(buffer)==1):
|
306 |
+
buffer="".join(buffer)
|
307 |
+
pass
|
308 |
+
if(len(buffer)==2):
|
309 |
+
part=buffer[0]+'"finish_reason": null}]}'
|
310 |
+
if(part.lstrip('\n\r').startswith("data: ")): part=part.lstrip('\n\r').replace("data: ", "")
|
311 |
+
try:
|
312 |
+
part = str(json.loads(part)["choices"][0]["text"])
|
313 |
+
#print(part, end="", flush=True)
|
314 |
+
response=response+part
|
315 |
+
buffer="" # reset buffer
|
316 |
+
except Exception as e:
|
317 |
+
print("Exception:"+str(e))
|
318 |
+
pass
|
319 |
+
if removeHTML==True: response = re.sub("<(.*?)>","\n", response) # remove HTML-components in general (may cause bugs with markdown-rendering)
|
320 |
+
yield response
|
321 |
+
history.append((message, response)) # add current dialog to history
|
322 |
+
|
323 |
+
val=None
|
324 |
+
gr.ChatInterface(
|
325 |
+
response,
|
326 |
+
chatbot=gr.Chatbot(value=val, render_markdown=True),
|
327 |
+
title="KI Schreibassistenz (on prem)" if onPrem else "KI Schreibassistenz (HFHub)",
|
328 |
+
additional_inputs=[
|
329 |
+
gr.Textbox(
|
330 |
+
value="Du bist wissenschaftlicher Mitarbeiter an einem Forschungsinstitut und zuständig für die Wissenschaftskommunikation.",
|
331 |
+
label="System Prompt"),
|
332 |
+
gr.Dropdown(
|
333 |
+
["Blogbeitrag","Gliederungsvorschlag","Social Media Beitrag",""],
|
334 |
+
value="Blogbeitrag",
|
335 |
+
label="Genre"),
|
336 |
+
gr.Textbox(
|
337 |
+
value="",
|
338 |
+
label="HF_token"),
|
339 |
+
]
|
340 |
+
).queue().launch(share=True) #False, server_name="0.0.0.0", server_port=7864)
|
341 |
+
print("Interface up and running!")
|
342 |
+
|
343 |
+
|
344 |
+
|
345 |
+
|