AFischer1985 commited on
Commit
7b16373
1 Parent(s): 012a610

Update run.py

Browse files
Files changed (1) hide show
  1. run.py +14 -18
run.py CHANGED
@@ -164,20 +164,20 @@ else:
164
  import os
165
  import requests
166
  import subprocess
167
- modelPath="/home/af/gguf/models/discolm_german_7b_v1.Q4_0.gguf"
168
  if(os.path.exists(modelPath)==False):
169
  #url="https://huggingface.co/TheBloke/WizardLM-13B-V1.2-GGUF/resolve/main/wizardlm-13b-v1.2.Q4_0.gguf"
170
- #url="https://huggingface.co/TheBloke/Mixtral-8x7B-Instruct-v0.1-GGUF/resolve/main/mixtral-8x7b-instruct-v0.1.Q4_0.gguf?download=true"
171
  #url="https://huggingface.co/TheBloke/Mistral-7B-Instruct-v0.2-GGUF/resolve/main/mistral-7b-instruct-v0.2.Q4_0.gguf?download=true"
172
- url="https://huggingface.co/TheBloke/DiscoLM_German_7b_v1-GGUF/resolve/main/discolm_german_7b_v1.Q4_0.gguf?download=true"
173
  response = requests.get(url)
174
- with open("./model.gguf", mode="wb") as file:
175
  file.write(response.content)
176
  print("Model downloaded")
177
- modelPath="./model.gguf"
178
  print(modelPath)
179
  n="20"
180
- if("mixtral-8x7b-instruct" in modelPath): n="0" # mixtral seems to cause problems here...
181
  command = ["python3", "-m", "llama_cpp.server", "--model", modelPath, "--host", "0.0.0.0", "--port", "2600", "--n_threads", "8", "--n_gpu_layers", n]
182
  subprocess.Popen(command)
183
  print("Server ready!")
@@ -189,32 +189,28 @@ else:
189
  def extend_prompt(message="", history=None, system=None, RAGAddon=None, system2=None, zeichenlimit=None,historylimit=4): #float("Inf")
190
  startOfString=""
191
  if zeichenlimit is None: zeichenlimit=1000000000 # :-)
192
- template0=" [INST]{system}\n [/INST] </s>" if onPrem else "<s> [INST] {system} [/INST] </s>"
193
  template1=" [INST] {message} [/INST]"
194
  template2=" {response}</s>"
195
- if("gemma-" in modelPath): # https://huggingface.co/mistralai/Mixtral-8x7B-Instruct-v0.1
196
  template0="<start_of_turn>user{system}</end_of_turn>"
197
  template1="<start_of_turn>user{message}</end_of_turn><start_of_turn>model"
198
  template2="{response}</end_of_turn>"
199
- if("mixtral-8x7b-instruct" in modelPath): # https://huggingface.co/mistralai/Mixtral-8x7B-Instruct-v0.1
200
  startOfString="<s>"
201
- template0=" [INST]{system}\n [/INST] </s>" if onPrem else "<s> [INST]{system}\n [/INST] </s>"
202
  template1=" [INST] {message} [/INST]"
203
  template2=" {response}</s>"
204
  if("Mistral-7B-Instruct" in modelPath): #https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.2
205
  startOfString="<s>"
206
- template0="[INST]{system}\n [/INST]</s>" if onPrem else "<s>[INST]{system}\n [/INST]</s>"
207
  template1="[INST] {message} [/INST]"
208
  template2=" {response}</s>"
209
- if("openchat-3.5" in modelPath): #https://huggingface.co/TheBloke/openchat-3.5-0106-GGUF
210
  template0="GPT4 Correct User: {system}<|end_of_turn|>GPT4 Correct Assistant: Okay.<|end_of_turn|>"
211
  template1="GPT4 Correct User: {message}<|end_of_turn|>GPT4 Correct Assistant: "
212
  template2="{response}<|end_of_turn|>"
213
- if("SauerkrautLM-7b-HerO" in modelPath): #https://huggingface.co/VAGOsolutions/SauerkrautLM-7b-HerO
214
- template0="<|im_start|>system\n{system}<|im_end|>\n"
215
- template1="<|im_start|>user\n{message}<|im_end|>\n<|im_start|>assistant\n"
216
- template2="{response}<|im_end|>\n"
217
- if("discolm_german_7b" in modelPath): #https://huggingface.co/DiscoResearch/DiscoLM_German_7b_v1
218
  template0="<|im_start|>system\n{system}<|im_end|>\n"
219
  template1="<|im_start|>user\n{message}<|im_end|>\n<|im_start|>assistant\n"
220
  template2="{response}<|im_end|>\n"
@@ -222,7 +218,7 @@ def extend_prompt(message="", history=None, system=None, RAGAddon=None, system2=
222
  template0="{system} " #<s>
223
  template1="USER: {message} ASSISTANT: "
224
  template2="{response}</s>"
225
- if("phi-2" in modelPath): #https://huggingface.co/TheBloke/phi-2-GGUF
226
  template0="Instruct: {system}\nOutput: Okay.\n"
227
  template1="Instruct: {message}\nOutput:"
228
  template2="{response}\n"
 
164
  import os
165
  import requests
166
  import subprocess
167
+ modelPath="/home/af/gguf/models/Discolm_german_7b_v1.Q4_0.gguf"
168
  if(os.path.exists(modelPath)==False):
169
  #url="https://huggingface.co/TheBloke/WizardLM-13B-V1.2-GGUF/resolve/main/wizardlm-13b-v1.2.Q4_0.gguf"
170
+ url="https://huggingface.co/TheBloke/Mixtral-8x7B-Instruct-v0.1-GGUF/resolve/main/mixtral-8x7b-instruct-v0.1.Q4_0.gguf?download=true"
171
  #url="https://huggingface.co/TheBloke/Mistral-7B-Instruct-v0.2-GGUF/resolve/main/mistral-7b-instruct-v0.2.Q4_0.gguf?download=true"
172
+ #url="https://huggingface.co/TheBloke/DiscoLM_German_7b_v1-GGUF/resolve/main/discolm_german_7b_v1.Q4_0.gguf?download=true"
173
  response = requests.get(url)
174
+ with open("./Mixtral-8x7b-instruct.gguf", mode="wb") as file:
175
  file.write(response.content)
176
  print("Model downloaded")
177
+ modelPath="./Mixtral-8x7b-instruct.gguf"
178
  print(modelPath)
179
  n="20"
180
+ if("Mixtral-8x7b-instruct" in modelPath): n="0" # mixtral seems to cause problems here...
181
  command = ["python3", "-m", "llama_cpp.server", "--model", modelPath, "--host", "0.0.0.0", "--port", "2600", "--n_threads", "8", "--n_gpu_layers", n]
182
  subprocess.Popen(command)
183
  print("Server ready!")
 
189
  def extend_prompt(message="", history=None, system=None, RAGAddon=None, system2=None, zeichenlimit=None,historylimit=4): #float("Inf")
190
  startOfString=""
191
  if zeichenlimit is None: zeichenlimit=1000000000 # :-)
192
+ template0=" [INST]{system}\n [/INST] </s>"
193
  template1=" [INST] {message} [/INST]"
194
  template2=" {response}</s>"
195
+ if("Gemma-" in modelPath): # https://huggingface.co/mistralai/Mixtral-8x7B-Instruct-v0.1
196
  template0="<start_of_turn>user{system}</end_of_turn>"
197
  template1="<start_of_turn>user{message}</end_of_turn><start_of_turn>model"
198
  template2="{response}</end_of_turn>"
199
+ if("Mixtral-8x7b-instruct" in modelPath): # https://huggingface.co/mistralai/Mixtral-8x7B-Instruct-v0.1
200
  startOfString="<s>"
201
+ template0=" [INST]{system}\n [/INST] </s>"
202
  template1=" [INST] {message} [/INST]"
203
  template2=" {response}</s>"
204
  if("Mistral-7B-Instruct" in modelPath): #https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.2
205
  startOfString="<s>"
206
+ template0="[INST]{system}\n [/INST]</s>"
207
  template1="[INST] {message} [/INST]"
208
  template2=" {response}</s>"
209
+ if("Openchat-3.5" in modelPath): #https://huggingface.co/TheBloke/openchat-3.5-0106-GGUF
210
  template0="GPT4 Correct User: {system}<|end_of_turn|>GPT4 Correct Assistant: Okay.<|end_of_turn|>"
211
  template1="GPT4 Correct User: {message}<|end_of_turn|>GPT4 Correct Assistant: "
212
  template2="{response}<|end_of_turn|>"
213
+ if(("Discolm_german_7b" in modelPath) or ("SauerkrautLM-7b-HerO" in modelPath)): #https://huggingface.co/VAGOsolutions/SauerkrautLM-7b-HerO
 
 
 
 
214
  template0="<|im_start|>system\n{system}<|im_end|>\n"
215
  template1="<|im_start|>user\n{message}<|im_end|>\n<|im_start|>assistant\n"
216
  template2="{response}<|im_end|>\n"
 
218
  template0="{system} " #<s>
219
  template1="USER: {message} ASSISTANT: "
220
  template2="{response}</s>"
221
+ if("Phi-2" in modelPath): #https://huggingface.co/TheBloke/phi-2-GGUF
222
  template0="Instruct: {system}\nOutput: Okay.\n"
223
  template1="Instruct: {message}\nOutput:"
224
  template2="{response}\n"