kwabs22
commited on
Commit
•
b781202
1
Parent(s):
7567461
llamacpp renamed all commands to have llama- prefix so old code breaks
Browse files
app.py
CHANGED
@@ -20,7 +20,7 @@ def custom_generate_response_by_api(cust_user_message, prompt_index, prompts_lis
|
|
20 |
#Local gguf model using llama.cpp
|
21 |
def generate_response(user_message): #generate_response_token_by_token
|
22 |
cmd = [
|
23 |
-
"/app/llama.cpp/
|
24 |
"-m", "/app/llama.cpp/models/stablelm-2-zephyr-1_6b-Q4_0.gguf",
|
25 |
"-p", user_message,
|
26 |
"-n", "400",
|
|
|
20 |
#Local gguf model using llama.cpp
|
21 |
def generate_response(user_message): #generate_response_token_by_token
|
22 |
cmd = [
|
23 |
+
"/app/llama.cpp/llama-cli", # Path to the executable #https://github.com/ggerganov/llama.cpp/pull/7809
|
24 |
"-m", "/app/llama.cpp/models/stablelm-2-zephyr-1_6b-Q4_0.gguf",
|
25 |
"-p", user_message,
|
26 |
"-n", "400",
|