Update app.py
Browse files
app.py
CHANGED
@@ -4,6 +4,7 @@ from huggingface_hub import hf_hub_download
|
|
4 |
import subprocess
|
5 |
import asyncio
|
6 |
import os
|
|
|
7 |
title = "Apollo-7B-GGUF Run On CPU"
|
8 |
|
9 |
description = """
|
@@ -23,6 +24,12 @@ model_path = "models"
|
|
23 |
model_name = "Apollo-6B-q8_0.gguf"
|
24 |
hf_hub_download(repo_id="FreedomIntelligence/Apollo-6B-GGUF", filename=model_name, local_dir=model_path, local_dir_use_symlinks=False)
|
25 |
|
|
|
|
|
|
|
|
|
|
|
|
|
26 |
print("Start the model init process")
|
27 |
model = model = GPT4All(model_name, model_path, allow_download = False, device="cpu")
|
28 |
print("Finish the model init process")
|
@@ -53,8 +60,6 @@ async def generater(message, history, temperature, top_p, top_k):
|
|
53 |
|
54 |
# Debug: 打印最终的prompt以验证其正确性
|
55 |
print(f"Final prompt: {prompt}")
|
56 |
-
current_dir = os.path.dirname(os.path.realpath(__file__))
|
57 |
-
main_path = os.path.join(current_dir, 'main')
|
58 |
cmd = [
|
59 |
main_path,
|
60 |
"-m",os.path.join(model_path, model_name),
|
|
|
4 |
import subprocess
|
5 |
import asyncio
|
6 |
import os
|
7 |
+
import stat
|
8 |
title = "Apollo-7B-GGUF Run On CPU"
|
9 |
|
10 |
description = """
|
|
|
24 |
model_name = "Apollo-6B-q8_0.gguf"
|
25 |
hf_hub_download(repo_id="FreedomIntelligence/Apollo-6B-GGUF", filename=model_name, local_dir=model_path, local_dir_use_symlinks=False)
|
26 |
|
27 |
+
|
28 |
+
current_dir = os.path.dirname(os.path.realpath(__file__))
|
29 |
+
main_path = os.path.join(current_dir, 'main')
|
30 |
+
os.chmod(main_path, os.stat(main_path).st_mode | stat.S_IEXEC)
|
31 |
+
|
32 |
+
|
33 |
print("Start the model init process")
|
34 |
model = model = GPT4All(model_name, model_path, allow_download = False, device="cpu")
|
35 |
print("Finish the model init process")
|
|
|
60 |
|
61 |
# Debug: 打印最终的prompt以验证其正确性
|
62 |
print(f"Final prompt: {prompt}")
|
|
|
|
|
63 |
cmd = [
|
64 |
main_path,
|
65 |
"-m",os.path.join(model_path, model_name),
|