Spaces:
Sleeping
Sleeping
saifeddinemk
commited on
Commit
•
9e9c793
1
Parent(s):
08e6694
Fixed app v2
Browse files
app.py
CHANGED
@@ -9,7 +9,7 @@ app = FastAPI()
|
|
9 |
try:
|
10 |
llm = Llama.from_pretrained(
|
11 |
repo_id="QuantFactory/SecurityLLM-GGUF",
|
12 |
-
filename="SecurityLLM.
|
13 |
)
|
14 |
except Exception as e:
|
15 |
raise RuntimeError(f"Failed to load model: {e}")
|
|
|
9 |
try:
|
10 |
llm = Llama.from_pretrained(
|
11 |
repo_id="QuantFactory/SecurityLLM-GGUF",
|
12 |
+
filename="SecurityLLM.Q8_0.gguf",
|
13 |
)
|
14 |
except Exception as e:
|
15 |
raise RuntimeError(f"Failed to load model: {e}")
|
log.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
test.py
ADDED
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from privateai_client import PAIClient
|
2 |
+
from privateai_client import request_objects
|
3 |
+
|
4 |
+
client = PAIClient(url="https://api.private-ai.com/community", api_key="47e2c6f28a0b42429e3e9dd248236d9c")
|
5 |
+
text_request = request_objects.process_text_obj(text=["How are u "])
|
6 |
+
response = client.process_text(text_request)
|
7 |
+
|
8 |
+
print(text_request.text)
|
9 |
+
print(response.processed_text)
|