chore: working on the inference with trees
Browse files- play_with_endpoint.py +20 -9
play_with_endpoint.py
CHANGED
@@ -4,9 +4,6 @@ import os, sys
|
|
4 |
|
5 |
from pathlib import Path
|
6 |
|
7 |
-
from sklearn.datasets import make_classification
|
8 |
-
from sklearn.model_selection import train_test_split
|
9 |
-
|
10 |
from concrete.ml.deployment import FHEModelClient
|
11 |
|
12 |
import requests
|
@@ -24,7 +21,7 @@ def from_json(python_object):
|
|
24 |
|
25 |
|
26 |
# TODO: put the right link `API_URL` for your entryp point
|
27 |
-
API_URL = "https://
|
28 |
headers = {
|
29 |
"Authorization": "Bearer " + os.environ.get("HF_TOKEN"),
|
30 |
"Content-Type": "application/json",
|
@@ -38,9 +35,24 @@ def query(payload):
|
|
38 |
|
39 |
path_to_model = Path("compiled_model")
|
40 |
|
41 |
-
#
|
42 |
-
|
43 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
44 |
|
45 |
# Recover parameters for client side
|
46 |
fhemodel_client = FHEModelClient(path_to_model)
|
@@ -60,7 +72,7 @@ is_first = True
|
|
60 |
for i in range(nb_samples):
|
61 |
|
62 |
# Quantize the input and encrypt it
|
63 |
-
encrypted_inputs = fhemodel_client.quantize_encrypt_serialize(
|
64 |
|
65 |
# Prepare the payload, including the evaluation keys which are needed server side
|
66 |
payload = {
|
@@ -94,4 +106,3 @@ print(f"Accuracy on {nb_samples} samples is {nb_good * 1. / nb_samples}")
|
|
94 |
print(f"Total time: {time.time() - time_start} seconds")
|
95 |
print(f"Duration in inferences: {duration} seconds")
|
96 |
print(f"Duration per inference: {duration / nb_samples} seconds")
|
97 |
-
# END: replace this part with your privacy-preserving application
|
|
|
4 |
|
5 |
from pathlib import Path
|
6 |
|
|
|
|
|
|
|
7 |
from concrete.ml.deployment import FHEModelClient
|
8 |
|
9 |
import requests
|
|
|
21 |
|
22 |
|
23 |
# TODO: put the right link `API_URL` for your entryp point
|
24 |
+
API_URL = "https://yw1dgyuig6ff5pft.us-east-1.aws.endpoints.huggingface.cloud"
|
25 |
headers = {
|
26 |
"Authorization": "Bearer " + os.environ.get("HF_TOKEN"),
|
27 |
"Content-Type": "application/json",
|
|
|
35 |
|
36 |
path_to_model = Path("compiled_model")
|
37 |
|
38 |
+
# Decision-tree in FHE
|
39 |
+
from sklearn.datasets import fetch_openml
|
40 |
+
from sklearn.model_selection import train_test_split
|
41 |
+
import numpy
|
42 |
+
|
43 |
+
features, classes = fetch_openml(data_id=44, as_frame=False, cache=True, return_X_y=True)
|
44 |
+
classes = classes.astype(numpy.int64)
|
45 |
+
|
46 |
+
_, X_test, _, Y_test = train_test_split(
|
47 |
+
features,
|
48 |
+
classes,
|
49 |
+
test_size=0.15,
|
50 |
+
random_state=42,
|
51 |
+
)
|
52 |
+
|
53 |
+
NB_SAMPLES = 2
|
54 |
+
X_test = X_test[:NB_SAMPLES]
|
55 |
+
Y_test = Y_test[:NB_SAMPLES]
|
56 |
|
57 |
# Recover parameters for client side
|
58 |
fhemodel_client = FHEModelClient(path_to_model)
|
|
|
72 |
for i in range(nb_samples):
|
73 |
|
74 |
# Quantize the input and encrypt it
|
75 |
+
encrypted_inputs = fhemodel_client.quantize_encrypt_serialize(X_test[i].reshape(1, -1))
|
76 |
|
77 |
# Prepare the payload, including the evaluation keys which are needed server side
|
78 |
payload = {
|
|
|
106 |
print(f"Total time: {time.time() - time_start} seconds")
|
107 |
print(f"Duration in inferences: {duration} seconds")
|
108 |
print(f"Duration per inference: {duration / nb_samples} seconds")
|
|