chore: clean a bit
Browse files- play_with_endpoint.py +7 -17
play_with_endpoint.py
CHANGED
@@ -50,7 +50,7 @@ _, X_test, _, Y_test = train_test_split(
|
|
50 |
random_state=42,
|
51 |
)
|
52 |
|
53 |
-
NB_SAMPLES =
|
54 |
X_test = X_test[:NB_SAMPLES]
|
55 |
Y_test = Y_test[:NB_SAMPLES]
|
56 |
|
@@ -67,15 +67,15 @@ nb_samples = len(X_test)
|
|
67 |
verbose = False
|
68 |
time_start = time.time()
|
69 |
duration = 0
|
70 |
-
is_first = True
|
71 |
|
72 |
for i in range(nb_samples):
|
73 |
|
74 |
# Quantize the input and encrypt it
|
75 |
encrypted_inputs = fhemodel_client.quantize_encrypt_serialize(X_test[i].reshape(1, -1))
|
76 |
|
77 |
-
|
78 |
-
|
|
|
79 |
|
80 |
# Prepare the payload, including the evaluation keys which are needed server side
|
81 |
payload = {
|
@@ -84,32 +84,22 @@ for i in range(nb_samples):
|
|
84 |
"evaluation_keys": to_json(evaluation_keys),
|
85 |
}
|
86 |
|
87 |
-
print(f"{payload=}")
|
88 |
-
|
89 |
# Run the inference on HF servers
|
90 |
duration -= time.time()
|
91 |
-
print(f"Starting at {time.time()}")
|
92 |
encrypted_prediction = query(payload)
|
93 |
-
print(f"Ending at {time.time()}")
|
94 |
duration += time.time()
|
95 |
|
96 |
-
|
97 |
-
|
98 |
-
encrypted_prediction = encrypted_prediction
|
99 |
-
|
100 |
-
if is_first:
|
101 |
-
is_first = False
|
102 |
-
print(f"Size of the payload: {sys.getsizeof(payload)} bytes")
|
103 |
|
104 |
# Decrypt the result and dequantize
|
105 |
prediction_proba = fhemodel_client.deserialize_decrypt_dequantize(encrypted_prediction)[0]
|
106 |
prediction = np.argmax(prediction_proba)
|
107 |
|
108 |
if verbose or True:
|
109 |
-
print(f"for {i}-th input, {prediction=} with expected {
|
110 |
|
111 |
# Measure accuracy
|
112 |
-
nb_good +=
|
113 |
|
114 |
print(f"Accuracy on {nb_samples} samples is {nb_good * 1. / nb_samples}")
|
115 |
print(f"Total time: {time.time() - time_start} seconds")
|
|
|
50 |
random_state=42,
|
51 |
)
|
52 |
|
53 |
+
NB_SAMPLES = 10
|
54 |
X_test = X_test[:NB_SAMPLES]
|
55 |
Y_test = Y_test[:NB_SAMPLES]
|
56 |
|
|
|
67 |
verbose = False
|
68 |
time_start = time.time()
|
69 |
duration = 0
|
|
|
70 |
|
71 |
for i in range(nb_samples):
|
72 |
|
73 |
# Quantize the input and encrypt it
|
74 |
encrypted_inputs = fhemodel_client.quantize_encrypt_serialize(X_test[i].reshape(1, -1))
|
75 |
|
76 |
+
if verbose:
|
77 |
+
print(f"Size of encrypted input: {sys.getsizeof(encrypted_inputs) / 1024 / 1024} megabytes")
|
78 |
+
print(f"Size of keys: {sys.getsizeof(evaluation_keys) / 1024 / 1024} megabytes")
|
79 |
|
80 |
# Prepare the payload, including the evaluation keys which are needed server side
|
81 |
payload = {
|
|
|
84 |
"evaluation_keys": to_json(evaluation_keys),
|
85 |
}
|
86 |
|
|
|
|
|
87 |
# Run the inference on HF servers
|
88 |
duration -= time.time()
|
|
|
89 |
encrypted_prediction = query(payload)
|
|
|
90 |
duration += time.time()
|
91 |
|
92 |
+
encrypted_prediction = from_json(encrypted_prediction)
|
|
|
|
|
|
|
|
|
|
|
|
|
93 |
|
94 |
# Decrypt the result and dequantize
|
95 |
prediction_proba = fhemodel_client.deserialize_decrypt_dequantize(encrypted_prediction)[0]
|
96 |
prediction = np.argmax(prediction_proba)
|
97 |
|
98 |
if verbose or True:
|
99 |
+
print(f"for {i}-th input, {prediction=} with expected {Y_test[i]}")
|
100 |
|
101 |
# Measure accuracy
|
102 |
+
nb_good += Y_test[i] == prediction
|
103 |
|
104 |
print(f"Accuracy on {nb_samples} samples is {nb_good * 1. / nb_samples}")
|
105 |
print(f"Total time: {time.time() - time_start} seconds")
|