Jingyuan-Zhu
commited on
added test and lag
Browse files- kaggle_evaluation/__init__.py +22 -0
- kaggle_evaluation/core/__init__.py +5 -0
- kaggle_evaluation/core/base_gateway.py +195 -0
- kaggle_evaluation/core/generated/__init__.py +5 -0
- kaggle_evaluation/core/generated/kaggle_evaluation_pb2.py +44 -0
- kaggle_evaluation/core/generated/kaggle_evaluation_pb2_grpc.py +66 -0
- kaggle_evaluation/core/kaggle_evaluation.proto +66 -0
- kaggle_evaluation/core/relay.py +335 -0
- kaggle_evaluation/core/templates.py +143 -0
- kaggle_evaluation/jane_street_gateway.py +57 -0
- kaggle_evaluation/jane_street_inference_server.py +9 -0
- lags.parquet/.DS_Store +3 -0
- lags.parquet/date_id=0/part-0.parquet +3 -0
- test.parquet/.DS_Store +3 -0
- test.parquet/date_id=0/part-0.parquet +3 -0
kaggle_evaluation/__init__.py
ADDED
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
'''
|
2 |
+
Module implementing generic communication patterns with Python in / Python out
|
3 |
+
supporting many (nested) primitives + special data science types like DataFrames
|
4 |
+
or np.ndarrays, with gRPC + protobuf as a backing implementation.
|
5 |
+
'''
|
6 |
+
|
7 |
+
import os
|
8 |
+
import sys
|
9 |
+
|
10 |
+
# Provide additional import management since grpc_tools.protoc doesn't support relative imports
|
11 |
+
module_dir = os.path.dirname(os.path.abspath(__file__))
|
12 |
+
gen_dir = os.path.join(module_dir, 'core', 'generated')
|
13 |
+
|
14 |
+
if not os.path.exists(os.path.join(gen_dir, 'kaggle_evaluation_pb2.py')):
|
15 |
+
print('kaggle evaluation proto and gRPC generated files are missing')
|
16 |
+
sys.exit(1)
|
17 |
+
|
18 |
+
sys.path.append(module_dir)
|
19 |
+
sys.path.append(gen_dir)
|
20 |
+
|
21 |
+
|
22 |
+
__version__ = '0.3.0'
|
kaggle_evaluation/core/__init__.py
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import sys
|
3 |
+
|
4 |
+
|
5 |
+
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
|
kaggle_evaluation/core/base_gateway.py
ADDED
@@ -0,0 +1,195 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
''' Lower level implementation details of the gateway.
|
2 |
+
Hosts should not need to review this file before writing their competition specific gateway.
|
3 |
+
'''
|
4 |
+
|
5 |
+
import enum
|
6 |
+
import json
|
7 |
+
import os
|
8 |
+
import pathlib
|
9 |
+
import re
|
10 |
+
import subprocess
|
11 |
+
|
12 |
+
from socket import gaierror
|
13 |
+
from typing import List, Optional, Tuple, Union
|
14 |
+
|
15 |
+
import grpc
|
16 |
+
import pandas as pd
|
17 |
+
import polars as pl
|
18 |
+
|
19 |
+
import kaggle_evaluation.core.relay
|
20 |
+
|
21 |
+
|
22 |
+
_FILE_SHARE_DIR = '/kaggle/shared/'
|
23 |
+
IS_RERUN = os.getenv('KAGGLE_IS_COMPETITION_RERUN') is not None
|
24 |
+
|
25 |
+
|
26 |
+
class GatewayRuntimeErrorType(enum.Enum):
|
27 |
+
''' Allow-listed error types that Gateways can raise, which map to canned error messages to show users.'''
|
28 |
+
UNSPECIFIED = 0
|
29 |
+
SERVER_NEVER_STARTED = 1
|
30 |
+
SERVER_CONNECTION_FAILED = 2
|
31 |
+
SERVER_RAISED_EXCEPTION = 3
|
32 |
+
SERVER_MISSING_ENDPOINT = 4
|
33 |
+
# Default error type if an exception was raised that was not explicitly handled by the Gateway
|
34 |
+
GATEWAY_RAISED_EXCEPTION = 5
|
35 |
+
INVALID_SUBMISSION = 6
|
36 |
+
|
37 |
+
|
38 |
+
class GatewayRuntimeError(Exception):
|
39 |
+
''' Gateways can raise this error to capture a user-visible error enum from above and host-visible error details.'''
|
40 |
+
def __init__(self, error_type: GatewayRuntimeErrorType, error_details: Optional[str]=None):
|
41 |
+
self.error_type = error_type
|
42 |
+
self.error_details = error_details
|
43 |
+
|
44 |
+
|
45 |
+
class BaseGateway():
|
46 |
+
def __init__(self, data_paths: Tuple[str]=None, file_share_dir: str=_FILE_SHARE_DIR):
|
47 |
+
self.client = kaggle_evaluation.core.relay.Client('inference_server' if IS_RERUN else 'localhost')
|
48 |
+
self.server = None # The gateway can have a server but it isn't typically necessary.
|
49 |
+
self.file_share_dir = file_share_dir
|
50 |
+
self.data_paths = data_paths
|
51 |
+
|
52 |
+
def validate_prediction_batch(
|
53 |
+
self,
|
54 |
+
prediction_batch: Union[pd.DataFrame, pl.DataFrame],
|
55 |
+
sample_submission_batch: Union[pd.DataFrame, pl.DataFrame]):
|
56 |
+
''' If competitors can submit fewer rows than expected they can save all predictions for the last batch and
|
57 |
+
bypass the benefits of the Kaggle evaluation service. This attack was seen in a real competition with the older time series API:
|
58 |
+
https://www.kaggle.com/competitions/riiid-test-answer-prediction/discussion/196066
|
59 |
+
It's critically important that this check be run every time predict() is called.
|
60 |
+
'''
|
61 |
+
if prediction_batch is None:
|
62 |
+
raise GatewayRuntimeError(GatewayRuntimeErrorType.INVALID_SUBMISSION, 'No prediction received')
|
63 |
+
if len(prediction_batch) != len(sample_submission_batch):
|
64 |
+
raise GatewayRuntimeError(
|
65 |
+
GatewayRuntimeErrorType.INVALID_SUBMISSION,
|
66 |
+
f'Invalid predictions: expected {len(sample_submission_batch)} rows but received {len(prediction_batch)}'
|
67 |
+
)
|
68 |
+
|
69 |
+
ROW_ID_COLUMN_INDEX = 0
|
70 |
+
row_id_colname = sample_submission_batch.columns[ROW_ID_COLUMN_INDEX]
|
71 |
+
# Prevent frame shift attacks that could be performed if the IDs are predictable.
|
72 |
+
# Ensure both dataframes are in Polars for efficient comparison.
|
73 |
+
if row_id_colname not in prediction_batch.columns:
|
74 |
+
raise GatewayRuntimeError(GatewayRuntimeErrorType.INVALID_SUBMISSION, f'Prediction missing column {row_id_colname}')
|
75 |
+
if not pl.Series(prediction_batch[row_id_colname]).equals(pl.Series(sample_submission_batch[row_id_colname])):
|
76 |
+
raise GatewayRuntimeError(
|
77 |
+
GatewayRuntimeErrorType.INVALID_SUBMISSION,
|
78 |
+
f'Invalid values for {row_id_colname} in batch of predictions.'
|
79 |
+
)
|
80 |
+
|
81 |
+
def _standardize_and_validate_paths(
|
82 |
+
self,
|
83 |
+
input_paths: List[Union[str, pathlib.Path]]
|
84 |
+
) -> List[pathlib.Path]:
|
85 |
+
# Accept a list of str or pathlib.Path, but standardize on list of str
|
86 |
+
for path in input_paths:
|
87 |
+
if os.pardir in str(path):
|
88 |
+
raise ValueError(f'Send files path contains {os.pardir}: {path}')
|
89 |
+
if str(path) != str(os.path.normpath(path)):
|
90 |
+
# Raise an error rather than sending users unexpectedly altered paths
|
91 |
+
raise ValueError(f'Send files path {path} must be normalized. See `os.path.normpath`')
|
92 |
+
if type(path) not in (pathlib.Path, str):
|
93 |
+
raise ValueError('All paths must be of type str or pathlib.Path')
|
94 |
+
if not os.path.exists(path):
|
95 |
+
raise ValueError(f'Input path {path} does not exist')
|
96 |
+
|
97 |
+
input_paths = [os.path.abspath(path) for path in input_paths]
|
98 |
+
if len(set(input_paths)) != len(input_paths):
|
99 |
+
raise ValueError('Duplicate input paths found')
|
100 |
+
|
101 |
+
if not self.file_share_dir.endswith(os.path.sep):
|
102 |
+
# Ensure output dir is valid for later use
|
103 |
+
output_dir = self.file_share_dir + os.path.sep
|
104 |
+
|
105 |
+
if not os.path.exists(self.file_share_dir) or not os.path.isdir(self.file_share_dir):
|
106 |
+
raise ValueError(f'Invalid output directory {self.file_share_dir}')
|
107 |
+
# Can't use os.path.join for output_dir + path: os.path.join won't prepend to an abspath
|
108 |
+
output_paths = [output_dir + path for path in input_paths]
|
109 |
+
return input_paths, output_paths
|
110 |
+
|
111 |
+
def share_files(
|
112 |
+
self,
|
113 |
+
input_paths: List[Union[str, pathlib.Path]],
|
114 |
+
) -> List[str]:
|
115 |
+
''' Makes files and/or directories available to the user's inference_server. They will be mirrored under the
|
116 |
+
self.file_share_dir directory, using the full absolute path. An input like:
|
117 |
+
/kaggle/input/mycomp/test.csv
|
118 |
+
Would be written to:
|
119 |
+
/kaggle/shared/kaggle/input/mycomp/test.csv
|
120 |
+
|
121 |
+
Args:
|
122 |
+
input_paths: List of paths to files and/or directories that should be shared.
|
123 |
+
|
124 |
+
Returns:
|
125 |
+
The output paths that were shared.
|
126 |
+
|
127 |
+
Raises:
|
128 |
+
ValueError if any invalid paths are passed.
|
129 |
+
'''
|
130 |
+
input_paths, output_paths = self._standardize_and_validate_paths(input_paths)
|
131 |
+
for in_path, out_path in zip(input_paths, output_paths):
|
132 |
+
os.makedirs(os.path.dirname(out_path), exist_ok=True)
|
133 |
+
|
134 |
+
# This makes the files available to the InferenceServer as read-only. Only the Gateway can mount files.
|
135 |
+
# mount will only work in live kaggle evaluation rerun sessions. Otherwise use a symlink.
|
136 |
+
if IS_RERUN:
|
137 |
+
if not os.path.isdir(out_path):
|
138 |
+
pathlib.Path(out_path).touch()
|
139 |
+
subprocess.run(f'mount --bind {in_path} {out_path}', shell=True, check=True)
|
140 |
+
else:
|
141 |
+
subprocess.run(f'ln -s {in_path} {out_path}', shell=True, check=True)
|
142 |
+
|
143 |
+
return output_paths
|
144 |
+
|
145 |
+
def write_submission(self, predictions):
|
146 |
+
''' Export the predictions to a submission.parquet.'''
|
147 |
+
if isinstance(predictions, list):
|
148 |
+
if isinstance(predictions[0], pd.DataFrame):
|
149 |
+
predictions = pd.concat(predictions, ignore_index=True)
|
150 |
+
elif isinstance(predictions[0], pl.DataFrame):
|
151 |
+
try:
|
152 |
+
predictions = pl.concat(predictions, how='vertical_relaxed')
|
153 |
+
except pl.exceptions.SchemaError:
|
154 |
+
raise GatewayRuntimeError(GatewayRuntimeErrorType.INVALID_SUBMISSION, 'Inconsistent prediction types')
|
155 |
+
except pl.exceptions.ComputeError:
|
156 |
+
raise GatewayRuntimeError(GatewayRuntimeErrorType.INVALID_SUBMISSION, 'Inconsistent prediction column counts')
|
157 |
+
|
158 |
+
if isinstance(predictions, pd.DataFrame):
|
159 |
+
predictions.to_parquet('submission.parquet', index=False)
|
160 |
+
elif isinstance(predictions, pl.DataFrame):
|
161 |
+
pl.DataFrame(predictions).write_parquet('submission.parquet')
|
162 |
+
else:
|
163 |
+
raise ValueError(f"Unsupported predictions type {type(predictions)}; can't write submission file")
|
164 |
+
|
165 |
+
def write_result(self, error: Optional[GatewayRuntimeError]=None):
|
166 |
+
''' Export a result.json containing error details if applicable.'''
|
167 |
+
result = { 'Succeeded': error is None }
|
168 |
+
|
169 |
+
if error is not None:
|
170 |
+
result['ErrorType'] = error.error_type.value
|
171 |
+
result['ErrorName'] = error.error_type.name
|
172 |
+
# Max error detail length is 8000
|
173 |
+
result['ErrorDetails'] = str(error.error_details[:8000]) if error.error_details else None
|
174 |
+
|
175 |
+
with open('result.json', 'w') as f_open:
|
176 |
+
json.dump(result, f_open)
|
177 |
+
|
178 |
+
def handle_server_error(self, exception: Exception, endpoint: str):
|
179 |
+
''' Determine how to handle an exception raised when calling the inference server. Typically just format the
|
180 |
+
error into a GatewayRuntimeError and raise.
|
181 |
+
'''
|
182 |
+
exception_str = str(exception)
|
183 |
+
if isinstance(exception, gaierror) or (isinstance(exception, RuntimeError) and 'Failed to connect to server after waiting' in exception_str):
|
184 |
+
raise GatewayRuntimeError(GatewayRuntimeErrorType.SERVER_NEVER_STARTED) from None
|
185 |
+
if f'No listener for {endpoint} was registered' in exception_str:
|
186 |
+
raise GatewayRuntimeError(GatewayRuntimeErrorType.SERVER_MISSING_ENDPOINT, f'Server did not register a listener for {endpoint}') from None
|
187 |
+
if 'Exception calling application' in exception_str:
|
188 |
+
# Extract just the exception message raised by the inference server
|
189 |
+
message_match = re.search('"Exception calling application: (.*)"', exception_str, re.IGNORECASE)
|
190 |
+
message = message_match.group(1) if message_match else exception_str
|
191 |
+
raise GatewayRuntimeError(GatewayRuntimeErrorType.SERVER_RAISED_EXCEPTION, message) from None
|
192 |
+
if isinstance(exception, grpc._channel._InactiveRpcError):
|
193 |
+
raise GatewayRuntimeError(GatewayRuntimeErrorType.SERVER_CONNECTION_FAILED, exception_str) from None
|
194 |
+
|
195 |
+
raise exception
|
kaggle_evaluation/core/generated/__init__.py
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import sys
|
3 |
+
|
4 |
+
|
5 |
+
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
|
kaggle_evaluation/core/generated/kaggle_evaluation_pb2.py
ADDED
@@ -0,0 +1,44 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# -*- coding: utf-8 -*-
|
2 |
+
# Generated by the protocol buffer compiler. DO NOT EDIT!
|
3 |
+
# source: kaggle_evaluation.proto
|
4 |
+
# Protobuf Python Version: 4.25.1
|
5 |
+
"""Generated protocol buffer code."""
|
6 |
+
from google.protobuf import descriptor as _descriptor
|
7 |
+
from google.protobuf import descriptor_pool as _descriptor_pool
|
8 |
+
from google.protobuf import symbol_database as _symbol_database
|
9 |
+
from google.protobuf.internal import builder as _builder
|
10 |
+
# @@protoc_insertion_point(imports)
|
11 |
+
|
12 |
+
_sym_db = _symbol_database.Default()
|
13 |
+
|
14 |
+
|
15 |
+
|
16 |
+
|
17 |
+
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x17kaggle_evaluation.proto\x12\x18kaggle_evaluation_client\"\xf9\x01\n\x17KaggleEvaluationRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12/\n\x04\x61rgs\x18\x02 \x03(\x0b\x32!.kaggle_evaluation_client.Payload\x12M\n\x06kwargs\x18\x03 \x03(\x0b\x32=.kaggle_evaluation_client.KaggleEvaluationRequest.KwargsEntry\x1aP\n\x0bKwargsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x30\n\x05value\x18\x02 \x01(\x0b\x32!.kaggle_evaluation_client.Payload:\x02\x38\x01\"N\n\x18KaggleEvaluationResponse\x12\x32\n\x07payload\x18\x01 \x01(\x0b\x32!.kaggle_evaluation_client.Payload\"\x8d\x04\n\x07Payload\x12\x13\n\tstr_value\x18\x01 \x01(\tH\x00\x12\x14\n\nbool_value\x18\x02 \x01(\x08H\x00\x12\x13\n\tint_value\x18\x03 \x01(\x12H\x00\x12\x15\n\x0b\x66loat_value\x18\x04 \x01(\x02H\x00\x12\x14\n\nnone_value\x18\x05 \x01(\x08H\x00\x12;\n\nlist_value\x18\x06 \x01(\x0b\x32%.kaggle_evaluation_client.PayloadListH\x00\x12<\n\x0btuple_value\x18\x07 \x01(\x0b\x32%.kaggle_evaluation_client.PayloadListH\x00\x12:\n\ndict_value\x18\x08 \x01(\x0b\x32$.kaggle_evaluation_client.PayloadMapH\x00\x12 \n\x16pandas_dataframe_value\x18\t \x01(\x0cH\x00\x12 \n\x16polars_dataframe_value\x18\n \x01(\x0cH\x00\x12\x1d\n\x13pandas_series_value\x18\x0b \x01(\x0cH\x00\x12\x1d\n\x13polars_series_value\x18\x0c \x01(\x0cH\x00\x12\x1b\n\x11numpy_array_value\x18\r \x01(\x0cH\x00\x12\x1c\n\x12numpy_scalar_value\x18\x0e \x01(\x0cH\x00\x12\x18\n\x0e\x62ytes_io_value\x18\x0f \x01(\x0cH\x00\x42\x07\n\x05value\"B\n\x0bPayloadList\x12\x33\n\x08payloads\x18\x01 \x03(\x0b\x32!.kaggle_evaluation_client.Payload\"\xad\x01\n\nPayloadMap\x12I\n\x0bpayload_map\x18\x01 \x03(\x0b\x32\x34.kaggle_evaluation_client.PayloadMap.PayloadMapEntry\x1aT\n\x0fPayloadMapEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x30\n\x05value\x18\x02 \x01(\x0b\x32!.kaggle_evaluation_client.Payload:\x02\x38\x01\x32\x8a\x01\n\x17KaggleEvaluationService\x12o\n\x04Send\x12\x31.kaggle_evaluation_client.KaggleEvaluationRequest\x1a\x32.kaggle_evaluation_client.KaggleEvaluationResponse\"\x00\x62\x06proto3')
|
18 |
+
|
19 |
+
_globals = globals()
|
20 |
+
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
|
21 |
+
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'kaggle_evaluation_pb2', _globals)
|
22 |
+
if _descriptor._USE_C_DESCRIPTORS == False:
|
23 |
+
DESCRIPTOR._options = None
|
24 |
+
_globals['_KAGGLEEVALUATIONREQUEST_KWARGSENTRY']._options = None
|
25 |
+
_globals['_KAGGLEEVALUATIONREQUEST_KWARGSENTRY']._serialized_options = b'8\001'
|
26 |
+
_globals['_PAYLOADMAP_PAYLOADMAPENTRY']._options = None
|
27 |
+
_globals['_PAYLOADMAP_PAYLOADMAPENTRY']._serialized_options = b'8\001'
|
28 |
+
_globals['_KAGGLEEVALUATIONREQUEST']._serialized_start=54
|
29 |
+
_globals['_KAGGLEEVALUATIONREQUEST']._serialized_end=303
|
30 |
+
_globals['_KAGGLEEVALUATIONREQUEST_KWARGSENTRY']._serialized_start=223
|
31 |
+
_globals['_KAGGLEEVALUATIONREQUEST_KWARGSENTRY']._serialized_end=303
|
32 |
+
_globals['_KAGGLEEVALUATIONRESPONSE']._serialized_start=305
|
33 |
+
_globals['_KAGGLEEVALUATIONRESPONSE']._serialized_end=383
|
34 |
+
_globals['_PAYLOAD']._serialized_start=386
|
35 |
+
_globals['_PAYLOAD']._serialized_end=911
|
36 |
+
_globals['_PAYLOADLIST']._serialized_start=913
|
37 |
+
_globals['_PAYLOADLIST']._serialized_end=979
|
38 |
+
_globals['_PAYLOADMAP']._serialized_start=982
|
39 |
+
_globals['_PAYLOADMAP']._serialized_end=1155
|
40 |
+
_globals['_PAYLOADMAP_PAYLOADMAPENTRY']._serialized_start=1071
|
41 |
+
_globals['_PAYLOADMAP_PAYLOADMAPENTRY']._serialized_end=1155
|
42 |
+
_globals['_KAGGLEEVALUATIONSERVICE']._serialized_start=1158
|
43 |
+
_globals['_KAGGLEEVALUATIONSERVICE']._serialized_end=1296
|
44 |
+
# @@protoc_insertion_point(module_scope)
|
kaggle_evaluation/core/generated/kaggle_evaluation_pb2_grpc.py
ADDED
@@ -0,0 +1,66 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
|
2 |
+
"""Client and server classes corresponding to protobuf-defined services."""
|
3 |
+
import grpc
|
4 |
+
|
5 |
+
import kaggle_evaluation_pb2 as kaggle__evaluation__pb2
|
6 |
+
|
7 |
+
|
8 |
+
class KaggleEvaluationServiceStub(object):
|
9 |
+
"""Missing associated documentation comment in .proto file."""
|
10 |
+
|
11 |
+
def __init__(self, channel):
|
12 |
+
"""Constructor.
|
13 |
+
|
14 |
+
Args:
|
15 |
+
channel: A grpc.Channel.
|
16 |
+
"""
|
17 |
+
self.Send = channel.unary_unary(
|
18 |
+
'/kaggle_evaluation_client.KaggleEvaluationService/Send',
|
19 |
+
request_serializer=kaggle__evaluation__pb2.KaggleEvaluationRequest.SerializeToString,
|
20 |
+
response_deserializer=kaggle__evaluation__pb2.KaggleEvaluationResponse.FromString,
|
21 |
+
)
|
22 |
+
|
23 |
+
|
24 |
+
class KaggleEvaluationServiceServicer(object):
|
25 |
+
"""Missing associated documentation comment in .proto file."""
|
26 |
+
|
27 |
+
def Send(self, request, context):
|
28 |
+
"""Missing associated documentation comment in .proto file."""
|
29 |
+
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
|
30 |
+
context.set_details('Method not implemented!')
|
31 |
+
raise NotImplementedError('Method not implemented!')
|
32 |
+
|
33 |
+
|
34 |
+
def add_KaggleEvaluationServiceServicer_to_server(servicer, server):
|
35 |
+
rpc_method_handlers = {
|
36 |
+
'Send': grpc.unary_unary_rpc_method_handler(
|
37 |
+
servicer.Send,
|
38 |
+
request_deserializer=kaggle__evaluation__pb2.KaggleEvaluationRequest.FromString,
|
39 |
+
response_serializer=kaggle__evaluation__pb2.KaggleEvaluationResponse.SerializeToString,
|
40 |
+
),
|
41 |
+
}
|
42 |
+
generic_handler = grpc.method_handlers_generic_handler(
|
43 |
+
'kaggle_evaluation_client.KaggleEvaluationService', rpc_method_handlers)
|
44 |
+
server.add_generic_rpc_handlers((generic_handler,))
|
45 |
+
|
46 |
+
|
47 |
+
# This class is part of an EXPERIMENTAL API.
|
48 |
+
class KaggleEvaluationService(object):
|
49 |
+
"""Missing associated documentation comment in .proto file."""
|
50 |
+
|
51 |
+
@staticmethod
|
52 |
+
def Send(request,
|
53 |
+
target,
|
54 |
+
options=(),
|
55 |
+
channel_credentials=None,
|
56 |
+
call_credentials=None,
|
57 |
+
insecure=False,
|
58 |
+
compression=None,
|
59 |
+
wait_for_ready=None,
|
60 |
+
timeout=None,
|
61 |
+
metadata=None):
|
62 |
+
return grpc.experimental.unary_unary(request, target, '/kaggle_evaluation_client.KaggleEvaluationService/Send',
|
63 |
+
kaggle__evaluation__pb2.KaggleEvaluationRequest.SerializeToString,
|
64 |
+
kaggle__evaluation__pb2.KaggleEvaluationResponse.FromString,
|
65 |
+
options, channel_credentials,
|
66 |
+
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
|
kaggle_evaluation/core/kaggle_evaluation.proto
ADDED
@@ -0,0 +1,66 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Defines the proto service for KaggleEvaluation communication, aiming to provide native
|
2 |
+
// support for passing a variety of python primitives + common data science
|
3 |
+
// objects, and nested objects thereof.
|
4 |
+
|
5 |
+
syntax = "proto3";
|
6 |
+
|
7 |
+
package kaggle_evaluation_client;
|
8 |
+
|
9 |
+
service KaggleEvaluationService {
|
10 |
+
rpc Send(KaggleEvaluationRequest) returns (KaggleEvaluationResponse) {};
|
11 |
+
}
|
12 |
+
|
13 |
+
message KaggleEvaluationRequest {
|
14 |
+
string name = 1;
|
15 |
+
// Support generic python method calls using standard args / kwargs format.
|
16 |
+
repeated Payload args = 2;
|
17 |
+
map<string, Payload> kwargs = 3;
|
18 |
+
}
|
19 |
+
|
20 |
+
message KaggleEvaluationResponse {
|
21 |
+
Payload payload = 1;
|
22 |
+
}
|
23 |
+
|
24 |
+
// Core object representing a python value.
|
25 |
+
message Payload {
|
26 |
+
oneof value {
|
27 |
+
// Primitives
|
28 |
+
string str_value = 1;
|
29 |
+
bool bool_value = 2;
|
30 |
+
sint64 int_value = 3;
|
31 |
+
float float_value = 4;
|
32 |
+
// Value is ignored, being set at all means `None`
|
33 |
+
bool none_value = 5;
|
34 |
+
|
35 |
+
// Iterables for nested types
|
36 |
+
PayloadList list_value = 6;
|
37 |
+
PayloadList tuple_value = 7;
|
38 |
+
// Only supports dict with keys of type str and values that are serializable
|
39 |
+
// to Payload as well.
|
40 |
+
PayloadMap dict_value = 8;
|
41 |
+
|
42 |
+
// Allowlisted special types
|
43 |
+
// pandas.DataFrame
|
44 |
+
bytes pandas_dataframe_value = 9;
|
45 |
+
// polars.DataFrame
|
46 |
+
bytes polars_dataframe_value = 10;
|
47 |
+
// pandas.Series
|
48 |
+
bytes pandas_series_value = 11;
|
49 |
+
// polars.Series
|
50 |
+
bytes polars_series_value = 12;
|
51 |
+
// numpy.ndarray
|
52 |
+
bytes numpy_array_value = 13;
|
53 |
+
// numpy.scalar. Distinct from numpy.ndarray to avoid issues with dimensionless numpy arrays
|
54 |
+
bytes numpy_scalar_value = 14;
|
55 |
+
// io.BytesIO
|
56 |
+
bytes bytes_io_value = 15;
|
57 |
+
}
|
58 |
+
}
|
59 |
+
|
60 |
+
message PayloadList {
|
61 |
+
repeated Payload payloads = 1;
|
62 |
+
}
|
63 |
+
|
64 |
+
message PayloadMap {
|
65 |
+
map<string, Payload> payload_map = 1;
|
66 |
+
}
|
kaggle_evaluation/core/relay.py
ADDED
@@ -0,0 +1,335 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
'''
|
2 |
+
Core implementation of the client module, implementing generic communication
|
3 |
+
patterns with Python in / Python out supporting many (nested) primitives +
|
4 |
+
special data science types like DataFrames or np.ndarrays, with gRPC + protobuf
|
5 |
+
as a backing implementation.
|
6 |
+
'''
|
7 |
+
|
8 |
+
import grpc
|
9 |
+
import io
|
10 |
+
import json
|
11 |
+
import socket
|
12 |
+
import time
|
13 |
+
|
14 |
+
from concurrent import futures
|
15 |
+
from typing import Callable, List, Tuple
|
16 |
+
|
17 |
+
import numpy as np
|
18 |
+
import pandas as pd
|
19 |
+
import polars as pl
|
20 |
+
import pyarrow
|
21 |
+
|
22 |
+
import kaggle_evaluation.core.generated.kaggle_evaluation_pb2 as kaggle_evaluation_proto
|
23 |
+
import kaggle_evaluation.core.generated.kaggle_evaluation_pb2_grpc as kaggle_evaluation_grpc
|
24 |
+
|
25 |
+
|
26 |
+
_SERVICE_CONFIG = {
|
27 |
+
# Service config proto: https://github.com/grpc/grpc-proto/blob/ec886024c2f7b7f597ba89d5b7d60c3f94627b17/grpc/service_config/service_config.proto#L377
|
28 |
+
'methodConfig': [
|
29 |
+
{
|
30 |
+
'name': [{}], # Applies to all methods
|
31 |
+
# See retry policy docs: https://grpc.io/docs/guides/retry/
|
32 |
+
'retryPolicy': {
|
33 |
+
'maxAttempts': 5,
|
34 |
+
'initialBackoff': '0.1s',
|
35 |
+
'maxBackoff': '1s',
|
36 |
+
'backoffMultiplier': 1, # Ensure relatively rapid feedback in the event of a crash
|
37 |
+
'retryableStatusCodes': ['UNAVAILABLE'],
|
38 |
+
},
|
39 |
+
}
|
40 |
+
]
|
41 |
+
}
|
42 |
+
_GRPC_PORT = 50051
|
43 |
+
_GRPC_CHANNEL_OPTIONS = [
|
44 |
+
# -1 for unlimited message send/receive size
|
45 |
+
# https://github.com/grpc/grpc/blob/v1.64.x/include/grpc/impl/channel_arg_names.h#L39
|
46 |
+
('grpc.max_send_message_length', -1),
|
47 |
+
('grpc.max_receive_message_length', -1),
|
48 |
+
# https://github.com/grpc/grpc/blob/master/doc/keepalive.md
|
49 |
+
('grpc.keepalive_time_ms', 60_000), # Time between heartbeat pings
|
50 |
+
('grpc.keepalive_timeout_ms', 5_000), # Time allowed to respond to pings
|
51 |
+
('grpc.http2.max_pings_without_data', 0), # Remove another cap on pings
|
52 |
+
('grpc.keepalive_permit_without_calls', 1), # Allow heartbeat pings at any time
|
53 |
+
('grpc.http2.min_ping_interval_without_data_ms', 1_000),
|
54 |
+
('grpc.service_config', json.dumps(_SERVICE_CONFIG)),
|
55 |
+
]
|
56 |
+
|
57 |
+
|
58 |
+
DEFAULT_DEADLINE_SECONDS = 60 * 60
|
59 |
+
_RETRY_SLEEP_SECONDS = 1
|
60 |
+
# Enforce a relatively strict server startup time so users can get feedback quickly if they're not
|
61 |
+
# configuring KaggleEvaluation correctly. We really don't want notebooks timing out after nine hours
|
62 |
+
# somebody forgot to start their inference_server. Slow steps like loading models
|
63 |
+
# can happen during the first inference call if necessary.
|
64 |
+
STARTUP_LIMIT_SECONDS = 60 * 15
|
65 |
+
|
66 |
+
### Utils shared by client and server for data transfer
|
67 |
+
|
68 |
+
# pl.Enum is currently unstable, but we should eventually consider supporting it.
|
69 |
+
# https://docs.pola.rs/api/python/stable/reference/api/polars.datatypes.Enum.html#polars.datatypes.Enum
|
70 |
+
_POLARS_TYPE_DENYLIST = set([pl.Enum, pl.Object, pl.Unknown])
|
71 |
+
|
72 |
+
def _serialize(data) -> kaggle_evaluation_proto.Payload:
|
73 |
+
'''Maps input data of one of several allow-listed types to a protobuf message to be sent over gRPC.
|
74 |
+
|
75 |
+
Args:
|
76 |
+
data: The input data to be mapped. Any of the types listed below are accepted.
|
77 |
+
|
78 |
+
Returns:
|
79 |
+
The Payload protobuf message.
|
80 |
+
|
81 |
+
Raises:
|
82 |
+
TypeError if data is of an unsupported type.
|
83 |
+
'''
|
84 |
+
# Python primitives and Numpy scalars
|
85 |
+
if isinstance(data, np.generic):
|
86 |
+
# Numpy functions that return a single number return numpy scalars instead of python primitives.
|
87 |
+
# In some cases this difference matters: https://numpy.org/devdocs/release/2.0.0-notes.html#representation-of-numpy-scalars-changed
|
88 |
+
# Ex: np.mean(1,2) yields np.float64(1.5) instead of 1.5.
|
89 |
+
# Check for numpy scalars first since most of them also inherit from python primitives.
|
90 |
+
# For example, `np.float64(1.5)` is an instance of `float` among many other things.
|
91 |
+
# https://numpy.org/doc/stable/reference/arrays.scalars.html
|
92 |
+
assert data.shape == () # Additional validation that the np.generic type remains solely for scalars
|
93 |
+
assert isinstance(data, np.number) or isinstance(data, np.bool_) # No support for bytes, strings, objects, etc
|
94 |
+
buffer = io.BytesIO()
|
95 |
+
np.save(buffer, data, allow_pickle=False)
|
96 |
+
return kaggle_evaluation_proto.Payload(numpy_scalar_value=buffer.getvalue())
|
97 |
+
elif isinstance(data, str):
|
98 |
+
return kaggle_evaluation_proto.Payload(str_value=data)
|
99 |
+
elif isinstance(data, bool): # bool is a subclass of int, so check that first
|
100 |
+
return kaggle_evaluation_proto.Payload(bool_value=data)
|
101 |
+
elif isinstance(data, int):
|
102 |
+
return kaggle_evaluation_proto.Payload(int_value=data)
|
103 |
+
elif isinstance(data, float):
|
104 |
+
return kaggle_evaluation_proto.Payload(float_value=data)
|
105 |
+
elif data is None:
|
106 |
+
return kaggle_evaluation_proto.Payload(none_value=True)
|
107 |
+
# Iterables for nested types
|
108 |
+
if isinstance(data, list):
|
109 |
+
return kaggle_evaluation_proto.Payload(list_value=kaggle_evaluation_proto.PayloadList(payloads=map(_serialize, data)))
|
110 |
+
elif isinstance(data, tuple):
|
111 |
+
return kaggle_evaluation_proto.Payload(tuple_value=kaggle_evaluation_proto.PayloadList(payloads=map(_serialize, data)))
|
112 |
+
elif isinstance(data, dict):
|
113 |
+
serialized_dict = {}
|
114 |
+
for key, value in data.items():
|
115 |
+
if not isinstance(key, str):
|
116 |
+
raise TypeError(f'KaggleEvaluation only supports dicts with keys of type str, found {type(key)}.')
|
117 |
+
serialized_dict[key] = _serialize(value)
|
118 |
+
return kaggle_evaluation_proto.Payload(dict_value=kaggle_evaluation_proto.PayloadMap(payload_map=serialized_dict))
|
119 |
+
# Allowlisted special types
|
120 |
+
if isinstance(data, pd.DataFrame):
|
121 |
+
buffer = io.BytesIO()
|
122 |
+
data.to_parquet(buffer, index=False, compression='lz4')
|
123 |
+
return kaggle_evaluation_proto.Payload(pandas_dataframe_value=buffer.getvalue())
|
124 |
+
elif isinstance(data, pl.DataFrame):
|
125 |
+
data_types = set(i.base_type() for i in data.dtypes)
|
126 |
+
banned_types = _POLARS_TYPE_DENYLIST.intersection(data_types)
|
127 |
+
if len(banned_types) > 0:
|
128 |
+
raise TypeError(f'Unsupported Polars data type(s): {banned_types}')
|
129 |
+
|
130 |
+
table = data.to_arrow()
|
131 |
+
buffer = io.BytesIO()
|
132 |
+
with pyarrow.ipc.new_stream(buffer, table.schema, options=pyarrow.ipc.IpcWriteOptions(compression='lz4')) as writer:
|
133 |
+
writer.write_table(table)
|
134 |
+
return kaggle_evaluation_proto.Payload(polars_dataframe_value=buffer.getvalue())
|
135 |
+
elif isinstance(data, pd.Series):
|
136 |
+
buffer = io.BytesIO()
|
137 |
+
# Can't serialize a pd.Series directly to parquet, must use intermediate DataFrame
|
138 |
+
pd.DataFrame(data).to_parquet(buffer, index=False, compression='lz4')
|
139 |
+
return kaggle_evaluation_proto.Payload(pandas_series_value=buffer.getvalue())
|
140 |
+
elif isinstance(data, pl.Series):
|
141 |
+
buffer = io.BytesIO()
|
142 |
+
# Can't serialize a pl.Series directly to parquet, must use intermediate DataFrame
|
143 |
+
pl.DataFrame(data).write_parquet(buffer, compression='lz4', statistics=False)
|
144 |
+
return kaggle_evaluation_proto.Payload(polars_series_value=buffer.getvalue())
|
145 |
+
elif isinstance(data, np.ndarray):
|
146 |
+
buffer = io.BytesIO()
|
147 |
+
np.save(buffer, data, allow_pickle=False)
|
148 |
+
return kaggle_evaluation_proto.Payload(numpy_array_value=buffer.getvalue())
|
149 |
+
elif isinstance(data, io.BytesIO):
|
150 |
+
return kaggle_evaluation_proto.Payload(bytes_io_value=data.getvalue())
|
151 |
+
|
152 |
+
raise TypeError(f'Type {type(data)} not supported for KaggleEvaluation.')
|
153 |
+
|
154 |
+
|
155 |
+
def _deserialize(payload: kaggle_evaluation_proto.Payload):
|
156 |
+
'''Maps a Payload protobuf message to a value of whichever type was set on the message.
|
157 |
+
|
158 |
+
Args:
|
159 |
+
payload: The message to be mapped.
|
160 |
+
|
161 |
+
Returns:
|
162 |
+
A value of one of several allow-listed types.
|
163 |
+
|
164 |
+
Raises:
|
165 |
+
TypeError if an unexpected value data type is found.
|
166 |
+
'''
|
167 |
+
# Primitives
|
168 |
+
if payload.WhichOneof('value') == 'str_value':
|
169 |
+
return payload.str_value
|
170 |
+
elif payload.WhichOneof('value') == 'bool_value':
|
171 |
+
return payload.bool_value
|
172 |
+
elif payload.WhichOneof('value') == 'int_value':
|
173 |
+
return payload.int_value
|
174 |
+
elif payload.WhichOneof('value') == 'float_value':
|
175 |
+
return payload.float_value
|
176 |
+
elif payload.WhichOneof('value') == 'none_value':
|
177 |
+
return None
|
178 |
+
# Iterables for nested types
|
179 |
+
elif payload.WhichOneof('value') == 'list_value':
|
180 |
+
return list(map(_deserialize, payload.list_value.payloads))
|
181 |
+
elif payload.WhichOneof('value') == 'tuple_value':
|
182 |
+
return tuple(map(_deserialize, payload.tuple_value.payloads))
|
183 |
+
elif payload.WhichOneof('value') == 'dict_value':
|
184 |
+
return {key: _deserialize(value) for key, value in payload.dict_value.payload_map.items()}
|
185 |
+
# Allowlisted special types
|
186 |
+
elif payload.WhichOneof('value') == 'pandas_dataframe_value':
|
187 |
+
return pd.read_parquet(io.BytesIO(payload.pandas_dataframe_value))
|
188 |
+
elif payload.WhichOneof('value') == 'polars_dataframe_value':
|
189 |
+
with pyarrow.ipc.open_stream(payload.polars_dataframe_value) as reader:
|
190 |
+
table = reader.read_all()
|
191 |
+
return pl.from_arrow(table)
|
192 |
+
elif payload.WhichOneof('value') == 'pandas_series_value':
|
193 |
+
# Pandas will still read a single column csv as a DataFrame.
|
194 |
+
df = pd.read_parquet(io.BytesIO(payload.pandas_series_value))
|
195 |
+
return pd.Series(df[df.columns[0]])
|
196 |
+
elif payload.WhichOneof('value') == 'polars_series_value':
|
197 |
+
return pl.Series(pl.read_parquet(io.BytesIO(payload.polars_series_value)))
|
198 |
+
elif payload.WhichOneof('value') == 'numpy_array_value':
|
199 |
+
return np.load(io.BytesIO(payload.numpy_array_value), allow_pickle=False)
|
200 |
+
elif payload.WhichOneof('value') == 'numpy_scalar_value':
|
201 |
+
data = np.load(io.BytesIO(payload.numpy_scalar_value), allow_pickle=False)
|
202 |
+
# As of Numpy 2.0.2, np.load for a numpy scalar yields a dimensionless array instead of a scalar
|
203 |
+
data = data.dtype.type(data) # Restore the expected numpy scalar type.
|
204 |
+
assert data.shape == () # Additional validation that the np.generic type remains solely for scalars
|
205 |
+
assert isinstance(data, np.number) or isinstance(data, np.bool_) # No support for bytes, strings, objects, etc
|
206 |
+
return data
|
207 |
+
elif payload.WhichOneof('value') == 'bytes_io_value':
|
208 |
+
return io.BytesIO(payload.bytes_io_value)
|
209 |
+
|
210 |
+
raise TypeError(f'Found unknown Payload case {payload.WhichOneof("value")}')
|
211 |
+
|
212 |
+
### Client code
|
213 |
+
|
214 |
+
class Client():
|
215 |
+
'''
|
216 |
+
Class which allows callers to make KaggleEvaluation requests.
|
217 |
+
'''
|
218 |
+
def __init__(self, channel_address: str='localhost'):
|
219 |
+
self.channel_address = channel_address
|
220 |
+
self.channel = grpc.insecure_channel(f'{channel_address}:{_GRPC_PORT}', options=_GRPC_CHANNEL_OPTIONS)
|
221 |
+
self._made_first_connection = False
|
222 |
+
self.endpoint_deadline_seconds = DEFAULT_DEADLINE_SECONDS
|
223 |
+
self.stub = kaggle_evaluation_grpc.KaggleEvaluationServiceStub(self.channel)
|
224 |
+
|
225 |
+
def _send_with_deadline(self, request):
|
226 |
+
''' Sends a message to the server while also:
|
227 |
+
- Throwing an error as soon as the inference_server container has been shut down.
|
228 |
+
- Setting a deadline of STARTUP_LIMIT_SECONDS for the inference_server to startup.
|
229 |
+
'''
|
230 |
+
if self._made_first_connection:
|
231 |
+
return self.stub.Send(request, wait_for_ready=False, timeout=self.endpoint_deadline_seconds)
|
232 |
+
|
233 |
+
first_call_time = time.time()
|
234 |
+
# Allow time for the server to start as long as its container is running
|
235 |
+
while time.time() - first_call_time < STARTUP_LIMIT_SECONDS:
|
236 |
+
try:
|
237 |
+
response = self.stub.Send(request, wait_for_ready=False)
|
238 |
+
self._made_first_connection = True
|
239 |
+
break
|
240 |
+
except grpc._channel._InactiveRpcError as err:
|
241 |
+
if 'StatusCode.UNAVAILABLE' not in str(err):
|
242 |
+
raise err
|
243 |
+
# Confirm the inference_server container is still alive & it's worth waiting on the server.
|
244 |
+
# If the inference_server container is no longer running this will throw a socket.gaierror.
|
245 |
+
socket.gethostbyname(self.channel_address)
|
246 |
+
time.sleep(_RETRY_SLEEP_SECONDS)
|
247 |
+
|
248 |
+
if not self._made_first_connection:
|
249 |
+
raise RuntimeError(f'Failed to connect to server after waiting {STARTUP_LIMIT_SECONDS} seconds')
|
250 |
+
return response
|
251 |
+
|
252 |
+
def send(self, name: str, *args, **kwargs):
|
253 |
+
'''Sends a single KaggleEvaluation request.
|
254 |
+
|
255 |
+
Args:
|
256 |
+
name: The endpoint name for the request.
|
257 |
+
*args: Variable-length/type arguments to be supplied on the request.
|
258 |
+
**kwargs: Key-value arguments to be supplied on the request.
|
259 |
+
|
260 |
+
Returns:
|
261 |
+
The response, which is of one of several allow-listed data types.
|
262 |
+
'''
|
263 |
+
request = kaggle_evaluation_proto.KaggleEvaluationRequest(
|
264 |
+
name=name,
|
265 |
+
args=map(_serialize, args),
|
266 |
+
kwargs={key: _serialize(value) for key, value in kwargs.items()}
|
267 |
+
)
|
268 |
+
response = self._send_with_deadline(request)
|
269 |
+
|
270 |
+
return _deserialize(response.payload)
|
271 |
+
|
272 |
+
def close(self):
|
273 |
+
self.channel.close()
|
274 |
+
|
275 |
+
|
276 |
+
### Server code
|
277 |
+
|
278 |
+
class KaggleEvaluationServiceServicer(kaggle_evaluation_grpc.KaggleEvaluationServiceServicer):
|
279 |
+
'''
|
280 |
+
Class which allows serving responses to KaggleEvaluation requests. The inference_server will run this service to listen for and respond
|
281 |
+
to requests from the Gateway. The Gateway may also listen for requests from the inference_server in some cases.
|
282 |
+
'''
|
283 |
+
def __init__(self, listeners: List[callable]):
|
284 |
+
self.listeners_map = dict((func.__name__, func) for func in listeners)
|
285 |
+
|
286 |
+
# pylint: disable=unused-argument
|
287 |
+
def Send(self, request: kaggle_evaluation_proto.KaggleEvaluationRequest, context: grpc.ServicerContext) -> kaggle_evaluation_proto.KaggleEvaluationResponse:
|
288 |
+
'''Handler for gRPC requests that deserializes arguments, calls a user-registered function for handling the
|
289 |
+
requested endpoint, then serializes and returns the response.
|
290 |
+
|
291 |
+
Args:
|
292 |
+
request: The KaggleEvaluationRequest protobuf message.
|
293 |
+
context: (Unused) gRPC context.
|
294 |
+
|
295 |
+
Returns:
|
296 |
+
The KaggleEvaluationResponse protobuf message.
|
297 |
+
|
298 |
+
Raises:
|
299 |
+
NotImplementedError if the caller has not registered a handler for the requested endpoint.
|
300 |
+
'''
|
301 |
+
if request.name not in self.listeners_map:
|
302 |
+
raise NotImplementedError(f'No listener for {request.name} was registered.')
|
303 |
+
|
304 |
+
args = map(_deserialize, request.args)
|
305 |
+
kwargs = {key: _deserialize(value) for key, value in request.kwargs.items()}
|
306 |
+
response_function = self.listeners_map[request.name]
|
307 |
+
response_payload = _serialize(response_function(*args, **kwargs))
|
308 |
+
return kaggle_evaluation_proto.KaggleEvaluationResponse(payload=response_payload)
|
309 |
+
|
310 |
+
def define_server(*endpoint_listeners: Tuple[Callable]) -> grpc.server:
|
311 |
+
'''Registers the endpoints that the container is able to respond to, then starts a server which listens for
|
312 |
+
those endpoints. The endpoints that need to be implemented will depend on the specific competition.
|
313 |
+
|
314 |
+
Args:
|
315 |
+
endpoint_listeners: Tuple of functions that define how requests to the endpoint of the function name should be
|
316 |
+
handled.
|
317 |
+
|
318 |
+
Returns:
|
319 |
+
The gRPC server object, which has been started. It should be stopped at exit time.
|
320 |
+
|
321 |
+
Raises:
|
322 |
+
ValueError if parameter values are invalid.
|
323 |
+
'''
|
324 |
+
if not endpoint_listeners:
|
325 |
+
raise ValueError('Must pass at least one endpoint listener, e.g. `predict`')
|
326 |
+
for func in endpoint_listeners:
|
327 |
+
if not isinstance(func, Callable):
|
328 |
+
raise ValueError('Endpoint listeners passed to `serve` must be functions')
|
329 |
+
if func.__name__ == '<lambda>':
|
330 |
+
raise ValueError('Functions passed as endpoint listeners must be named')
|
331 |
+
|
332 |
+
server = grpc.server(futures.ThreadPoolExecutor(max_workers=1), options=_GRPC_CHANNEL_OPTIONS)
|
333 |
+
kaggle_evaluation_grpc.add_KaggleEvaluationServiceServicer_to_server(KaggleEvaluationServiceServicer(endpoint_listeners), server)
|
334 |
+
server.add_insecure_port(f'[::]:{_GRPC_PORT}')
|
335 |
+
return server
|
kaggle_evaluation/core/templates.py
ADDED
@@ -0,0 +1,143 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
'''Template for the two classes hosts should customize for each competition.'''
|
2 |
+
|
3 |
+
import abc
|
4 |
+
import os
|
5 |
+
import time
|
6 |
+
import sys
|
7 |
+
import traceback
|
8 |
+
import warnings
|
9 |
+
|
10 |
+
from typing import Callable, Generator, Tuple
|
11 |
+
|
12 |
+
import kaggle_evaluation.core.base_gateway
|
13 |
+
import kaggle_evaluation.core.relay
|
14 |
+
|
15 |
+
_initial_import_time = time.time()
|
16 |
+
_issued_startup_time_warning = False
|
17 |
+
|
18 |
+
|
19 |
+
class Gateway(kaggle_evaluation.core.base_gateway.BaseGateway, abc.ABC):
|
20 |
+
'''
|
21 |
+
Template to start with when writing a new gateway.
|
22 |
+
In most cases, hosts should only need to write get_all_predictions.
|
23 |
+
There are two main methods for sending data to the inference_server hosts should understand:
|
24 |
+
- Small datasets: use `self.predict`. Competitors will receive the data passed to self.predict as
|
25 |
+
Python objects in memory. This is just a wrapper for self.client.send(); you can write additional
|
26 |
+
wrappers if necessary.
|
27 |
+
- Large datasets: it's much faster to send data via self.share_files, which is equivalent to making
|
28 |
+
files available via symlink. See base_gateway.BaseGateway.share_files for the full details.
|
29 |
+
'''
|
30 |
+
@abc.abstractmethod
|
31 |
+
def unpack_data_paths(self):
|
32 |
+
''' Map the contents of self.data_paths to the competition-specific entries
|
33 |
+
Each competition should respect these paths to make it easy for competitors to
|
34 |
+
run tests on their local machines or with custom files.
|
35 |
+
|
36 |
+
Should include default paths to support data_paths = None.
|
37 |
+
'''
|
38 |
+
raise NotImplementedError
|
39 |
+
|
40 |
+
@abc.abstractmethod
|
41 |
+
def generate_data_batches(self) -> Generator:
|
42 |
+
''' Used by the default implementation of `get_all_predictions` so we can
|
43 |
+
ensure `validate_prediction_batch` is run every time `predict` is called.
|
44 |
+
|
45 |
+
This method must yield both the batch of data to be sent to `predict` and the validation
|
46 |
+
data sent to `validate_prediction_batch`.
|
47 |
+
'''
|
48 |
+
raise NotImplementedError
|
49 |
+
|
50 |
+
def get_all_predictions(self):
|
51 |
+
all_predictions = []
|
52 |
+
for data_batch, validation_batch in self.generate_data_batches():
|
53 |
+
predictions = self.predict(*data_batch)
|
54 |
+
self.validate_prediction_batch(predictions, validation_batch)
|
55 |
+
all_predictions.append(predictions)
|
56 |
+
return all_predictions
|
57 |
+
|
58 |
+
def predict(self, *args, **kwargs):
|
59 |
+
''' self.predict will send all data in args and kwargs to the user container, and
|
60 |
+
instruct the user container to generate a `predict` response.
|
61 |
+
|
62 |
+
'''
|
63 |
+
try:
|
64 |
+
return self.client.send('predict', *args, **kwargs)
|
65 |
+
except Exception as e:
|
66 |
+
self.handle_server_error(e, 'predict')
|
67 |
+
|
68 |
+
def set_response_timeout_seconds(self, timeout_seconds: float=6_000):
|
69 |
+
# Set a response deadline that will apply after the very first repsonse
|
70 |
+
self.client.endpoint_deadline_seconds = timeout_seconds
|
71 |
+
|
72 |
+
def run(self):
|
73 |
+
error = None
|
74 |
+
try:
|
75 |
+
self.unpack_data_paths()
|
76 |
+
predictions = self.get_all_predictions()
|
77 |
+
self.write_submission(predictions)
|
78 |
+
except kaggle_evaluation.core.base_gateway.GatewayRuntimeError as gre:
|
79 |
+
error = gre
|
80 |
+
except Exception:
|
81 |
+
# Get the full stack trace
|
82 |
+
exc_type, exc_value, exc_traceback = sys.exc_info()
|
83 |
+
error_str = ''.join(traceback.format_exception(exc_type, exc_value, exc_traceback))
|
84 |
+
|
85 |
+
error = kaggle_evaluation.core.base_gateway.GatewayRuntimeError(
|
86 |
+
kaggle_evaluation.core.base_gateway.GatewayRuntimeErrorType.GATEWAY_RAISED_EXCEPTION,
|
87 |
+
error_str
|
88 |
+
)
|
89 |
+
|
90 |
+
self.client.close()
|
91 |
+
if self.server:
|
92 |
+
self.server.stop(0)
|
93 |
+
|
94 |
+
if kaggle_evaluation.core.base_gateway.IS_RERUN:
|
95 |
+
self.write_result(error)
|
96 |
+
elif error:
|
97 |
+
# For local testing
|
98 |
+
raise error
|
99 |
+
|
100 |
+
|
101 |
+
class InferenceServer(abc.ABC):
|
102 |
+
'''
|
103 |
+
Base class for competition participants to inherit from when writing their submission. In most cases, users should
|
104 |
+
only need to implement a `predict` function or other endpoints to pass to this class's constructor, and hosts will
|
105 |
+
provide a mock Gateway for testing.
|
106 |
+
'''
|
107 |
+
def __init__(self, endpoint_listeners: Tuple[Callable]):
|
108 |
+
self.server = kaggle_evaluation.core.relay.define_server(endpoint_listeners)
|
109 |
+
self.client = None # The inference_server can have a client but it isn't typically necessary.
|
110 |
+
self._issued_startup_time_warning = False
|
111 |
+
self._startup_limit_seconds = kaggle_evaluation.core.relay.STARTUP_LIMIT_SECONDS
|
112 |
+
|
113 |
+
def serve(self):
|
114 |
+
self.server.start()
|
115 |
+
if os.getenv('KAGGLE_IS_COMPETITION_RERUN') is not None:
|
116 |
+
self.server.wait_for_termination() # This will block all other code
|
117 |
+
|
118 |
+
@abc.abstractmethod
|
119 |
+
def _get_gateway_for_test(self, data_paths):
|
120 |
+
# TODO: This should return a version of the competition-specific gateway that's able to load
|
121 |
+
# data used for unit tests.
|
122 |
+
raise NotImplementedError
|
123 |
+
|
124 |
+
def run_local_gateway(self, data_paths=None):
|
125 |
+
global _issued_startup_time_warning
|
126 |
+
script_elapsed_seconds = time.time() - _initial_import_time
|
127 |
+
if script_elapsed_seconds > self._startup_limit_seconds and not _issued_startup_time_warning:
|
128 |
+
warnings.warn(
|
129 |
+
f'''{int(script_elapsed_seconds)} seconds elapsed before server startup.
|
130 |
+
This exceeds the startup time limit of {int(self._startup_limit_seconds)} seconds that the gateway will enforce
|
131 |
+
during the rerun on the hidden test set. Start the server before performing any time consuming steps.''',
|
132 |
+
category=RuntimeWarning
|
133 |
+
)
|
134 |
+
_issued_startup_time_warning = True
|
135 |
+
|
136 |
+
self.server.start()
|
137 |
+
try:
|
138 |
+
self.gateway = self._get_gateway_for_test(data_paths)
|
139 |
+
self.gateway.run()
|
140 |
+
except Exception as err:
|
141 |
+
raise err from None
|
142 |
+
finally:
|
143 |
+
self.server.stop(0)
|
kaggle_evaluation/jane_street_gateway.py
ADDED
@@ -0,0 +1,57 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""Gateway notebook for https://www.kaggle.com/competitions/jane-street-real-time-market-data-forecasting"""
|
2 |
+
|
3 |
+
import os
|
4 |
+
|
5 |
+
import kaggle_evaluation.core.base_gateway
|
6 |
+
import kaggle_evaluation.core.templates
|
7 |
+
import polars as pl
|
8 |
+
|
9 |
+
|
10 |
+
class JSGateway(kaggle_evaluation.core.templates.Gateway):
|
11 |
+
def __init__(self, data_paths: tuple[str, str] | None = None):
|
12 |
+
super().__init__(data_paths, file_share_dir=None)
|
13 |
+
self.data_paths = data_paths
|
14 |
+
self.set_response_timeout_seconds(60)
|
15 |
+
|
16 |
+
def unpack_data_paths(self):
|
17 |
+
if not self.data_paths:
|
18 |
+
self.test_path = (
|
19 |
+
"/kaggle/input/jane-street-realtime-marketdata-forecasting/test.parquet"
|
20 |
+
)
|
21 |
+
self.lags_path = (
|
22 |
+
"/kaggle/input/jane-street-realtime-marketdata-forecasting/lags.parquet"
|
23 |
+
)
|
24 |
+
else:
|
25 |
+
self.test_path, self.lags_path = self.data_paths
|
26 |
+
|
27 |
+
def generate_data_batches(self):
|
28 |
+
date_ids = sorted(
|
29 |
+
pl.scan_parquet(self.test_path)
|
30 |
+
.select(pl.col("date_id").unique())
|
31 |
+
.collect()
|
32 |
+
.get_column("date_id")
|
33 |
+
)
|
34 |
+
assert date_ids[0] == 0
|
35 |
+
|
36 |
+
for date_id in date_ids:
|
37 |
+
test_batches = pl.read_parquet(
|
38 |
+
os.path.join(self.test_path, f"date_id={date_id}"),
|
39 |
+
).group_by("time_id", maintain_order=True)
|
40 |
+
|
41 |
+
lags = pl.read_parquet(
|
42 |
+
os.path.join(self.lags_path, f"date_id={date_id}"),
|
43 |
+
)
|
44 |
+
|
45 |
+
for (time_id,), test in test_batches:
|
46 |
+
test_data = (test, lags if time_id == 0 else None)
|
47 |
+
validation_data = test.select('row_id')
|
48 |
+
yield test_data, validation_data
|
49 |
+
|
50 |
+
|
51 |
+
if __name__ == "__main__":
|
52 |
+
if os.getenv("KAGGLE_IS_COMPETITION_RERUN"):
|
53 |
+
gateway = JSGateway()
|
54 |
+
# Relies on valid default data paths
|
55 |
+
gateway.run()
|
56 |
+
else:
|
57 |
+
print("Skipping run for now")
|
kaggle_evaluation/jane_street_inference_server.py
ADDED
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
import kaggle_evaluation.core.templates
|
3 |
+
|
4 |
+
import jane_street_gateway
|
5 |
+
|
6 |
+
|
7 |
+
class JSInferenceServer(kaggle_evaluation.core.templates.InferenceServer):
|
8 |
+
def _get_gateway_for_test(self, data_paths=None):
|
9 |
+
return jane_street_gateway.JSGateway(data_paths)
|
lags.parquet/.DS_Store
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:189f13537da47c0531de399d77a57f11df733d7fbef684d5b38f62918b03470e
|
3 |
+
size 6148
|
lags.parquet/date_id=0/part-0.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:f9492d4475c5d06725019fd037d0e61ab9cede5a4d49293bd874a95f3c7813bb
|
3 |
+
size 5752
|
test.parquet/.DS_Store
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:d663248193e5b378048c20c00c5e1d480a9cf788a05fed70a2539e98b1d3a156
|
3 |
+
size 6148
|
test.parquet/date_id=0/part-0.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:5b73c5a30da6d6cd386c9d28cd7cb43b40a877ffdb7ab4420d7b0b8eac19c9b6
|
3 |
+
size 28165
|