Spaces:
Running
on
Zero
Running
on
Zero
File size: 2,105 Bytes
7931de6 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 |
# Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from pytriton.model_config.common import DeviceKind, DynamicBatcher, QueuePolicy, TimeoutAction
from pytriton.model_config.triton_model_config import ResponseCache, TensorSpec, TritonModelConfig
full_model_config = TritonModelConfig(
model_name="simple",
batching=True,
max_batch_size=16,
batcher=DynamicBatcher(
preferred_batch_size=[16, 32],
max_queue_delay_microseconds=100,
preserve_ordering=True,
priority_levels=3,
default_priority_level=1,
default_queue_policy=QueuePolicy(
allow_timeout_override=True,
timeout_action=TimeoutAction.DELAY,
default_timeout_microseconds=100,
max_queue_size=2,
),
priority_queue_policy={
2: QueuePolicy(
allow_timeout_override=True,
timeout_action=TimeoutAction.DELAY,
default_timeout_microseconds=100,
max_queue_size=3,
)
},
),
instance_group={DeviceKind.KIND_CPU: 1, DeviceKind.KIND_GPU: 2},
decoupled=True,
backend_parameters={
"parameter1": "value1",
"parameter2": "value2",
},
inputs=[
TensorSpec(name="INPUT_1", dtype=np.float32, shape=(-1,)),
TensorSpec(name="INPUT_2", dtype=np.bytes_, shape=(-1,)),
],
outputs=[
TensorSpec(name="OUTPUT_1", dtype=np.int32, shape=(1000,)),
],
response_cache=ResponseCache(enable=True),
)
|