/* COPYRIGHT HEADER GOES HERE: No CopyRight Header String Passed During Model Conversion */ /* Command Line used: qnn-onnx-converter act_bw=16 act_quantizer=tf adjust_nms_features_dims=True algorithms=[] align_matmul_ranks=True arch_checker=False batch=None bias_bw=32 converter_op_package_lib= copyright_file=None custom_io= custom_op_config_paths=None debug=-1 define_symbol=None disable_batchnorm_folding=False disable_node_validation=False disable_qnn_op_config_validation=False disable_relu_squashing=False dry_run=None dumpIR=False dump_custom_io_config_template= dump_inferred_model=False dump_value_info=False enable_match_gathernd=False exclude_named_tensors=False expand_gru_op_structure=True expand_lstm_op_structure=False extract_color_transform=True float_bias_bw=0 float_bw=32 float_fallback=False force_prune_cast_ops=False handle_gather_negative_indices=True ignore_encodings=False inject_cast_for_gather=True input_dim=[['images', '1,3,640,640']] input_dtype=[] input_encoding=[] input_layout=[] input_list=/home/dlc_quan_temp/GD84REtr6dTF7IP/quant.txt input_type=[] keep_disconnected_nodes=False keep_int64_inputs=False keep_quant_nodes=False match_caffe_ssd_to_tf=True no_simplification=False op_package_lib= out_names=['/model.24/m.0/Conv_output_0', '/model.24/m.1/Conv_output_0', '/model.24/m.2/Conv_output_0'] overwrite_model_prefix=False package_name=None param_quantizer=tf perform_axes_to_spatial_first_order=True prepare_inputs_as_params=False preprocess_roi_pool_inputs=True preserve_io=[] quantization_overrides= restrict_quantization_steps=[] squash_box_decoder=True unroll_gru_time_steps=True unroll_lstm_time_steps=True use_convert_quantization_nodes=False use_dynamic_16_bit_weights=False use_native_dtype=False use_native_input_files=False use_native_output_files=False use_per_channel_quantization=[False] use_per_row_quantization=False weight_bw=8 */ #include "QnnOpDef.h" #include "QnnModel.hpp" // Flag to determine if Backend should do node validation for each opNode added #define DO_GRAPH_NODE_VALIDATIONS 1 using namespace qnn_wrapper_api; extern "C" { QNN_API ModelError_t QnnModel_composeGraphs(Qnn_BackendHandle_t backendHandle, QNN_INTERFACE_VER_TYPE interface, Qnn_ContextHandle_t contextHandle, const GraphConfigInfo_t** graphsConfigInfo, const uint32_t numGraphsConfigInfo, GraphInfoPtr_t** graphsInfo, uint32_t* numGraphsInfo, bool debug, QnnLog_Callback_t logCallback, QnnLog_Level_t maxLogLevel) { ModelError_t err = MODEL_NO_ERROR; /* model/graph for cutoff_yolov5s*/ QnnModel cutoff_yolov5s; const QnnGraph_Config_t** graphConfigs = nullptr; VALIDATE(getQnnGraphConfigFromInfo("cutoff_yolov5s", graphsConfigInfo, numGraphsConfigInfo, graphConfigs), err); VALIDATE(cutoff_yolov5s.initialize(backendHandle, interface, contextHandle, "cutoff_yolov5s", debug, DO_GRAPH_NODE_VALIDATIONS, graphConfigs), err); uint32_t dimensions_images[] = {1, 640, 640, 3}; VALIDATE(cutoff_yolov5s.addTensor("images", // Node Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "images", .type= QNN_TENSOR_TYPE_APP_WRITE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000152590218931f, .offset= 0}}}, .rank= 4, .dimensions=dimensions_images, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} ), err); uint32_t dimensions_model_0_conv_weight[] = {6, 6, 3, 32}; VALIDATE(cutoff_yolov5s.addTensor("model_0_conv_weight", // Node Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "model_0_conv_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.1020871326327324f, .offset= -115}}}, .rank= 4, .dimensions=dimensions_model_0_conv_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(model_0_conv_weight), .dataSize=BINLEN(model_0_conv_weight)}}}}} ), err); uint32_t dimensions_model_0_conv_bias[] = {32}; VALIDATE(cutoff_yolov5s.addTensor("model_0_conv_bias", // Node Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "model_0_conv_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_SFIXED_POINT_32, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000000050697846f, .offset= 0}}}, .rank= 1, .dimensions=dimensions_model_0_conv_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(model_0_conv_bias), .dataSize=BINLEN(model_0_conv_bias)}}}}} ), err); /* ADDING NODE FOR _model_0_conv_Conv */ uint32_t dimensions___model_0_conv_Conv_dilation[] = {2}; uint32_t __model_0_conv_Conv_dilation[] = {1, 1}; uint32_t dimensions___model_0_conv_Conv_pad_amount[] = {2, 2}; uint32_t __model_0_conv_Conv_pad_amount[] = {2, 2, 2, 2}; uint32_t dimensions___model_0_conv_Conv_stride[] = {2}; uint32_t __model_0_conv_Conv_stride[] = {2, 2}; Qnn_Param_t params__model_0_conv_Conv[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "__model_0_conv_Conv_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions___model_0_conv_Conv_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)__model_0_conv_Conv_dilation, .dataSize=8}}}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "__model_0_conv_Conv_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions___model_0_conv_Conv_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)__model_0_conv_Conv_pad_amount, .dataSize=16}}}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "__model_0_conv_Conv_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions___model_0_conv_Conv_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)__model_0_conv_Conv_stride, .dataSize=8}}}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}} }; const char* inputs__model_0_conv_Conv[] = { "images", "model_0_conv_weight", "model_0_conv_bias" }; uint32_t dimensions__model_0_conv_Conv_output_0[] = {1, 320, 320, 32}; Qnn_Tensor_t outputs__model_0_conv_Conv[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_0_conv_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0014899881789461f, .offset= -31931}}}, .rank= 4, .dimensions=dimensions__model_0_conv_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_0_conv_Conv", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__model_0_conv_Conv, // Node Params 4, // Num Node Params inputs__model_0_conv_Conv, // Input Tensor Names 3, // Num Input Tensor Names outputs__model_0_conv_Conv, // Output Tensors 1// Num Output Tensors ), err); /* ADDING NODE FOR _model_0_act_Sigmoid */ const char* inputs__model_0_act_Sigmoid[] = { "_model_0_conv_Conv_output_0" }; uint32_t dimensions__model_0_act_Sigmoid_output_0[] = {1, 320, 320, 32}; Qnn_Tensor_t outputs__model_0_act_Sigmoid[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_0_act_Sigmoid_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000152587890625f, .offset= 0}}}, .rank= 4, .dimensions=dimensions__model_0_act_Sigmoid_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_0_act_Sigmoid", // Node Name "qti.aisw", // Package Name "Sigmoid", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__model_0_act_Sigmoid, // Input Tensor Names 1, // Num Input Tensor Names outputs__model_0_act_Sigmoid, // Output Tensors 1// Num Output Tensors ), err); /* ADDING NODE FOR _model_0_act_Mul */ const char* inputs__model_0_act_Mul[] = { "_model_0_conv_Conv_output_0", "_model_0_act_Sigmoid_output_0" }; uint32_t dimensions__model_0_act_Mul_output_0[] = {1, 320, 320, 32}; Qnn_Tensor_t outputs__model_0_act_Mul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_0_act_Mul_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0007682504947297f, .offset= -362}}}, .rank= 4, .dimensions=dimensions__model_0_act_Mul_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_0_act_Mul", // Node Name "qti.aisw", // Package Name "ElementWiseMultiply", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__model_0_act_Mul, // Input Tensor Names 2, // Num Input Tensor Names outputs__model_0_act_Mul, // Output Tensors 1// Num Output Tensors ), err); uint32_t dimensions_model_1_conv_weight[] = {3, 3, 32, 64}; VALIDATE(cutoff_yolov5s.addTensor("model_1_conv_weight", // Node Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "model_1_conv_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0069086146540940f, .offset= -140}}}, .rank= 4, .dimensions=dimensions_model_1_conv_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(model_1_conv_weight), .dataSize=BINLEN(model_1_conv_weight)}}}}} ), err); uint32_t dimensions_model_1_conv_bias[] = {64}; VALIDATE(cutoff_yolov5s.addTensor("model_1_conv_bias", // Node Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "model_1_conv_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_SFIXED_POINT_32, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000000019767228f, .offset= 0}}}, .rank= 1, .dimensions=dimensions_model_1_conv_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(model_1_conv_bias), .dataSize=BINLEN(model_1_conv_bias)}}}}} ), err); /* ADDING NODE FOR _model_1_conv_Conv */ uint32_t dimensions___model_1_conv_Conv_dilation[] = {2}; uint32_t __model_1_conv_Conv_dilation[] = {1, 1}; uint32_t dimensions___model_1_conv_Conv_pad_amount[] = {2, 2}; uint32_t __model_1_conv_Conv_pad_amount[] = {1, 1, 1, 1}; uint32_t dimensions___model_1_conv_Conv_stride[] = {2}; uint32_t __model_1_conv_Conv_stride[] = {2, 2}; Qnn_Param_t params__model_1_conv_Conv[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "__model_1_conv_Conv_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions___model_1_conv_Conv_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)__model_1_conv_Conv_dilation, .dataSize=8}}}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "__model_1_conv_Conv_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions___model_1_conv_Conv_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)__model_1_conv_Conv_pad_amount, .dataSize=16}}}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "__model_1_conv_Conv_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions___model_1_conv_Conv_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)__model_1_conv_Conv_stride, .dataSize=8}}}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}} }; const char* inputs__model_1_conv_Conv[] = { "_model_0_act_Mul_output_0", "model_1_conv_weight", "model_1_conv_bias" }; uint32_t dimensions__model_1_conv_Conv_output_0[] = {1, 160, 160, 64}; Qnn_Tensor_t outputs__model_1_conv_Conv[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_1_conv_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0024792121257633f, .offset= -32644}}}, .rank= 4, .dimensions=dimensions__model_1_conv_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_1_conv_Conv", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__model_1_conv_Conv, // Node Params 4, // Num Node Params inputs__model_1_conv_Conv, // Input Tensor Names 3, // Num Input Tensor Names outputs__model_1_conv_Conv, // Output Tensors 1// Num Output Tensors ), err); /* ADDING NODE FOR _model_1_act_Sigmoid */ const char* inputs__model_1_act_Sigmoid[] = { "_model_1_conv_Conv_output_0" }; uint32_t dimensions__model_1_act_Sigmoid_output_0[] = {1, 160, 160, 64}; Qnn_Tensor_t outputs__model_1_act_Sigmoid[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_1_act_Sigmoid_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000152587890625f, .offset= 0}}}, .rank= 4, .dimensions=dimensions__model_1_act_Sigmoid_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_1_act_Sigmoid", // Node Name "qti.aisw", // Package Name "Sigmoid", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__model_1_act_Sigmoid, // Input Tensor Names 1, // Num Input Tensor Names outputs__model_1_act_Sigmoid, // Output Tensors 1// Num Output Tensors ), err); /* ADDING NODE FOR _model_1_act_Mul */ const char* inputs__model_1_act_Mul[] = { "_model_1_conv_Conv_output_0", "_model_1_act_Sigmoid_output_0" }; uint32_t dimensions__model_1_act_Mul_output_0[] = {1, 160, 160, 64}; Qnn_Tensor_t outputs__model_1_act_Mul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_1_act_Mul_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0012485315091908f, .offset= -223}}}, .rank= 4, .dimensions=dimensions__model_1_act_Mul_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_1_act_Mul", // Node Name "qti.aisw", // Package Name "ElementWiseMultiply", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__model_1_act_Mul, // Input Tensor Names 2, // Num Input Tensor Names outputs__model_1_act_Mul, // Output Tensors 1// Num Output Tensors ), err); uint32_t dimensions_model_2_cv1_conv_weight[] = {1, 1, 64, 32}; VALIDATE(cutoff_yolov5s.addTensor("model_2_cv1_conv_weight", // Node Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "model_2_cv1_conv_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0036230115219951f, .offset= -183}}}, .rank= 4, .dimensions=dimensions_model_2_cv1_conv_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(model_2_cv1_conv_weight), .dataSize=BINLEN(model_2_cv1_conv_weight)}}}}} ), err); uint32_t dimensions_model_2_cv1_conv_bias[] = {32}; VALIDATE(cutoff_yolov5s.addTensor("model_2_cv1_conv_bias", // Node Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "model_2_cv1_conv_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_SFIXED_POINT_32, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000000010004297f, .offset= 0}}}, .rank= 1, .dimensions=dimensions_model_2_cv1_conv_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(model_2_cv1_conv_bias), .dataSize=BINLEN(model_2_cv1_conv_bias)}}}}} ), err); /* ADDING NODE FOR _model_2_cv1_conv_Conv */ uint32_t dimensions___model_2_cv1_conv_Conv_dilation[] = {2}; uint32_t __model_2_cv1_conv_Conv_dilation[] = {1, 1}; uint32_t dimensions___model_2_cv1_conv_Conv_pad_amount[] = {2, 2}; uint32_t __model_2_cv1_conv_Conv_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions___model_2_cv1_conv_Conv_stride[] = {2}; uint32_t __model_2_cv1_conv_Conv_stride[] = {1, 1}; Qnn_Param_t params__model_2_cv1_conv_Conv[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "__model_2_cv1_conv_Conv_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions___model_2_cv1_conv_Conv_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)__model_2_cv1_conv_Conv_dilation, .dataSize=8}}}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "__model_2_cv1_conv_Conv_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions___model_2_cv1_conv_Conv_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)__model_2_cv1_conv_Conv_pad_amount, .dataSize=16}}}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "__model_2_cv1_conv_Conv_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions___model_2_cv1_conv_Conv_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)__model_2_cv1_conv_Conv_stride, .dataSize=8}}}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}} }; const char* inputs__model_2_cv1_conv_Conv[] = { "_model_1_act_Mul_output_0", "model_2_cv1_conv_weight", "model_2_cv1_conv_bias" }; uint32_t dimensions__model_2_cv1_conv_Conv_output_0[] = {1, 160, 160, 32}; Qnn_Tensor_t outputs__model_2_cv1_conv_Conv[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_2_cv1_conv_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0010293343802914f, .offset= -43939}}}, .rank= 4, .dimensions=dimensions__model_2_cv1_conv_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_2_cv1_conv_Conv", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__model_2_cv1_conv_Conv, // Node Params 4, // Num Node Params inputs__model_2_cv1_conv_Conv, // Input Tensor Names 3, // Num Input Tensor Names outputs__model_2_cv1_conv_Conv, // Output Tensors 1// Num Output Tensors ), err); /* ADDING NODE FOR _model_2_cv1_act_Sigmoid */ const char* inputs__model_2_cv1_act_Sigmoid[] = { "_model_2_cv1_conv_Conv_output_0" }; uint32_t dimensions__model_2_cv1_act_Sigmoid_output_0[] = {1, 160, 160, 32}; Qnn_Tensor_t outputs__model_2_cv1_act_Sigmoid[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_2_cv1_act_Sigmoid_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000152587890625f, .offset= 0}}}, .rank= 4, .dimensions=dimensions__model_2_cv1_act_Sigmoid_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_2_cv1_act_Sigmoid", // Node Name "qti.aisw", // Package Name "Sigmoid", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__model_2_cv1_act_Sigmoid, // Input Tensor Names 1, // Num Input Tensor Names outputs__model_2_cv1_act_Sigmoid, // Output Tensors 1// Num Output Tensors ), err); /* ADDING NODE FOR _model_2_cv1_act_Mul */ const char* inputs__model_2_cv1_act_Mul[] = { "_model_2_cv1_conv_Conv_output_0", "_model_2_cv1_act_Sigmoid_output_0" }; uint32_t dimensions__model_2_cv1_act_Mul_output_0[] = {1, 160, 160, 32}; Qnn_Tensor_t outputs__model_2_cv1_act_Mul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_2_cv1_act_Mul_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0003434480167925f, .offset= -811}}}, .rank= 4, .dimensions=dimensions__model_2_cv1_act_Mul_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_2_cv1_act_Mul", // Node Name "qti.aisw", // Package Name "ElementWiseMultiply", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__model_2_cv1_act_Mul, // Input Tensor Names 2, // Num Input Tensor Names outputs__model_2_cv1_act_Mul, // Output Tensors 1// Num Output Tensors ), err); uint32_t dimensions_model_2_m_0_cv1_conv_weight[] = {1, 1, 32, 32}; VALIDATE(cutoff_yolov5s.addTensor("model_2_m_0_cv1_conv_weight", // Node Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "model_2_m_0_cv1_conv_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0199895892292261f, .offset= -174}}}, .rank= 4, .dimensions=dimensions_model_2_m_0_cv1_conv_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(model_2_m_0_cv1_conv_weight), .dataSize=BINLEN(model_2_m_0_cv1_conv_weight)}}}}} ), err); uint32_t dimensions_model_2_m_0_cv1_conv_bias[] = {32}; VALIDATE(cutoff_yolov5s.addTensor("model_2_m_0_cv1_conv_bias", // Node Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "model_2_m_0_cv1_conv_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_SFIXED_POINT_32, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000000024373878f, .offset= 0}}}, .rank= 1, .dimensions=dimensions_model_2_m_0_cv1_conv_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(model_2_m_0_cv1_conv_bias), .dataSize=BINLEN(model_2_m_0_cv1_conv_bias)}}}}} ), err); /* ADDING NODE FOR _model_2_m_m_0_cv1_conv_Conv */ uint32_t dimensions___model_2_m_m_0_cv1_conv_Conv_dilation[] = {2}; uint32_t __model_2_m_m_0_cv1_conv_Conv_dilation[] = {1, 1}; uint32_t dimensions___model_2_m_m_0_cv1_conv_Conv_pad_amount[] = {2, 2}; uint32_t __model_2_m_m_0_cv1_conv_Conv_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions___model_2_m_m_0_cv1_conv_Conv_stride[] = {2}; uint32_t __model_2_m_m_0_cv1_conv_Conv_stride[] = {1, 1}; Qnn_Param_t params__model_2_m_m_0_cv1_conv_Conv[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "__model_2_m_m_0_cv1_conv_Conv_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions___model_2_m_m_0_cv1_conv_Conv_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)__model_2_m_m_0_cv1_conv_Conv_dilation, .dataSize=8}}}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "__model_2_m_m_0_cv1_conv_Conv_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions___model_2_m_m_0_cv1_conv_Conv_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)__model_2_m_m_0_cv1_conv_Conv_pad_amount, .dataSize=16}}}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "__model_2_m_m_0_cv1_conv_Conv_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions___model_2_m_m_0_cv1_conv_Conv_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)__model_2_m_m_0_cv1_conv_Conv_stride, .dataSize=8}}}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}} }; const char* inputs__model_2_m_m_0_cv1_conv_Conv[] = { "_model_2_cv1_act_Mul_output_0", "model_2_m_0_cv1_conv_weight", "model_2_m_0_cv1_conv_bias" }; uint32_t dimensions__model_2_m_m_0_cv1_conv_Conv_output_0[] = {1, 160, 160, 32}; Qnn_Tensor_t outputs__model_2_m_m_0_cv1_conv_Conv[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_2_m_m_0_cv1_conv_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0009298155200668f, .offset= -49290}}}, .rank= 4, .dimensions=dimensions__model_2_m_m_0_cv1_conv_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_2_m_m_0_cv1_conv_Conv", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__model_2_m_m_0_cv1_conv_Conv, // Node Params 4, // Num Node Params inputs__model_2_m_m_0_cv1_conv_Conv, // Input Tensor Names 3, // Num Input Tensor Names outputs__model_2_m_m_0_cv1_conv_Conv, // Output Tensors 1// Num Output Tensors ), err); /* ADDING NODE FOR _model_2_m_m_0_cv1_act_Sigmoid */ const char* inputs__model_2_m_m_0_cv1_act_Sigmoid[] = { "_model_2_m_m_0_cv1_conv_Conv_output_0" }; uint32_t dimensions__model_2_m_m_0_cv1_act_Sigmoid_output_0[] = {1, 160, 160, 32}; Qnn_Tensor_t outputs__model_2_m_m_0_cv1_act_Sigmoid[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_2_m_m_0_cv1_act_Sigmoid_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000152587890625f, .offset= 0}}}, .rank= 4, .dimensions=dimensions__model_2_m_m_0_cv1_act_Sigmoid_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_2_m_m_0_cv1_act_Sigmoid", // Node Name "qti.aisw", // Package Name "Sigmoid", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__model_2_m_m_0_cv1_act_Sigmoid, // Input Tensor Names 1, // Num Input Tensor Names outputs__model_2_m_m_0_cv1_act_Sigmoid, // Output Tensors 1// Num Output Tensors ), err); /* ADDING NODE FOR _model_2_m_m_0_cv1_act_Mul */ const char* inputs__model_2_m_m_0_cv1_act_Mul[] = { "_model_2_m_m_0_cv1_conv_Conv_output_0", "_model_2_m_m_0_cv1_act_Sigmoid_output_0" }; uint32_t dimensions__model_2_m_m_0_cv1_act_Mul_output_0[] = {1, 160, 160, 32}; Qnn_Tensor_t outputs__model_2_m_m_0_cv1_act_Mul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_2_m_m_0_cv1_act_Mul_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0002347366389586f, .offset= -1186}}}, .rank= 4, .dimensions=dimensions__model_2_m_m_0_cv1_act_Mul_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_2_m_m_0_cv1_act_Mul", // Node Name "qti.aisw", // Package Name "ElementWiseMultiply", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__model_2_m_m_0_cv1_act_Mul, // Input Tensor Names 2, // Num Input Tensor Names outputs__model_2_m_m_0_cv1_act_Mul, // Output Tensors 1// Num Output Tensors ), err); uint32_t dimensions_model_2_m_0_cv2_conv_weight[] = {3, 3, 32, 32}; VALIDATE(cutoff_yolov5s.addTensor("model_2_m_0_cv2_conv_weight", // Node Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "model_2_m_0_cv2_conv_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0184967666864395f, .offset= -119}}}, .rank= 4, .dimensions=dimensions_model_2_m_0_cv2_conv_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(model_2_m_0_cv2_conv_weight), .dataSize=BINLEN(model_2_m_0_cv2_conv_weight)}}}}} ), err); uint32_t dimensions_model_2_m_0_cv2_conv_bias[] = {32}; VALIDATE(cutoff_yolov5s.addTensor("model_2_m_0_cv2_conv_bias", // Node Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "model_2_m_0_cv2_conv_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_SFIXED_POINT_32, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000000030096319f, .offset= 0}}}, .rank= 1, .dimensions=dimensions_model_2_m_0_cv2_conv_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(model_2_m_0_cv2_conv_bias), .dataSize=BINLEN(model_2_m_0_cv2_conv_bias)}}}}} ), err); /* ADDING NODE FOR _model_2_m_m_0_cv2_conv_Conv */ uint32_t dimensions___model_2_m_m_0_cv2_conv_Conv_dilation[] = {2}; uint32_t __model_2_m_m_0_cv2_conv_Conv_dilation[] = {1, 1}; uint32_t dimensions___model_2_m_m_0_cv2_conv_Conv_pad_amount[] = {2, 2}; uint32_t __model_2_m_m_0_cv2_conv_Conv_pad_amount[] = {1, 1, 1, 1}; uint32_t dimensions___model_2_m_m_0_cv2_conv_Conv_stride[] = {2}; uint32_t __model_2_m_m_0_cv2_conv_Conv_stride[] = {1, 1}; Qnn_Param_t params__model_2_m_m_0_cv2_conv_Conv[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "__model_2_m_m_0_cv2_conv_Conv_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions___model_2_m_m_0_cv2_conv_Conv_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)__model_2_m_m_0_cv2_conv_Conv_dilation, .dataSize=8}}}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "__model_2_m_m_0_cv2_conv_Conv_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions___model_2_m_m_0_cv2_conv_Conv_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)__model_2_m_m_0_cv2_conv_Conv_pad_amount, .dataSize=16}}}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "__model_2_m_m_0_cv2_conv_Conv_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions___model_2_m_m_0_cv2_conv_Conv_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)__model_2_m_m_0_cv2_conv_Conv_stride, .dataSize=8}}}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}} }; const char* inputs__model_2_m_m_0_cv2_conv_Conv[] = { "_model_2_m_m_0_cv1_act_Mul_output_0", "model_2_m_0_cv2_conv_weight", "model_2_m_0_cv2_conv_bias" }; uint32_t dimensions__model_2_m_m_0_cv2_conv_Conv_output_0[] = {1, 160, 160, 32}; Qnn_Tensor_t outputs__model_2_m_m_0_cv2_conv_Conv[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_2_m_m_0_cv2_conv_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0008308318792842f, .offset= -32730}}}, .rank= 4, .dimensions=dimensions__model_2_m_m_0_cv2_conv_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_2_m_m_0_cv2_conv_Conv", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__model_2_m_m_0_cv2_conv_Conv, // Node Params 4, // Num Node Params inputs__model_2_m_m_0_cv2_conv_Conv, // Input Tensor Names 3, // Num Input Tensor Names outputs__model_2_m_m_0_cv2_conv_Conv, // Output Tensors 1// Num Output Tensors ), err); /* ADDING NODE FOR _model_2_m_m_0_cv2_act_Sigmoid */ const char* inputs__model_2_m_m_0_cv2_act_Sigmoid[] = { "_model_2_m_m_0_cv2_conv_Conv_output_0" }; uint32_t dimensions__model_2_m_m_0_cv2_act_Sigmoid_output_0[] = {1, 160, 160, 32}; Qnn_Tensor_t outputs__model_2_m_m_0_cv2_act_Sigmoid[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_2_m_m_0_cv2_act_Sigmoid_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000152587890625f, .offset= 0}}}, .rank= 4, .dimensions=dimensions__model_2_m_m_0_cv2_act_Sigmoid_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_2_m_m_0_cv2_act_Sigmoid", // Node Name "qti.aisw", // Package Name "Sigmoid", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__model_2_m_m_0_cv2_act_Sigmoid, // Input Tensor Names 1, // Num Input Tensor Names outputs__model_2_m_m_0_cv2_act_Sigmoid, // Output Tensors 1// Num Output Tensors ), err); /* ADDING NODE FOR _model_2_m_m_0_cv2_act_Mul */ const char* inputs__model_2_m_m_0_cv2_act_Mul[] = { "_model_2_m_m_0_cv2_conv_Conv_output_0", "_model_2_m_m_0_cv2_act_Sigmoid_output_0" }; uint32_t dimensions__model_2_m_m_0_cv2_act_Mul_output_0[] = {1, 160, 160, 32}; Qnn_Tensor_t outputs__model_2_m_m_0_cv2_act_Mul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_2_m_m_0_cv2_act_Mul_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0004201405681670f, .offset= -663}}}, .rank= 4, .dimensions=dimensions__model_2_m_m_0_cv2_act_Mul_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_2_m_m_0_cv2_act_Mul", // Node Name "qti.aisw", // Package Name "ElementWiseMultiply", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__model_2_m_m_0_cv2_act_Mul, // Input Tensor Names 2, // Num Input Tensor Names outputs__model_2_m_m_0_cv2_act_Mul, // Output Tensors 1// Num Output Tensors ), err); /* ADDING NODE FOR _model_2_m_m_0_Add */ const char* inputs__model_2_m_m_0_Add[] = { "_model_2_cv1_act_Mul_output_0", "_model_2_m_m_0_cv2_act_Mul_output_0" }; uint32_t dimensions__model_2_m_m_0_Add_output_0[] = {1, 160, 160, 32}; Qnn_Tensor_t outputs__model_2_m_m_0_Add[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_2_m_m_0_Add_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0004221911367495f, .offset= -1319}}}, .rank= 4, .dimensions=dimensions__model_2_m_m_0_Add_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_2_m_m_0_Add", // Node Name "qti.aisw", // Package Name "ElementWiseAdd", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__model_2_m_m_0_Add, // Input Tensor Names 2, // Num Input Tensor Names outputs__model_2_m_m_0_Add, // Output Tensors 1// Num Output Tensors ), err); uint32_t dimensions_model_2_cv2_conv_weight[] = {1, 1, 64, 32}; VALIDATE(cutoff_yolov5s.addTensor("model_2_cv2_conv_weight", // Node Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "model_2_cv2_conv_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0083658732473850f, .offset= -179}}}, .rank= 4, .dimensions=dimensions_model_2_cv2_conv_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(model_2_cv2_conv_weight), .dataSize=BINLEN(model_2_cv2_conv_weight)}}}}} ), err); uint32_t dimensions_model_2_cv2_conv_bias[] = {32}; VALIDATE(cutoff_yolov5s.addTensor("model_2_cv2_conv_bias", // Node Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "model_2_cv2_conv_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_SFIXED_POINT_32, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000000014632799f, .offset= 0}}}, .rank= 1, .dimensions=dimensions_model_2_cv2_conv_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(model_2_cv2_conv_bias), .dataSize=BINLEN(model_2_cv2_conv_bias)}}}}} ), err); /* ADDING NODE FOR _model_2_cv2_conv_Conv */ uint32_t dimensions___model_2_cv2_conv_Conv_dilation[] = {2}; uint32_t __model_2_cv2_conv_Conv_dilation[] = {1, 1}; uint32_t dimensions___model_2_cv2_conv_Conv_pad_amount[] = {2, 2}; uint32_t __model_2_cv2_conv_Conv_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions___model_2_cv2_conv_Conv_stride[] = {2}; uint32_t __model_2_cv2_conv_Conv_stride[] = {1, 1}; Qnn_Param_t params__model_2_cv2_conv_Conv[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "__model_2_cv2_conv_Conv_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions___model_2_cv2_conv_Conv_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)__model_2_cv2_conv_Conv_dilation, .dataSize=8}}}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "__model_2_cv2_conv_Conv_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions___model_2_cv2_conv_Conv_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)__model_2_cv2_conv_Conv_pad_amount, .dataSize=16}}}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "__model_2_cv2_conv_Conv_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions___model_2_cv2_conv_Conv_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)__model_2_cv2_conv_Conv_stride, .dataSize=8}}}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}} }; const char* inputs__model_2_cv2_conv_Conv[] = { "_model_1_act_Mul_output_0", "model_2_cv2_conv_weight", "model_2_cv2_conv_bias" }; uint32_t dimensions__model_2_cv2_conv_Conv_output_0[] = {1, 160, 160, 32}; Qnn_Tensor_t outputs__model_2_cv2_conv_Conv[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_2_cv2_conv_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0023219869472086f, .offset= -47381}}}, .rank= 4, .dimensions=dimensions__model_2_cv2_conv_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_2_cv2_conv_Conv", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__model_2_cv2_conv_Conv, // Node Params 4, // Num Node Params inputs__model_2_cv2_conv_Conv, // Input Tensor Names 3, // Num Input Tensor Names outputs__model_2_cv2_conv_Conv, // Output Tensors 1// Num Output Tensors ), err); /* ADDING NODE FOR _model_2_cv2_act_Sigmoid */ const char* inputs__model_2_cv2_act_Sigmoid[] = { "_model_2_cv2_conv_Conv_output_0" }; uint32_t dimensions__model_2_cv2_act_Sigmoid_output_0[] = {1, 160, 160, 32}; Qnn_Tensor_t outputs__model_2_cv2_act_Sigmoid[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_2_cv2_act_Sigmoid_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000152587890625f, .offset= 0}}}, .rank= 4, .dimensions=dimensions__model_2_cv2_act_Sigmoid_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_2_cv2_act_Sigmoid", // Node Name "qti.aisw", // Package Name "Sigmoid", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__model_2_cv2_act_Sigmoid, // Input Tensor Names 1, // Num Input Tensor Names outputs__model_2_cv2_act_Sigmoid, // Output Tensors 1// Num Output Tensors ), err); /* ADDING NODE FOR _model_2_cv2_act_Mul */ const char* inputs__model_2_cv2_act_Mul[] = { "_model_2_cv2_conv_Conv_output_0", "_model_2_cv2_act_Sigmoid_output_0" }; uint32_t dimensions__model_2_cv2_act_Mul_output_0[] = {1, 160, 160, 32}; Qnn_Tensor_t outputs__model_2_cv2_act_Mul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_2_cv2_act_Mul_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0006474598776549f, .offset= -430}}}, .rank= 4, .dimensions=dimensions__model_2_cv2_act_Mul_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_2_cv2_act_Mul", // Node Name "qti.aisw", // Package Name "ElementWiseMultiply", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__model_2_cv2_act_Mul, // Input Tensor Names 2, // Num Input Tensor Names outputs__model_2_cv2_act_Mul, // Output Tensors 1// Num Output Tensors ), err); /* ADDING NODE FOR _model_2_Concat */ Qnn_Param_t params__model_2_Concat[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="axis", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 3}}}} }; const char* inputs__model_2_Concat[] = { "_model_2_m_m_0_Add_output_0", "_model_2_cv2_act_Mul_output_0" }; uint32_t dimensions__model_2_Concat_output_0[] = {1, 160, 160, 64}; Qnn_Tensor_t outputs__model_2_Concat[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_2_Concat_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0006517089204863f, .offset= -854}}}, .rank= 4, .dimensions=dimensions__model_2_Concat_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_2_Concat", // Node Name "qti.aisw", // Package Name "Concat", // Qnn Node Type params__model_2_Concat, // Node Params 1, // Num Node Params inputs__model_2_Concat, // Input Tensor Names 2, // Num Input Tensor Names outputs__model_2_Concat, // Output Tensors 1// Num Output Tensors ), err); uint32_t dimensions_model_2_cv3_conv_weight[] = {1, 1, 64, 64}; VALIDATE(cutoff_yolov5s.addTensor("model_2_cv3_conv_weight", // Node Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "model_2_cv3_conv_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0064793643541634f, .offset= -147}}}, .rank= 4, .dimensions=dimensions_model_2_cv3_conv_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(model_2_cv3_conv_weight), .dataSize=BINLEN(model_2_cv3_conv_weight)}}}}} ), err); uint32_t dimensions_model_2_cv3_conv_bias[] = {64}; VALIDATE(cutoff_yolov5s.addTensor("model_2_cv3_conv_bias", // Node Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "model_2_cv3_conv_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_SFIXED_POINT_32, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000000029979064f, .offset= 0}}}, .rank= 1, .dimensions=dimensions_model_2_cv3_conv_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(model_2_cv3_conv_bias), .dataSize=BINLEN(model_2_cv3_conv_bias)}}}}} ), err); /* ADDING NODE FOR _model_2_cv3_conv_Conv */ uint32_t dimensions___model_2_cv3_conv_Conv_dilation[] = {2}; uint32_t __model_2_cv3_conv_Conv_dilation[] = {1, 1}; uint32_t dimensions___model_2_cv3_conv_Conv_pad_amount[] = {2, 2}; uint32_t __model_2_cv3_conv_Conv_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions___model_2_cv3_conv_Conv_stride[] = {2}; uint32_t __model_2_cv3_conv_Conv_stride[] = {1, 1}; Qnn_Param_t params__model_2_cv3_conv_Conv[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "__model_2_cv3_conv_Conv_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions___model_2_cv3_conv_Conv_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)__model_2_cv3_conv_Conv_dilation, .dataSize=8}}}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "__model_2_cv3_conv_Conv_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions___model_2_cv3_conv_Conv_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)__model_2_cv3_conv_Conv_pad_amount, .dataSize=16}}}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "__model_2_cv3_conv_Conv_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions___model_2_cv3_conv_Conv_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)__model_2_cv3_conv_Conv_stride, .dataSize=8}}}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}} }; const char* inputs__model_2_cv3_conv_Conv[] = { "_model_2_Concat_output_0", "model_2_cv3_conv_weight", "model_2_cv3_conv_bias" }; uint32_t dimensions__model_2_cv3_conv_Conv_output_0[] = {1, 160, 160, 64}; Qnn_Tensor_t outputs__model_2_cv3_conv_Conv[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_2_cv3_conv_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0006736086797900f, .offset= -40113}}}, .rank= 4, .dimensions=dimensions__model_2_cv3_conv_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_2_cv3_conv_Conv", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__model_2_cv3_conv_Conv, // Node Params 4, // Num Node Params inputs__model_2_cv3_conv_Conv, // Input Tensor Names 3, // Num Input Tensor Names outputs__model_2_cv3_conv_Conv, // Output Tensors 1// Num Output Tensors ), err); /* ADDING NODE FOR _model_2_cv3_act_Sigmoid */ const char* inputs__model_2_cv3_act_Sigmoid[] = { "_model_2_cv3_conv_Conv_output_0" }; uint32_t dimensions__model_2_cv3_act_Sigmoid_output_0[] = {1, 160, 160, 64}; Qnn_Tensor_t outputs__model_2_cv3_act_Sigmoid[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_2_cv3_act_Sigmoid_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000152587890625f, .offset= 0}}}, .rank= 4, .dimensions=dimensions__model_2_cv3_act_Sigmoid_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_2_cv3_act_Sigmoid", // Node Name "qti.aisw", // Package Name "Sigmoid", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__model_2_cv3_act_Sigmoid, // Input Tensor Names 1, // Num Input Tensor Names outputs__model_2_cv3_act_Sigmoid, // Output Tensors 1// Num Output Tensors ), err); /* ADDING NODE FOR _model_2_cv3_act_Mul */ const char* inputs__model_2_cv3_act_Mul[] = { "_model_2_cv3_conv_Conv_output_0", "_model_2_cv3_act_Sigmoid_output_0" }; uint32_t dimensions__model_2_cv3_act_Mul_output_0[] = {1, 160, 160, 64}; Qnn_Tensor_t outputs__model_2_cv3_act_Mul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_2_cv3_act_Mul_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0002655478601810f, .offset= -1049}}}, .rank= 4, .dimensions=dimensions__model_2_cv3_act_Mul_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_2_cv3_act_Mul", // Node Name "qti.aisw", // Package Name "ElementWiseMultiply", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__model_2_cv3_act_Mul, // Input Tensor Names 2, // Num Input Tensor Names outputs__model_2_cv3_act_Mul, // Output Tensors 1// Num Output Tensors ), err); uint32_t dimensions_model_3_conv_weight[] = {3, 3, 64, 128}; VALIDATE(cutoff_yolov5s.addTensor("model_3_conv_weight", // Node Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "model_3_conv_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0042196996510029f, .offset= -117}}}, .rank= 4, .dimensions=dimensions_model_3_conv_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(model_3_conv_weight), .dataSize=BINLEN(model_3_conv_weight)}}}}} ), err); uint32_t dimensions_model_3_conv_bias[] = {128}; VALIDATE(cutoff_yolov5s.addTensor("model_3_conv_bias", // Node Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "model_3_conv_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_SFIXED_POINT_32, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000000014794969f, .offset= 0}}}, .rank= 1, .dimensions=dimensions_model_3_conv_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(model_3_conv_bias), .dataSize=BINLEN(model_3_conv_bias)}}}}} ), err); /* ADDING NODE FOR _model_3_conv_Conv */ uint32_t dimensions___model_3_conv_Conv_dilation[] = {2}; uint32_t __model_3_conv_Conv_dilation[] = {1, 1}; uint32_t dimensions___model_3_conv_Conv_pad_amount[] = {2, 2}; uint32_t __model_3_conv_Conv_pad_amount[] = {1, 1, 1, 1}; uint32_t dimensions___model_3_conv_Conv_stride[] = {2}; uint32_t __model_3_conv_Conv_stride[] = {2, 2}; Qnn_Param_t params__model_3_conv_Conv[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "__model_3_conv_Conv_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions___model_3_conv_Conv_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)__model_3_conv_Conv_dilation, .dataSize=8}}}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "__model_3_conv_Conv_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions___model_3_conv_Conv_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)__model_3_conv_Conv_pad_amount, .dataSize=16}}}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "__model_3_conv_Conv_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions___model_3_conv_Conv_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)__model_3_conv_Conv_stride, .dataSize=8}}}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}} }; const char* inputs__model_3_conv_Conv[] = { "_model_2_cv3_act_Mul_output_0", "model_3_conv_weight", "model_3_conv_bias" }; uint32_t dimensions__model_3_conv_Conv_output_0[] = {1, 80, 80, 128}; Qnn_Tensor_t outputs__model_3_conv_Conv[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_3_conv_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0003590791020542f, .offset= -40403}}}, .rank= 4, .dimensions=dimensions__model_3_conv_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_3_conv_Conv", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__model_3_conv_Conv, // Node Params 4, // Num Node Params inputs__model_3_conv_Conv, // Input Tensor Names 3, // Num Input Tensor Names outputs__model_3_conv_Conv, // Output Tensors 1// Num Output Tensors ), err); /* ADDING NODE FOR _model_3_act_Sigmoid */ const char* inputs__model_3_act_Sigmoid[] = { "_model_3_conv_Conv_output_0" }; uint32_t dimensions__model_3_act_Sigmoid_output_0[] = {1, 80, 80, 128}; Qnn_Tensor_t outputs__model_3_act_Sigmoid[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_3_act_Sigmoid_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000152587890625f, .offset= 0}}}, .rank= 4, .dimensions=dimensions__model_3_act_Sigmoid_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_3_act_Sigmoid", // Node Name "qti.aisw", // Package Name "Sigmoid", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__model_3_act_Sigmoid, // Input Tensor Names 1, // Num Input Tensor Names outputs__model_3_act_Sigmoid, // Output Tensors 1// Num Output Tensors ), err); /* ADDING NODE FOR _model_3_act_Mul */ const char* inputs__model_3_act_Mul[] = { "_model_3_conv_Conv_output_0", "_model_3_act_Sigmoid_output_0" }; uint32_t dimensions__model_3_act_Mul_output_0[] = {1, 80, 80, 128}; Qnn_Tensor_t outputs__model_3_act_Mul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_3_act_Mul_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0001419365144102f, .offset= -1962}}}, .rank= 4, .dimensions=dimensions__model_3_act_Mul_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_3_act_Mul", // Node Name "qti.aisw", // Package Name "ElementWiseMultiply", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__model_3_act_Mul, // Input Tensor Names 2, // Num Input Tensor Names outputs__model_3_act_Mul, // Output Tensors 1// Num Output Tensors ), err); uint32_t dimensions_model_4_cv1_conv_weight[] = {1, 1, 128, 64}; VALIDATE(cutoff_yolov5s.addTensor("model_4_cv1_conv_weight", // Node Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "model_4_cv1_conv_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0043396777473390f, .offset= -173}}}, .rank= 4, .dimensions=dimensions_model_4_cv1_conv_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(model_4_cv1_conv_weight), .dataSize=BINLEN(model_4_cv1_conv_weight)}}}}} ), err); uint32_t dimensions_model_4_cv1_conv_bias[] = {64}; VALIDATE(cutoff_yolov5s.addTensor("model_4_cv1_conv_bias", // Node Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "model_4_cv1_conv_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_SFIXED_POINT_32, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000000005588278f, .offset= 0}}}, .rank= 1, .dimensions=dimensions_model_4_cv1_conv_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(model_4_cv1_conv_bias), .dataSize=BINLEN(model_4_cv1_conv_bias)}}}}} ), err); /* ADDING NODE FOR _model_4_cv1_conv_Conv */ uint32_t dimensions___model_4_cv1_conv_Conv_dilation[] = {2}; uint32_t __model_4_cv1_conv_Conv_dilation[] = {1, 1}; uint32_t dimensions___model_4_cv1_conv_Conv_pad_amount[] = {2, 2}; uint32_t __model_4_cv1_conv_Conv_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions___model_4_cv1_conv_Conv_stride[] = {2}; uint32_t __model_4_cv1_conv_Conv_stride[] = {1, 1}; Qnn_Param_t params__model_4_cv1_conv_Conv[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "__model_4_cv1_conv_Conv_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions___model_4_cv1_conv_Conv_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)__model_4_cv1_conv_Conv_dilation, .dataSize=8}}}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "__model_4_cv1_conv_Conv_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions___model_4_cv1_conv_Conv_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)__model_4_cv1_conv_Conv_pad_amount, .dataSize=16}}}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "__model_4_cv1_conv_Conv_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions___model_4_cv1_conv_Conv_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)__model_4_cv1_conv_Conv_stride, .dataSize=8}}}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}} }; const char* inputs__model_4_cv1_conv_Conv[] = { "_model_3_act_Mul_output_0", "model_4_cv1_conv_weight", "model_4_cv1_conv_bias" }; uint32_t dimensions__model_4_cv1_conv_Conv_output_0[] = {1, 80, 80, 64}; Qnn_Tensor_t outputs__model_4_cv1_conv_Conv[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_4_cv1_conv_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0001509718276793f, .offset= -46283}}}, .rank= 4, .dimensions=dimensions__model_4_cv1_conv_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_4_cv1_conv_Conv", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__model_4_cv1_conv_Conv, // Node Params 4, // Num Node Params inputs__model_4_cv1_conv_Conv, // Input Tensor Names 3, // Num Input Tensor Names outputs__model_4_cv1_conv_Conv, // Output Tensors 1// Num Output Tensors ), err); /* ADDING NODE FOR _model_4_cv1_act_Sigmoid */ const char* inputs__model_4_cv1_act_Sigmoid[] = { "_model_4_cv1_conv_Conv_output_0" }; uint32_t dimensions__model_4_cv1_act_Sigmoid_output_0[] = {1, 80, 80, 64}; Qnn_Tensor_t outputs__model_4_cv1_act_Sigmoid[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_4_cv1_act_Sigmoid_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000152587890625f, .offset= 0}}}, .rank= 4, .dimensions=dimensions__model_4_cv1_act_Sigmoid_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_4_cv1_act_Sigmoid", // Node Name "qti.aisw", // Package Name "Sigmoid", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__model_4_cv1_act_Sigmoid, // Input Tensor Names 1, // Num Input Tensor Names outputs__model_4_cv1_act_Sigmoid, // Output Tensors 1// Num Output Tensors ), err); /* ADDING NODE FOR _model_4_cv1_act_Mul */ const char* inputs__model_4_cv1_act_Mul[] = { "_model_4_cv1_conv_Conv_output_0", "_model_4_cv1_act_Sigmoid_output_0" }; uint32_t dimensions__model_4_cv1_act_Mul_output_0[] = {1, 80, 80, 64}; Qnn_Tensor_t outputs__model_4_cv1_act_Mul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_4_cv1_act_Mul_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000463015458081f, .offset= -6014}}}, .rank= 4, .dimensions=dimensions__model_4_cv1_act_Mul_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_4_cv1_act_Mul", // Node Name "qti.aisw", // Package Name "ElementWiseMultiply", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__model_4_cv1_act_Mul, // Input Tensor Names 2, // Num Input Tensor Names outputs__model_4_cv1_act_Mul, // Output Tensors 1// Num Output Tensors ), err); uint32_t dimensions_model_4_m_0_cv1_conv_weight[] = {1, 1, 64, 64}; VALIDATE(cutoff_yolov5s.addTensor("model_4_m_0_cv1_conv_weight", // Node Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "model_4_m_0_cv1_conv_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0262842532247305f, .offset= -111}}}, .rank= 4, .dimensions=dimensions_model_4_m_0_cv1_conv_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(model_4_m_0_cv1_conv_weight), .dataSize=BINLEN(model_4_m_0_cv1_conv_weight)}}}}} ), err); uint32_t dimensions_model_4_m_0_cv1_conv_bias[] = {64}; VALIDATE(cutoff_yolov5s.addTensor("model_4_m_0_cv1_conv_bias", // Node Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "model_4_m_0_cv1_conv_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_SFIXED_POINT_32, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000000017815247f, .offset= 0}}}, .rank= 1, .dimensions=dimensions_model_4_m_0_cv1_conv_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(model_4_m_0_cv1_conv_bias), .dataSize=BINLEN(model_4_m_0_cv1_conv_bias)}}}}} ), err); /* ADDING NODE FOR _model_4_m_m_0_cv1_conv_Conv */ uint32_t dimensions___model_4_m_m_0_cv1_conv_Conv_dilation[] = {2}; uint32_t __model_4_m_m_0_cv1_conv_Conv_dilation[] = {1, 1}; uint32_t dimensions___model_4_m_m_0_cv1_conv_Conv_pad_amount[] = {2, 2}; uint32_t __model_4_m_m_0_cv1_conv_Conv_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions___model_4_m_m_0_cv1_conv_Conv_stride[] = {2}; uint32_t __model_4_m_m_0_cv1_conv_Conv_stride[] = {1, 1}; Qnn_Param_t params__model_4_m_m_0_cv1_conv_Conv[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "__model_4_m_m_0_cv1_conv_Conv_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions___model_4_m_m_0_cv1_conv_Conv_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)__model_4_m_m_0_cv1_conv_Conv_dilation, .dataSize=8}}}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "__model_4_m_m_0_cv1_conv_Conv_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions___model_4_m_m_0_cv1_conv_Conv_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)__model_4_m_m_0_cv1_conv_Conv_pad_amount, .dataSize=16}}}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "__model_4_m_m_0_cv1_conv_Conv_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions___model_4_m_m_0_cv1_conv_Conv_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)__model_4_m_m_0_cv1_conv_Conv_stride, .dataSize=8}}}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}} }; const char* inputs__model_4_m_m_0_cv1_conv_Conv[] = { "_model_4_cv1_act_Mul_output_0", "model_4_m_0_cv1_conv_weight", "model_4_m_0_cv1_conv_bias" }; uint32_t dimensions__model_4_m_m_0_cv1_conv_Conv_output_0[] = {1, 80, 80, 64}; Qnn_Tensor_t outputs__model_4_m_m_0_cv1_conv_Conv[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_4_m_m_0_cv1_conv_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0003198964404874f, .offset= -32760}}}, .rank= 4, .dimensions=dimensions__model_4_m_m_0_cv1_conv_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_4_m_m_0_cv1_conv_Conv", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__model_4_m_m_0_cv1_conv_Conv, // Node Params 4, // Num Node Params inputs__model_4_m_m_0_cv1_conv_Conv, // Input Tensor Names 3, // Num Input Tensor Names outputs__model_4_m_m_0_cv1_conv_Conv, // Output Tensors 1// Num Output Tensors ), err); /* ADDING NODE FOR _model_4_m_m_0_cv1_act_Sigmoid */ const char* inputs__model_4_m_m_0_cv1_act_Sigmoid[] = { "_model_4_m_m_0_cv1_conv_Conv_output_0" }; uint32_t dimensions__model_4_m_m_0_cv1_act_Sigmoid_output_0[] = {1, 80, 80, 64}; Qnn_Tensor_t outputs__model_4_m_m_0_cv1_act_Sigmoid[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_4_m_m_0_cv1_act_Sigmoid_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000152587890625f, .offset= 0}}}, .rank= 4, .dimensions=dimensions__model_4_m_m_0_cv1_act_Sigmoid_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_4_m_m_0_cv1_act_Sigmoid", // Node Name "qti.aisw", // Package Name "Sigmoid", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__model_4_m_m_0_cv1_act_Sigmoid, // Input Tensor Names 1, // Num Input Tensor Names outputs__model_4_m_m_0_cv1_act_Sigmoid, // Output Tensors 1// Num Output Tensors ), err); /* ADDING NODE FOR _model_4_m_m_0_cv1_act_Mul */ const char* inputs__model_4_m_m_0_cv1_act_Mul[] = { "_model_4_m_m_0_cv1_conv_Conv_output_0", "_model_4_m_m_0_cv1_act_Sigmoid_output_0" }; uint32_t dimensions__model_4_m_m_0_cv1_act_Mul_output_0[] = {1, 80, 80, 64}; Qnn_Tensor_t outputs__model_4_m_m_0_cv1_act_Mul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_4_m_m_0_cv1_act_Mul_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0001642310817260f, .offset= -1696}}}, .rank= 4, .dimensions=dimensions__model_4_m_m_0_cv1_act_Mul_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_4_m_m_0_cv1_act_Mul", // Node Name "qti.aisw", // Package Name "ElementWiseMultiply", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__model_4_m_m_0_cv1_act_Mul, // Input Tensor Names 2, // Num Input Tensor Names outputs__model_4_m_m_0_cv1_act_Mul, // Output Tensors 1// Num Output Tensors ), err); uint32_t dimensions_model_4_m_0_cv2_conv_weight[] = {3, 3, 64, 64}; VALIDATE(cutoff_yolov5s.addTensor("model_4_m_0_cv2_conv_weight", // Node Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "model_4_m_0_cv2_conv_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0033535182010382f, .offset= -115}}}, .rank= 4, .dimensions=dimensions_model_4_m_0_cv2_conv_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(model_4_m_0_cv2_conv_weight), .dataSize=BINLEN(model_4_m_0_cv2_conv_weight)}}}}} ), err); uint32_t dimensions_model_4_m_0_cv2_conv_bias[] = {64}; VALIDATE(cutoff_yolov5s.addTensor("model_4_m_0_cv2_conv_bias", // Node Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "model_4_m_0_cv2_conv_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_SFIXED_POINT_32, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000000011107433f, .offset= 0}}}, .rank= 1, .dimensions=dimensions_model_4_m_0_cv2_conv_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(model_4_m_0_cv2_conv_bias), .dataSize=BINLEN(model_4_m_0_cv2_conv_bias)}}}}} ), err); /* ADDING NODE FOR _model_4_m_m_0_cv2_conv_Conv */ uint32_t dimensions___model_4_m_m_0_cv2_conv_Conv_dilation[] = {2}; uint32_t __model_4_m_m_0_cv2_conv_Conv_dilation[] = {1, 1}; uint32_t dimensions___model_4_m_m_0_cv2_conv_Conv_pad_amount[] = {2, 2}; uint32_t __model_4_m_m_0_cv2_conv_Conv_pad_amount[] = {1, 1, 1, 1}; uint32_t dimensions___model_4_m_m_0_cv2_conv_Conv_stride[] = {2}; uint32_t __model_4_m_m_0_cv2_conv_Conv_stride[] = {1, 1}; Qnn_Param_t params__model_4_m_m_0_cv2_conv_Conv[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "__model_4_m_m_0_cv2_conv_Conv_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions___model_4_m_m_0_cv2_conv_Conv_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)__model_4_m_m_0_cv2_conv_Conv_dilation, .dataSize=8}}}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "__model_4_m_m_0_cv2_conv_Conv_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions___model_4_m_m_0_cv2_conv_Conv_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)__model_4_m_m_0_cv2_conv_Conv_pad_amount, .dataSize=16}}}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "__model_4_m_m_0_cv2_conv_Conv_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions___model_4_m_m_0_cv2_conv_Conv_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)__model_4_m_m_0_cv2_conv_Conv_stride, .dataSize=8}}}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}} }; const char* inputs__model_4_m_m_0_cv2_conv_Conv[] = { "_model_4_m_m_0_cv1_act_Mul_output_0", "model_4_m_0_cv2_conv_weight", "model_4_m_0_cv2_conv_bias" }; uint32_t dimensions__model_4_m_m_0_cv2_conv_Conv_output_0[] = {1, 80, 80, 64}; Qnn_Tensor_t outputs__model_4_m_m_0_cv2_conv_Conv[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_4_m_m_0_cv2_conv_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0001907631522045f, .offset= -38231}}}, .rank= 4, .dimensions=dimensions__model_4_m_m_0_cv2_conv_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_4_m_m_0_cv2_conv_Conv", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__model_4_m_m_0_cv2_conv_Conv, // Node Params 4, // Num Node Params inputs__model_4_m_m_0_cv2_conv_Conv, // Input Tensor Names 3, // Num Input Tensor Names outputs__model_4_m_m_0_cv2_conv_Conv, // Output Tensors 1// Num Output Tensors ), err); /* ADDING NODE FOR _model_4_m_m_0_cv2_act_Sigmoid */ const char* inputs__model_4_m_m_0_cv2_act_Sigmoid[] = { "_model_4_m_m_0_cv2_conv_Conv_output_0" }; uint32_t dimensions__model_4_m_m_0_cv2_act_Sigmoid_output_0[] = {1, 80, 80, 64}; Qnn_Tensor_t outputs__model_4_m_m_0_cv2_act_Sigmoid[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_4_m_m_0_cv2_act_Sigmoid_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000152587890625f, .offset= 0}}}, .rank= 4, .dimensions=dimensions__model_4_m_m_0_cv2_act_Sigmoid_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_4_m_m_0_cv2_act_Sigmoid", // Node Name "qti.aisw", // Package Name "Sigmoid", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__model_4_m_m_0_cv2_act_Sigmoid, // Input Tensor Names 1, // Num Input Tensor Names outputs__model_4_m_m_0_cv2_act_Sigmoid, // Output Tensors 1// Num Output Tensors ), err); /* ADDING NODE FOR _model_4_m_m_0_cv2_act_Mul */ const char* inputs__model_4_m_m_0_cv2_act_Mul[] = { "_model_4_m_m_0_cv2_conv_Conv_output_0", "_model_4_m_m_0_cv2_act_Sigmoid_output_0" }; uint32_t dimensions__model_4_m_m_0_cv2_act_Mul_output_0[] = {1, 80, 80, 64}; Qnn_Tensor_t outputs__model_4_m_m_0_cv2_act_Mul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_4_m_m_0_cv2_act_Mul_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000832944861031f, .offset= -3343}}}, .rank= 4, .dimensions=dimensions__model_4_m_m_0_cv2_act_Mul_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_4_m_m_0_cv2_act_Mul", // Node Name "qti.aisw", // Package Name "ElementWiseMultiply", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__model_4_m_m_0_cv2_act_Mul, // Input Tensor Names 2, // Num Input Tensor Names outputs__model_4_m_m_0_cv2_act_Mul, // Output Tensors 1// Num Output Tensors ), err); /* ADDING NODE FOR _model_4_m_m_0_Add */ const char* inputs__model_4_m_m_0_Add[] = { "_model_4_cv1_act_Mul_output_0", "_model_4_m_m_0_cv2_act_Mul_output_0" }; uint32_t dimensions__model_4_m_m_0_Add_output_0[] = {1, 80, 80, 64}; Qnn_Tensor_t outputs__model_4_m_m_0_Add[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_4_m_m_0_Add_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000848727722769f, .offset= -6562}}}, .rank= 4, .dimensions=dimensions__model_4_m_m_0_Add_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_4_m_m_0_Add", // Node Name "qti.aisw", // Package Name "ElementWiseAdd", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__model_4_m_m_0_Add, // Input Tensor Names 2, // Num Input Tensor Names outputs__model_4_m_m_0_Add, // Output Tensors 1// Num Output Tensors ), err); uint32_t dimensions_model_4_m_1_cv1_conv_weight[] = {1, 1, 64, 64}; VALIDATE(cutoff_yolov5s.addTensor("model_4_m_1_cv1_conv_weight", // Node Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "model_4_m_1_cv1_conv_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0135572925209999f, .offset= -150}}}, .rank= 4, .dimensions=dimensions_model_4_m_1_cv1_conv_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(model_4_m_1_cv1_conv_weight), .dataSize=BINLEN(model_4_m_1_cv1_conv_weight)}}}}} ), err); uint32_t dimensions_model_4_m_1_cv1_conv_bias[] = {64}; VALIDATE(cutoff_yolov5s.addTensor("model_4_m_1_cv1_conv_bias", // Node Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "model_4_m_1_cv1_conv_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_SFIXED_POINT_32, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000000012511689f, .offset= 0}}}, .rank= 1, .dimensions=dimensions_model_4_m_1_cv1_conv_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(model_4_m_1_cv1_conv_bias), .dataSize=BINLEN(model_4_m_1_cv1_conv_bias)}}}}} ), err); /* ADDING NODE FOR _model_4_m_m_1_cv1_conv_Conv */ uint32_t dimensions___model_4_m_m_1_cv1_conv_Conv_dilation[] = {2}; uint32_t __model_4_m_m_1_cv1_conv_Conv_dilation[] = {1, 1}; uint32_t dimensions___model_4_m_m_1_cv1_conv_Conv_pad_amount[] = {2, 2}; uint32_t __model_4_m_m_1_cv1_conv_Conv_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions___model_4_m_m_1_cv1_conv_Conv_stride[] = {2}; uint32_t __model_4_m_m_1_cv1_conv_Conv_stride[] = {1, 1}; Qnn_Param_t params__model_4_m_m_1_cv1_conv_Conv[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "__model_4_m_m_1_cv1_conv_Conv_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions___model_4_m_m_1_cv1_conv_Conv_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)__model_4_m_m_1_cv1_conv_Conv_dilation, .dataSize=8}}}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "__model_4_m_m_1_cv1_conv_Conv_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions___model_4_m_m_1_cv1_conv_Conv_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)__model_4_m_m_1_cv1_conv_Conv_pad_amount, .dataSize=16}}}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "__model_4_m_m_1_cv1_conv_Conv_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions___model_4_m_m_1_cv1_conv_Conv_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)__model_4_m_m_1_cv1_conv_Conv_stride, .dataSize=8}}}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}} }; const char* inputs__model_4_m_m_1_cv1_conv_Conv[] = { "_model_4_m_m_0_Add_output_0", "model_4_m_1_cv1_conv_weight", "model_4_m_1_cv1_conv_bias" }; uint32_t dimensions__model_4_m_m_1_cv1_conv_Conv_output_0[] = {1, 80, 80, 64}; Qnn_Tensor_t outputs__model_4_m_m_1_cv1_conv_Conv[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_4_m_m_1_cv1_conv_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0002675629220903f, .offset= -36509}}}, .rank= 4, .dimensions=dimensions__model_4_m_m_1_cv1_conv_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_4_m_m_1_cv1_conv_Conv", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__model_4_m_m_1_cv1_conv_Conv, // Node Params 4, // Num Node Params inputs__model_4_m_m_1_cv1_conv_Conv, // Input Tensor Names 3, // Num Input Tensor Names outputs__model_4_m_m_1_cv1_conv_Conv, // Output Tensors 1// Num Output Tensors ), err); /* ADDING NODE FOR _model_4_m_m_1_cv1_act_Sigmoid */ const char* inputs__model_4_m_m_1_cv1_act_Sigmoid[] = { "_model_4_m_m_1_cv1_conv_Conv_output_0" }; uint32_t dimensions__model_4_m_m_1_cv1_act_Sigmoid_output_0[] = {1, 80, 80, 64}; Qnn_Tensor_t outputs__model_4_m_m_1_cv1_act_Sigmoid[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_4_m_m_1_cv1_act_Sigmoid_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000152587890625f, .offset= 0}}}, .rank= 4, .dimensions=dimensions__model_4_m_m_1_cv1_act_Sigmoid_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_4_m_m_1_cv1_act_Sigmoid", // Node Name "qti.aisw", // Package Name "Sigmoid", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__model_4_m_m_1_cv1_act_Sigmoid, // Input Tensor Names 1, // Num Input Tensor Names outputs__model_4_m_m_1_cv1_act_Sigmoid, // Output Tensors 1// Num Output Tensors ), err); /* ADDING NODE FOR _model_4_m_m_1_cv1_act_Mul */ const char* inputs__model_4_m_m_1_cv1_act_Mul[] = { "_model_4_m_m_1_cv1_conv_Conv_output_0", "_model_4_m_m_1_cv1_act_Sigmoid_output_0" }; uint32_t dimensions__model_4_m_m_1_cv1_act_Mul_output_0[] = {1, 80, 80, 64}; Qnn_Tensor_t outputs__model_4_m_m_1_cv1_act_Mul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_4_m_m_1_cv1_act_Mul_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0001227062457474f, .offset= -2269}}}, .rank= 4, .dimensions=dimensions__model_4_m_m_1_cv1_act_Mul_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_4_m_m_1_cv1_act_Mul", // Node Name "qti.aisw", // Package Name "ElementWiseMultiply", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__model_4_m_m_1_cv1_act_Mul, // Input Tensor Names 2, // Num Input Tensor Names outputs__model_4_m_m_1_cv1_act_Mul, // Output Tensors 1// Num Output Tensors ), err); uint32_t dimensions_model_4_m_1_cv2_conv_weight[] = {3, 3, 64, 64}; VALIDATE(cutoff_yolov5s.addTensor("model_4_m_1_cv2_conv_weight", // Node Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "model_4_m_1_cv2_conv_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0067659108899534f, .offset= -137}}}, .rank= 4, .dimensions=dimensions_model_4_m_1_cv2_conv_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(model_4_m_1_cv2_conv_weight), .dataSize=BINLEN(model_4_m_1_cv2_conv_weight)}}}}} ), err); uint32_t dimensions_model_4_m_1_cv2_conv_bias[] = {64}; VALIDATE(cutoff_yolov5s.addTensor("model_4_m_1_cv2_conv_bias", // Node Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "model_4_m_1_cv2_conv_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_SFIXED_POINT_32, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000000010163683f, .offset= 0}}}, .rank= 1, .dimensions=dimensions_model_4_m_1_cv2_conv_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(model_4_m_1_cv2_conv_bias), .dataSize=BINLEN(model_4_m_1_cv2_conv_bias)}}}}} ), err); /* ADDING NODE FOR _model_4_m_m_1_cv2_conv_Conv */ uint32_t dimensions___model_4_m_m_1_cv2_conv_Conv_dilation[] = {2}; uint32_t __model_4_m_m_1_cv2_conv_Conv_dilation[] = {1, 1}; uint32_t dimensions___model_4_m_m_1_cv2_conv_Conv_pad_amount[] = {2, 2}; uint32_t __model_4_m_m_1_cv2_conv_Conv_pad_amount[] = {1, 1, 1, 1}; uint32_t dimensions___model_4_m_m_1_cv2_conv_Conv_stride[] = {2}; uint32_t __model_4_m_m_1_cv2_conv_Conv_stride[] = {1, 1}; Qnn_Param_t params__model_4_m_m_1_cv2_conv_Conv[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "__model_4_m_m_1_cv2_conv_Conv_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions___model_4_m_m_1_cv2_conv_Conv_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)__model_4_m_m_1_cv2_conv_Conv_dilation, .dataSize=8}}}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "__model_4_m_m_1_cv2_conv_Conv_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions___model_4_m_m_1_cv2_conv_Conv_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)__model_4_m_m_1_cv2_conv_Conv_pad_amount, .dataSize=16}}}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "__model_4_m_m_1_cv2_conv_Conv_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions___model_4_m_m_1_cv2_conv_Conv_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)__model_4_m_m_1_cv2_conv_Conv_stride, .dataSize=8}}}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}} }; const char* inputs__model_4_m_m_1_cv2_conv_Conv[] = { "_model_4_m_m_1_cv1_act_Mul_output_0", "model_4_m_1_cv2_conv_weight", "model_4_m_1_cv2_conv_bias" }; uint32_t dimensions__model_4_m_m_1_cv2_conv_Conv_output_0[] = {1, 80, 80, 64}; Qnn_Tensor_t outputs__model_4_m_m_1_cv2_conv_Conv[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_4_m_m_1_cv2_conv_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0003871233202517f, .offset= -37531}}}, .rank= 4, .dimensions=dimensions__model_4_m_m_1_cv2_conv_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_4_m_m_1_cv2_conv_Conv", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__model_4_m_m_1_cv2_conv_Conv, // Node Params 4, // Num Node Params inputs__model_4_m_m_1_cv2_conv_Conv, // Input Tensor Names 3, // Num Input Tensor Names outputs__model_4_m_m_1_cv2_conv_Conv, // Output Tensors 1// Num Output Tensors ), err); /* ADDING NODE FOR _model_4_m_m_1_cv2_act_Sigmoid */ const char* inputs__model_4_m_m_1_cv2_act_Sigmoid[] = { "_model_4_m_m_1_cv2_conv_Conv_output_0" }; uint32_t dimensions__model_4_m_m_1_cv2_act_Sigmoid_output_0[] = {1, 80, 80, 64}; Qnn_Tensor_t outputs__model_4_m_m_1_cv2_act_Sigmoid[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_4_m_m_1_cv2_act_Sigmoid_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000152587890625f, .offset= 0}}}, .rank= 4, .dimensions=dimensions__model_4_m_m_1_cv2_act_Sigmoid_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_4_m_m_1_cv2_act_Sigmoid", // Node Name "qti.aisw", // Package Name "Sigmoid", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__model_4_m_m_1_cv2_act_Sigmoid, // Input Tensor Names 1, // Num Input Tensor Names outputs__model_4_m_m_1_cv2_act_Sigmoid, // Output Tensors 1// Num Output Tensors ), err); /* ADDING NODE FOR _model_4_m_m_1_cv2_act_Mul */ const char* inputs__model_4_m_m_1_cv2_act_Mul[] = { "_model_4_m_m_1_cv2_conv_Conv_output_0", "_model_4_m_m_1_cv2_act_Sigmoid_output_0" }; uint32_t dimensions__model_4_m_m_1_cv2_act_Mul_output_0[] = {1, 80, 80, 64}; Qnn_Tensor_t outputs__model_4_m_m_1_cv2_act_Mul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_4_m_m_1_cv2_act_Mul_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0001696667750366f, .offset= -1641}}}, .rank= 4, .dimensions=dimensions__model_4_m_m_1_cv2_act_Mul_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_4_m_m_1_cv2_act_Mul", // Node Name "qti.aisw", // Package Name "ElementWiseMultiply", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__model_4_m_m_1_cv2_act_Mul, // Input Tensor Names 2, // Num Input Tensor Names outputs__model_4_m_m_1_cv2_act_Mul, // Output Tensors 1// Num Output Tensors ), err); /* ADDING NODE FOR _model_4_m_m_1_Add */ const char* inputs__model_4_m_m_1_Add[] = { "_model_4_m_m_0_Add_output_0", "_model_4_m_m_1_cv2_act_Mul_output_0" }; uint32_t dimensions__model_4_m_m_1_Add_output_0[] = {1, 80, 80, 64}; Qnn_Tensor_t outputs__model_4_m_m_1_Add[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_4_m_m_1_Add_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0001752743846737f, .offset= -4766}}}, .rank= 4, .dimensions=dimensions__model_4_m_m_1_Add_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_4_m_m_1_Add", // Node Name "qti.aisw", // Package Name "ElementWiseAdd", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__model_4_m_m_1_Add, // Input Tensor Names 2, // Num Input Tensor Names outputs__model_4_m_m_1_Add, // Output Tensors 1// Num Output Tensors ), err); uint32_t dimensions_model_4_cv2_conv_weight[] = {1, 1, 128, 64}; VALIDATE(cutoff_yolov5s.addTensor("model_4_cv2_conv_weight", // Node Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "model_4_cv2_conv_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0116010522469878f, .offset= -128}}}, .rank= 4, .dimensions=dimensions_model_4_cv2_conv_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(model_4_cv2_conv_weight), .dataSize=BINLEN(model_4_cv2_conv_weight)}}}}} ), err); uint32_t dimensions_model_4_cv2_conv_bias[] = {64}; VALIDATE(cutoff_yolov5s.addTensor("model_4_cv2_conv_bias", // Node Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "model_4_cv2_conv_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_SFIXED_POINT_32, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000000020142599f, .offset= 0}}}, .rank= 1, .dimensions=dimensions_model_4_cv2_conv_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(model_4_cv2_conv_bias), .dataSize=BINLEN(model_4_cv2_conv_bias)}}}}} ), err); /* ADDING NODE FOR _model_4_cv2_conv_Conv */ uint32_t dimensions___model_4_cv2_conv_Conv_dilation[] = {2}; uint32_t __model_4_cv2_conv_Conv_dilation[] = {1, 1}; uint32_t dimensions___model_4_cv2_conv_Conv_pad_amount[] = {2, 2}; uint32_t __model_4_cv2_conv_Conv_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions___model_4_cv2_conv_Conv_stride[] = {2}; uint32_t __model_4_cv2_conv_Conv_stride[] = {1, 1}; Qnn_Param_t params__model_4_cv2_conv_Conv[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "__model_4_cv2_conv_Conv_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions___model_4_cv2_conv_Conv_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)__model_4_cv2_conv_Conv_dilation, .dataSize=8}}}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "__model_4_cv2_conv_Conv_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions___model_4_cv2_conv_Conv_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)__model_4_cv2_conv_Conv_pad_amount, .dataSize=16}}}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "__model_4_cv2_conv_Conv_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions___model_4_cv2_conv_Conv_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)__model_4_cv2_conv_Conv_stride, .dataSize=8}}}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}} }; const char* inputs__model_4_cv2_conv_Conv[] = { "_model_3_act_Mul_output_0", "model_4_cv2_conv_weight", "model_4_cv2_conv_bias" }; uint32_t dimensions__model_4_cv2_conv_Conv_output_0[] = {1, 80, 80, 64}; Qnn_Tensor_t outputs__model_4_cv2_conv_Conv[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_4_cv2_conv_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0002892240299843f, .offset= -34072}}}, .rank= 4, .dimensions=dimensions__model_4_cv2_conv_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_4_cv2_conv_Conv", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__model_4_cv2_conv_Conv, // Node Params 4, // Num Node Params inputs__model_4_cv2_conv_Conv, // Input Tensor Names 3, // Num Input Tensor Names outputs__model_4_cv2_conv_Conv, // Output Tensors 1// Num Output Tensors ), err); /* ADDING NODE FOR _model_4_cv2_act_Sigmoid */ const char* inputs__model_4_cv2_act_Sigmoid[] = { "_model_4_cv2_conv_Conv_output_0" }; uint32_t dimensions__model_4_cv2_act_Sigmoid_output_0[] = {1, 80, 80, 64}; Qnn_Tensor_t outputs__model_4_cv2_act_Sigmoid[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_4_cv2_act_Sigmoid_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000152587890625f, .offset= 0}}}, .rank= 4, .dimensions=dimensions__model_4_cv2_act_Sigmoid_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_4_cv2_act_Sigmoid", // Node Name "qti.aisw", // Package Name "Sigmoid", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__model_4_cv2_act_Sigmoid, // Input Tensor Names 1, // Num Input Tensor Names outputs__model_4_cv2_act_Sigmoid, // Output Tensors 1// Num Output Tensors ), err); /* ADDING NODE FOR _model_4_cv2_act_Mul */ const char* inputs__model_4_cv2_act_Mul[] = { "_model_4_cv2_conv_Conv_output_0", "_model_4_cv2_act_Sigmoid_output_0" }; uint32_t dimensions__model_4_cv2_act_Mul_output_0[] = {1, 80, 80, 64}; Qnn_Tensor_t outputs__model_4_cv2_act_Mul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_4_cv2_act_Mul_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0001430895936210f, .offset= -1946}}}, .rank= 4, .dimensions=dimensions__model_4_cv2_act_Mul_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_4_cv2_act_Mul", // Node Name "qti.aisw", // Package Name "ElementWiseMultiply", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__model_4_cv2_act_Mul, // Input Tensor Names 2, // Num Input Tensor Names outputs__model_4_cv2_act_Mul, // Output Tensors 1// Num Output Tensors ), err); /* ADDING NODE FOR _model_4_Concat */ Qnn_Param_t params__model_4_Concat[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="axis", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 3}}}} }; const char* inputs__model_4_Concat[] = { "_model_4_m_m_1_Add_output_0", "_model_4_cv2_act_Mul_output_0" }; uint32_t dimensions__model_4_Concat_output_0[] = {1, 80, 80, 128}; Qnn_Tensor_t outputs__model_4_Concat[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_4_Concat_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0001752743846737f, .offset= -4766}}}, .rank= 4, .dimensions=dimensions__model_4_Concat_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_4_Concat", // Node Name "qti.aisw", // Package Name "Concat", // Qnn Node Type params__model_4_Concat, // Node Params 1, // Num Node Params inputs__model_4_Concat, // Input Tensor Names 2, // Num Input Tensor Names outputs__model_4_Concat, // Output Tensors 1// Num Output Tensors ), err); uint32_t dimensions_model_4_cv3_conv_weight[] = {1, 1, 128, 128}; VALIDATE(cutoff_yolov5s.addTensor("model_4_cv3_conv_weight", // Node Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "model_4_cv3_conv_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0054279789328575f, .offset= -130}}}, .rank= 4, .dimensions=dimensions_model_4_cv3_conv_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(model_4_cv3_conv_weight), .dataSize=BINLEN(model_4_cv3_conv_weight)}}}}} ), err); uint32_t dimensions_model_4_cv3_conv_bias[] = {128}; VALIDATE(cutoff_yolov5s.addTensor("model_4_cv3_conv_bias", // Node Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "model_4_cv3_conv_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_SFIXED_POINT_32, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000000014665973f, .offset= 0}}}, .rank= 1, .dimensions=dimensions_model_4_cv3_conv_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(model_4_cv3_conv_bias), .dataSize=BINLEN(model_4_cv3_conv_bias)}}}}} ), err); /* ADDING NODE FOR _model_4_cv3_conv_Conv */ uint32_t dimensions___model_4_cv3_conv_Conv_dilation[] = {2}; uint32_t __model_4_cv3_conv_Conv_dilation[] = {1, 1}; uint32_t dimensions___model_4_cv3_conv_Conv_pad_amount[] = {2, 2}; uint32_t __model_4_cv3_conv_Conv_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions___model_4_cv3_conv_Conv_stride[] = {2}; uint32_t __model_4_cv3_conv_Conv_stride[] = {1, 1}; Qnn_Param_t params__model_4_cv3_conv_Conv[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "__model_4_cv3_conv_Conv_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions___model_4_cv3_conv_Conv_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)__model_4_cv3_conv_Conv_dilation, .dataSize=8}}}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "__model_4_cv3_conv_Conv_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions___model_4_cv3_conv_Conv_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)__model_4_cv3_conv_Conv_pad_amount, .dataSize=16}}}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "__model_4_cv3_conv_Conv_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions___model_4_cv3_conv_Conv_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)__model_4_cv3_conv_Conv_stride, .dataSize=8}}}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}} }; const char* inputs__model_4_cv3_conv_Conv[] = { "_model_4_Concat_output_0", "model_4_cv3_conv_weight", "model_4_cv3_conv_bias" }; uint32_t dimensions__model_4_cv3_conv_Conv_output_0[] = {1, 80, 80, 128}; Qnn_Tensor_t outputs__model_4_cv3_conv_Conv[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_4_cv3_conv_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0002768624108285f, .offset= -35007}}}, .rank= 4, .dimensions=dimensions__model_4_cv3_conv_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_4_cv3_conv_Conv", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__model_4_cv3_conv_Conv, // Node Params 4, // Num Node Params inputs__model_4_cv3_conv_Conv, // Input Tensor Names 3, // Num Input Tensor Names outputs__model_4_cv3_conv_Conv, // Output Tensors 1// Num Output Tensors ), err); /* ADDING NODE FOR _model_4_cv3_act_Sigmoid */ const char* inputs__model_4_cv3_act_Sigmoid[] = { "_model_4_cv3_conv_Conv_output_0" }; uint32_t dimensions__model_4_cv3_act_Sigmoid_output_0[] = {1, 80, 80, 128}; Qnn_Tensor_t outputs__model_4_cv3_act_Sigmoid[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_4_cv3_act_Sigmoid_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000152587890625f, .offset= 0}}}, .rank= 4, .dimensions=dimensions__model_4_cv3_act_Sigmoid_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_4_cv3_act_Sigmoid", // Node Name "qti.aisw", // Package Name "Sigmoid", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__model_4_cv3_act_Sigmoid, // Input Tensor Names 1, // Num Input Tensor Names outputs__model_4_cv3_act_Sigmoid, // Output Tensors 1// Num Output Tensors ), err); /* ADDING NODE FOR _model_4_cv3_act_Mul */ const char* inputs__model_4_cv3_act_Mul[] = { "_model_4_cv3_conv_Conv_output_0", "_model_4_cv3_act_Sigmoid_output_0" }; uint32_t dimensions__model_4_cv3_act_Mul_output_0[] = {1, 80, 80, 128}; Qnn_Tensor_t outputs__model_4_cv3_act_Mul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_4_cv3_act_Mul_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0001331916428171f, .offset= -2091}}}, .rank= 4, .dimensions=dimensions__model_4_cv3_act_Mul_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_4_cv3_act_Mul", // Node Name "qti.aisw", // Package Name "ElementWiseMultiply", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__model_4_cv3_act_Mul, // Input Tensor Names 2, // Num Input Tensor Names outputs__model_4_cv3_act_Mul, // Output Tensors 1// Num Output Tensors ), err); uint32_t dimensions_model_5_conv_weight[] = {3, 3, 128, 256}; VALIDATE(cutoff_yolov5s.addTensor("model_5_conv_weight", // Node Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "model_5_conv_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0050536175258458f, .offset= -155}}}, .rank= 4, .dimensions=dimensions_model_5_conv_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(model_5_conv_weight), .dataSize=BINLEN(model_5_conv_weight)}}}}} ), err); uint32_t dimensions_model_5_conv_bias[] = {256}; VALIDATE(cutoff_yolov5s.addTensor("model_5_conv_bias", // Node Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "model_5_conv_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_SFIXED_POINT_32, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000000016302880f, .offset= 0}}}, .rank= 1, .dimensions=dimensions_model_5_conv_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(model_5_conv_bias), .dataSize=BINLEN(model_5_conv_bias)}}}}} ), err); /* ADDING NODE FOR _model_5_conv_Conv */ uint32_t dimensions___model_5_conv_Conv_dilation[] = {2}; uint32_t __model_5_conv_Conv_dilation[] = {1, 1}; uint32_t dimensions___model_5_conv_Conv_pad_amount[] = {2, 2}; uint32_t __model_5_conv_Conv_pad_amount[] = {1, 1, 1, 1}; uint32_t dimensions___model_5_conv_Conv_stride[] = {2}; uint32_t __model_5_conv_Conv_stride[] = {2, 2}; Qnn_Param_t params__model_5_conv_Conv[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "__model_5_conv_Conv_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions___model_5_conv_Conv_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)__model_5_conv_Conv_dilation, .dataSize=8}}}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "__model_5_conv_Conv_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions___model_5_conv_Conv_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)__model_5_conv_Conv_pad_amount, .dataSize=16}}}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "__model_5_conv_Conv_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions___model_5_conv_Conv_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)__model_5_conv_Conv_stride, .dataSize=8}}}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}} }; const char* inputs__model_5_conv_Conv[] = { "_model_4_cv3_act_Mul_output_0", "model_5_conv_weight", "model_5_conv_bias" }; uint32_t dimensions__model_5_conv_Conv_output_0[] = {1, 40, 40, 256}; Qnn_Tensor_t outputs__model_5_conv_Conv[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_5_conv_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0003233898023609f, .offset= -36020}}}, .rank= 4, .dimensions=dimensions__model_5_conv_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_5_conv_Conv", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__model_5_conv_Conv, // Node Params 4, // Num Node Params inputs__model_5_conv_Conv, // Input Tensor Names 3, // Num Input Tensor Names outputs__model_5_conv_Conv, // Output Tensors 1// Num Output Tensors ), err); /* ADDING NODE FOR _model_5_act_Sigmoid */ const char* inputs__model_5_act_Sigmoid[] = { "_model_5_conv_Conv_output_0" }; uint32_t dimensions__model_5_act_Sigmoid_output_0[] = {1, 40, 40, 256}; Qnn_Tensor_t outputs__model_5_act_Sigmoid[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_5_act_Sigmoid_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000152587890625f, .offset= 0}}}, .rank= 4, .dimensions=dimensions__model_5_act_Sigmoid_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_5_act_Sigmoid", // Node Name "qti.aisw", // Package Name "Sigmoid", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__model_5_act_Sigmoid, // Input Tensor Names 1, // Num Input Tensor Names outputs__model_5_act_Sigmoid, // Output Tensors 1// Num Output Tensors ), err); /* ADDING NODE FOR _model_5_act_Mul */ const char* inputs__model_5_act_Mul[] = { "_model_5_conv_Conv_output_0", "_model_5_act_Sigmoid_output_0" }; uint32_t dimensions__model_5_act_Mul_output_0[] = {1, 40, 40, 256}; Qnn_Tensor_t outputs__model_5_act_Mul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_5_act_Mul_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0001498837809777f, .offset= -1858}}}, .rank= 4, .dimensions=dimensions__model_5_act_Mul_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_5_act_Mul", // Node Name "qti.aisw", // Package Name "ElementWiseMultiply", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__model_5_act_Mul, // Input Tensor Names 2, // Num Input Tensor Names outputs__model_5_act_Mul, // Output Tensors 1// Num Output Tensors ), err); uint32_t dimensions_model_6_cv1_conv_weight[] = {1, 1, 256, 128}; VALIDATE(cutoff_yolov5s.addTensor("model_6_cv1_conv_weight", // Node Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "model_6_cv1_conv_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0047016693279147f, .offset= -105}}}, .rank= 4, .dimensions=dimensions_model_6_cv1_conv_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(model_6_cv1_conv_weight), .dataSize=BINLEN(model_6_cv1_conv_weight)}}}}} ), err); uint32_t dimensions_model_6_cv1_conv_bias[] = {128}; VALIDATE(cutoff_yolov5s.addTensor("model_6_cv1_conv_bias", // Node Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "model_6_cv1_conv_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_SFIXED_POINT_32, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000000007227745f, .offset= 0}}}, .rank= 1, .dimensions=dimensions_model_6_cv1_conv_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(model_6_cv1_conv_bias), .dataSize=BINLEN(model_6_cv1_conv_bias)}}}}} ), err); /* ADDING NODE FOR _model_6_cv1_conv_Conv */ uint32_t dimensions___model_6_cv1_conv_Conv_dilation[] = {2}; uint32_t __model_6_cv1_conv_Conv_dilation[] = {1, 1}; uint32_t dimensions___model_6_cv1_conv_Conv_pad_amount[] = {2, 2}; uint32_t __model_6_cv1_conv_Conv_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions___model_6_cv1_conv_Conv_stride[] = {2}; uint32_t __model_6_cv1_conv_Conv_stride[] = {1, 1}; Qnn_Param_t params__model_6_cv1_conv_Conv[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "__model_6_cv1_conv_Conv_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions___model_6_cv1_conv_Conv_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)__model_6_cv1_conv_Conv_dilation, .dataSize=8}}}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "__model_6_cv1_conv_Conv_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions___model_6_cv1_conv_Conv_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)__model_6_cv1_conv_Conv_pad_amount, .dataSize=16}}}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "__model_6_cv1_conv_Conv_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions___model_6_cv1_conv_Conv_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)__model_6_cv1_conv_Conv_stride, .dataSize=8}}}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}} }; const char* inputs__model_6_cv1_conv_Conv[] = { "_model_5_act_Mul_output_0", "model_6_cv1_conv_weight", "model_6_cv1_conv_bias" }; uint32_t dimensions__model_6_cv1_conv_Conv_output_0[] = {1, 40, 40, 128}; Qnn_Tensor_t outputs__model_6_cv1_conv_Conv[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_6_cv1_conv_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0001798848970793f, .offset= -42571}}}, .rank= 4, .dimensions=dimensions__model_6_cv1_conv_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_6_cv1_conv_Conv", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__model_6_cv1_conv_Conv, // Node Params 4, // Num Node Params inputs__model_6_cv1_conv_Conv, // Input Tensor Names 3, // Num Input Tensor Names outputs__model_6_cv1_conv_Conv, // Output Tensors 1// Num Output Tensors ), err); /* ADDING NODE FOR _model_6_cv1_act_Sigmoid */ const char* inputs__model_6_cv1_act_Sigmoid[] = { "_model_6_cv1_conv_Conv_output_0" }; uint32_t dimensions__model_6_cv1_act_Sigmoid_output_0[] = {1, 40, 40, 128}; Qnn_Tensor_t outputs__model_6_cv1_act_Sigmoid[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_6_cv1_act_Sigmoid_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000152587890625f, .offset= 0}}}, .rank= 4, .dimensions=dimensions__model_6_cv1_act_Sigmoid_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_6_cv1_act_Sigmoid", // Node Name "qti.aisw", // Package Name "Sigmoid", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__model_6_cv1_act_Sigmoid, // Input Tensor Names 1, // Num Input Tensor Names outputs__model_6_cv1_act_Sigmoid, // Output Tensors 1// Num Output Tensors ), err); /* ADDING NODE FOR _model_6_cv1_act_Mul */ const char* inputs__model_6_cv1_act_Mul[] = { "_model_6_cv1_conv_Conv_output_0", "_model_6_cv1_act_Sigmoid_output_0" }; uint32_t dimensions__model_6_cv1_act_Mul_output_0[] = {1, 40, 40, 128}; Qnn_Tensor_t outputs__model_6_cv1_act_Mul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_6_cv1_act_Mul_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000662850216031f, .offset= -4201}}}, .rank= 4, .dimensions=dimensions__model_6_cv1_act_Mul_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_6_cv1_act_Mul", // Node Name "qti.aisw", // Package Name "ElementWiseMultiply", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__model_6_cv1_act_Mul, // Input Tensor Names 2, // Num Input Tensor Names outputs__model_6_cv1_act_Mul, // Output Tensors 1// Num Output Tensors ), err); uint32_t dimensions_model_6_m_0_cv1_conv_weight[] = {1, 1, 128, 128}; VALIDATE(cutoff_yolov5s.addTensor("model_6_m_0_cv1_conv_weight", // Node Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "model_6_m_0_cv1_conv_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0235136691480875f, .offset= -117}}}, .rank= 4, .dimensions=dimensions_model_6_m_0_cv1_conv_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(model_6_m_0_cv1_conv_weight), .dataSize=BINLEN(model_6_m_0_cv1_conv_weight)}}}}} ), err); uint32_t dimensions_model_6_m_0_cv1_conv_bias[] = {128}; VALIDATE(cutoff_yolov5s.addTensor("model_6_m_0_cv1_conv_bias", // Node Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "model_6_m_0_cv1_conv_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_SFIXED_POINT_32, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000000019840789f, .offset= 0}}}, .rank= 1, .dimensions=dimensions_model_6_m_0_cv1_conv_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(model_6_m_0_cv1_conv_bias), .dataSize=BINLEN(model_6_m_0_cv1_conv_bias)}}}}} ), err); /* ADDING NODE FOR _model_6_m_m_0_cv1_conv_Conv */ uint32_t dimensions___model_6_m_m_0_cv1_conv_Conv_dilation[] = {2}; uint32_t __model_6_m_m_0_cv1_conv_Conv_dilation[] = {1, 1}; uint32_t dimensions___model_6_m_m_0_cv1_conv_Conv_pad_amount[] = {2, 2}; uint32_t __model_6_m_m_0_cv1_conv_Conv_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions___model_6_m_m_0_cv1_conv_Conv_stride[] = {2}; uint32_t __model_6_m_m_0_cv1_conv_Conv_stride[] = {1, 1}; Qnn_Param_t params__model_6_m_m_0_cv1_conv_Conv[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "__model_6_m_m_0_cv1_conv_Conv_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions___model_6_m_m_0_cv1_conv_Conv_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)__model_6_m_m_0_cv1_conv_Conv_dilation, .dataSize=8}}}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "__model_6_m_m_0_cv1_conv_Conv_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions___model_6_m_m_0_cv1_conv_Conv_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)__model_6_m_m_0_cv1_conv_Conv_pad_amount, .dataSize=16}}}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "__model_6_m_m_0_cv1_conv_Conv_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions___model_6_m_m_0_cv1_conv_Conv_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)__model_6_m_m_0_cv1_conv_Conv_stride, .dataSize=8}}}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}} }; const char* inputs__model_6_m_m_0_cv1_conv_Conv[] = { "_model_6_cv1_act_Mul_output_0", "model_6_m_0_cv1_conv_weight", "model_6_m_0_cv1_conv_bias" }; uint32_t dimensions__model_6_m_m_0_cv1_conv_Conv_output_0[] = {1, 40, 40, 128}; Qnn_Tensor_t outputs__model_6_m_m_0_cv1_conv_Conv[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_6_m_m_0_cv1_conv_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0003633314627223f, .offset= -24381}}}, .rank= 4, .dimensions=dimensions__model_6_m_m_0_cv1_conv_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_6_m_m_0_cv1_conv_Conv", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__model_6_m_m_0_cv1_conv_Conv, // Node Params 4, // Num Node Params inputs__model_6_m_m_0_cv1_conv_Conv, // Input Tensor Names 3, // Num Input Tensor Names outputs__model_6_m_m_0_cv1_conv_Conv, // Output Tensors 1// Num Output Tensors ), err); /* ADDING NODE FOR _model_6_m_m_0_cv1_act_Sigmoid */ const char* inputs__model_6_m_m_0_cv1_act_Sigmoid[] = { "_model_6_m_m_0_cv1_conv_Conv_output_0" }; uint32_t dimensions__model_6_m_m_0_cv1_act_Sigmoid_output_0[] = {1, 40, 40, 128}; Qnn_Tensor_t outputs__model_6_m_m_0_cv1_act_Sigmoid[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_6_m_m_0_cv1_act_Sigmoid_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000152587890625f, .offset= 0}}}, .rank= 4, .dimensions=dimensions__model_6_m_m_0_cv1_act_Sigmoid_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_6_m_m_0_cv1_act_Sigmoid", // Node Name "qti.aisw", // Package Name "Sigmoid", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__model_6_m_m_0_cv1_act_Sigmoid, // Input Tensor Names 1, // Num Input Tensor Names outputs__model_6_m_m_0_cv1_act_Sigmoid, // Output Tensors 1// Num Output Tensors ), err); /* ADDING NODE FOR _model_6_m_m_0_cv1_act_Mul */ const char* inputs__model_6_m_m_0_cv1_act_Mul[] = { "_model_6_m_m_0_cv1_conv_Conv_output_0", "_model_6_m_m_0_cv1_act_Sigmoid_output_0" }; uint32_t dimensions__model_6_m_m_0_cv1_act_Mul_output_0[] = {1, 40, 40, 128}; Qnn_Tensor_t outputs__model_6_m_m_0_cv1_act_Mul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_6_m_m_0_cv1_act_Mul_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0002324106462765f, .offset= -1198}}}, .rank= 4, .dimensions=dimensions__model_6_m_m_0_cv1_act_Mul_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_6_m_m_0_cv1_act_Mul", // Node Name "qti.aisw", // Package Name "ElementWiseMultiply", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__model_6_m_m_0_cv1_act_Mul, // Input Tensor Names 2, // Num Input Tensor Names outputs__model_6_m_m_0_cv1_act_Mul, // Output Tensors 1// Num Output Tensors ), err); uint32_t dimensions_model_6_m_0_cv2_conv_weight[] = {3, 3, 128, 128}; VALIDATE(cutoff_yolov5s.addTensor("model_6_m_0_cv2_conv_weight", // Node Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "model_6_m_0_cv2_conv_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0029645801987499f, .offset= -174}}}, .rank= 4, .dimensions=dimensions_model_6_m_0_cv2_conv_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(model_6_m_0_cv2_conv_weight), .dataSize=BINLEN(model_6_m_0_cv2_conv_weight)}}}}} ), err); uint32_t dimensions_model_6_m_0_cv2_conv_bias[] = {128}; VALIDATE(cutoff_yolov5s.addTensor("model_6_m_0_cv2_conv_bias", // Node Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "model_6_m_0_cv2_conv_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_SFIXED_POINT_32, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000000007720334f, .offset= 0}}}, .rank= 1, .dimensions=dimensions_model_6_m_0_cv2_conv_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(model_6_m_0_cv2_conv_bias), .dataSize=BINLEN(model_6_m_0_cv2_conv_bias)}}}}} ), err); /* ADDING NODE FOR _model_6_m_m_0_cv2_conv_Conv */ uint32_t dimensions___model_6_m_m_0_cv2_conv_Conv_dilation[] = {2}; uint32_t __model_6_m_m_0_cv2_conv_Conv_dilation[] = {1, 1}; uint32_t dimensions___model_6_m_m_0_cv2_conv_Conv_pad_amount[] = {2, 2}; uint32_t __model_6_m_m_0_cv2_conv_Conv_pad_amount[] = {1, 1, 1, 1}; uint32_t dimensions___model_6_m_m_0_cv2_conv_Conv_stride[] = {2}; uint32_t __model_6_m_m_0_cv2_conv_Conv_stride[] = {1, 1}; Qnn_Param_t params__model_6_m_m_0_cv2_conv_Conv[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "__model_6_m_m_0_cv2_conv_Conv_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions___model_6_m_m_0_cv2_conv_Conv_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)__model_6_m_m_0_cv2_conv_Conv_dilation, .dataSize=8}}}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "__model_6_m_m_0_cv2_conv_Conv_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions___model_6_m_m_0_cv2_conv_Conv_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)__model_6_m_m_0_cv2_conv_Conv_pad_amount, .dataSize=16}}}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "__model_6_m_m_0_cv2_conv_Conv_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions___model_6_m_m_0_cv2_conv_Conv_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)__model_6_m_m_0_cv2_conv_Conv_stride, .dataSize=8}}}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}} }; const char* inputs__model_6_m_m_0_cv2_conv_Conv[] = { "_model_6_m_m_0_cv1_act_Mul_output_0", "model_6_m_0_cv2_conv_weight", "model_6_m_0_cv2_conv_bias" }; uint32_t dimensions__model_6_m_m_0_cv2_conv_Conv_output_0[] = {1, 40, 40, 128}; Qnn_Tensor_t outputs__model_6_m_m_0_cv2_conv_Conv[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_6_m_m_0_cv2_conv_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0001850519765867f, .offset= -35287}}}, .rank= 4, .dimensions=dimensions__model_6_m_m_0_cv2_conv_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_6_m_m_0_cv2_conv_Conv", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__model_6_m_m_0_cv2_conv_Conv, // Node Params 4, // Num Node Params inputs__model_6_m_m_0_cv2_conv_Conv, // Input Tensor Names 3, // Num Input Tensor Names outputs__model_6_m_m_0_cv2_conv_Conv, // Output Tensors 1// Num Output Tensors ), err); /* ADDING NODE FOR _model_6_m_m_0_cv2_act_Sigmoid */ const char* inputs__model_6_m_m_0_cv2_act_Sigmoid[] = { "_model_6_m_m_0_cv2_conv_Conv_output_0" }; uint32_t dimensions__model_6_m_m_0_cv2_act_Sigmoid_output_0[] = {1, 40, 40, 128}; Qnn_Tensor_t outputs__model_6_m_m_0_cv2_act_Sigmoid[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_6_m_m_0_cv2_act_Sigmoid_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000152587890625f, .offset= 0}}}, .rank= 4, .dimensions=dimensions__model_6_m_m_0_cv2_act_Sigmoid_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_6_m_m_0_cv2_act_Sigmoid", // Node Name "qti.aisw", // Package Name "Sigmoid", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__model_6_m_m_0_cv2_act_Sigmoid, // Input Tensor Names 1, // Num Input Tensor Names outputs__model_6_m_m_0_cv2_act_Sigmoid, // Output Tensors 1// Num Output Tensors ), err); /* ADDING NODE FOR _model_6_m_m_0_cv2_act_Mul */ const char* inputs__model_6_m_m_0_cv2_act_Mul[] = { "_model_6_m_m_0_cv2_conv_Conv_output_0", "_model_6_m_m_0_cv2_act_Sigmoid_output_0" }; uint32_t dimensions__model_6_m_m_0_cv2_act_Mul_output_0[] = {1, 40, 40, 128}; Qnn_Tensor_t outputs__model_6_m_m_0_cv2_act_Mul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_6_m_m_0_cv2_act_Mul_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000893451506272f, .offset= -3117}}}, .rank= 4, .dimensions=dimensions__model_6_m_m_0_cv2_act_Mul_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_6_m_m_0_cv2_act_Mul", // Node Name "qti.aisw", // Package Name "ElementWiseMultiply", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__model_6_m_m_0_cv2_act_Mul, // Input Tensor Names 2, // Num Input Tensor Names outputs__model_6_m_m_0_cv2_act_Mul, // Output Tensors 1// Num Output Tensors ), err); /* ADDING NODE FOR _model_6_m_m_0_Add */ const char* inputs__model_6_m_m_0_Add[] = { "_model_6_cv1_act_Mul_output_0", "_model_6_m_m_0_cv2_act_Mul_output_0" }; uint32_t dimensions__model_6_m_m_0_Add_output_0[] = {1, 40, 40, 128}; Qnn_Tensor_t outputs__model_6_m_m_0_Add[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_6_m_m_0_Add_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0001374758576276f, .offset= -4051}}}, .rank= 4, .dimensions=dimensions__model_6_m_m_0_Add_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_6_m_m_0_Add", // Node Name "qti.aisw", // Package Name "ElementWiseAdd", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__model_6_m_m_0_Add, // Input Tensor Names 2, // Num Input Tensor Names outputs__model_6_m_m_0_Add, // Output Tensors 1// Num Output Tensors ), err); uint32_t dimensions_model_6_m_1_cv1_conv_weight[] = {1, 1, 128, 128}; VALIDATE(cutoff_yolov5s.addTensor("model_6_m_1_cv1_conv_weight", // Node Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "model_6_m_1_cv1_conv_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0150019684806466f, .offset= -137}}}, .rank= 4, .dimensions=dimensions_model_6_m_1_cv1_conv_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(model_6_m_1_cv1_conv_weight), .dataSize=BINLEN(model_6_m_1_cv1_conv_weight)}}}}} ), err); uint32_t dimensions_model_6_m_1_cv1_conv_bias[] = {128}; VALIDATE(cutoff_yolov5s.addTensor("model_6_m_1_cv1_conv_bias", // Node Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "model_6_m_1_cv1_conv_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_SFIXED_POINT_32, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000000017492754f, .offset= 0}}}, .rank= 1, .dimensions=dimensions_model_6_m_1_cv1_conv_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(model_6_m_1_cv1_conv_bias), .dataSize=BINLEN(model_6_m_1_cv1_conv_bias)}}}}} ), err); /* ADDING NODE FOR _model_6_m_m_1_cv1_conv_Conv */ uint32_t dimensions___model_6_m_m_1_cv1_conv_Conv_dilation[] = {2}; uint32_t __model_6_m_m_1_cv1_conv_Conv_dilation[] = {1, 1}; uint32_t dimensions___model_6_m_m_1_cv1_conv_Conv_pad_amount[] = {2, 2}; uint32_t __model_6_m_m_1_cv1_conv_Conv_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions___model_6_m_m_1_cv1_conv_Conv_stride[] = {2}; uint32_t __model_6_m_m_1_cv1_conv_Conv_stride[] = {1, 1}; Qnn_Param_t params__model_6_m_m_1_cv1_conv_Conv[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "__model_6_m_m_1_cv1_conv_Conv_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions___model_6_m_m_1_cv1_conv_Conv_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)__model_6_m_m_1_cv1_conv_Conv_dilation, .dataSize=8}}}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "__model_6_m_m_1_cv1_conv_Conv_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions___model_6_m_m_1_cv1_conv_Conv_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)__model_6_m_m_1_cv1_conv_Conv_pad_amount, .dataSize=16}}}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "__model_6_m_m_1_cv1_conv_Conv_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions___model_6_m_m_1_cv1_conv_Conv_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)__model_6_m_m_1_cv1_conv_Conv_stride, .dataSize=8}}}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}} }; const char* inputs__model_6_m_m_1_cv1_conv_Conv[] = { "_model_6_m_m_0_Add_output_0", "model_6_m_1_cv1_conv_weight", "model_6_m_1_cv1_conv_bias" }; uint32_t dimensions__model_6_m_m_1_cv1_conv_Conv_output_0[] = {1, 40, 40, 128}; Qnn_Tensor_t outputs__model_6_m_m_1_cv1_conv_Conv[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_6_m_m_1_cv1_conv_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0003877567069139f, .offset= -27028}}}, .rank= 4, .dimensions=dimensions__model_6_m_m_1_cv1_conv_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_6_m_m_1_cv1_conv_Conv", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__model_6_m_m_1_cv1_conv_Conv, // Node Params 4, // Num Node Params inputs__model_6_m_m_1_cv1_conv_Conv, // Input Tensor Names 3, // Num Input Tensor Names outputs__model_6_m_m_1_cv1_conv_Conv, // Output Tensors 1// Num Output Tensors ), err); /* ADDING NODE FOR _model_6_m_m_1_cv1_act_Sigmoid */ const char* inputs__model_6_m_m_1_cv1_act_Sigmoid[] = { "_model_6_m_m_1_cv1_conv_Conv_output_0" }; uint32_t dimensions__model_6_m_m_1_cv1_act_Sigmoid_output_0[] = {1, 40, 40, 128}; Qnn_Tensor_t outputs__model_6_m_m_1_cv1_act_Sigmoid[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_6_m_m_1_cv1_act_Sigmoid_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000152587890625f, .offset= 0}}}, .rank= 4, .dimensions=dimensions__model_6_m_m_1_cv1_act_Sigmoid_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_6_m_m_1_cv1_act_Sigmoid", // Node Name "qti.aisw", // Package Name "Sigmoid", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__model_6_m_m_1_cv1_act_Sigmoid, // Input Tensor Names 1, // Num Input Tensor Names outputs__model_6_m_m_1_cv1_act_Sigmoid, // Output Tensors 1// Num Output Tensors ), err); /* ADDING NODE FOR _model_6_m_m_1_cv1_act_Mul */ const char* inputs__model_6_m_m_1_cv1_act_Mul[] = { "_model_6_m_m_1_cv1_conv_Conv_output_0", "_model_6_m_m_1_cv1_act_Sigmoid_output_0" }; uint32_t dimensions__model_6_m_m_1_cv1_act_Mul_output_0[] = {1, 40, 40, 128}; Qnn_Tensor_t outputs__model_6_m_m_1_cv1_act_Mul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_6_m_m_1_cv1_act_Mul_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0002320844505448f, .offset= -1200}}}, .rank= 4, .dimensions=dimensions__model_6_m_m_1_cv1_act_Mul_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_6_m_m_1_cv1_act_Mul", // Node Name "qti.aisw", // Package Name "ElementWiseMultiply", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__model_6_m_m_1_cv1_act_Mul, // Input Tensor Names 2, // Num Input Tensor Names outputs__model_6_m_m_1_cv1_act_Mul, // Output Tensors 1// Num Output Tensors ), err); uint32_t dimensions_model_6_m_1_cv2_conv_weight[] = {3, 3, 128, 128}; VALIDATE(cutoff_yolov5s.addTensor("model_6_m_1_cv2_conv_weight", // Node Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "model_6_m_1_cv2_conv_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0046471692621708f, .offset= -139}}}, .rank= 4, .dimensions=dimensions_model_6_m_1_cv2_conv_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(model_6_m_1_cv2_conv_weight), .dataSize=BINLEN(model_6_m_1_cv2_conv_weight)}}}}} ), err); uint32_t dimensions_model_6_m_1_cv2_conv_bias[] = {128}; VALIDATE(cutoff_yolov5s.addTensor("model_6_m_1_cv2_conv_bias", // Node Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "model_6_m_1_cv2_conv_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_SFIXED_POINT_32, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000000007942608f, .offset= 0}}}, .rank= 1, .dimensions=dimensions_model_6_m_1_cv2_conv_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(model_6_m_1_cv2_conv_bias), .dataSize=BINLEN(model_6_m_1_cv2_conv_bias)}}}}} ), err); /* ADDING NODE FOR _model_6_m_m_1_cv2_conv_Conv */ uint32_t dimensions___model_6_m_m_1_cv2_conv_Conv_dilation[] = {2}; uint32_t __model_6_m_m_1_cv2_conv_Conv_dilation[] = {1, 1}; uint32_t dimensions___model_6_m_m_1_cv2_conv_Conv_pad_amount[] = {2, 2}; uint32_t __model_6_m_m_1_cv2_conv_Conv_pad_amount[] = {1, 1, 1, 1}; uint32_t dimensions___model_6_m_m_1_cv2_conv_Conv_stride[] = {2}; uint32_t __model_6_m_m_1_cv2_conv_Conv_stride[] = {1, 1}; Qnn_Param_t params__model_6_m_m_1_cv2_conv_Conv[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "__model_6_m_m_1_cv2_conv_Conv_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions___model_6_m_m_1_cv2_conv_Conv_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)__model_6_m_m_1_cv2_conv_Conv_dilation, .dataSize=8}}}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "__model_6_m_m_1_cv2_conv_Conv_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions___model_6_m_m_1_cv2_conv_Conv_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)__model_6_m_m_1_cv2_conv_Conv_pad_amount, .dataSize=16}}}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "__model_6_m_m_1_cv2_conv_Conv_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions___model_6_m_m_1_cv2_conv_Conv_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)__model_6_m_m_1_cv2_conv_Conv_stride, .dataSize=8}}}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}} }; const char* inputs__model_6_m_m_1_cv2_conv_Conv[] = { "_model_6_m_m_1_cv1_act_Mul_output_0", "model_6_m_1_cv2_conv_weight", "model_6_m_1_cv2_conv_bias" }; uint32_t dimensions__model_6_m_m_1_cv2_conv_Conv_output_0[] = {1, 40, 40, 128}; Qnn_Tensor_t outputs__model_6_m_m_1_cv2_conv_Conv[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_6_m_m_1_cv2_conv_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0002434704947518f, .offset= -34428}}}, .rank= 4, .dimensions=dimensions__model_6_m_m_1_cv2_conv_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_6_m_m_1_cv2_conv_Conv", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__model_6_m_m_1_cv2_conv_Conv, // Node Params 4, // Num Node Params inputs__model_6_m_m_1_cv2_conv_Conv, // Input Tensor Names 3, // Num Input Tensor Names outputs__model_6_m_m_1_cv2_conv_Conv, // Output Tensors 1// Num Output Tensors ), err); /* ADDING NODE FOR _model_6_m_m_1_cv2_act_Sigmoid */ const char* inputs__model_6_m_m_1_cv2_act_Sigmoid[] = { "_model_6_m_m_1_cv2_conv_Conv_output_0" }; uint32_t dimensions__model_6_m_m_1_cv2_act_Sigmoid_output_0[] = {1, 40, 40, 128}; Qnn_Tensor_t outputs__model_6_m_m_1_cv2_act_Sigmoid[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_6_m_m_1_cv2_act_Sigmoid_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000152587890625f, .offset= 0}}}, .rank= 4, .dimensions=dimensions__model_6_m_m_1_cv2_act_Sigmoid_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_6_m_m_1_cv2_act_Sigmoid", // Node Name "qti.aisw", // Package Name "Sigmoid", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__model_6_m_m_1_cv2_act_Sigmoid, // Input Tensor Names 1, // Num Input Tensor Names outputs__model_6_m_m_1_cv2_act_Sigmoid, // Output Tensors 1// Num Output Tensors ), err); /* ADDING NODE FOR _model_6_m_m_1_cv2_act_Mul */ const char* inputs__model_6_m_m_1_cv2_act_Mul[] = { "_model_6_m_m_1_cv2_conv_Conv_output_0", "_model_6_m_m_1_cv2_act_Sigmoid_output_0" }; uint32_t dimensions__model_6_m_m_1_cv2_act_Mul_output_0[] = {1, 40, 40, 128}; Qnn_Tensor_t outputs__model_6_m_m_1_cv2_act_Mul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_6_m_m_1_cv2_act_Mul_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0001197553428938f, .offset= -2325}}}, .rank= 4, .dimensions=dimensions__model_6_m_m_1_cv2_act_Mul_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_6_m_m_1_cv2_act_Mul", // Node Name "qti.aisw", // Package Name "ElementWiseMultiply", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__model_6_m_m_1_cv2_act_Mul, // Input Tensor Names 2, // Num Input Tensor Names outputs__model_6_m_m_1_cv2_act_Mul, // Output Tensors 1// Num Output Tensors ), err); /* ADDING NODE FOR _model_6_m_m_1_Add */ const char* inputs__model_6_m_m_1_Add[] = { "_model_6_m_m_0_Add_output_0", "_model_6_m_m_1_cv2_act_Mul_output_0" }; uint32_t dimensions__model_6_m_m_1_Add_output_0[] = {1, 40, 40, 128}; Qnn_Tensor_t outputs__model_6_m_m_1_Add[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_6_m_m_1_Add_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0002009185118368f, .offset= -4158}}}, .rank= 4, .dimensions=dimensions__model_6_m_m_1_Add_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_6_m_m_1_Add", // Node Name "qti.aisw", // Package Name "ElementWiseAdd", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__model_6_m_m_1_Add, // Input Tensor Names 2, // Num Input Tensor Names outputs__model_6_m_m_1_Add, // Output Tensors 1// Num Output Tensors ), err); uint32_t dimensions_model_6_m_2_cv1_conv_weight[] = {1, 1, 128, 128}; VALIDATE(cutoff_yolov5s.addTensor("model_6_m_2_cv1_conv_weight", // Node Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "model_6_m_2_cv1_conv_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0079269921407104f, .offset= -138}}}, .rank= 4, .dimensions=dimensions_model_6_m_2_cv1_conv_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(model_6_m_2_cv1_conv_weight), .dataSize=BINLEN(model_6_m_2_cv1_conv_weight)}}}}} ), err); uint32_t dimensions_model_6_m_2_cv1_conv_bias[] = {128}; VALIDATE(cutoff_yolov5s.addTensor("model_6_m_2_cv1_conv_bias", // Node Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "model_6_m_2_cv1_conv_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_SFIXED_POINT_32, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000000012001622f, .offset= 0}}}, .rank= 1, .dimensions=dimensions_model_6_m_2_cv1_conv_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(model_6_m_2_cv1_conv_bias), .dataSize=BINLEN(model_6_m_2_cv1_conv_bias)}}}}} ), err); /* ADDING NODE FOR _model_6_m_m_2_cv1_conv_Conv */ uint32_t dimensions___model_6_m_m_2_cv1_conv_Conv_dilation[] = {2}; uint32_t __model_6_m_m_2_cv1_conv_Conv_dilation[] = {1, 1}; uint32_t dimensions___model_6_m_m_2_cv1_conv_Conv_pad_amount[] = {2, 2}; uint32_t __model_6_m_m_2_cv1_conv_Conv_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions___model_6_m_m_2_cv1_conv_Conv_stride[] = {2}; uint32_t __model_6_m_m_2_cv1_conv_Conv_stride[] = {1, 1}; Qnn_Param_t params__model_6_m_m_2_cv1_conv_Conv[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "__model_6_m_m_2_cv1_conv_Conv_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions___model_6_m_m_2_cv1_conv_Conv_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)__model_6_m_m_2_cv1_conv_Conv_dilation, .dataSize=8}}}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "__model_6_m_m_2_cv1_conv_Conv_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions___model_6_m_m_2_cv1_conv_Conv_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)__model_6_m_m_2_cv1_conv_Conv_pad_amount, .dataSize=16}}}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "__model_6_m_m_2_cv1_conv_Conv_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions___model_6_m_m_2_cv1_conv_Conv_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)__model_6_m_m_2_cv1_conv_Conv_stride, .dataSize=8}}}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}} }; const char* inputs__model_6_m_m_2_cv1_conv_Conv[] = { "_model_6_m_m_1_Add_output_0", "model_6_m_2_cv1_conv_weight", "model_6_m_2_cv1_conv_bias" }; uint32_t dimensions__model_6_m_m_2_cv1_conv_Conv_output_0[] = {1, 40, 40, 128}; Qnn_Tensor_t outputs__model_6_m_m_2_cv1_conv_Conv[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_6_m_m_2_cv1_conv_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0003164758381899f, .offset= -35131}}}, .rank= 4, .dimensions=dimensions__model_6_m_m_2_cv1_conv_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_6_m_m_2_cv1_conv_Conv", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__model_6_m_m_2_cv1_conv_Conv, // Node Params 4, // Num Node Params inputs__model_6_m_m_2_cv1_conv_Conv, // Input Tensor Names 3, // Num Input Tensor Names outputs__model_6_m_m_2_cv1_conv_Conv, // Output Tensors 1// Num Output Tensors ), err); /* ADDING NODE FOR _model_6_m_m_2_cv1_act_Sigmoid */ const char* inputs__model_6_m_m_2_cv1_act_Sigmoid[] = { "_model_6_m_m_2_cv1_conv_Conv_output_0" }; uint32_t dimensions__model_6_m_m_2_cv1_act_Sigmoid_output_0[] = {1, 40, 40, 128}; Qnn_Tensor_t outputs__model_6_m_m_2_cv1_act_Sigmoid[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_6_m_m_2_cv1_act_Sigmoid_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000152587890625f, .offset= 0}}}, .rank= 4, .dimensions=dimensions__model_6_m_m_2_cv1_act_Sigmoid_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_6_m_m_2_cv1_act_Sigmoid", // Node Name "qti.aisw", // Package Name "Sigmoid", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__model_6_m_m_2_cv1_act_Sigmoid, // Input Tensor Names 1, // Num Input Tensor Names outputs__model_6_m_m_2_cv1_act_Sigmoid, // Output Tensors 1// Num Output Tensors ), err); /* ADDING NODE FOR _model_6_m_m_2_cv1_act_Mul */ const char* inputs__model_6_m_m_2_cv1_act_Mul[] = { "_model_6_m_m_2_cv1_conv_Conv_output_0", "_model_6_m_m_2_cv1_act_Sigmoid_output_0" }; uint32_t dimensions__model_6_m_m_2_cv1_act_Mul_output_0[] = {1, 40, 40, 128}; Qnn_Tensor_t outputs__model_6_m_m_2_cv1_act_Mul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_6_m_m_2_cv1_act_Mul_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0001510637666797f, .offset= -1843}}}, .rank= 4, .dimensions=dimensions__model_6_m_m_2_cv1_act_Mul_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_6_m_m_2_cv1_act_Mul", // Node Name "qti.aisw", // Package Name "ElementWiseMultiply", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__model_6_m_m_2_cv1_act_Mul, // Input Tensor Names 2, // Num Input Tensor Names outputs__model_6_m_m_2_cv1_act_Mul, // Output Tensors 1// Num Output Tensors ), err); uint32_t dimensions_model_6_m_2_cv2_conv_weight[] = {3, 3, 128, 128}; VALIDATE(cutoff_yolov5s.addTensor("model_6_m_2_cv2_conv_weight", // Node Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "model_6_m_2_cv2_conv_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0091705499216914f, .offset= -138}}}, .rank= 4, .dimensions=dimensions_model_6_m_2_cv2_conv_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(model_6_m_2_cv2_conv_weight), .dataSize=BINLEN(model_6_m_2_cv2_conv_weight)}}}}} ), err); uint32_t dimensions_model_6_m_2_cv2_conv_bias[] = {128}; VALIDATE(cutoff_yolov5s.addTensor("model_6_m_2_cv2_conv_bias", // Node Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "model_6_m_2_cv2_conv_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_SFIXED_POINT_32, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000000014950098f, .offset= 0}}}, .rank= 1, .dimensions=dimensions_model_6_m_2_cv2_conv_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(model_6_m_2_cv2_conv_bias), .dataSize=BINLEN(model_6_m_2_cv2_conv_bias)}}}}} ), err); /* ADDING NODE FOR _model_6_m_m_2_cv2_conv_Conv */ uint32_t dimensions___model_6_m_m_2_cv2_conv_Conv_dilation[] = {2}; uint32_t __model_6_m_m_2_cv2_conv_Conv_dilation[] = {1, 1}; uint32_t dimensions___model_6_m_m_2_cv2_conv_Conv_pad_amount[] = {2, 2}; uint32_t __model_6_m_m_2_cv2_conv_Conv_pad_amount[] = {1, 1, 1, 1}; uint32_t dimensions___model_6_m_m_2_cv2_conv_Conv_stride[] = {2}; uint32_t __model_6_m_m_2_cv2_conv_Conv_stride[] = {1, 1}; Qnn_Param_t params__model_6_m_m_2_cv2_conv_Conv[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "__model_6_m_m_2_cv2_conv_Conv_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions___model_6_m_m_2_cv2_conv_Conv_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)__model_6_m_m_2_cv2_conv_Conv_dilation, .dataSize=8}}}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "__model_6_m_m_2_cv2_conv_Conv_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions___model_6_m_m_2_cv2_conv_Conv_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)__model_6_m_m_2_cv2_conv_Conv_pad_amount, .dataSize=16}}}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "__model_6_m_m_2_cv2_conv_Conv_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions___model_6_m_m_2_cv2_conv_Conv_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)__model_6_m_m_2_cv2_conv_Conv_stride, .dataSize=8}}}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}} }; const char* inputs__model_6_m_m_2_cv2_conv_Conv[] = { "_model_6_m_m_2_cv1_act_Mul_output_0", "model_6_m_2_cv2_conv_weight", "model_6_m_2_cv2_conv_bias" }; uint32_t dimensions__model_6_m_m_2_cv2_conv_Conv_output_0[] = {1, 40, 40, 128}; Qnn_Tensor_t outputs__model_6_m_m_2_cv2_conv_Conv[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_6_m_m_2_cv2_conv_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0004538083449006f, .offset= -37425}}}, .rank= 4, .dimensions=dimensions__model_6_m_m_2_cv2_conv_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_6_m_m_2_cv2_conv_Conv", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__model_6_m_m_2_cv2_conv_Conv, // Node Params 4, // Num Node Params inputs__model_6_m_m_2_cv2_conv_Conv, // Input Tensor Names 3, // Num Input Tensor Names outputs__model_6_m_m_2_cv2_conv_Conv, // Output Tensors 1// Num Output Tensors ), err); /* ADDING NODE FOR _model_6_m_m_2_cv2_act_Sigmoid */ const char* inputs__model_6_m_m_2_cv2_act_Sigmoid[] = { "_model_6_m_m_2_cv2_conv_Conv_output_0" }; uint32_t dimensions__model_6_m_m_2_cv2_act_Sigmoid_output_0[] = {1, 40, 40, 128}; Qnn_Tensor_t outputs__model_6_m_m_2_cv2_act_Sigmoid[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_6_m_m_2_cv2_act_Sigmoid_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000152587890625f, .offset= 0}}}, .rank= 4, .dimensions=dimensions__model_6_m_m_2_cv2_act_Sigmoid_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_6_m_m_2_cv2_act_Sigmoid", // Node Name "qti.aisw", // Package Name "Sigmoid", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__model_6_m_m_2_cv2_act_Sigmoid, // Input Tensor Names 1, // Num Input Tensor Names outputs__model_6_m_m_2_cv2_act_Sigmoid, // Output Tensors 1// Num Output Tensors ), err); /* ADDING NODE FOR _model_6_m_m_2_cv2_act_Mul */ const char* inputs__model_6_m_m_2_cv2_act_Mul[] = { "_model_6_m_m_2_cv2_conv_Conv_output_0", "_model_6_m_m_2_cv2_act_Sigmoid_output_0" }; uint32_t dimensions__model_6_m_m_2_cv2_act_Mul_output_0[] = {1, 40, 40, 128}; Qnn_Tensor_t outputs__model_6_m_m_2_cv2_act_Mul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_6_m_m_2_cv2_act_Mul_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0001989014854189f, .offset= -1400}}}, .rank= 4, .dimensions=dimensions__model_6_m_m_2_cv2_act_Mul_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_6_m_m_2_cv2_act_Mul", // Node Name "qti.aisw", // Package Name "ElementWiseMultiply", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__model_6_m_m_2_cv2_act_Mul, // Input Tensor Names 2, // Num Input Tensor Names outputs__model_6_m_m_2_cv2_act_Mul, // Output Tensors 1// Num Output Tensors ), err); /* ADDING NODE FOR _model_6_m_m_2_Add */ const char* inputs__model_6_m_m_2_Add[] = { "_model_6_m_m_1_Add_output_0", "_model_6_m_m_2_cv2_act_Mul_output_0" }; uint32_t dimensions__model_6_m_m_2_Add_output_0[] = {1, 40, 40, 128}; Qnn_Tensor_t outputs__model_6_m_m_2_Add[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_6_m_m_2_Add_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0002046370209428f, .offset= -5443}}}, .rank= 4, .dimensions=dimensions__model_6_m_m_2_Add_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_6_m_m_2_Add", // Node Name "qti.aisw", // Package Name "ElementWiseAdd", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__model_6_m_m_2_Add, // Input Tensor Names 2, // Num Input Tensor Names outputs__model_6_m_m_2_Add, // Output Tensors 1// Num Output Tensors ), err); uint32_t dimensions_model_6_cv2_conv_weight[] = {1, 1, 256, 128}; VALIDATE(cutoff_yolov5s.addTensor("model_6_cv2_conv_weight", // Node Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "model_6_cv2_conv_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0089017264544964f, .offset= -159}}}, .rank= 4, .dimensions=dimensions_model_6_cv2_conv_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(model_6_cv2_conv_weight), .dataSize=BINLEN(model_6_cv2_conv_weight)}}}}} ), err); uint32_t dimensions_model_6_cv2_conv_bias[] = {128}; VALIDATE(cutoff_yolov5s.addTensor("model_6_cv2_conv_bias", // Node Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "model_6_cv2_conv_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_SFIXED_POINT_32, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000000008372794f, .offset= 0}}}, .rank= 1, .dimensions=dimensions_model_6_cv2_conv_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(model_6_cv2_conv_bias), .dataSize=BINLEN(model_6_cv2_conv_bias)}}}}} ), err); /* ADDING NODE FOR _model_6_cv2_conv_Conv */ uint32_t dimensions___model_6_cv2_conv_Conv_dilation[] = {2}; uint32_t __model_6_cv2_conv_Conv_dilation[] = {1, 1}; uint32_t dimensions___model_6_cv2_conv_Conv_pad_amount[] = {2, 2}; uint32_t __model_6_cv2_conv_Conv_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions___model_6_cv2_conv_Conv_stride[] = {2}; uint32_t __model_6_cv2_conv_Conv_stride[] = {1, 1}; Qnn_Param_t params__model_6_cv2_conv_Conv[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "__model_6_cv2_conv_Conv_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions___model_6_cv2_conv_Conv_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)__model_6_cv2_conv_Conv_dilation, .dataSize=8}}}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "__model_6_cv2_conv_Conv_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions___model_6_cv2_conv_Conv_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)__model_6_cv2_conv_Conv_pad_amount, .dataSize=16}}}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "__model_6_cv2_conv_Conv_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions___model_6_cv2_conv_Conv_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)__model_6_cv2_conv_Conv_stride, .dataSize=8}}}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}} }; const char* inputs__model_6_cv2_conv_Conv[] = { "_model_5_act_Mul_output_0", "model_6_cv2_conv_weight", "model_6_cv2_conv_bias" }; uint32_t dimensions__model_6_cv2_conv_Conv_output_0[] = {1, 40, 40, 128}; Qnn_Tensor_t outputs__model_6_cv2_conv_Conv[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_6_cv2_conv_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0003917470749002f, .offset= -35884}}}, .rank= 4, .dimensions=dimensions__model_6_cv2_conv_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_6_cv2_conv_Conv", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__model_6_cv2_conv_Conv, // Node Params 4, // Num Node Params inputs__model_6_cv2_conv_Conv, // Input Tensor Names 3, // Num Input Tensor Names outputs__model_6_cv2_conv_Conv, // Output Tensors 1// Num Output Tensors ), err); /* ADDING NODE FOR _model_6_cv2_act_Sigmoid */ const char* inputs__model_6_cv2_act_Sigmoid[] = { "_model_6_cv2_conv_Conv_output_0" }; uint32_t dimensions__model_6_cv2_act_Sigmoid_output_0[] = {1, 40, 40, 128}; Qnn_Tensor_t outputs__model_6_cv2_act_Sigmoid[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_6_cv2_act_Sigmoid_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000152587890625f, .offset= 0}}}, .rank= 4, .dimensions=dimensions__model_6_cv2_act_Sigmoid_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_6_cv2_act_Sigmoid", // Node Name "qti.aisw", // Package Name "Sigmoid", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__model_6_cv2_act_Sigmoid, // Input Tensor Names 1, // Num Input Tensor Names outputs__model_6_cv2_act_Sigmoid, // Output Tensors 1// Num Output Tensors ), err); /* ADDING NODE FOR _model_6_cv2_act_Mul */ const char* inputs__model_6_cv2_act_Mul[] = { "_model_6_cv2_conv_Conv_output_0", "_model_6_cv2_act_Sigmoid_output_0" }; uint32_t dimensions__model_6_cv2_act_Mul_output_0[] = {1, 40, 40, 128}; Qnn_Tensor_t outputs__model_6_cv2_act_Mul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_6_cv2_act_Mul_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0001814908173401f, .offset= -1534}}}, .rank= 4, .dimensions=dimensions__model_6_cv2_act_Mul_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_6_cv2_act_Mul", // Node Name "qti.aisw", // Package Name "ElementWiseMultiply", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__model_6_cv2_act_Mul, // Input Tensor Names 2, // Num Input Tensor Names outputs__model_6_cv2_act_Mul, // Output Tensors 1// Num Output Tensors ), err); /* ADDING NODE FOR _model_6_Concat */ Qnn_Param_t params__model_6_Concat[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="axis", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 3}}}} }; const char* inputs__model_6_Concat[] = { "_model_6_m_m_2_Add_output_0", "_model_6_cv2_act_Mul_output_0" }; uint32_t dimensions__model_6_Concat_output_0[] = {1, 40, 40, 256}; Qnn_Tensor_t outputs__model_6_Concat[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_6_Concat_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0002046370209428f, .offset= -5443}}}, .rank= 4, .dimensions=dimensions__model_6_Concat_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_6_Concat", // Node Name "qti.aisw", // Package Name "Concat", // Qnn Node Type params__model_6_Concat, // Node Params 1, // Num Node Params inputs__model_6_Concat, // Input Tensor Names 2, // Num Input Tensor Names outputs__model_6_Concat, // Output Tensors 1// Num Output Tensors ), err); uint32_t dimensions_model_6_cv3_conv_weight[] = {1, 1, 256, 256}; VALIDATE(cutoff_yolov5s.addTensor("model_6_cv3_conv_weight", // Node Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "model_6_cv3_conv_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0045033744536340f, .offset= -110}}}, .rank= 4, .dimensions=dimensions_model_6_cv3_conv_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(model_6_cv3_conv_weight), .dataSize=BINLEN(model_6_cv3_conv_weight)}}}}} ), err); uint32_t dimensions_model_6_cv3_conv_bias[] = {256}; VALIDATE(cutoff_yolov5s.addTensor("model_6_cv3_conv_bias", // Node Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "model_6_cv3_conv_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_SFIXED_POINT_32, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000000008321650f, .offset= 0}}}, .rank= 1, .dimensions=dimensions_model_6_cv3_conv_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(model_6_cv3_conv_bias), .dataSize=BINLEN(model_6_cv3_conv_bias)}}}}} ), err); /* ADDING NODE FOR _model_6_cv3_conv_Conv */ uint32_t dimensions___model_6_cv3_conv_Conv_dilation[] = {2}; uint32_t __model_6_cv3_conv_Conv_dilation[] = {1, 1}; uint32_t dimensions___model_6_cv3_conv_Conv_pad_amount[] = {2, 2}; uint32_t __model_6_cv3_conv_Conv_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions___model_6_cv3_conv_Conv_stride[] = {2}; uint32_t __model_6_cv3_conv_Conv_stride[] = {1, 1}; Qnn_Param_t params__model_6_cv3_conv_Conv[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "__model_6_cv3_conv_Conv_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions___model_6_cv3_conv_Conv_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)__model_6_cv3_conv_Conv_dilation, .dataSize=8}}}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "__model_6_cv3_conv_Conv_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions___model_6_cv3_conv_Conv_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)__model_6_cv3_conv_Conv_pad_amount, .dataSize=16}}}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "__model_6_cv3_conv_Conv_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions___model_6_cv3_conv_Conv_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)__model_6_cv3_conv_Conv_stride, .dataSize=8}}}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}} }; const char* inputs__model_6_cv3_conv_Conv[] = { "_model_6_Concat_output_0", "model_6_cv3_conv_weight", "model_6_cv3_conv_bias" }; uint32_t dimensions__model_6_cv3_conv_Conv_output_0[] = {1, 40, 40, 256}; Qnn_Tensor_t outputs__model_6_cv3_conv_Conv[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_6_cv3_conv_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0002908524184022f, .offset= -33943}}}, .rank= 4, .dimensions=dimensions__model_6_cv3_conv_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_6_cv3_conv_Conv", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__model_6_cv3_conv_Conv, // Node Params 4, // Num Node Params inputs__model_6_cv3_conv_Conv, // Input Tensor Names 3, // Num Input Tensor Names outputs__model_6_cv3_conv_Conv, // Output Tensors 1// Num Output Tensors ), err); /* ADDING NODE FOR _model_6_cv3_act_Sigmoid */ const char* inputs__model_6_cv3_act_Sigmoid[] = { "_model_6_cv3_conv_Conv_output_0" }; uint32_t dimensions__model_6_cv3_act_Sigmoid_output_0[] = {1, 40, 40, 256}; Qnn_Tensor_t outputs__model_6_cv3_act_Sigmoid[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_6_cv3_act_Sigmoid_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000152587890625f, .offset= 0}}}, .rank= 4, .dimensions=dimensions__model_6_cv3_act_Sigmoid_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_6_cv3_act_Sigmoid", // Node Name "qti.aisw", // Package Name "Sigmoid", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__model_6_cv3_act_Sigmoid, // Input Tensor Names 1, // Num Input Tensor Names outputs__model_6_cv3_act_Sigmoid, // Output Tensors 1// Num Output Tensors ), err); /* ADDING NODE FOR _model_6_cv3_act_Mul */ const char* inputs__model_6_cv3_act_Mul[] = { "_model_6_cv3_conv_Conv_output_0", "_model_6_cv3_act_Sigmoid_output_0" }; uint32_t dimensions__model_6_cv3_act_Mul_output_0[] = {1, 40, 40, 256}; Qnn_Tensor_t outputs__model_6_cv3_act_Mul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_6_cv3_act_Mul_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0001444447843824f, .offset= -1928}}}, .rank= 4, .dimensions=dimensions__model_6_cv3_act_Mul_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_6_cv3_act_Mul", // Node Name "qti.aisw", // Package Name "ElementWiseMultiply", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__model_6_cv3_act_Mul, // Input Tensor Names 2, // Num Input Tensor Names outputs__model_6_cv3_act_Mul, // Output Tensors 1// Num Output Tensors ), err); uint32_t dimensions_model_7_conv_weight[] = {3, 3, 256, 512}; VALIDATE(cutoff_yolov5s.addTensor("model_7_conv_weight", // Node Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "model_7_conv_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0026594484224916f, .offset= -98}}}, .rank= 4, .dimensions=dimensions_model_7_conv_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(model_7_conv_weight), .dataSize=BINLEN(model_7_conv_weight)}}}}} ), err); uint32_t dimensions_model_7_conv_bias[] = {512}; VALIDATE(cutoff_yolov5s.addTensor("model_7_conv_bias", // Node Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "model_7_conv_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_SFIXED_POINT_32, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000000012945011f, .offset= 0}}}, .rank= 1, .dimensions=dimensions_model_7_conv_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(model_7_conv_bias), .dataSize=BINLEN(model_7_conv_bias)}}}}} ), err); /* ADDING NODE FOR _model_7_conv_Conv */ uint32_t dimensions___model_7_conv_Conv_dilation[] = {2}; uint32_t __model_7_conv_Conv_dilation[] = {1, 1}; uint32_t dimensions___model_7_conv_Conv_pad_amount[] = {2, 2}; uint32_t __model_7_conv_Conv_pad_amount[] = {1, 1, 1, 1}; uint32_t dimensions___model_7_conv_Conv_stride[] = {2}; uint32_t __model_7_conv_Conv_stride[] = {2, 2}; Qnn_Param_t params__model_7_conv_Conv[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "__model_7_conv_Conv_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions___model_7_conv_Conv_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)__model_7_conv_Conv_dilation, .dataSize=8}}}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "__model_7_conv_Conv_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions___model_7_conv_Conv_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)__model_7_conv_Conv_pad_amount, .dataSize=16}}}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "__model_7_conv_Conv_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions___model_7_conv_Conv_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)__model_7_conv_Conv_stride, .dataSize=8}}}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}} }; const char* inputs__model_7_conv_Conv[] = { "_model_6_cv3_act_Mul_output_0", "model_7_conv_weight", "model_7_conv_bias" }; uint32_t dimensions__model_7_conv_Conv_output_0[] = {1, 20, 20, 512}; Qnn_Tensor_t outputs__model_7_conv_Conv[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_7_conv_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0002900219406001f, .offset= -34760}}}, .rank= 4, .dimensions=dimensions__model_7_conv_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_7_conv_Conv", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__model_7_conv_Conv, // Node Params 4, // Num Node Params inputs__model_7_conv_Conv, // Input Tensor Names 3, // Num Input Tensor Names outputs__model_7_conv_Conv, // Output Tensors 1// Num Output Tensors ), err); /* ADDING NODE FOR _model_7_act_Sigmoid */ const char* inputs__model_7_act_Sigmoid[] = { "_model_7_conv_Conv_output_0" }; uint32_t dimensions__model_7_act_Sigmoid_output_0[] = {1, 20, 20, 512}; Qnn_Tensor_t outputs__model_7_act_Sigmoid[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_7_act_Sigmoid_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000152587890625f, .offset= 0}}}, .rank= 4, .dimensions=dimensions__model_7_act_Sigmoid_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_7_act_Sigmoid", // Node Name "qti.aisw", // Package Name "Sigmoid", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__model_7_act_Sigmoid, // Input Tensor Names 1, // Num Input Tensor Names outputs__model_7_act_Sigmoid, // Output Tensors 1// Num Output Tensors ), err); /* ADDING NODE FOR _model_7_act_Mul */ const char* inputs__model_7_act_Mul[] = { "_model_7_conv_Conv_output_0", "_model_7_act_Sigmoid_output_0" }; uint32_t dimensions__model_7_act_Mul_output_0[] = {1, 20, 20, 512}; Qnn_Tensor_t outputs__model_7_act_Mul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_7_act_Mul_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0001404221256962f, .offset= -1983}}}, .rank= 4, .dimensions=dimensions__model_7_act_Mul_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_7_act_Mul", // Node Name "qti.aisw", // Package Name "ElementWiseMultiply", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__model_7_act_Mul, // Input Tensor Names 2, // Num Input Tensor Names outputs__model_7_act_Mul, // Output Tensors 1// Num Output Tensors ), err); uint32_t dimensions_model_8_cv1_conv_weight[] = {1, 1, 512, 256}; VALIDATE(cutoff_yolov5s.addTensor("model_8_cv1_conv_weight", // Node Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "model_8_cv1_conv_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0039452207274735f, .offset= -120}}}, .rank= 4, .dimensions=dimensions_model_8_cv1_conv_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(model_8_cv1_conv_weight), .dataSize=BINLEN(model_8_cv1_conv_weight)}}}}} ), err); uint32_t dimensions_model_8_cv1_conv_bias[] = {256}; VALIDATE(cutoff_yolov5s.addTensor("model_8_cv1_conv_bias", // Node Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "model_8_cv1_conv_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_SFIXED_POINT_32, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000000010736048f, .offset= 0}}}, .rank= 1, .dimensions=dimensions_model_8_cv1_conv_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(model_8_cv1_conv_bias), .dataSize=BINLEN(model_8_cv1_conv_bias)}}}}} ), err); /* ADDING NODE FOR _model_8_cv1_conv_Conv */ uint32_t dimensions___model_8_cv1_conv_Conv_dilation[] = {2}; uint32_t __model_8_cv1_conv_Conv_dilation[] = {1, 1}; uint32_t dimensions___model_8_cv1_conv_Conv_pad_amount[] = {2, 2}; uint32_t __model_8_cv1_conv_Conv_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions___model_8_cv1_conv_Conv_stride[] = {2}; uint32_t __model_8_cv1_conv_Conv_stride[] = {1, 1}; Qnn_Param_t params__model_8_cv1_conv_Conv[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "__model_8_cv1_conv_Conv_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions___model_8_cv1_conv_Conv_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)__model_8_cv1_conv_Conv_dilation, .dataSize=8}}}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "__model_8_cv1_conv_Conv_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions___model_8_cv1_conv_Conv_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)__model_8_cv1_conv_Conv_pad_amount, .dataSize=16}}}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "__model_8_cv1_conv_Conv_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions___model_8_cv1_conv_Conv_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)__model_8_cv1_conv_Conv_stride, .dataSize=8}}}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}} }; const char* inputs__model_8_cv1_conv_Conv[] = { "_model_7_act_Mul_output_0", "model_8_cv1_conv_weight", "model_8_cv1_conv_bias" }; uint32_t dimensions__model_8_cv1_conv_Conv_output_0[] = {1, 20, 20, 256}; Qnn_Tensor_t outputs__model_8_cv1_conv_Conv[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_8_cv1_conv_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0002614156692289f, .offset= -33913}}}, .rank= 4, .dimensions=dimensions__model_8_cv1_conv_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_8_cv1_conv_Conv", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__model_8_cv1_conv_Conv, // Node Params 4, // Num Node Params inputs__model_8_cv1_conv_Conv, // Input Tensor Names 3, // Num Input Tensor Names outputs__model_8_cv1_conv_Conv, // Output Tensors 1// Num Output Tensors ), err); /* ADDING NODE FOR _model_8_cv1_act_Sigmoid */ const char* inputs__model_8_cv1_act_Sigmoid[] = { "_model_8_cv1_conv_Conv_output_0" }; uint32_t dimensions__model_8_cv1_act_Sigmoid_output_0[] = {1, 20, 20, 256}; Qnn_Tensor_t outputs__model_8_cv1_act_Sigmoid[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_8_cv1_act_Sigmoid_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000152587890625f, .offset= 0}}}, .rank= 4, .dimensions=dimensions__model_8_cv1_act_Sigmoid_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_8_cv1_act_Sigmoid", // Node Name "qti.aisw", // Package Name "Sigmoid", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__model_8_cv1_act_Sigmoid, // Input Tensor Names 1, // Num Input Tensor Names outputs__model_8_cv1_act_Sigmoid, // Output Tensors 1// Num Output Tensors ), err); /* ADDING NODE FOR _model_8_cv1_act_Mul */ const char* inputs__model_8_cv1_act_Mul[] = { "_model_8_cv1_conv_Conv_output_0", "_model_8_cv1_act_Sigmoid_output_0" }; uint32_t dimensions__model_8_cv1_act_Mul_output_0[] = {1, 20, 20, 256}; Qnn_Tensor_t outputs__model_8_cv1_act_Mul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_8_cv1_act_Mul_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0001303543249378f, .offset= -2136}}}, .rank= 4, .dimensions=dimensions__model_8_cv1_act_Mul_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_8_cv1_act_Mul", // Node Name "qti.aisw", // Package Name "ElementWiseMultiply", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__model_8_cv1_act_Mul, // Input Tensor Names 2, // Num Input Tensor Names outputs__model_8_cv1_act_Mul, // Output Tensors 1// Num Output Tensors ), err); uint32_t dimensions_model_8_m_0_cv1_conv_weight[] = {1, 1, 256, 256}; VALIDATE(cutoff_yolov5s.addTensor("model_8_m_0_cv1_conv_weight", // Node Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "model_8_m_0_cv1_conv_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0230620428919792f, .offset= -147}}}, .rank= 4, .dimensions=dimensions_model_8_m_0_cv1_conv_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(model_8_m_0_cv1_conv_weight), .dataSize=BINLEN(model_8_m_0_cv1_conv_weight)}}}}} ), err); uint32_t dimensions_model_8_m_0_cv1_conv_bias[] = {256}; VALIDATE(cutoff_yolov5s.addTensor("model_8_m_0_cv1_conv_bias", // Node Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "model_8_m_0_cv1_conv_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_SFIXED_POINT_32, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000000031805067f, .offset= 0}}}, .rank= 1, .dimensions=dimensions_model_8_m_0_cv1_conv_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(model_8_m_0_cv1_conv_bias), .dataSize=BINLEN(model_8_m_0_cv1_conv_bias)}}}}} ), err); /* ADDING NODE FOR _model_8_m_m_0_cv1_conv_Conv */ uint32_t dimensions___model_8_m_m_0_cv1_conv_Conv_dilation[] = {2}; uint32_t __model_8_m_m_0_cv1_conv_Conv_dilation[] = {1, 1}; uint32_t dimensions___model_8_m_m_0_cv1_conv_Conv_pad_amount[] = {2, 2}; uint32_t __model_8_m_m_0_cv1_conv_Conv_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions___model_8_m_m_0_cv1_conv_Conv_stride[] = {2}; uint32_t __model_8_m_m_0_cv1_conv_Conv_stride[] = {1, 1}; Qnn_Param_t params__model_8_m_m_0_cv1_conv_Conv[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "__model_8_m_m_0_cv1_conv_Conv_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions___model_8_m_m_0_cv1_conv_Conv_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)__model_8_m_m_0_cv1_conv_Conv_dilation, .dataSize=8}}}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "__model_8_m_m_0_cv1_conv_Conv_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions___model_8_m_m_0_cv1_conv_Conv_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)__model_8_m_m_0_cv1_conv_Conv_pad_amount, .dataSize=16}}}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "__model_8_m_m_0_cv1_conv_Conv_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions___model_8_m_m_0_cv1_conv_Conv_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)__model_8_m_m_0_cv1_conv_Conv_stride, .dataSize=8}}}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}} }; const char* inputs__model_8_m_m_0_cv1_conv_Conv[] = { "_model_8_cv1_act_Mul_output_0", "model_8_m_0_cv1_conv_weight", "model_8_m_0_cv1_conv_bias" }; uint32_t dimensions__model_8_m_m_0_cv1_conv_Conv_output_0[] = {1, 20, 20, 256}; Qnn_Tensor_t outputs__model_8_m_m_0_cv1_conv_Conv[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_8_m_m_0_cv1_conv_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0004623595159501f, .offset= -30654}}}, .rank= 4, .dimensions=dimensions__model_8_m_m_0_cv1_conv_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_8_m_m_0_cv1_conv_Conv", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__model_8_m_m_0_cv1_conv_Conv, // Node Params 4, // Num Node Params inputs__model_8_m_m_0_cv1_conv_Conv, // Input Tensor Names 3, // Num Input Tensor Names outputs__model_8_m_m_0_cv1_conv_Conv, // Output Tensors 1// Num Output Tensors ), err); /* ADDING NODE FOR _model_8_m_m_0_cv1_act_Sigmoid */ const char* inputs__model_8_m_m_0_cv1_act_Sigmoid[] = { "_model_8_m_m_0_cv1_conv_Conv_output_0" }; uint32_t dimensions__model_8_m_m_0_cv1_act_Sigmoid_output_0[] = {1, 20, 20, 256}; Qnn_Tensor_t outputs__model_8_m_m_0_cv1_act_Sigmoid[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_8_m_m_0_cv1_act_Sigmoid_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000152587890625f, .offset= 0}}}, .rank= 4, .dimensions=dimensions__model_8_m_m_0_cv1_act_Sigmoid_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_8_m_m_0_cv1_act_Sigmoid", // Node Name "qti.aisw", // Package Name "Sigmoid", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__model_8_m_m_0_cv1_act_Sigmoid, // Input Tensor Names 1, // Num Input Tensor Names outputs__model_8_m_m_0_cv1_act_Sigmoid, // Output Tensors 1// Num Output Tensors ), err); /* ADDING NODE FOR _model_8_m_m_0_cv1_act_Mul */ const char* inputs__model_8_m_m_0_cv1_act_Mul[] = { "_model_8_m_m_0_cv1_conv_Conv_output_0", "_model_8_m_m_0_cv1_act_Sigmoid_output_0" }; uint32_t dimensions__model_8_m_m_0_cv1_act_Mul_output_0[] = {1, 20, 20, 256}; Qnn_Tensor_t outputs__model_8_m_m_0_cv1_act_Mul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_8_m_m_0_cv1_act_Mul_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0002503422438167f, .offset= -1112}}}, .rank= 4, .dimensions=dimensions__model_8_m_m_0_cv1_act_Mul_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_8_m_m_0_cv1_act_Mul", // Node Name "qti.aisw", // Package Name "ElementWiseMultiply", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__model_8_m_m_0_cv1_act_Mul, // Input Tensor Names 2, // Num Input Tensor Names outputs__model_8_m_m_0_cv1_act_Mul, // Output Tensors 1// Num Output Tensors ), err); uint32_t dimensions_model_8_m_0_cv2_conv_weight[] = {3, 3, 256, 256}; VALIDATE(cutoff_yolov5s.addTensor("model_8_m_0_cv2_conv_weight", // Node Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "model_8_m_0_cv2_conv_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0035108458250761f, .offset= -125}}}, .rank= 4, .dimensions=dimensions_model_8_m_0_cv2_conv_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(model_8_m_0_cv2_conv_weight), .dataSize=BINLEN(model_8_m_0_cv2_conv_weight)}}}}} ), err); uint32_t dimensions_model_8_m_0_cv2_conv_bias[] = {256}; VALIDATE(cutoff_yolov5s.addTensor("model_8_m_0_cv2_conv_bias", // Node Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "model_8_m_0_cv2_conv_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_SFIXED_POINT_32, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000000014267478f, .offset= 0}}}, .rank= 1, .dimensions=dimensions_model_8_m_0_cv2_conv_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(model_8_m_0_cv2_conv_bias), .dataSize=BINLEN(model_8_m_0_cv2_conv_bias)}}}}} ), err); /* ADDING NODE FOR _model_8_m_m_0_cv2_conv_Conv */ uint32_t dimensions___model_8_m_m_0_cv2_conv_Conv_dilation[] = {2}; uint32_t __model_8_m_m_0_cv2_conv_Conv_dilation[] = {1, 1}; uint32_t dimensions___model_8_m_m_0_cv2_conv_Conv_pad_amount[] = {2, 2}; uint32_t __model_8_m_m_0_cv2_conv_Conv_pad_amount[] = {1, 1, 1, 1}; uint32_t dimensions___model_8_m_m_0_cv2_conv_Conv_stride[] = {2}; uint32_t __model_8_m_m_0_cv2_conv_Conv_stride[] = {1, 1}; Qnn_Param_t params__model_8_m_m_0_cv2_conv_Conv[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "__model_8_m_m_0_cv2_conv_Conv_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions___model_8_m_m_0_cv2_conv_Conv_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)__model_8_m_m_0_cv2_conv_Conv_dilation, .dataSize=8}}}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "__model_8_m_m_0_cv2_conv_Conv_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions___model_8_m_m_0_cv2_conv_Conv_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)__model_8_m_m_0_cv2_conv_Conv_pad_amount, .dataSize=16}}}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "__model_8_m_m_0_cv2_conv_Conv_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions___model_8_m_m_0_cv2_conv_Conv_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)__model_8_m_m_0_cv2_conv_Conv_stride, .dataSize=8}}}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}} }; const char* inputs__model_8_m_m_0_cv2_conv_Conv[] = { "_model_8_m_m_0_cv1_act_Mul_output_0", "model_8_m_0_cv2_conv_weight", "model_8_m_0_cv2_conv_bias" }; uint32_t dimensions__model_8_m_m_0_cv2_conv_Conv_output_0[] = {1, 20, 20, 256}; Qnn_Tensor_t outputs__model_8_m_m_0_cv2_conv_Conv[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_8_m_m_0_cv2_conv_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0004297832492739f, .offset= -29543}}}, .rank= 4, .dimensions=dimensions__model_8_m_m_0_cv2_conv_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_8_m_m_0_cv2_conv_Conv", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__model_8_m_m_0_cv2_conv_Conv, // Node Params 4, // Num Node Params inputs__model_8_m_m_0_cv2_conv_Conv, // Input Tensor Names 3, // Num Input Tensor Names outputs__model_8_m_m_0_cv2_conv_Conv, // Output Tensors 1// Num Output Tensors ), err); /* ADDING NODE FOR _model_8_m_m_0_cv2_act_Sigmoid */ const char* inputs__model_8_m_m_0_cv2_act_Sigmoid[] = { "_model_8_m_m_0_cv2_conv_Conv_output_0" }; uint32_t dimensions__model_8_m_m_0_cv2_act_Sigmoid_output_0[] = {1, 20, 20, 256}; Qnn_Tensor_t outputs__model_8_m_m_0_cv2_act_Sigmoid[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_8_m_m_0_cv2_act_Sigmoid_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000152587890625f, .offset= 0}}}, .rank= 4, .dimensions=dimensions__model_8_m_m_0_cv2_act_Sigmoid_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_8_m_m_0_cv2_act_Sigmoid", // Node Name "qti.aisw", // Package Name "Sigmoid", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__model_8_m_m_0_cv2_act_Sigmoid, // Input Tensor Names 1, // Num Input Tensor Names outputs__model_8_m_m_0_cv2_act_Sigmoid, // Output Tensors 1// Num Output Tensors ), err); /* ADDING NODE FOR _model_8_m_m_0_cv2_act_Mul */ const char* inputs__model_8_m_m_0_cv2_act_Mul[] = { "_model_8_m_m_0_cv2_conv_Conv_output_0", "_model_8_m_m_0_cv2_act_Sigmoid_output_0" }; uint32_t dimensions__model_8_m_m_0_cv2_act_Mul_output_0[] = {1, 20, 20, 256}; Qnn_Tensor_t outputs__model_8_m_m_0_cv2_act_Mul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_8_m_m_0_cv2_act_Mul_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0002402898389846f, .offset= -1159}}}, .rank= 4, .dimensions=dimensions__model_8_m_m_0_cv2_act_Mul_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_8_m_m_0_cv2_act_Mul", // Node Name "qti.aisw", // Package Name "ElementWiseMultiply", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__model_8_m_m_0_cv2_act_Mul, // Input Tensor Names 2, // Num Input Tensor Names outputs__model_8_m_m_0_cv2_act_Mul, // Output Tensors 1// Num Output Tensors ), err); /* ADDING NODE FOR _model_8_m_m_0_Add */ const char* inputs__model_8_m_m_0_Add[] = { "_model_8_cv1_act_Mul_output_0", "_model_8_m_m_0_cv2_act_Mul_output_0" }; uint32_t dimensions__model_8_m_m_0_Add_output_0[] = {1, 20, 20, 256}; Qnn_Tensor_t outputs__model_8_m_m_0_Add[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_8_m_m_0_Add_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0002416809584247f, .offset= -2304}}}, .rank= 4, .dimensions=dimensions__model_8_m_m_0_Add_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_8_m_m_0_Add", // Node Name "qti.aisw", // Package Name "ElementWiseAdd", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__model_8_m_m_0_Add, // Input Tensor Names 2, // Num Input Tensor Names outputs__model_8_m_m_0_Add, // Output Tensors 1// Num Output Tensors ), err); uint32_t dimensions_model_8_cv2_conv_weight[] = {1, 1, 512, 256}; VALIDATE(cutoff_yolov5s.addTensor("model_8_cv2_conv_weight", // Node Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "model_8_cv2_conv_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0044571533799171f, .offset= -122}}}, .rank= 4, .dimensions=dimensions_model_8_cv2_conv_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(model_8_cv2_conv_weight), .dataSize=BINLEN(model_8_cv2_conv_weight)}}}}} ), err); uint32_t dimensions_model_8_cv2_conv_bias[] = {256}; VALIDATE(cutoff_yolov5s.addTensor("model_8_cv2_conv_bias", // Node Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "model_8_cv2_conv_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_SFIXED_POINT_32, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000000006893144f, .offset= 0}}}, .rank= 1, .dimensions=dimensions_model_8_cv2_conv_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(model_8_cv2_conv_bias), .dataSize=BINLEN(model_8_cv2_conv_bias)}}}}} ), err); /* ADDING NODE FOR _model_8_cv2_conv_Conv */ uint32_t dimensions___model_8_cv2_conv_Conv_dilation[] = {2}; uint32_t __model_8_cv2_conv_Conv_dilation[] = {1, 1}; uint32_t dimensions___model_8_cv2_conv_Conv_pad_amount[] = {2, 2}; uint32_t __model_8_cv2_conv_Conv_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions___model_8_cv2_conv_Conv_stride[] = {2}; uint32_t __model_8_cv2_conv_Conv_stride[] = {1, 1}; Qnn_Param_t params__model_8_cv2_conv_Conv[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "__model_8_cv2_conv_Conv_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions___model_8_cv2_conv_Conv_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)__model_8_cv2_conv_Conv_dilation, .dataSize=8}}}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "__model_8_cv2_conv_Conv_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions___model_8_cv2_conv_Conv_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)__model_8_cv2_conv_Conv_pad_amount, .dataSize=16}}}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "__model_8_cv2_conv_Conv_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions___model_8_cv2_conv_Conv_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)__model_8_cv2_conv_Conv_stride, .dataSize=8}}}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}} }; const char* inputs__model_8_cv2_conv_Conv[] = { "_model_7_act_Mul_output_0", "model_8_cv2_conv_weight", "model_8_cv2_conv_bias" }; uint32_t dimensions__model_8_cv2_conv_Conv_output_0[] = {1, 20, 20, 256}; Qnn_Tensor_t outputs__model_8_cv2_conv_Conv[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_8_cv2_conv_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0003602156066336f, .offset= -31266}}}, .rank= 4, .dimensions=dimensions__model_8_cv2_conv_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_8_cv2_conv_Conv", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__model_8_cv2_conv_Conv, // Node Params 4, // Num Node Params inputs__model_8_cv2_conv_Conv, // Input Tensor Names 3, // Num Input Tensor Names outputs__model_8_cv2_conv_Conv, // Output Tensors 1// Num Output Tensors ), err); /* ADDING NODE FOR _model_8_cv2_act_Sigmoid */ const char* inputs__model_8_cv2_act_Sigmoid[] = { "_model_8_cv2_conv_Conv_output_0" }; uint32_t dimensions__model_8_cv2_act_Sigmoid_output_0[] = {1, 20, 20, 256}; Qnn_Tensor_t outputs__model_8_cv2_act_Sigmoid[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_8_cv2_act_Sigmoid_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000152587890625f, .offset= 0}}}, .rank= 4, .dimensions=dimensions__model_8_cv2_act_Sigmoid_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_8_cv2_act_Sigmoid", // Node Name "qti.aisw", // Package Name "Sigmoid", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__model_8_cv2_act_Sigmoid, // Input Tensor Names 1, // Num Input Tensor Names outputs__model_8_cv2_act_Sigmoid, // Output Tensors 1// Num Output Tensors ), err); /* ADDING NODE FOR _model_8_cv2_act_Mul */ const char* inputs__model_8_cv2_act_Mul[] = { "_model_8_cv2_conv_Conv_output_0", "_model_8_cv2_act_Sigmoid_output_0" }; uint32_t dimensions__model_8_cv2_act_Mul_output_0[] = {1, 20, 20, 256}; Qnn_Tensor_t outputs__model_8_cv2_act_Mul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_8_cv2_act_Mul_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0001926115219248f, .offset= -1446}}}, .rank= 4, .dimensions=dimensions__model_8_cv2_act_Mul_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_8_cv2_act_Mul", // Node Name "qti.aisw", // Package Name "ElementWiseMultiply", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__model_8_cv2_act_Mul, // Input Tensor Names 2, // Num Input Tensor Names outputs__model_8_cv2_act_Mul, // Output Tensors 1// Num Output Tensors ), err); /* ADDING NODE FOR _model_8_Concat */ Qnn_Param_t params__model_8_Concat[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="axis", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 3}}}} }; const char* inputs__model_8_Concat[] = { "_model_8_m_m_0_Add_output_0", "_model_8_cv2_act_Mul_output_0" }; uint32_t dimensions__model_8_Concat_output_0[] = {1, 20, 20, 512}; Qnn_Tensor_t outputs__model_8_Concat[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_8_Concat_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0002416809584247f, .offset= -2304}}}, .rank= 4, .dimensions=dimensions__model_8_Concat_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_8_Concat", // Node Name "qti.aisw", // Package Name "Concat", // Qnn Node Type params__model_8_Concat, // Node Params 1, // Num Node Params inputs__model_8_Concat, // Input Tensor Names 2, // Num Input Tensor Names outputs__model_8_Concat, // Output Tensors 1// Num Output Tensors ), err); uint32_t dimensions_model_8_cv3_conv_weight[] = {1, 1, 512, 512}; VALIDATE(cutoff_yolov5s.addTensor("model_8_cv3_conv_weight", // Node Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "model_8_cv3_conv_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0048706559464335f, .offset= -116}}}, .rank= 4, .dimensions=dimensions_model_8_cv3_conv_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(model_8_cv3_conv_weight), .dataSize=BINLEN(model_8_cv3_conv_weight)}}}}} ), err); uint32_t dimensions_model_8_cv3_conv_bias[] = {512}; VALIDATE(cutoff_yolov5s.addTensor("model_8_cv3_conv_bias", // Node Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "model_8_cv3_conv_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_SFIXED_POINT_32, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000000007290017f, .offset= 0}}}, .rank= 1, .dimensions=dimensions_model_8_cv3_conv_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(model_8_cv3_conv_bias), .dataSize=BINLEN(model_8_cv3_conv_bias)}}}}} ), err); /* ADDING NODE FOR _model_8_cv3_conv_Conv */ uint32_t dimensions___model_8_cv3_conv_Conv_dilation[] = {2}; uint32_t __model_8_cv3_conv_Conv_dilation[] = {1, 1}; uint32_t dimensions___model_8_cv3_conv_Conv_pad_amount[] = {2, 2}; uint32_t __model_8_cv3_conv_Conv_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions___model_8_cv3_conv_Conv_stride[] = {2}; uint32_t __model_8_cv3_conv_Conv_stride[] = {1, 1}; Qnn_Param_t params__model_8_cv3_conv_Conv[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "__model_8_cv3_conv_Conv_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions___model_8_cv3_conv_Conv_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)__model_8_cv3_conv_Conv_dilation, .dataSize=8}}}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "__model_8_cv3_conv_Conv_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions___model_8_cv3_conv_Conv_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)__model_8_cv3_conv_Conv_pad_amount, .dataSize=16}}}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "__model_8_cv3_conv_Conv_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions___model_8_cv3_conv_Conv_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)__model_8_cv3_conv_Conv_stride, .dataSize=8}}}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}} }; const char* inputs__model_8_cv3_conv_Conv[] = { "_model_8_Concat_output_0", "model_8_cv3_conv_weight", "model_8_cv3_conv_bias" }; uint32_t dimensions__model_8_cv3_conv_Conv_output_0[] = {1, 20, 20, 512}; Qnn_Tensor_t outputs__model_8_cv3_conv_Conv[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_8_cv3_conv_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0003550452238414f, .offset= -33578}}}, .rank= 4, .dimensions=dimensions__model_8_cv3_conv_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_8_cv3_conv_Conv", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__model_8_cv3_conv_Conv, // Node Params 4, // Num Node Params inputs__model_8_cv3_conv_Conv, // Input Tensor Names 3, // Num Input Tensor Names outputs__model_8_cv3_conv_Conv, // Output Tensors 1// Num Output Tensors ), err); /* ADDING NODE FOR _model_8_cv3_act_Sigmoid */ const char* inputs__model_8_cv3_act_Sigmoid[] = { "_model_8_cv3_conv_Conv_output_0" }; uint32_t dimensions__model_8_cv3_act_Sigmoid_output_0[] = {1, 20, 20, 512}; Qnn_Tensor_t outputs__model_8_cv3_act_Sigmoid[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_8_cv3_act_Sigmoid_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000152587890625f, .offset= 0}}}, .rank= 4, .dimensions=dimensions__model_8_cv3_act_Sigmoid_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_8_cv3_act_Sigmoid", // Node Name "qti.aisw", // Package Name "Sigmoid", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__model_8_cv3_act_Sigmoid, // Input Tensor Names 1, // Num Input Tensor Names outputs__model_8_cv3_act_Sigmoid, // Output Tensors 1// Num Output Tensors ), err); /* ADDING NODE FOR _model_8_cv3_act_Mul */ const char* inputs__model_8_cv3_act_Mul[] = { "_model_8_cv3_conv_Conv_output_0", "_model_8_cv3_act_Sigmoid_output_0" }; uint32_t dimensions__model_8_cv3_act_Mul_output_0[] = {1, 20, 20, 512}; Qnn_Tensor_t outputs__model_8_cv3_act_Mul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_8_cv3_act_Mul_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0001773808617145f, .offset= -1570}}}, .rank= 4, .dimensions=dimensions__model_8_cv3_act_Mul_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_8_cv3_act_Mul", // Node Name "qti.aisw", // Package Name "ElementWiseMultiply", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__model_8_cv3_act_Mul, // Input Tensor Names 2, // Num Input Tensor Names outputs__model_8_cv3_act_Mul, // Output Tensors 1// Num Output Tensors ), err); uint32_t dimensions_model_9_cv1_conv_weight[] = {1, 1, 512, 256}; VALIDATE(cutoff_yolov5s.addTensor("model_9_cv1_conv_weight", // Node Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "model_9_cv1_conv_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0043528368696570f, .offset= -129}}}, .rank= 4, .dimensions=dimensions_model_9_cv1_conv_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(model_9_cv1_conv_weight), .dataSize=BINLEN(model_9_cv1_conv_weight)}}}}} ), err); uint32_t dimensions_model_9_cv1_conv_bias[] = {256}; VALIDATE(cutoff_yolov5s.addTensor("model_9_cv1_conv_bias", // Node Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "model_9_cv1_conv_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_SFIXED_POINT_32, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000000012689502f, .offset= 0}}}, .rank= 1, .dimensions=dimensions_model_9_cv1_conv_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(model_9_cv1_conv_bias), .dataSize=BINLEN(model_9_cv1_conv_bias)}}}}} ), err); /* ADDING NODE FOR _model_9_cv1_conv_Conv */ uint32_t dimensions___model_9_cv1_conv_Conv_dilation[] = {2}; uint32_t __model_9_cv1_conv_Conv_dilation[] = {1, 1}; uint32_t dimensions___model_9_cv1_conv_Conv_pad_amount[] = {2, 2}; uint32_t __model_9_cv1_conv_Conv_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions___model_9_cv1_conv_Conv_stride[] = {2}; uint32_t __model_9_cv1_conv_Conv_stride[] = {1, 1}; Qnn_Param_t params__model_9_cv1_conv_Conv[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "__model_9_cv1_conv_Conv_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions___model_9_cv1_conv_Conv_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)__model_9_cv1_conv_Conv_dilation, .dataSize=8}}}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "__model_9_cv1_conv_Conv_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions___model_9_cv1_conv_Conv_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)__model_9_cv1_conv_Conv_pad_amount, .dataSize=16}}}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "__model_9_cv1_conv_Conv_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions___model_9_cv1_conv_Conv_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)__model_9_cv1_conv_Conv_stride, .dataSize=8}}}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}} }; const char* inputs__model_9_cv1_conv_Conv[] = { "_model_8_cv3_act_Mul_output_0", "model_9_cv1_conv_weight", "model_9_cv1_conv_bias" }; uint32_t dimensions__model_9_cv1_conv_Conv_output_0[] = {1, 20, 20, 256}; Qnn_Tensor_t outputs__model_9_cv1_conv_Conv[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_9_cv1_conv_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0002691168338060f, .offset= -33695}}}, .rank= 4, .dimensions=dimensions__model_9_cv1_conv_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_9_cv1_conv_Conv", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__model_9_cv1_conv_Conv, // Node Params 4, // Num Node Params inputs__model_9_cv1_conv_Conv, // Input Tensor Names 3, // Num Input Tensor Names outputs__model_9_cv1_conv_Conv, // Output Tensors 1// Num Output Tensors ), err); /* ADDING NODE FOR _model_9_cv1_act_Sigmoid */ const char* inputs__model_9_cv1_act_Sigmoid[] = { "_model_9_cv1_conv_Conv_output_0" }; uint32_t dimensions__model_9_cv1_act_Sigmoid_output_0[] = {1, 20, 20, 256}; Qnn_Tensor_t outputs__model_9_cv1_act_Sigmoid[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_9_cv1_act_Sigmoid_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000152587890625f, .offset= 0}}}, .rank= 4, .dimensions=dimensions__model_9_cv1_act_Sigmoid_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_9_cv1_act_Sigmoid", // Node Name "qti.aisw", // Package Name "Sigmoid", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__model_9_cv1_act_Sigmoid, // Input Tensor Names 1, // Num Input Tensor Names outputs__model_9_cv1_act_Sigmoid, // Output Tensors 1// Num Output Tensors ), err); /* ADDING NODE FOR _model_9_cv1_act_Mul */ const char* inputs__model_9_cv1_act_Mul[] = { "_model_9_cv1_conv_Conv_output_0", "_model_9_cv1_act_Sigmoid_output_0" }; uint32_t dimensions__model_9_cv1_act_Mul_output_0[] = {1, 20, 20, 256}; Qnn_Tensor_t outputs__model_9_cv1_act_Mul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_9_cv1_act_Mul_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0001349747326458f, .offset= -2063}}}, .rank= 4, .dimensions=dimensions__model_9_cv1_act_Mul_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_9_cv1_act_Mul", // Node Name "qti.aisw", // Package Name "ElementWiseMultiply", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__model_9_cv1_act_Mul, // Input Tensor Names 2, // Num Input Tensor Names outputs__model_9_cv1_act_Mul, // Output Tensors 1// Num Output Tensors ), err); /* ADDING NODE FOR _model_9_m_MaxPool */ uint32_t dimensions___model_9_m_MaxPool_filter_size[] = {2}; uint32_t __model_9_m_MaxPool_filter_size[] = {5, 5}; uint32_t dimensions___model_9_m_MaxPool_pad_amount[] = {2, 2}; uint32_t __model_9_m_MaxPool_pad_amount[] = {2, 2, 2, 2}; uint32_t dimensions___model_9_m_MaxPool_stride[] = {2}; uint32_t __model_9_m_MaxPool_stride[] = {1, 1}; Qnn_Param_t params__model_9_m_MaxPool[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="filter_size", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "__model_9_m_MaxPool_filter_size", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions___model_9_m_MaxPool_filter_size, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)__model_9_m_MaxPool_filter_size, .dataSize=8}}}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "__model_9_m_MaxPool_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions___model_9_m_MaxPool_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)__model_9_m_MaxPool_pad_amount, .dataSize=16}}}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "__model_9_m_MaxPool_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions___model_9_m_MaxPool_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)__model_9_m_MaxPool_stride, .dataSize=8}}}}}}} }; const char* inputs__model_9_m_MaxPool[] = { "_model_9_cv1_act_Mul_output_0" }; uint32_t dimensions__model_9_m_MaxPool_output_0[] = {1, 20, 20, 256}; Qnn_Tensor_t outputs__model_9_m_MaxPool[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_9_m_MaxPool_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0001349747326458f, .offset= -2063}}}, .rank= 4, .dimensions=dimensions__model_9_m_MaxPool_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_9_m_MaxPool", // Node Name "qti.aisw", // Package Name "PoolMax2d", // Qnn Node Type params__model_9_m_MaxPool, // Node Params 3, // Num Node Params inputs__model_9_m_MaxPool, // Input Tensor Names 1, // Num Input Tensor Names outputs__model_9_m_MaxPool, // Output Tensors 1// Num Output Tensors ), err); /* ADDING NODE FOR _model_9_m_1_MaxPool */ uint32_t dimensions___model_9_m_1_MaxPool_filter_size[] = {2}; uint32_t __model_9_m_1_MaxPool_filter_size[] = {5, 5}; uint32_t dimensions___model_9_m_1_MaxPool_pad_amount[] = {2, 2}; uint32_t __model_9_m_1_MaxPool_pad_amount[] = {2, 2, 2, 2}; uint32_t dimensions___model_9_m_1_MaxPool_stride[] = {2}; uint32_t __model_9_m_1_MaxPool_stride[] = {1, 1}; Qnn_Param_t params__model_9_m_1_MaxPool[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="filter_size", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "__model_9_m_1_MaxPool_filter_size", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions___model_9_m_1_MaxPool_filter_size, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)__model_9_m_1_MaxPool_filter_size, .dataSize=8}}}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "__model_9_m_1_MaxPool_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions___model_9_m_1_MaxPool_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)__model_9_m_1_MaxPool_pad_amount, .dataSize=16}}}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "__model_9_m_1_MaxPool_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions___model_9_m_1_MaxPool_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)__model_9_m_1_MaxPool_stride, .dataSize=8}}}}}}} }; const char* inputs__model_9_m_1_MaxPool[] = { "_model_9_m_MaxPool_output_0" }; uint32_t dimensions__model_9_m_1_MaxPool_output_0[] = {1, 20, 20, 256}; Qnn_Tensor_t outputs__model_9_m_1_MaxPool[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_9_m_1_MaxPool_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0001349747326458f, .offset= -2063}}}, .rank= 4, .dimensions=dimensions__model_9_m_1_MaxPool_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_9_m_1_MaxPool", // Node Name "qti.aisw", // Package Name "PoolMax2d", // Qnn Node Type params__model_9_m_1_MaxPool, // Node Params 3, // Num Node Params inputs__model_9_m_1_MaxPool, // Input Tensor Names 1, // Num Input Tensor Names outputs__model_9_m_1_MaxPool, // Output Tensors 1// Num Output Tensors ), err); /* ADDING NODE FOR _model_9_m_2_MaxPool */ uint32_t dimensions___model_9_m_2_MaxPool_filter_size[] = {2}; uint32_t __model_9_m_2_MaxPool_filter_size[] = {5, 5}; uint32_t dimensions___model_9_m_2_MaxPool_pad_amount[] = {2, 2}; uint32_t __model_9_m_2_MaxPool_pad_amount[] = {2, 2, 2, 2}; uint32_t dimensions___model_9_m_2_MaxPool_stride[] = {2}; uint32_t __model_9_m_2_MaxPool_stride[] = {1, 1}; Qnn_Param_t params__model_9_m_2_MaxPool[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="filter_size", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "__model_9_m_2_MaxPool_filter_size", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions___model_9_m_2_MaxPool_filter_size, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)__model_9_m_2_MaxPool_filter_size, .dataSize=8}}}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "__model_9_m_2_MaxPool_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions___model_9_m_2_MaxPool_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)__model_9_m_2_MaxPool_pad_amount, .dataSize=16}}}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "__model_9_m_2_MaxPool_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions___model_9_m_2_MaxPool_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)__model_9_m_2_MaxPool_stride, .dataSize=8}}}}}}} }; const char* inputs__model_9_m_2_MaxPool[] = { "_model_9_m_1_MaxPool_output_0" }; uint32_t dimensions__model_9_m_2_MaxPool_output_0[] = {1, 20, 20, 256}; Qnn_Tensor_t outputs__model_9_m_2_MaxPool[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_9_m_2_MaxPool_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0001349747326458f, .offset= -2063}}}, .rank= 4, .dimensions=dimensions__model_9_m_2_MaxPool_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_9_m_2_MaxPool", // Node Name "qti.aisw", // Package Name "PoolMax2d", // Qnn Node Type params__model_9_m_2_MaxPool, // Node Params 3, // Num Node Params inputs__model_9_m_2_MaxPool, // Input Tensor Names 1, // Num Input Tensor Names outputs__model_9_m_2_MaxPool, // Output Tensors 1// Num Output Tensors ), err); /* ADDING NODE FOR _model_9_Concat */ Qnn_Param_t params__model_9_Concat[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="axis", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 3}}}} }; const char* inputs__model_9_Concat[] = { "_model_9_cv1_act_Mul_output_0", "_model_9_m_MaxPool_output_0", "_model_9_m_1_MaxPool_output_0", "_model_9_m_2_MaxPool_output_0" }; uint32_t dimensions__model_9_Concat_output_0[] = {1, 20, 20, 1024}; Qnn_Tensor_t outputs__model_9_Concat[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_9_Concat_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0001349747326458f, .offset= -2063}}}, .rank= 4, .dimensions=dimensions__model_9_Concat_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_9_Concat", // Node Name "qti.aisw", // Package Name "Concat", // Qnn Node Type params__model_9_Concat, // Node Params 1, // Num Node Params inputs__model_9_Concat, // Input Tensor Names 4, // Num Input Tensor Names outputs__model_9_Concat, // Output Tensors 1// Num Output Tensors ), err); uint32_t dimensions_model_9_cv2_conv_weight[] = {1, 1, 1024, 512}; VALIDATE(cutoff_yolov5s.addTensor("model_9_cv2_conv_weight", // Node Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "model_9_cv2_conv_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0043584164232016f, .offset= -123}}}, .rank= 4, .dimensions=dimensions_model_9_cv2_conv_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(model_9_cv2_conv_weight), .dataSize=BINLEN(model_9_cv2_conv_weight)}}}}} ), err); uint32_t dimensions_model_9_cv2_conv_bias[] = {512}; VALIDATE(cutoff_yolov5s.addTensor("model_9_cv2_conv_bias", // Node Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "model_9_cv2_conv_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_SFIXED_POINT_32, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000000028260099f, .offset= 0}}}, .rank= 1, .dimensions=dimensions_model_9_cv2_conv_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(model_9_cv2_conv_bias), .dataSize=BINLEN(model_9_cv2_conv_bias)}}}}} ), err); /* ADDING NODE FOR _model_9_cv2_conv_Conv */ uint32_t dimensions___model_9_cv2_conv_Conv_dilation[] = {2}; uint32_t __model_9_cv2_conv_Conv_dilation[] = {1, 1}; uint32_t dimensions___model_9_cv2_conv_Conv_pad_amount[] = {2, 2}; uint32_t __model_9_cv2_conv_Conv_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions___model_9_cv2_conv_Conv_stride[] = {2}; uint32_t __model_9_cv2_conv_Conv_stride[] = {1, 1}; Qnn_Param_t params__model_9_cv2_conv_Conv[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "__model_9_cv2_conv_Conv_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions___model_9_cv2_conv_Conv_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)__model_9_cv2_conv_Conv_dilation, .dataSize=8}}}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "__model_9_cv2_conv_Conv_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions___model_9_cv2_conv_Conv_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)__model_9_cv2_conv_Conv_pad_amount, .dataSize=16}}}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "__model_9_cv2_conv_Conv_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions___model_9_cv2_conv_Conv_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)__model_9_cv2_conv_Conv_stride, .dataSize=8}}}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}} }; const char* inputs__model_9_cv2_conv_Conv[] = { "_model_9_Concat_output_0", "model_9_cv2_conv_weight", "model_9_cv2_conv_bias" }; uint32_t dimensions__model_9_cv2_conv_Conv_output_0[] = {1, 20, 20, 512}; Qnn_Tensor_t outputs__model_9_cv2_conv_Conv[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_9_cv2_conv_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0002950036723632f, .offset= -30590}}}, .rank= 4, .dimensions=dimensions__model_9_cv2_conv_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_9_cv2_conv_Conv", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__model_9_cv2_conv_Conv, // Node Params 4, // Num Node Params inputs__model_9_cv2_conv_Conv, // Input Tensor Names 3, // Num Input Tensor Names outputs__model_9_cv2_conv_Conv, // Output Tensors 1// Num Output Tensors ), err); /* ADDING NODE FOR _model_9_cv2_act_Sigmoid */ const char* inputs__model_9_cv2_act_Sigmoid[] = { "_model_9_cv2_conv_Conv_output_0" }; uint32_t dimensions__model_9_cv2_act_Sigmoid_output_0[] = {1, 20, 20, 512}; Qnn_Tensor_t outputs__model_9_cv2_act_Sigmoid[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_9_cv2_act_Sigmoid_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000152587890625f, .offset= 0}}}, .rank= 4, .dimensions=dimensions__model_9_cv2_act_Sigmoid_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_9_cv2_act_Sigmoid", // Node Name "qti.aisw", // Package Name "Sigmoid", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__model_9_cv2_act_Sigmoid, // Input Tensor Names 1, // Num Input Tensor Names outputs__model_9_cv2_act_Sigmoid, // Output Tensors 1// Num Output Tensors ), err); /* ADDING NODE FOR _model_9_cv2_act_Mul */ const char* inputs__model_9_cv2_act_Mul[] = { "_model_9_cv2_conv_Conv_output_0", "_model_9_cv2_act_Sigmoid_output_0" }; uint32_t dimensions__model_9_cv2_act_Mul_output_0[] = {1, 20, 20, 512}; Qnn_Tensor_t outputs__model_9_cv2_act_Mul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_9_cv2_act_Mul_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0001615470682736f, .offset= -1724}}}, .rank= 4, .dimensions=dimensions__model_9_cv2_act_Mul_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_9_cv2_act_Mul", // Node Name "qti.aisw", // Package Name "ElementWiseMultiply", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__model_9_cv2_act_Mul, // Input Tensor Names 2, // Num Input Tensor Names outputs__model_9_cv2_act_Mul, // Output Tensors 1// Num Output Tensors ), err); uint32_t dimensions_model_10_conv_weight[] = {1, 1, 512, 256}; VALIDATE(cutoff_yolov5s.addTensor("model_10_conv_weight", // Node Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "model_10_conv_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0118971290066838f, .offset= -138}}}, .rank= 4, .dimensions=dimensions_model_10_conv_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(model_10_conv_weight), .dataSize=BINLEN(model_10_conv_weight)}}}}} ), err); uint32_t dimensions_model_10_conv_bias[] = {256}; VALIDATE(cutoff_yolov5s.addTensor("model_10_conv_bias", // Node Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "model_10_conv_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_SFIXED_POINT_32, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000000019064554f, .offset= 0}}}, .rank= 1, .dimensions=dimensions_model_10_conv_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(model_10_conv_bias), .dataSize=BINLEN(model_10_conv_bias)}}}}} ), err); /* ADDING NODE FOR _model_10_conv_Conv */ uint32_t dimensions___model_10_conv_Conv_dilation[] = {2}; uint32_t __model_10_conv_Conv_dilation[] = {1, 1}; uint32_t dimensions___model_10_conv_Conv_pad_amount[] = {2, 2}; uint32_t __model_10_conv_Conv_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions___model_10_conv_Conv_stride[] = {2}; uint32_t __model_10_conv_Conv_stride[] = {1, 1}; Qnn_Param_t params__model_10_conv_Conv[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "__model_10_conv_Conv_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions___model_10_conv_Conv_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)__model_10_conv_Conv_dilation, .dataSize=8}}}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "__model_10_conv_Conv_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions___model_10_conv_Conv_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)__model_10_conv_Conv_pad_amount, .dataSize=16}}}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "__model_10_conv_Conv_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions___model_10_conv_Conv_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)__model_10_conv_Conv_stride, .dataSize=8}}}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}} }; const char* inputs__model_10_conv_Conv[] = { "_model_9_cv2_act_Mul_output_0", "model_10_conv_weight", "model_10_conv_bias" }; uint32_t dimensions__model_10_conv_Conv_output_0[] = {1, 20, 20, 256}; Qnn_Tensor_t outputs__model_10_conv_Conv[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_10_conv_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0002983841404784f, .offset= -33282}}}, .rank= 4, .dimensions=dimensions__model_10_conv_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_10_conv_Conv", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__model_10_conv_Conv, // Node Params 4, // Num Node Params inputs__model_10_conv_Conv, // Input Tensor Names 3, // Num Input Tensor Names outputs__model_10_conv_Conv, // Output Tensors 1// Num Output Tensors ), err); /* ADDING NODE FOR _model_10_act_Sigmoid */ const char* inputs__model_10_act_Sigmoid[] = { "_model_10_conv_Conv_output_0" }; uint32_t dimensions__model_10_act_Sigmoid_output_0[] = {1, 20, 20, 256}; Qnn_Tensor_t outputs__model_10_act_Sigmoid[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_10_act_Sigmoid_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000152587890625f, .offset= 0}}}, .rank= 4, .dimensions=dimensions__model_10_act_Sigmoid_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_10_act_Sigmoid", // Node Name "qti.aisw", // Package Name "Sigmoid", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__model_10_act_Sigmoid, // Input Tensor Names 1, // Num Input Tensor Names outputs__model_10_act_Sigmoid, // Output Tensors 1// Num Output Tensors ), err); /* ADDING NODE FOR _model_10_act_Mul */ const char* inputs__model_10_act_Mul[] = { "_model_10_conv_Conv_output_0", "_model_10_act_Sigmoid_output_0" }; uint32_t dimensions__model_10_act_Mul_output_0[] = {1, 20, 20, 256}; Qnn_Tensor_t outputs__model_10_act_Mul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_10_act_Mul_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0001510898873676f, .offset= -1843}}}, .rank= 4, .dimensions=dimensions__model_10_act_Mul_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_10_act_Mul", // Node Name "qti.aisw", // Package Name "ElementWiseMultiply", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__model_10_act_Mul, // Input Tensor Names 2, // Num Input Tensor Names outputs__model_10_act_Mul, // Output Tensors 1// Num Output Tensors ), err); /* ADDING NODE FOR _model_11_Resize */ Qnn_Param_t params__model_11_Resize[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="align_corners", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="half_pixel_centers", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}} }; const char* inputs__model_11_Resize[] = { "_model_10_act_Mul_output_0" }; uint32_t dimensions__model_11_Resize_output_0[] = {1, 40, 40, 256}; Qnn_Tensor_t outputs__model_11_Resize[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_11_Resize_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0001510898873676f, .offset= -1843}}}, .rank= 4, .dimensions=dimensions__model_11_Resize_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_11_Resize", // Node Name "qti.aisw", // Package Name "ResizeNearestNeighbor", // Qnn Node Type params__model_11_Resize, // Node Params 2, // Num Node Params inputs__model_11_Resize, // Input Tensor Names 1, // Num Input Tensor Names outputs__model_11_Resize, // Output Tensors 1// Num Output Tensors ), err); /* ADDING NODE FOR _model_12_Concat */ Qnn_Param_t params__model_12_Concat[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="axis", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 3}}}} }; const char* inputs__model_12_Concat[] = { "_model_11_Resize_output_0", "_model_6_cv3_act_Mul_output_0" }; uint32_t dimensions__model_12_Concat_output_0[] = {1, 40, 40, 512}; Qnn_Tensor_t outputs__model_12_Concat[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_12_Concat_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0001510903530288f, .offset= -1843}}}, .rank= 4, .dimensions=dimensions__model_12_Concat_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_12_Concat", // Node Name "qti.aisw", // Package Name "Concat", // Qnn Node Type params__model_12_Concat, // Node Params 1, // Num Node Params inputs__model_12_Concat, // Input Tensor Names 2, // Num Input Tensor Names outputs__model_12_Concat, // Output Tensors 1// Num Output Tensors ), err); uint32_t dimensions_model_13_cv1_conv_weight[] = {1, 1, 512, 128}; VALIDATE(cutoff_yolov5s.addTensor("model_13_cv1_conv_weight", // Node Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "model_13_cv1_conv_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0060090483166277f, .offset= -153}}}, .rank= 4, .dimensions=dimensions_model_13_cv1_conv_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(model_13_cv1_conv_weight), .dataSize=BINLEN(model_13_cv1_conv_weight)}}}}} ), err); uint32_t dimensions_model_13_cv1_conv_bias[] = {128}; VALIDATE(cutoff_yolov5s.addTensor("model_13_cv1_conv_bias", // Node Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "model_13_cv1_conv_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_SFIXED_POINT_32, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000000011940382f, .offset= 0}}}, .rank= 1, .dimensions=dimensions_model_13_cv1_conv_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(model_13_cv1_conv_bias), .dataSize=BINLEN(model_13_cv1_conv_bias)}}}}} ), err); /* ADDING NODE FOR _model_13_cv1_conv_Conv */ uint32_t dimensions___model_13_cv1_conv_Conv_dilation[] = {2}; uint32_t __model_13_cv1_conv_Conv_dilation[] = {1, 1}; uint32_t dimensions___model_13_cv1_conv_Conv_pad_amount[] = {2, 2}; uint32_t __model_13_cv1_conv_Conv_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions___model_13_cv1_conv_Conv_stride[] = {2}; uint32_t __model_13_cv1_conv_Conv_stride[] = {1, 1}; Qnn_Param_t params__model_13_cv1_conv_Conv[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "__model_13_cv1_conv_Conv_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions___model_13_cv1_conv_Conv_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)__model_13_cv1_conv_Conv_dilation, .dataSize=8}}}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "__model_13_cv1_conv_Conv_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions___model_13_cv1_conv_Conv_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)__model_13_cv1_conv_Conv_pad_amount, .dataSize=16}}}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "__model_13_cv1_conv_Conv_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions___model_13_cv1_conv_Conv_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)__model_13_cv1_conv_Conv_stride, .dataSize=8}}}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}} }; const char* inputs__model_13_cv1_conv_Conv[] = { "_model_12_Concat_output_0", "model_13_cv1_conv_weight", "model_13_cv1_conv_bias" }; uint32_t dimensions__model_13_cv1_conv_Conv_output_0[] = {1, 40, 40, 128}; Qnn_Tensor_t outputs__model_13_cv1_conv_Conv[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_13_cv1_conv_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0002254480496049f, .offset= -37137}}}, .rank= 4, .dimensions=dimensions__model_13_cv1_conv_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_13_cv1_conv_Conv", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__model_13_cv1_conv_Conv, // Node Params 4, // Num Node Params inputs__model_13_cv1_conv_Conv, // Input Tensor Names 3, // Num Input Tensor Names outputs__model_13_cv1_conv_Conv, // Output Tensors 1// Num Output Tensors ), err); /* ADDING NODE FOR _model_13_cv1_act_Sigmoid */ const char* inputs__model_13_cv1_act_Sigmoid[] = { "_model_13_cv1_conv_Conv_output_0" }; uint32_t dimensions__model_13_cv1_act_Sigmoid_output_0[] = {1, 40, 40, 128}; Qnn_Tensor_t outputs__model_13_cv1_act_Sigmoid[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_13_cv1_act_Sigmoid_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000152587890625f, .offset= 0}}}, .rank= 4, .dimensions=dimensions__model_13_cv1_act_Sigmoid_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_13_cv1_act_Sigmoid", // Node Name "qti.aisw", // Package Name "Sigmoid", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__model_13_cv1_act_Sigmoid, // Input Tensor Names 1, // Num Input Tensor Names outputs__model_13_cv1_act_Sigmoid, // Output Tensors 1// Num Output Tensors ), err); /* ADDING NODE FOR _model_13_cv1_act_Mul */ const char* inputs__model_13_cv1_act_Mul[] = { "_model_13_cv1_conv_Conv_output_0", "_model_13_cv1_act_Sigmoid_output_0" }; uint32_t dimensions__model_13_cv1_act_Mul_output_0[] = {1, 40, 40, 128}; Qnn_Tensor_t outputs__model_13_cv1_act_Mul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_13_cv1_act_Mul_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0001017815811792f, .offset= -2736}}}, .rank= 4, .dimensions=dimensions__model_13_cv1_act_Mul_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_13_cv1_act_Mul", // Node Name "qti.aisw", // Package Name "ElementWiseMultiply", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__model_13_cv1_act_Mul, // Input Tensor Names 2, // Num Input Tensor Names outputs__model_13_cv1_act_Mul, // Output Tensors 1// Num Output Tensors ), err); uint32_t dimensions_model_13_m_0_cv1_conv_weight[] = {1, 1, 128, 128}; VALIDATE(cutoff_yolov5s.addTensor("model_13_m_0_cv1_conv_weight", // Node Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "model_13_m_0_cv1_conv_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0116098113358021f, .offset= -149}}}, .rank= 4, .dimensions=dimensions_model_13_m_0_cv1_conv_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(model_13_m_0_cv1_conv_weight), .dataSize=BINLEN(model_13_m_0_cv1_conv_weight)}}}}} ), err); uint32_t dimensions_model_13_m_0_cv1_conv_bias[] = {128}; VALIDATE(cutoff_yolov5s.addTensor("model_13_m_0_cv1_conv_bias", // Node Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "model_13_m_0_cv1_conv_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_SFIXED_POINT_32, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000000014135144f, .offset= 0}}}, .rank= 1, .dimensions=dimensions_model_13_m_0_cv1_conv_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(model_13_m_0_cv1_conv_bias), .dataSize=BINLEN(model_13_m_0_cv1_conv_bias)}}}}} ), err); /* ADDING NODE FOR _model_13_m_m_0_cv1_conv_Conv */ uint32_t dimensions___model_13_m_m_0_cv1_conv_Conv_dilation[] = {2}; uint32_t __model_13_m_m_0_cv1_conv_Conv_dilation[] = {1, 1}; uint32_t dimensions___model_13_m_m_0_cv1_conv_Conv_pad_amount[] = {2, 2}; uint32_t __model_13_m_m_0_cv1_conv_Conv_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions___model_13_m_m_0_cv1_conv_Conv_stride[] = {2}; uint32_t __model_13_m_m_0_cv1_conv_Conv_stride[] = {1, 1}; Qnn_Param_t params__model_13_m_m_0_cv1_conv_Conv[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "__model_13_m_m_0_cv1_conv_Conv_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions___model_13_m_m_0_cv1_conv_Conv_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)__model_13_m_m_0_cv1_conv_Conv_dilation, .dataSize=8}}}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "__model_13_m_m_0_cv1_conv_Conv_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions___model_13_m_m_0_cv1_conv_Conv_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)__model_13_m_m_0_cv1_conv_Conv_pad_amount, .dataSize=16}}}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "__model_13_m_m_0_cv1_conv_Conv_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions___model_13_m_m_0_cv1_conv_Conv_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)__model_13_m_m_0_cv1_conv_Conv_stride, .dataSize=8}}}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}} }; const char* inputs__model_13_m_m_0_cv1_conv_Conv[] = { "_model_13_cv1_act_Mul_output_0", "model_13_m_0_cv1_conv_weight", "model_13_m_0_cv1_conv_bias" }; uint32_t dimensions__model_13_m_m_0_cv1_conv_Conv_output_0[] = {1, 40, 40, 128}; Qnn_Tensor_t outputs__model_13_m_m_0_cv1_conv_Conv[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_13_m_m_0_cv1_conv_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0002854890772142f, .offset= -33382}}}, .rank= 4, .dimensions=dimensions__model_13_m_m_0_cv1_conv_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_13_m_m_0_cv1_conv_Conv", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__model_13_m_m_0_cv1_conv_Conv, // Node Params 4, // Num Node Params inputs__model_13_m_m_0_cv1_conv_Conv, // Input Tensor Names 3, // Num Input Tensor Names outputs__model_13_m_m_0_cv1_conv_Conv, // Output Tensors 1// Num Output Tensors ), err); /* ADDING NODE FOR _model_13_m_m_0_cv1_act_Sigmoid */ const char* inputs__model_13_m_m_0_cv1_act_Sigmoid[] = { "_model_13_m_m_0_cv1_conv_Conv_output_0" }; uint32_t dimensions__model_13_m_m_0_cv1_act_Sigmoid_output_0[] = {1, 40, 40, 128}; Qnn_Tensor_t outputs__model_13_m_m_0_cv1_act_Sigmoid[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_13_m_m_0_cv1_act_Sigmoid_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000152587890625f, .offset= 0}}}, .rank= 4, .dimensions=dimensions__model_13_m_m_0_cv1_act_Sigmoid_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_13_m_m_0_cv1_act_Sigmoid", // Node Name "qti.aisw", // Package Name "Sigmoid", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__model_13_m_m_0_cv1_act_Sigmoid, // Input Tensor Names 1, // Num Input Tensor Names outputs__model_13_m_m_0_cv1_act_Sigmoid, // Output Tensors 1// Num Output Tensors ), err); /* ADDING NODE FOR _model_13_m_m_0_cv1_act_Mul */ const char* inputs__model_13_m_m_0_cv1_act_Mul[] = { "_model_13_m_m_0_cv1_conv_Conv_output_0", "_model_13_m_m_0_cv1_act_Sigmoid_output_0" }; uint32_t dimensions__model_13_m_m_0_cv1_act_Mul_output_0[] = {1, 40, 40, 128}; Qnn_Tensor_t outputs__model_13_m_m_0_cv1_act_Mul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_13_m_m_0_cv1_act_Mul_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0001443033834221f, .offset= -1930}}}, .rank= 4, .dimensions=dimensions__model_13_m_m_0_cv1_act_Mul_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_13_m_m_0_cv1_act_Mul", // Node Name "qti.aisw", // Package Name "ElementWiseMultiply", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__model_13_m_m_0_cv1_act_Mul, // Input Tensor Names 2, // Num Input Tensor Names outputs__model_13_m_m_0_cv1_act_Mul, // Output Tensors 1// Num Output Tensors ), err); uint32_t dimensions_model_13_m_0_cv2_conv_weight[] = {3, 3, 128, 128}; VALIDATE(cutoff_yolov5s.addTensor("model_13_m_0_cv2_conv_weight", // Node Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "model_13_m_0_cv2_conv_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0059649567119777f, .offset= -156}}}, .rank= 4, .dimensions=dimensions_model_13_m_0_cv2_conv_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(model_13_m_0_cv2_conv_weight), .dataSize=BINLEN(model_13_m_0_cv2_conv_weight)}}}}} ), err); uint32_t dimensions_model_13_m_0_cv2_conv_bias[] = {128}; VALIDATE(cutoff_yolov5s.addTensor("model_13_m_0_cv2_conv_bias", // Node Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "model_13_m_0_cv2_conv_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_SFIXED_POINT_32, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000000012571884f, .offset= 0}}}, .rank= 1, .dimensions=dimensions_model_13_m_0_cv2_conv_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(model_13_m_0_cv2_conv_bias), .dataSize=BINLEN(model_13_m_0_cv2_conv_bias)}}}}} ), err); /* ADDING NODE FOR _model_13_m_m_0_cv2_conv_Conv */ uint32_t dimensions___model_13_m_m_0_cv2_conv_Conv_dilation[] = {2}; uint32_t __model_13_m_m_0_cv2_conv_Conv_dilation[] = {1, 1}; uint32_t dimensions___model_13_m_m_0_cv2_conv_Conv_pad_amount[] = {2, 2}; uint32_t __model_13_m_m_0_cv2_conv_Conv_pad_amount[] = {1, 1, 1, 1}; uint32_t dimensions___model_13_m_m_0_cv2_conv_Conv_stride[] = {2}; uint32_t __model_13_m_m_0_cv2_conv_Conv_stride[] = {1, 1}; Qnn_Param_t params__model_13_m_m_0_cv2_conv_Conv[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "__model_13_m_m_0_cv2_conv_Conv_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions___model_13_m_m_0_cv2_conv_Conv_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)__model_13_m_m_0_cv2_conv_Conv_dilation, .dataSize=8}}}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "__model_13_m_m_0_cv2_conv_Conv_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions___model_13_m_m_0_cv2_conv_Conv_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)__model_13_m_m_0_cv2_conv_Conv_pad_amount, .dataSize=16}}}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "__model_13_m_m_0_cv2_conv_Conv_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions___model_13_m_m_0_cv2_conv_Conv_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)__model_13_m_m_0_cv2_conv_Conv_stride, .dataSize=8}}}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}} }; const char* inputs__model_13_m_m_0_cv2_conv_Conv[] = { "_model_13_m_m_0_cv1_act_Mul_output_0", "model_13_m_0_cv2_conv_weight", "model_13_m_0_cv2_conv_bias" }; uint32_t dimensions__model_13_m_m_0_cv2_conv_Conv_output_0[] = {1, 40, 40, 128}; Qnn_Tensor_t outputs__model_13_m_m_0_cv2_conv_Conv[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_13_m_m_0_cv2_conv_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0002792468585540f, .offset= -34633}}}, .rank= 4, .dimensions=dimensions__model_13_m_m_0_cv2_conv_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_13_m_m_0_cv2_conv_Conv", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__model_13_m_m_0_cv2_conv_Conv, // Node Params 4, // Num Node Params inputs__model_13_m_m_0_cv2_conv_Conv, // Input Tensor Names 3, // Num Input Tensor Names outputs__model_13_m_m_0_cv2_conv_Conv, // Output Tensors 1// Num Output Tensors ), err); /* ADDING NODE FOR _model_13_m_m_0_cv2_act_Sigmoid */ const char* inputs__model_13_m_m_0_cv2_act_Sigmoid[] = { "_model_13_m_m_0_cv2_conv_Conv_output_0" }; uint32_t dimensions__model_13_m_m_0_cv2_act_Sigmoid_output_0[] = {1, 40, 40, 128}; Qnn_Tensor_t outputs__model_13_m_m_0_cv2_act_Sigmoid[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_13_m_m_0_cv2_act_Sigmoid_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000152587890625f, .offset= 0}}}, .rank= 4, .dimensions=dimensions__model_13_m_m_0_cv2_act_Sigmoid_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_13_m_m_0_cv2_act_Sigmoid", // Node Name "qti.aisw", // Package Name "Sigmoid", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__model_13_m_m_0_cv2_act_Sigmoid, // Input Tensor Names 1, // Num Input Tensor Names outputs__model_13_m_m_0_cv2_act_Sigmoid, // Output Tensors 1// Num Output Tensors ), err); /* ADDING NODE FOR _model_13_m_m_0_cv2_act_Mul */ const char* inputs__model_13_m_m_0_cv2_act_Mul[] = { "_model_13_m_m_0_cv2_conv_Conv_output_0", "_model_13_m_m_0_cv2_act_Sigmoid_output_0" }; uint32_t dimensions__model_13_m_m_0_cv2_act_Mul_output_0[] = {1, 40, 40, 128}; Qnn_Tensor_t outputs__model_13_m_m_0_cv2_act_Mul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_13_m_m_0_cv2_act_Mul_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0001358992740279f, .offset= -2049}}}, .rank= 4, .dimensions=dimensions__model_13_m_m_0_cv2_act_Mul_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_13_m_m_0_cv2_act_Mul", // Node Name "qti.aisw", // Package Name "ElementWiseMultiply", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__model_13_m_m_0_cv2_act_Mul, // Input Tensor Names 2, // Num Input Tensor Names outputs__model_13_m_m_0_cv2_act_Mul, // Output Tensors 1// Num Output Tensors ), err); uint32_t dimensions_model_13_cv2_conv_weight[] = {1, 1, 512, 128}; VALIDATE(cutoff_yolov5s.addTensor("model_13_cv2_conv_weight", // Node Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "model_13_cv2_conv_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0046525802463293f, .offset= -159}}}, .rank= 4, .dimensions=dimensions_model_13_cv2_conv_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(model_13_cv2_conv_weight), .dataSize=BINLEN(model_13_cv2_conv_weight)}}}}} ), err); uint32_t dimensions_model_13_cv2_conv_bias[] = {128}; VALIDATE(cutoff_yolov5s.addTensor("model_13_cv2_conv_bias", // Node Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "model_13_cv2_conv_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_SFIXED_POINT_32, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000000007868370f, .offset= 0}}}, .rank= 1, .dimensions=dimensions_model_13_cv2_conv_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(model_13_cv2_conv_bias), .dataSize=BINLEN(model_13_cv2_conv_bias)}}}}} ), err); /* ADDING NODE FOR _model_13_cv2_conv_Conv */ uint32_t dimensions___model_13_cv2_conv_Conv_dilation[] = {2}; uint32_t __model_13_cv2_conv_Conv_dilation[] = {1, 1}; uint32_t dimensions___model_13_cv2_conv_Conv_pad_amount[] = {2, 2}; uint32_t __model_13_cv2_conv_Conv_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions___model_13_cv2_conv_Conv_stride[] = {2}; uint32_t __model_13_cv2_conv_Conv_stride[] = {1, 1}; Qnn_Param_t params__model_13_cv2_conv_Conv[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "__model_13_cv2_conv_Conv_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions___model_13_cv2_conv_Conv_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)__model_13_cv2_conv_Conv_dilation, .dataSize=8}}}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "__model_13_cv2_conv_Conv_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions___model_13_cv2_conv_Conv_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)__model_13_cv2_conv_Conv_pad_amount, .dataSize=16}}}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "__model_13_cv2_conv_Conv_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions___model_13_cv2_conv_Conv_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)__model_13_cv2_conv_Conv_stride, .dataSize=8}}}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}} }; const char* inputs__model_13_cv2_conv_Conv[] = { "_model_12_Concat_output_0", "model_13_cv2_conv_weight", "model_13_cv2_conv_bias" }; uint32_t dimensions__model_13_cv2_conv_Conv_output_0[] = {1, 40, 40, 128}; Qnn_Tensor_t outputs__model_13_cv2_conv_Conv[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_13_cv2_conv_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0002520048874430f, .offset= -35854}}}, .rank= 4, .dimensions=dimensions__model_13_cv2_conv_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_13_cv2_conv_Conv", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__model_13_cv2_conv_Conv, // Node Params 4, // Num Node Params inputs__model_13_cv2_conv_Conv, // Input Tensor Names 3, // Num Input Tensor Names outputs__model_13_cv2_conv_Conv, // Output Tensors 1// Num Output Tensors ), err); /* ADDING NODE FOR _model_13_cv2_act_Sigmoid */ const char* inputs__model_13_cv2_act_Sigmoid[] = { "_model_13_cv2_conv_Conv_output_0" }; uint32_t dimensions__model_13_cv2_act_Sigmoid_output_0[] = {1, 40, 40, 128}; Qnn_Tensor_t outputs__model_13_cv2_act_Sigmoid[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_13_cv2_act_Sigmoid_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000152587890625f, .offset= 0}}}, .rank= 4, .dimensions=dimensions__model_13_cv2_act_Sigmoid_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_13_cv2_act_Sigmoid", // Node Name "qti.aisw", // Package Name "Sigmoid", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__model_13_cv2_act_Sigmoid, // Input Tensor Names 1, // Num Input Tensor Names outputs__model_13_cv2_act_Sigmoid, // Output Tensors 1// Num Output Tensors ), err); /* ADDING NODE FOR _model_13_cv2_act_Mul */ const char* inputs__model_13_cv2_act_Mul[] = { "_model_13_cv2_conv_Conv_output_0", "_model_13_cv2_act_Sigmoid_output_0" }; uint32_t dimensions__model_13_cv2_act_Mul_output_0[] = {1, 40, 40, 128}; Qnn_Tensor_t outputs__model_13_cv2_act_Mul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_13_cv2_act_Mul_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0001183200438390f, .offset= -2353}}}, .rank= 4, .dimensions=dimensions__model_13_cv2_act_Mul_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_13_cv2_act_Mul", // Node Name "qti.aisw", // Package Name "ElementWiseMultiply", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__model_13_cv2_act_Mul, // Input Tensor Names 2, // Num Input Tensor Names outputs__model_13_cv2_act_Mul, // Output Tensors 1// Num Output Tensors ), err); /* ADDING NODE FOR _model_13_Concat */ Qnn_Param_t params__model_13_Concat[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="axis", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 3}}}} }; const char* inputs__model_13_Concat[] = { "_model_13_m_m_0_cv2_act_Mul_output_0", "_model_13_cv2_act_Mul_output_0" }; uint32_t dimensions__model_13_Concat_output_0[] = {1, 40, 40, 256}; Qnn_Tensor_t outputs__model_13_Concat[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_13_Concat_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0001358992740279f, .offset= -2049}}}, .rank= 4, .dimensions=dimensions__model_13_Concat_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_13_Concat", // Node Name "qti.aisw", // Package Name "Concat", // Qnn Node Type params__model_13_Concat, // Node Params 1, // Num Node Params inputs__model_13_Concat, // Input Tensor Names 2, // Num Input Tensor Names outputs__model_13_Concat, // Output Tensors 1// Num Output Tensors ), err); uint32_t dimensions_model_13_cv3_conv_weight[] = {1, 1, 256, 256}; VALIDATE(cutoff_yolov5s.addTensor("model_13_cv3_conv_weight", // Node Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "model_13_cv3_conv_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0095260469242930f, .offset= -125}}}, .rank= 4, .dimensions=dimensions_model_13_cv3_conv_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(model_13_cv3_conv_weight), .dataSize=BINLEN(model_13_cv3_conv_weight)}}}}} ), err); uint32_t dimensions_model_13_cv3_conv_bias[] = {256}; VALIDATE(cutoff_yolov5s.addTensor("model_13_cv3_conv_bias", // Node Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "model_13_cv3_conv_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_SFIXED_POINT_32, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000000009828760f, .offset= 0}}}, .rank= 1, .dimensions=dimensions_model_13_cv3_conv_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(model_13_cv3_conv_bias), .dataSize=BINLEN(model_13_cv3_conv_bias)}}}}} ), err); /* ADDING NODE FOR _model_13_cv3_conv_Conv */ uint32_t dimensions___model_13_cv3_conv_Conv_dilation[] = {2}; uint32_t __model_13_cv3_conv_Conv_dilation[] = {1, 1}; uint32_t dimensions___model_13_cv3_conv_Conv_pad_amount[] = {2, 2}; uint32_t __model_13_cv3_conv_Conv_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions___model_13_cv3_conv_Conv_stride[] = {2}; uint32_t __model_13_cv3_conv_Conv_stride[] = {1, 1}; Qnn_Param_t params__model_13_cv3_conv_Conv[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "__model_13_cv3_conv_Conv_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions___model_13_cv3_conv_Conv_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)__model_13_cv3_conv_Conv_dilation, .dataSize=8}}}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "__model_13_cv3_conv_Conv_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions___model_13_cv3_conv_Conv_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)__model_13_cv3_conv_Conv_pad_amount, .dataSize=16}}}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "__model_13_cv3_conv_Conv_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions___model_13_cv3_conv_Conv_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)__model_13_cv3_conv_Conv_stride, .dataSize=8}}}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}} }; const char* inputs__model_13_cv3_conv_Conv[] = { "_model_13_Concat_output_0", "model_13_cv3_conv_weight", "model_13_cv3_conv_bias" }; uint32_t dimensions__model_13_cv3_conv_Conv_output_0[] = {1, 40, 40, 256}; Qnn_Tensor_t outputs__model_13_cv3_conv_Conv[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_13_cv3_conv_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0003396527899895f, .offset= -33296}}}, .rank= 4, .dimensions=dimensions__model_13_cv3_conv_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_13_cv3_conv_Conv", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__model_13_cv3_conv_Conv, // Node Params 4, // Num Node Params inputs__model_13_cv3_conv_Conv, // Input Tensor Names 3, // Num Input Tensor Names outputs__model_13_cv3_conv_Conv, // Output Tensors 1// Num Output Tensors ), err); /* ADDING NODE FOR _model_13_cv3_act_Sigmoid */ const char* inputs__model_13_cv3_act_Sigmoid[] = { "_model_13_cv3_conv_Conv_output_0" }; uint32_t dimensions__model_13_cv3_act_Sigmoid_output_0[] = {1, 40, 40, 256}; Qnn_Tensor_t outputs__model_13_cv3_act_Sigmoid[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_13_cv3_act_Sigmoid_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000152587890625f, .offset= 0}}}, .rank= 4, .dimensions=dimensions__model_13_cv3_act_Sigmoid_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_13_cv3_act_Sigmoid", // Node Name "qti.aisw", // Package Name "Sigmoid", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__model_13_cv3_act_Sigmoid, // Input Tensor Names 1, // Num Input Tensor Names outputs__model_13_cv3_act_Sigmoid, // Output Tensors 1// Num Output Tensors ), err); /* ADDING NODE FOR _model_13_cv3_act_Mul */ const char* inputs__model_13_cv3_act_Mul[] = { "_model_13_cv3_conv_Conv_output_0", "_model_13_cv3_act_Sigmoid_output_0" }; uint32_t dimensions__model_13_cv3_act_Mul_output_0[] = {1, 40, 40, 256}; Qnn_Tensor_t outputs__model_13_cv3_act_Mul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_13_cv3_act_Mul_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0001713332603686f, .offset= -1625}}}, .rank= 4, .dimensions=dimensions__model_13_cv3_act_Mul_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_13_cv3_act_Mul", // Node Name "qti.aisw", // Package Name "ElementWiseMultiply", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__model_13_cv3_act_Mul, // Input Tensor Names 2, // Num Input Tensor Names outputs__model_13_cv3_act_Mul, // Output Tensors 1// Num Output Tensors ), err); uint32_t dimensions_model_14_conv_weight[] = {1, 1, 256, 128}; VALIDATE(cutoff_yolov5s.addTensor("model_14_conv_weight", // Node Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "model_14_conv_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0050822757184505f, .offset= -129}}}, .rank= 4, .dimensions=dimensions_model_14_conv_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(model_14_conv_weight), .dataSize=BINLEN(model_14_conv_weight)}}}}} ), err); uint32_t dimensions_model_14_conv_bias[] = {128}; VALIDATE(cutoff_yolov5s.addTensor("model_14_conv_bias", // Node Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "model_14_conv_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_SFIXED_POINT_32, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000000008891820f, .offset= 0}}}, .rank= 1, .dimensions=dimensions_model_14_conv_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(model_14_conv_bias), .dataSize=BINLEN(model_14_conv_bias)}}}}} ), err); /* ADDING NODE FOR _model_14_conv_Conv */ uint32_t dimensions___model_14_conv_Conv_dilation[] = {2}; uint32_t __model_14_conv_Conv_dilation[] = {1, 1}; uint32_t dimensions___model_14_conv_Conv_pad_amount[] = {2, 2}; uint32_t __model_14_conv_Conv_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions___model_14_conv_Conv_stride[] = {2}; uint32_t __model_14_conv_Conv_stride[] = {1, 1}; Qnn_Param_t params__model_14_conv_Conv[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "__model_14_conv_Conv_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions___model_14_conv_Conv_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)__model_14_conv_Conv_dilation, .dataSize=8}}}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "__model_14_conv_Conv_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions___model_14_conv_Conv_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)__model_14_conv_Conv_pad_amount, .dataSize=16}}}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "__model_14_conv_Conv_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions___model_14_conv_Conv_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)__model_14_conv_Conv_stride, .dataSize=8}}}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}} }; const char* inputs__model_14_conv_Conv[] = { "_model_13_cv3_act_Mul_output_0", "model_14_conv_weight", "model_14_conv_bias" }; uint32_t dimensions__model_14_conv_Conv_output_0[] = {1, 40, 40, 128}; Qnn_Tensor_t outputs__model_14_conv_Conv[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_14_conv_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0002538467815612f, .offset= -33538}}}, .rank= 4, .dimensions=dimensions__model_14_conv_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_14_conv_Conv", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__model_14_conv_Conv, // Node Params 4, // Num Node Params inputs__model_14_conv_Conv, // Input Tensor Names 3, // Num Input Tensor Names outputs__model_14_conv_Conv, // Output Tensors 1// Num Output Tensors ), err); /* ADDING NODE FOR _model_14_act_Sigmoid */ const char* inputs__model_14_act_Sigmoid[] = { "_model_14_conv_Conv_output_0" }; uint32_t dimensions__model_14_act_Sigmoid_output_0[] = {1, 40, 40, 128}; Qnn_Tensor_t outputs__model_14_act_Sigmoid[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_14_act_Sigmoid_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000152587890625f, .offset= 0}}}, .rank= 4, .dimensions=dimensions__model_14_act_Sigmoid_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_14_act_Sigmoid", // Node Name "qti.aisw", // Package Name "Sigmoid", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__model_14_act_Sigmoid, // Input Tensor Names 1, // Num Input Tensor Names outputs__model_14_act_Sigmoid, // Output Tensors 1// Num Output Tensors ), err); /* ADDING NODE FOR _model_14_act_Mul */ const char* inputs__model_14_act_Mul[] = { "_model_14_conv_Conv_output_0", "_model_14_act_Sigmoid_output_0" }; uint32_t dimensions__model_14_act_Mul_output_0[] = {1, 40, 40, 128}; Qnn_Tensor_t outputs__model_14_act_Mul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_14_act_Mul_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0001281506265514f, .offset= -2173}}}, .rank= 4, .dimensions=dimensions__model_14_act_Mul_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_14_act_Mul", // Node Name "qti.aisw", // Package Name "ElementWiseMultiply", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__model_14_act_Mul, // Input Tensor Names 2, // Num Input Tensor Names outputs__model_14_act_Mul, // Output Tensors 1// Num Output Tensors ), err); /* ADDING NODE FOR _model_15_Resize */ Qnn_Param_t params__model_15_Resize[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="align_corners", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="half_pixel_centers", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}} }; const char* inputs__model_15_Resize[] = { "_model_14_act_Mul_output_0" }; uint32_t dimensions__model_15_Resize_output_0[] = {1, 80, 80, 128}; Qnn_Tensor_t outputs__model_15_Resize[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_15_Resize_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0001281506265514f, .offset= -2173}}}, .rank= 4, .dimensions=dimensions__model_15_Resize_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_15_Resize", // Node Name "qti.aisw", // Package Name "ResizeNearestNeighbor", // Qnn Node Type params__model_15_Resize, // Node Params 2, // Num Node Params inputs__model_15_Resize, // Input Tensor Names 1, // Num Input Tensor Names outputs__model_15_Resize, // Output Tensors 1// Num Output Tensors ), err); /* ADDING NODE FOR _model_16_Concat */ Qnn_Param_t params__model_16_Concat[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="axis", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 3}}}} }; const char* inputs__model_16_Concat[] = { "_model_15_Resize_output_0", "_model_4_cv3_act_Mul_output_0" }; uint32_t dimensions__model_16_Concat_output_0[] = {1, 80, 80, 256}; Qnn_Tensor_t outputs__model_16_Concat[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_16_Concat_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0001331916428171f, .offset= -2091}}}, .rank= 4, .dimensions=dimensions__model_16_Concat_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_16_Concat", // Node Name "qti.aisw", // Package Name "Concat", // Qnn Node Type params__model_16_Concat, // Node Params 1, // Num Node Params inputs__model_16_Concat, // Input Tensor Names 2, // Num Input Tensor Names outputs__model_16_Concat, // Output Tensors 1// Num Output Tensors ), err); uint32_t dimensions_model_17_cv1_conv_weight[] = {1, 1, 256, 64}; VALIDATE(cutoff_yolov5s.addTensor("model_17_cv1_conv_weight", // Node Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "model_17_cv1_conv_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0057397047057748f, .offset= -121}}}, .rank= 4, .dimensions=dimensions_model_17_cv1_conv_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(model_17_cv1_conv_weight), .dataSize=BINLEN(model_17_cv1_conv_weight)}}}}} ), err); uint32_t dimensions_model_17_cv1_conv_bias[] = {64}; VALIDATE(cutoff_yolov5s.addTensor("model_17_cv1_conv_bias", // Node Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "model_17_cv1_conv_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_SFIXED_POINT_32, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000000007800250f, .offset= 0}}}, .rank= 1, .dimensions=dimensions_model_17_cv1_conv_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(model_17_cv1_conv_bias), .dataSize=BINLEN(model_17_cv1_conv_bias)}}}}} ), err); /* ADDING NODE FOR _model_17_cv1_conv_Conv */ uint32_t dimensions___model_17_cv1_conv_Conv_dilation[] = {2}; uint32_t __model_17_cv1_conv_Conv_dilation[] = {1, 1}; uint32_t dimensions___model_17_cv1_conv_Conv_pad_amount[] = {2, 2}; uint32_t __model_17_cv1_conv_Conv_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions___model_17_cv1_conv_Conv_stride[] = {2}; uint32_t __model_17_cv1_conv_Conv_stride[] = {1, 1}; Qnn_Param_t params__model_17_cv1_conv_Conv[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "__model_17_cv1_conv_Conv_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions___model_17_cv1_conv_Conv_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)__model_17_cv1_conv_Conv_dilation, .dataSize=8}}}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "__model_17_cv1_conv_Conv_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions___model_17_cv1_conv_Conv_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)__model_17_cv1_conv_Conv_pad_amount, .dataSize=16}}}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "__model_17_cv1_conv_Conv_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions___model_17_cv1_conv_Conv_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)__model_17_cv1_conv_Conv_stride, .dataSize=8}}}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}} }; const char* inputs__model_17_cv1_conv_Conv[] = { "_model_16_Concat_output_0", "model_17_cv1_conv_weight", "model_17_cv1_conv_bias" }; uint32_t dimensions__model_17_cv1_conv_Conv_output_0[] = {1, 80, 80, 64}; Qnn_Tensor_t outputs__model_17_cv1_conv_Conv[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_17_cv1_conv_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0001690492354101f, .offset= -44472}}}, .rank= 4, .dimensions=dimensions__model_17_cv1_conv_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_17_cv1_conv_Conv", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__model_17_cv1_conv_Conv, // Node Params 4, // Num Node Params inputs__model_17_cv1_conv_Conv, // Input Tensor Names 3, // Num Input Tensor Names outputs__model_17_cv1_conv_Conv, // Output Tensors 1// Num Output Tensors ), err); /* ADDING NODE FOR _model_17_cv1_act_Sigmoid */ const char* inputs__model_17_cv1_act_Sigmoid[] = { "_model_17_cv1_conv_Conv_output_0" }; uint32_t dimensions__model_17_cv1_act_Sigmoid_output_0[] = {1, 80, 80, 64}; Qnn_Tensor_t outputs__model_17_cv1_act_Sigmoid[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_17_cv1_act_Sigmoid_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000152587890625f, .offset= 0}}}, .rank= 4, .dimensions=dimensions__model_17_cv1_act_Sigmoid_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_17_cv1_act_Sigmoid", // Node Name "qti.aisw", // Package Name "Sigmoid", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__model_17_cv1_act_Sigmoid, // Input Tensor Names 1, // Num Input Tensor Names outputs__model_17_cv1_act_Sigmoid, // Output Tensors 1// Num Output Tensors ), err); /* ADDING NODE FOR _model_17_cv1_act_Mul */ const char* inputs__model_17_cv1_act_Mul[] = { "_model_17_cv1_conv_Conv_output_0", "_model_17_cv1_act_Sigmoid_output_0" }; uint32_t dimensions__model_17_cv1_act_Mul_output_0[] = {1, 80, 80, 64}; Qnn_Tensor_t outputs__model_17_cv1_act_Mul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_17_cv1_act_Mul_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000570788906771f, .offset= -4879}}}, .rank= 4, .dimensions=dimensions__model_17_cv1_act_Mul_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_17_cv1_act_Mul", // Node Name "qti.aisw", // Package Name "ElementWiseMultiply", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__model_17_cv1_act_Mul, // Input Tensor Names 2, // Num Input Tensor Names outputs__model_17_cv1_act_Mul, // Output Tensors 1// Num Output Tensors ), err); uint32_t dimensions_model_17_m_0_cv1_conv_weight[] = {1, 1, 64, 64}; VALIDATE(cutoff_yolov5s.addTensor("model_17_m_0_cv1_conv_weight", // Node Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "model_17_m_0_cv1_conv_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0158278495073318f, .offset= -161}}}, .rank= 4, .dimensions=dimensions_model_17_m_0_cv1_conv_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(model_17_m_0_cv1_conv_weight), .dataSize=BINLEN(model_17_m_0_cv1_conv_weight)}}}}} ), err); uint32_t dimensions_model_17_m_0_cv1_conv_bias[] = {64}; VALIDATE(cutoff_yolov5s.addTensor("model_17_m_0_cv1_conv_bias", // Node Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "model_17_m_0_cv1_conv_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_SFIXED_POINT_32, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000000014584740f, .offset= 0}}}, .rank= 1, .dimensions=dimensions_model_17_m_0_cv1_conv_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(model_17_m_0_cv1_conv_bias), .dataSize=BINLEN(model_17_m_0_cv1_conv_bias)}}}}} ), err); /* ADDING NODE FOR _model_17_m_m_0_cv1_conv_Conv */ uint32_t dimensions___model_17_m_m_0_cv1_conv_Conv_dilation[] = {2}; uint32_t __model_17_m_m_0_cv1_conv_Conv_dilation[] = {1, 1}; uint32_t dimensions___model_17_m_m_0_cv1_conv_Conv_pad_amount[] = {2, 2}; uint32_t __model_17_m_m_0_cv1_conv_Conv_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions___model_17_m_m_0_cv1_conv_Conv_stride[] = {2}; uint32_t __model_17_m_m_0_cv1_conv_Conv_stride[] = {1, 1}; Qnn_Param_t params__model_17_m_m_0_cv1_conv_Conv[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "__model_17_m_m_0_cv1_conv_Conv_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions___model_17_m_m_0_cv1_conv_Conv_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)__model_17_m_m_0_cv1_conv_Conv_dilation, .dataSize=8}}}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "__model_17_m_m_0_cv1_conv_Conv_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions___model_17_m_m_0_cv1_conv_Conv_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)__model_17_m_m_0_cv1_conv_Conv_pad_amount, .dataSize=16}}}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "__model_17_m_m_0_cv1_conv_Conv_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions___model_17_m_m_0_cv1_conv_Conv_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)__model_17_m_m_0_cv1_conv_Conv_stride, .dataSize=8}}}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}} }; const char* inputs__model_17_m_m_0_cv1_conv_Conv[] = { "_model_17_cv1_act_Mul_output_0", "model_17_m_0_cv1_conv_weight", "model_17_m_0_cv1_conv_bias" }; uint32_t dimensions__model_17_m_m_0_cv1_conv_Conv_output_0[] = {1, 80, 80, 64}; Qnn_Tensor_t outputs__model_17_m_m_0_cv1_conv_Conv[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_17_m_m_0_cv1_conv_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0002027381415246f, .offset= -35402}}}, .rank= 4, .dimensions=dimensions__model_17_m_m_0_cv1_conv_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_17_m_m_0_cv1_conv_Conv", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__model_17_m_m_0_cv1_conv_Conv, // Node Params 4, // Num Node Params inputs__model_17_m_m_0_cv1_conv_Conv, // Input Tensor Names 3, // Num Input Tensor Names outputs__model_17_m_m_0_cv1_conv_Conv, // Output Tensors 1// Num Output Tensors ), err); /* ADDING NODE FOR _model_17_m_m_0_cv1_act_Sigmoid */ const char* inputs__model_17_m_m_0_cv1_act_Sigmoid[] = { "_model_17_m_m_0_cv1_conv_Conv_output_0" }; uint32_t dimensions__model_17_m_m_0_cv1_act_Sigmoid_output_0[] = {1, 80, 80, 64}; Qnn_Tensor_t outputs__model_17_m_m_0_cv1_act_Sigmoid[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_17_m_m_0_cv1_act_Sigmoid_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000152587890625f, .offset= 0}}}, .rank= 4, .dimensions=dimensions__model_17_m_m_0_cv1_act_Sigmoid_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_17_m_m_0_cv1_act_Sigmoid", // Node Name "qti.aisw", // Package Name "Sigmoid", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__model_17_m_m_0_cv1_act_Sigmoid, // Input Tensor Names 1, // Num Input Tensor Names outputs__model_17_m_m_0_cv1_act_Sigmoid, // Output Tensors 1// Num Output Tensors ), err); /* ADDING NODE FOR _model_17_m_m_0_cv1_act_Mul */ const char* inputs__model_17_m_m_0_cv1_act_Mul[] = { "_model_17_m_m_0_cv1_conv_Conv_output_0", "_model_17_m_m_0_cv1_act_Sigmoid_output_0" }; uint32_t dimensions__model_17_m_m_0_cv1_act_Mul_output_0[] = {1, 80, 80, 64}; Qnn_Tensor_t outputs__model_17_m_m_0_cv1_act_Mul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_17_m_m_0_cv1_act_Mul_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000972609050223f, .offset= -2863}}}, .rank= 4, .dimensions=dimensions__model_17_m_m_0_cv1_act_Mul_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_17_m_m_0_cv1_act_Mul", // Node Name "qti.aisw", // Package Name "ElementWiseMultiply", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__model_17_m_m_0_cv1_act_Mul, // Input Tensor Names 2, // Num Input Tensor Names outputs__model_17_m_m_0_cv1_act_Mul, // Output Tensors 1// Num Output Tensors ), err); uint32_t dimensions_model_17_m_0_cv2_conv_weight[] = {3, 3, 64, 64}; VALIDATE(cutoff_yolov5s.addTensor("model_17_m_0_cv2_conv_weight", // Node Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "model_17_m_0_cv2_conv_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0113136311993003f, .offset= -140}}}, .rank= 4, .dimensions=dimensions_model_17_m_0_cv2_conv_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(model_17_m_0_cv2_conv_weight), .dataSize=BINLEN(model_17_m_0_cv2_conv_weight)}}}}} ), err); uint32_t dimensions_model_17_m_0_cv2_conv_bias[] = {64}; VALIDATE(cutoff_yolov5s.addTensor("model_17_m_0_cv2_conv_bias", // Node Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "model_17_m_0_cv2_conv_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_SFIXED_POINT_32, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000000016451631f, .offset= 0}}}, .rank= 1, .dimensions=dimensions_model_17_m_0_cv2_conv_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(model_17_m_0_cv2_conv_bias), .dataSize=BINLEN(model_17_m_0_cv2_conv_bias)}}}}} ), err); /* ADDING NODE FOR _model_17_m_m_0_cv2_conv_Conv */ uint32_t dimensions___model_17_m_m_0_cv2_conv_Conv_dilation[] = {2}; uint32_t __model_17_m_m_0_cv2_conv_Conv_dilation[] = {1, 1}; uint32_t dimensions___model_17_m_m_0_cv2_conv_Conv_pad_amount[] = {2, 2}; uint32_t __model_17_m_m_0_cv2_conv_Conv_pad_amount[] = {1, 1, 1, 1}; uint32_t dimensions___model_17_m_m_0_cv2_conv_Conv_stride[] = {2}; uint32_t __model_17_m_m_0_cv2_conv_Conv_stride[] = {1, 1}; Qnn_Param_t params__model_17_m_m_0_cv2_conv_Conv[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "__model_17_m_m_0_cv2_conv_Conv_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions___model_17_m_m_0_cv2_conv_Conv_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)__model_17_m_m_0_cv2_conv_Conv_dilation, .dataSize=8}}}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "__model_17_m_m_0_cv2_conv_Conv_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions___model_17_m_m_0_cv2_conv_Conv_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)__model_17_m_m_0_cv2_conv_Conv_pad_amount, .dataSize=16}}}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "__model_17_m_m_0_cv2_conv_Conv_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions___model_17_m_m_0_cv2_conv_Conv_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)__model_17_m_m_0_cv2_conv_Conv_stride, .dataSize=8}}}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}} }; const char* inputs__model_17_m_m_0_cv2_conv_Conv[] = { "_model_17_m_m_0_cv1_act_Mul_output_0", "model_17_m_0_cv2_conv_weight", "model_17_m_0_cv2_conv_bias" }; uint32_t dimensions__model_17_m_m_0_cv2_conv_Conv_output_0[] = {1, 80, 80, 64}; Qnn_Tensor_t outputs__model_17_m_m_0_cv2_conv_Conv[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_17_m_m_0_cv2_conv_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0007259564008564f, .offset= -39357}}}, .rank= 4, .dimensions=dimensions__model_17_m_m_0_cv2_conv_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_17_m_m_0_cv2_conv_Conv", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__model_17_m_m_0_cv2_conv_Conv, // Node Params 4, // Num Node Params inputs__model_17_m_m_0_cv2_conv_Conv, // Input Tensor Names 3, // Num Input Tensor Names outputs__model_17_m_m_0_cv2_conv_Conv, // Output Tensors 1// Num Output Tensors ), err); /* ADDING NODE FOR _model_17_m_m_0_cv2_act_Sigmoid */ const char* inputs__model_17_m_m_0_cv2_act_Sigmoid[] = { "_model_17_m_m_0_cv2_conv_Conv_output_0" }; uint32_t dimensions__model_17_m_m_0_cv2_act_Sigmoid_output_0[] = {1, 80, 80, 64}; Qnn_Tensor_t outputs__model_17_m_m_0_cv2_act_Sigmoid[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_17_m_m_0_cv2_act_Sigmoid_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000152587890625f, .offset= 0}}}, .rank= 4, .dimensions=dimensions__model_17_m_m_0_cv2_act_Sigmoid_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_17_m_m_0_cv2_act_Sigmoid", // Node Name "qti.aisw", // Package Name "Sigmoid", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__model_17_m_m_0_cv2_act_Sigmoid, // Input Tensor Names 1, // Num Input Tensor Names outputs__model_17_m_m_0_cv2_act_Sigmoid, // Output Tensors 1// Num Output Tensors ), err); /* ADDING NODE FOR _model_17_m_m_0_cv2_act_Mul */ const char* inputs__model_17_m_m_0_cv2_act_Mul[] = { "_model_17_m_m_0_cv2_conv_Conv_output_0", "_model_17_m_m_0_cv2_act_Sigmoid_output_0" }; uint32_t dimensions__model_17_m_m_0_cv2_act_Mul_output_0[] = {1, 80, 80, 64}; Qnn_Tensor_t outputs__model_17_m_m_0_cv2_act_Mul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_17_m_m_0_cv2_act_Mul_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0002942295395769f, .offset= -946}}}, .rank= 4, .dimensions=dimensions__model_17_m_m_0_cv2_act_Mul_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_17_m_m_0_cv2_act_Mul", // Node Name "qti.aisw", // Package Name "ElementWiseMultiply", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__model_17_m_m_0_cv2_act_Mul, // Input Tensor Names 2, // Num Input Tensor Names outputs__model_17_m_m_0_cv2_act_Mul, // Output Tensors 1// Num Output Tensors ), err); uint32_t dimensions_model_17_cv2_conv_weight[] = {1, 1, 256, 64}; VALIDATE(cutoff_yolov5s.addTensor("model_17_cv2_conv_weight", // Node Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "model_17_cv2_conv_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0111798774451017f, .offset= -131}}}, .rank= 4, .dimensions=dimensions_model_17_cv2_conv_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(model_17_cv2_conv_weight), .dataSize=BINLEN(model_17_cv2_conv_weight)}}}}} ), err); uint32_t dimensions_model_17_cv2_conv_bias[] = {64}; VALIDATE(cutoff_yolov5s.addTensor("model_17_cv2_conv_bias", // Node Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "model_17_cv2_conv_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_SFIXED_POINT_32, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000000009659175f, .offset= 0}}}, .rank= 1, .dimensions=dimensions_model_17_cv2_conv_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(model_17_cv2_conv_bias), .dataSize=BINLEN(model_17_cv2_conv_bias)}}}}} ), err); /* ADDING NODE FOR _model_17_cv2_conv_Conv */ uint32_t dimensions___model_17_cv2_conv_Conv_dilation[] = {2}; uint32_t __model_17_cv2_conv_Conv_dilation[] = {1, 1}; uint32_t dimensions___model_17_cv2_conv_Conv_pad_amount[] = {2, 2}; uint32_t __model_17_cv2_conv_Conv_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions___model_17_cv2_conv_Conv_stride[] = {2}; uint32_t __model_17_cv2_conv_Conv_stride[] = {1, 1}; Qnn_Param_t params__model_17_cv2_conv_Conv[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "__model_17_cv2_conv_Conv_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions___model_17_cv2_conv_Conv_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)__model_17_cv2_conv_Conv_dilation, .dataSize=8}}}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "__model_17_cv2_conv_Conv_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions___model_17_cv2_conv_Conv_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)__model_17_cv2_conv_Conv_pad_amount, .dataSize=16}}}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "__model_17_cv2_conv_Conv_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions___model_17_cv2_conv_Conv_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)__model_17_cv2_conv_Conv_stride, .dataSize=8}}}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}} }; const char* inputs__model_17_cv2_conv_Conv[] = { "_model_16_Concat_output_0", "model_17_cv2_conv_weight", "model_17_cv2_conv_bias" }; uint32_t dimensions__model_17_cv2_conv_Conv_output_0[] = {1, 80, 80, 64}; Qnn_Tensor_t outputs__model_17_cv2_conv_Conv[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_17_cv2_conv_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0003119672182947f, .offset= -40629}}}, .rank= 4, .dimensions=dimensions__model_17_cv2_conv_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_17_cv2_conv_Conv", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__model_17_cv2_conv_Conv, // Node Params 4, // Num Node Params inputs__model_17_cv2_conv_Conv, // Input Tensor Names 3, // Num Input Tensor Names outputs__model_17_cv2_conv_Conv, // Output Tensors 1// Num Output Tensors ), err); /* ADDING NODE FOR _model_17_cv2_act_Sigmoid */ const char* inputs__model_17_cv2_act_Sigmoid[] = { "_model_17_cv2_conv_Conv_output_0" }; uint32_t dimensions__model_17_cv2_act_Sigmoid_output_0[] = {1, 80, 80, 64}; Qnn_Tensor_t outputs__model_17_cv2_act_Sigmoid[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_17_cv2_act_Sigmoid_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000152587890625f, .offset= 0}}}, .rank= 4, .dimensions=dimensions__model_17_cv2_act_Sigmoid_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_17_cv2_act_Sigmoid", // Node Name "qti.aisw", // Package Name "Sigmoid", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__model_17_cv2_act_Sigmoid, // Input Tensor Names 1, // Num Input Tensor Names outputs__model_17_cv2_act_Sigmoid, // Output Tensors 1// Num Output Tensors ), err); /* ADDING NODE FOR _model_17_cv2_act_Mul */ const char* inputs__model_17_cv2_act_Mul[] = { "_model_17_cv2_conv_Conv_output_0", "_model_17_cv2_act_Sigmoid_output_0" }; uint32_t dimensions__model_17_cv2_act_Mul_output_0[] = {1, 80, 80, 64}; Qnn_Tensor_t outputs__model_17_cv2_act_Mul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_17_cv2_act_Mul_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0001227608008776f, .offset= -2268}}}, .rank= 4, .dimensions=dimensions__model_17_cv2_act_Mul_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_17_cv2_act_Mul", // Node Name "qti.aisw", // Package Name "ElementWiseMultiply", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__model_17_cv2_act_Mul, // Input Tensor Names 2, // Num Input Tensor Names outputs__model_17_cv2_act_Mul, // Output Tensors 1// Num Output Tensors ), err); /* ADDING NODE FOR _model_17_Concat */ Qnn_Param_t params__model_17_Concat[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="axis", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 3}}}} }; const char* inputs__model_17_Concat[] = { "_model_17_m_m_0_cv2_act_Mul_output_0", "_model_17_cv2_act_Mul_output_0" }; uint32_t dimensions__model_17_Concat_output_0[] = {1, 80, 80, 128}; Qnn_Tensor_t outputs__model_17_Concat[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_17_Concat_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0002942307619378f, .offset= -946}}}, .rank= 4, .dimensions=dimensions__model_17_Concat_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_17_Concat", // Node Name "qti.aisw", // Package Name "Concat", // Qnn Node Type params__model_17_Concat, // Node Params 1, // Num Node Params inputs__model_17_Concat, // Input Tensor Names 2, // Num Input Tensor Names outputs__model_17_Concat, // Output Tensors 1// Num Output Tensors ), err); uint32_t dimensions_model_17_cv3_conv_weight[] = {1, 1, 128, 128}; VALIDATE(cutoff_yolov5s.addTensor("model_17_cv3_conv_weight", // Node Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "model_17_cv3_conv_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0250751078128815f, .offset= -121}}}, .rank= 4, .dimensions=dimensions_model_17_cv3_conv_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(model_17_cv3_conv_weight), .dataSize=BINLEN(model_17_cv3_conv_weight)}}}}} ), err); uint32_t dimensions_model_17_cv3_conv_bias[] = {128}; VALIDATE(cutoff_yolov5s.addTensor("model_17_cv3_conv_bias", // Node Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "model_17_cv3_conv_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_SFIXED_POINT_32, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000000109620606f, .offset= 0}}}, .rank= 1, .dimensions=dimensions_model_17_cv3_conv_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(model_17_cv3_conv_bias), .dataSize=BINLEN(model_17_cv3_conv_bias)}}}}} ), err); /* ADDING NODE FOR _model_17_cv3_conv_Conv */ uint32_t dimensions___model_17_cv3_conv_Conv_dilation[] = {2}; uint32_t __model_17_cv3_conv_Conv_dilation[] = {1, 1}; uint32_t dimensions___model_17_cv3_conv_Conv_pad_amount[] = {2, 2}; uint32_t __model_17_cv3_conv_Conv_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions___model_17_cv3_conv_Conv_stride[] = {2}; uint32_t __model_17_cv3_conv_Conv_stride[] = {1, 1}; Qnn_Param_t params__model_17_cv3_conv_Conv[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "__model_17_cv3_conv_Conv_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions___model_17_cv3_conv_Conv_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)__model_17_cv3_conv_Conv_dilation, .dataSize=8}}}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "__model_17_cv3_conv_Conv_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions___model_17_cv3_conv_Conv_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)__model_17_cv3_conv_Conv_pad_amount, .dataSize=16}}}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "__model_17_cv3_conv_Conv_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions___model_17_cv3_conv_Conv_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)__model_17_cv3_conv_Conv_stride, .dataSize=8}}}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}} }; const char* inputs__model_17_cv3_conv_Conv[] = { "_model_17_Concat_output_0", "model_17_cv3_conv_weight", "model_17_cv3_conv_bias" }; uint32_t dimensions__model_17_cv3_conv_Conv_output_0[] = {1, 80, 80, 128}; Qnn_Tensor_t outputs__model_17_cv3_conv_Conv[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_17_cv3_conv_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0009380719275214f, .offset= -33287}}}, .rank= 4, .dimensions=dimensions__model_17_cv3_conv_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_17_cv3_conv_Conv", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__model_17_cv3_conv_Conv, // Node Params 4, // Num Node Params inputs__model_17_cv3_conv_Conv, // Input Tensor Names 3, // Num Input Tensor Names outputs__model_17_cv3_conv_Conv, // Output Tensors 1// Num Output Tensors ), err); /* ADDING NODE FOR _model_17_cv3_act_Sigmoid */ const char* inputs__model_17_cv3_act_Sigmoid[] = { "_model_17_cv3_conv_Conv_output_0" }; uint32_t dimensions__model_17_cv3_act_Sigmoid_output_0[] = {1, 80, 80, 128}; Qnn_Tensor_t outputs__model_17_cv3_act_Sigmoid[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_17_cv3_act_Sigmoid_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000152587890625f, .offset= 0}}}, .rank= 4, .dimensions=dimensions__model_17_cv3_act_Sigmoid_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_17_cv3_act_Sigmoid", // Node Name "qti.aisw", // Package Name "Sigmoid", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__model_17_cv3_act_Sigmoid, // Input Tensor Names 1, // Num Input Tensor Names outputs__model_17_cv3_act_Sigmoid, // Output Tensors 1// Num Output Tensors ), err); /* ADDING NODE FOR _model_17_cv3_act_Mul */ const char* inputs__model_17_cv3_act_Mul[] = { "_model_17_cv3_conv_Conv_output_0", "_model_17_cv3_act_Sigmoid_output_0" }; uint32_t dimensions__model_17_cv3_act_Mul_output_0[] = {1, 80, 80, 128}; Qnn_Tensor_t outputs__model_17_cv3_act_Mul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_17_cv3_act_Mul_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0004658489488065f, .offset= -598}}}, .rank= 4, .dimensions=dimensions__model_17_cv3_act_Mul_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_17_cv3_act_Mul", // Node Name "qti.aisw", // Package Name "ElementWiseMultiply", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__model_17_cv3_act_Mul, // Input Tensor Names 2, // Num Input Tensor Names outputs__model_17_cv3_act_Mul, // Output Tensors 1// Num Output Tensors ), err); uint32_t dimensions_model_18_conv_weight[] = {3, 3, 128, 128}; VALIDATE(cutoff_yolov5s.addTensor("model_18_conv_weight", // Node Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "model_18_conv_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0017898838268593f, .offset= -121}}}, .rank= 4, .dimensions=dimensions_model_18_conv_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(model_18_conv_weight), .dataSize=BINLEN(model_18_conv_weight)}}}}} ), err); uint32_t dimensions_model_18_conv_bias[] = {128}; VALIDATE(cutoff_yolov5s.addTensor("model_18_conv_bias", // Node Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "model_18_conv_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_SFIXED_POINT_32, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000000008778207f, .offset= 0}}}, .rank= 1, .dimensions=dimensions_model_18_conv_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(model_18_conv_bias), .dataSize=BINLEN(model_18_conv_bias)}}}}} ), err); /* ADDING NODE FOR _model_18_conv_Conv */ uint32_t dimensions___model_18_conv_Conv_dilation[] = {2}; uint32_t __model_18_conv_Conv_dilation[] = {1, 1}; uint32_t dimensions___model_18_conv_Conv_pad_amount[] = {2, 2}; uint32_t __model_18_conv_Conv_pad_amount[] = {1, 1, 1, 1}; uint32_t dimensions___model_18_conv_Conv_stride[] = {2}; uint32_t __model_18_conv_Conv_stride[] = {2, 2}; Qnn_Param_t params__model_18_conv_Conv[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "__model_18_conv_Conv_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions___model_18_conv_Conv_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)__model_18_conv_Conv_dilation, .dataSize=8}}}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "__model_18_conv_Conv_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions___model_18_conv_Conv_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)__model_18_conv_Conv_pad_amount, .dataSize=16}}}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "__model_18_conv_Conv_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions___model_18_conv_Conv_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)__model_18_conv_Conv_stride, .dataSize=8}}}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}} }; const char* inputs__model_18_conv_Conv[] = { "_model_17_cv3_act_Mul_output_0", "model_18_conv_weight", "model_18_conv_bias" }; uint32_t dimensions__model_18_conv_Conv_output_0[] = {1, 40, 40, 128}; Qnn_Tensor_t outputs__model_18_conv_Conv[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_18_conv_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0003007671621162f, .offset= -34221}}}, .rank= 4, .dimensions=dimensions__model_18_conv_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_18_conv_Conv", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__model_18_conv_Conv, // Node Params 4, // Num Node Params inputs__model_18_conv_Conv, // Input Tensor Names 3, // Num Input Tensor Names outputs__model_18_conv_Conv, // Output Tensors 1// Num Output Tensors ), err); /* ADDING NODE FOR _model_18_act_Sigmoid */ const char* inputs__model_18_act_Sigmoid[] = { "_model_18_conv_Conv_output_0" }; uint32_t dimensions__model_18_act_Sigmoid_output_0[] = {1, 40, 40, 128}; Qnn_Tensor_t outputs__model_18_act_Sigmoid[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_18_act_Sigmoid_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000152587890625f, .offset= 0}}}, .rank= 4, .dimensions=dimensions__model_18_act_Sigmoid_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_18_act_Sigmoid", // Node Name "qti.aisw", // Package Name "Sigmoid", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__model_18_act_Sigmoid, // Input Tensor Names 1, // Num Input Tensor Names outputs__model_18_act_Sigmoid, // Output Tensors 1// Num Output Tensors ), err); /* ADDING NODE FOR _model_18_act_Mul */ const char* inputs__model_18_act_Mul[] = { "_model_18_conv_Conv_output_0", "_model_18_act_Sigmoid_output_0" }; uint32_t dimensions__model_18_act_Mul_output_0[] = {1, 40, 40, 128}; Qnn_Tensor_t outputs__model_18_act_Mul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_18_act_Mul_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0001479498459958f, .offset= -1882}}}, .rank= 4, .dimensions=dimensions__model_18_act_Mul_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_18_act_Mul", // Node Name "qti.aisw", // Package Name "ElementWiseMultiply", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__model_18_act_Mul, // Input Tensor Names 2, // Num Input Tensor Names outputs__model_18_act_Mul, // Output Tensors 1// Num Output Tensors ), err); /* ADDING NODE FOR _model_19_Concat */ Qnn_Param_t params__model_19_Concat[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="axis", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 3}}}} }; const char* inputs__model_19_Concat[] = { "_model_18_act_Mul_output_0", "_model_14_act_Mul_output_0" }; uint32_t dimensions__model_19_Concat_output_0[] = {1, 40, 40, 256}; Qnn_Tensor_t outputs__model_19_Concat[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_19_Concat_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0001479502971051f, .offset= -1882}}}, .rank= 4, .dimensions=dimensions__model_19_Concat_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_19_Concat", // Node Name "qti.aisw", // Package Name "Concat", // Qnn Node Type params__model_19_Concat, // Node Params 1, // Num Node Params inputs__model_19_Concat, // Input Tensor Names 2, // Num Input Tensor Names outputs__model_19_Concat, // Output Tensors 1// Num Output Tensors ), err); uint32_t dimensions_model_20_cv1_conv_weight[] = {1, 1, 256, 128}; VALIDATE(cutoff_yolov5s.addTensor("model_20_cv1_conv_weight", // Node Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "model_20_cv1_conv_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0080735348165035f, .offset= -133}}}, .rank= 4, .dimensions=dimensions_model_20_cv1_conv_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(model_20_cv1_conv_weight), .dataSize=BINLEN(model_20_cv1_conv_weight)}}}}} ), err); uint32_t dimensions_model_20_cv1_conv_bias[] = {128}; VALIDATE(cutoff_yolov5s.addTensor("model_20_cv1_conv_bias", // Node Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "model_20_cv1_conv_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_SFIXED_POINT_32, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000000010931447f, .offset= 0}}}, .rank= 1, .dimensions=dimensions_model_20_cv1_conv_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(model_20_cv1_conv_bias), .dataSize=BINLEN(model_20_cv1_conv_bias)}}}}} ), err); /* ADDING NODE FOR _model_20_cv1_conv_Conv */ uint32_t dimensions___model_20_cv1_conv_Conv_dilation[] = {2}; uint32_t __model_20_cv1_conv_Conv_dilation[] = {1, 1}; uint32_t dimensions___model_20_cv1_conv_Conv_pad_amount[] = {2, 2}; uint32_t __model_20_cv1_conv_Conv_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions___model_20_cv1_conv_Conv_stride[] = {2}; uint32_t __model_20_cv1_conv_Conv_stride[] = {1, 1}; Qnn_Param_t params__model_20_cv1_conv_Conv[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "__model_20_cv1_conv_Conv_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions___model_20_cv1_conv_Conv_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)__model_20_cv1_conv_Conv_dilation, .dataSize=8}}}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "__model_20_cv1_conv_Conv_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions___model_20_cv1_conv_Conv_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)__model_20_cv1_conv_Conv_pad_amount, .dataSize=16}}}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "__model_20_cv1_conv_Conv_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions___model_20_cv1_conv_Conv_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)__model_20_cv1_conv_Conv_stride, .dataSize=8}}}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}} }; const char* inputs__model_20_cv1_conv_Conv[] = { "_model_19_Concat_output_0", "model_20_cv1_conv_weight", "model_20_cv1_conv_bias" }; uint32_t dimensions__model_20_cv1_conv_Conv_output_0[] = {1, 40, 40, 128}; Qnn_Tensor_t outputs__model_20_cv1_conv_Conv[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_20_cv1_conv_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0003037437272724f, .offset= -34694}}}, .rank= 4, .dimensions=dimensions__model_20_cv1_conv_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_20_cv1_conv_Conv", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__model_20_cv1_conv_Conv, // Node Params 4, // Num Node Params inputs__model_20_cv1_conv_Conv, // Input Tensor Names 3, // Num Input Tensor Names outputs__model_20_cv1_conv_Conv, // Output Tensors 1// Num Output Tensors ), err); /* ADDING NODE FOR _model_20_cv1_act_Sigmoid */ const char* inputs__model_20_cv1_act_Sigmoid[] = { "_model_20_cv1_conv_Conv_output_0" }; uint32_t dimensions__model_20_cv1_act_Sigmoid_output_0[] = {1, 40, 40, 128}; Qnn_Tensor_t outputs__model_20_cv1_act_Sigmoid[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_20_cv1_act_Sigmoid_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000152587890625f, .offset= 0}}}, .rank= 4, .dimensions=dimensions__model_20_cv1_act_Sigmoid_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_20_cv1_act_Sigmoid", // Node Name "qti.aisw", // Package Name "Sigmoid", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__model_20_cv1_act_Sigmoid, // Input Tensor Names 1, // Num Input Tensor Names outputs__model_20_cv1_act_Sigmoid, // Output Tensors 1// Num Output Tensors ), err); /* ADDING NODE FOR _model_20_cv1_act_Mul */ const char* inputs__model_20_cv1_act_Mul[] = { "_model_20_cv1_conv_Conv_output_0", "_model_20_cv1_act_Sigmoid_output_0" }; uint32_t dimensions__model_20_cv1_act_Mul_output_0[] = {1, 40, 40, 128}; Qnn_Tensor_t outputs__model_20_cv1_act_Mul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_20_cv1_act_Mul_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0001471814321121f, .offset= -1892}}}, .rank= 4, .dimensions=dimensions__model_20_cv1_act_Mul_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_20_cv1_act_Mul", // Node Name "qti.aisw", // Package Name "ElementWiseMultiply", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__model_20_cv1_act_Mul, // Input Tensor Names 2, // Num Input Tensor Names outputs__model_20_cv1_act_Mul, // Output Tensors 1// Num Output Tensors ), err); uint32_t dimensions_model_20_m_0_cv1_conv_weight[] = {1, 1, 128, 128}; VALIDATE(cutoff_yolov5s.addTensor("model_20_m_0_cv1_conv_weight", // Node Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "model_20_m_0_cv1_conv_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0074081192724407f, .offset= -145}}}, .rank= 4, .dimensions=dimensions_model_20_m_0_cv1_conv_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(model_20_m_0_cv1_conv_weight), .dataSize=BINLEN(model_20_m_0_cv1_conv_weight)}}}}} ), err); uint32_t dimensions_model_20_m_0_cv1_conv_bias[] = {128}; VALIDATE(cutoff_yolov5s.addTensor("model_20_m_0_cv1_conv_bias", // Node Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "model_20_m_0_cv1_conv_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_SFIXED_POINT_32, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000000008539692f, .offset= 0}}}, .rank= 1, .dimensions=dimensions_model_20_m_0_cv1_conv_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(model_20_m_0_cv1_conv_bias), .dataSize=BINLEN(model_20_m_0_cv1_conv_bias)}}}}} ), err); /* ADDING NODE FOR _model_20_m_m_0_cv1_conv_Conv */ uint32_t dimensions___model_20_m_m_0_cv1_conv_Conv_dilation[] = {2}; uint32_t __model_20_m_m_0_cv1_conv_Conv_dilation[] = {1, 1}; uint32_t dimensions___model_20_m_m_0_cv1_conv_Conv_pad_amount[] = {2, 2}; uint32_t __model_20_m_m_0_cv1_conv_Conv_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions___model_20_m_m_0_cv1_conv_Conv_stride[] = {2}; uint32_t __model_20_m_m_0_cv1_conv_Conv_stride[] = {1, 1}; Qnn_Param_t params__model_20_m_m_0_cv1_conv_Conv[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "__model_20_m_m_0_cv1_conv_Conv_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions___model_20_m_m_0_cv1_conv_Conv_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)__model_20_m_m_0_cv1_conv_Conv_dilation, .dataSize=8}}}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "__model_20_m_m_0_cv1_conv_Conv_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions___model_20_m_m_0_cv1_conv_Conv_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)__model_20_m_m_0_cv1_conv_Conv_pad_amount, .dataSize=16}}}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "__model_20_m_m_0_cv1_conv_Conv_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions___model_20_m_m_0_cv1_conv_Conv_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)__model_20_m_m_0_cv1_conv_Conv_stride, .dataSize=8}}}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}} }; const char* inputs__model_20_m_m_0_cv1_conv_Conv[] = { "_model_20_cv1_act_Mul_output_0", "model_20_m_0_cv1_conv_weight", "model_20_m_0_cv1_conv_bias" }; uint32_t dimensions__model_20_m_m_0_cv1_conv_Conv_output_0[] = {1, 40, 40, 128}; Qnn_Tensor_t outputs__model_20_m_m_0_cv1_conv_Conv[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_20_m_m_0_cv1_conv_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0002761994255707f, .offset= -35566}}}, .rank= 4, .dimensions=dimensions__model_20_m_m_0_cv1_conv_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_20_m_m_0_cv1_conv_Conv", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__model_20_m_m_0_cv1_conv_Conv, // Node Params 4, // Num Node Params inputs__model_20_m_m_0_cv1_conv_Conv, // Input Tensor Names 3, // Num Input Tensor Names outputs__model_20_m_m_0_cv1_conv_Conv, // Output Tensors 1// Num Output Tensors ), err); /* ADDING NODE FOR _model_20_m_m_0_cv1_act_Sigmoid */ const char* inputs__model_20_m_m_0_cv1_act_Sigmoid[] = { "_model_20_m_m_0_cv1_conv_Conv_output_0" }; uint32_t dimensions__model_20_m_m_0_cv1_act_Sigmoid_output_0[] = {1, 40, 40, 128}; Qnn_Tensor_t outputs__model_20_m_m_0_cv1_act_Sigmoid[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_20_m_m_0_cv1_act_Sigmoid_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000152587890625f, .offset= 0}}}, .rank= 4, .dimensions=dimensions__model_20_m_m_0_cv1_act_Sigmoid_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_20_m_m_0_cv1_act_Sigmoid", // Node Name "qti.aisw", // Package Name "Sigmoid", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__model_20_m_m_0_cv1_act_Sigmoid, // Input Tensor Names 1, // Num Input Tensor Names outputs__model_20_m_m_0_cv1_act_Sigmoid, // Output Tensors 1// Num Output Tensors ), err); /* ADDING NODE FOR _model_20_m_m_0_cv1_act_Mul */ const char* inputs__model_20_m_m_0_cv1_act_Mul[] = { "_model_20_m_m_0_cv1_conv_Conv_output_0", "_model_20_m_m_0_cv1_act_Sigmoid_output_0" }; uint32_t dimensions__model_20_m_m_0_cv1_act_Mul_output_0[] = {1, 40, 40, 128}; Qnn_Tensor_t outputs__model_20_m_m_0_cv1_act_Mul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_20_m_m_0_cv1_act_Mul_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0001305234764004f, .offset= -2133}}}, .rank= 4, .dimensions=dimensions__model_20_m_m_0_cv1_act_Mul_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_20_m_m_0_cv1_act_Mul", // Node Name "qti.aisw", // Package Name "ElementWiseMultiply", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__model_20_m_m_0_cv1_act_Mul, // Input Tensor Names 2, // Num Input Tensor Names outputs__model_20_m_m_0_cv1_act_Mul, // Output Tensors 1// Num Output Tensors ), err); uint32_t dimensions_model_20_m_0_cv2_conv_weight[] = {3, 3, 128, 128}; VALIDATE(cutoff_yolov5s.addTensor("model_20_m_0_cv2_conv_weight", // Node Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "model_20_m_0_cv2_conv_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0092308809980750f, .offset= -126}}}, .rank= 4, .dimensions=dimensions_model_20_m_0_cv2_conv_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(model_20_m_0_cv2_conv_weight), .dataSize=BINLEN(model_20_m_0_cv2_conv_weight)}}}}} ), err); uint32_t dimensions_model_20_m_0_cv2_conv_bias[] = {128}; VALIDATE(cutoff_yolov5s.addTensor("model_20_m_0_cv2_conv_bias", // Node Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "model_20_m_0_cv2_conv_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_SFIXED_POINT_32, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000000012405187f, .offset= 0}}}, .rank= 1, .dimensions=dimensions_model_20_m_0_cv2_conv_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(model_20_m_0_cv2_conv_bias), .dataSize=BINLEN(model_20_m_0_cv2_conv_bias)}}}}} ), err); /* ADDING NODE FOR _model_20_m_m_0_cv2_conv_Conv */ uint32_t dimensions___model_20_m_m_0_cv2_conv_Conv_dilation[] = {2}; uint32_t __model_20_m_m_0_cv2_conv_Conv_dilation[] = {1, 1}; uint32_t dimensions___model_20_m_m_0_cv2_conv_Conv_pad_amount[] = {2, 2}; uint32_t __model_20_m_m_0_cv2_conv_Conv_pad_amount[] = {1, 1, 1, 1}; uint32_t dimensions___model_20_m_m_0_cv2_conv_Conv_stride[] = {2}; uint32_t __model_20_m_m_0_cv2_conv_Conv_stride[] = {1, 1}; Qnn_Param_t params__model_20_m_m_0_cv2_conv_Conv[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "__model_20_m_m_0_cv2_conv_Conv_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions___model_20_m_m_0_cv2_conv_Conv_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)__model_20_m_m_0_cv2_conv_Conv_dilation, .dataSize=8}}}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "__model_20_m_m_0_cv2_conv_Conv_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions___model_20_m_m_0_cv2_conv_Conv_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)__model_20_m_m_0_cv2_conv_Conv_pad_amount, .dataSize=16}}}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "__model_20_m_m_0_cv2_conv_Conv_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions___model_20_m_m_0_cv2_conv_Conv_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)__model_20_m_m_0_cv2_conv_Conv_stride, .dataSize=8}}}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}} }; const char* inputs__model_20_m_m_0_cv2_conv_Conv[] = { "_model_20_m_m_0_cv1_act_Mul_output_0", "model_20_m_0_cv2_conv_weight", "model_20_m_0_cv2_conv_bias" }; uint32_t dimensions__model_20_m_m_0_cv2_conv_Conv_output_0[] = {1, 40, 40, 128}; Qnn_Tensor_t outputs__model_20_m_m_0_cv2_conv_Conv[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_20_m_m_0_cv2_conv_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0004865182272624f, .offset= -43004}}}, .rank= 4, .dimensions=dimensions__model_20_m_m_0_cv2_conv_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_20_m_m_0_cv2_conv_Conv", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__model_20_m_m_0_cv2_conv_Conv, // Node Params 4, // Num Node Params inputs__model_20_m_m_0_cv2_conv_Conv, // Input Tensor Names 3, // Num Input Tensor Names outputs__model_20_m_m_0_cv2_conv_Conv, // Output Tensors 1// Num Output Tensors ), err); /* ADDING NODE FOR _model_20_m_m_0_cv2_act_Sigmoid */ const char* inputs__model_20_m_m_0_cv2_act_Sigmoid[] = { "_model_20_m_m_0_cv2_conv_Conv_output_0" }; uint32_t dimensions__model_20_m_m_0_cv2_act_Sigmoid_output_0[] = {1, 40, 40, 128}; Qnn_Tensor_t outputs__model_20_m_m_0_cv2_act_Sigmoid[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_20_m_m_0_cv2_act_Sigmoid_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000152587890625f, .offset= 0}}}, .rank= 4, .dimensions=dimensions__model_20_m_m_0_cv2_act_Sigmoid_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_20_m_m_0_cv2_act_Sigmoid", // Node Name "qti.aisw", // Package Name "Sigmoid", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__model_20_m_m_0_cv2_act_Sigmoid, // Input Tensor Names 1, // Num Input Tensor Names outputs__model_20_m_m_0_cv2_act_Sigmoid, // Output Tensors 1// Num Output Tensors ), err); /* ADDING NODE FOR _model_20_m_m_0_cv2_act_Mul */ const char* inputs__model_20_m_m_0_cv2_act_Mul[] = { "_model_20_m_m_0_cv2_conv_Conv_output_0", "_model_20_m_m_0_cv2_act_Sigmoid_output_0" }; uint32_t dimensions__model_20_m_m_0_cv2_act_Mul_output_0[] = {1, 40, 40, 128}; Qnn_Tensor_t outputs__model_20_m_m_0_cv2_act_Mul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_20_m_m_0_cv2_act_Mul_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0001715130201774f, .offset= -1624}}}, .rank= 4, .dimensions=dimensions__model_20_m_m_0_cv2_act_Mul_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_20_m_m_0_cv2_act_Mul", // Node Name "qti.aisw", // Package Name "ElementWiseMultiply", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__model_20_m_m_0_cv2_act_Mul, // Input Tensor Names 2, // Num Input Tensor Names outputs__model_20_m_m_0_cv2_act_Mul, // Output Tensors 1// Num Output Tensors ), err); uint32_t dimensions_model_20_cv2_conv_weight[] = {1, 1, 256, 128}; VALIDATE(cutoff_yolov5s.addTensor("model_20_cv2_conv_weight", // Node Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "model_20_cv2_conv_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0094242803752422f, .offset= -187}}}, .rank= 4, .dimensions=dimensions_model_20_cv2_conv_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(model_20_cv2_conv_weight), .dataSize=BINLEN(model_20_cv2_conv_weight)}}}}} ), err); uint32_t dimensions_model_20_cv2_conv_bias[] = {128}; VALIDATE(cutoff_yolov5s.addTensor("model_20_cv2_conv_bias", // Node Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "model_20_cv2_conv_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_SFIXED_POINT_32, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000000008100984f, .offset= 0}}}, .rank= 1, .dimensions=dimensions_model_20_cv2_conv_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(model_20_cv2_conv_bias), .dataSize=BINLEN(model_20_cv2_conv_bias)}}}}} ), err); /* ADDING NODE FOR _model_20_cv2_conv_Conv */ uint32_t dimensions___model_20_cv2_conv_Conv_dilation[] = {2}; uint32_t __model_20_cv2_conv_Conv_dilation[] = {1, 1}; uint32_t dimensions___model_20_cv2_conv_Conv_pad_amount[] = {2, 2}; uint32_t __model_20_cv2_conv_Conv_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions___model_20_cv2_conv_Conv_stride[] = {2}; uint32_t __model_20_cv2_conv_Conv_stride[] = {1, 1}; Qnn_Param_t params__model_20_cv2_conv_Conv[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "__model_20_cv2_conv_Conv_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions___model_20_cv2_conv_Conv_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)__model_20_cv2_conv_Conv_dilation, .dataSize=8}}}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "__model_20_cv2_conv_Conv_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions___model_20_cv2_conv_Conv_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)__model_20_cv2_conv_Conv_pad_amount, .dataSize=16}}}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "__model_20_cv2_conv_Conv_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions___model_20_cv2_conv_Conv_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)__model_20_cv2_conv_Conv_stride, .dataSize=8}}}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}} }; const char* inputs__model_20_cv2_conv_Conv[] = { "_model_19_Concat_output_0", "model_20_cv2_conv_weight", "model_20_cv2_conv_bias" }; uint32_t dimensions__model_20_cv2_conv_Conv_output_0[] = {1, 40, 40, 128}; Qnn_Tensor_t outputs__model_20_cv2_conv_Conv[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_20_cv2_conv_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0002825991250575f, .offset= -34741}}}, .rank= 4, .dimensions=dimensions__model_20_cv2_conv_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_20_cv2_conv_Conv", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__model_20_cv2_conv_Conv, // Node Params 4, // Num Node Params inputs__model_20_cv2_conv_Conv, // Input Tensor Names 3, // Num Input Tensor Names outputs__model_20_cv2_conv_Conv, // Output Tensors 1// Num Output Tensors ), err); /* ADDING NODE FOR _model_20_cv2_act_Sigmoid */ const char* inputs__model_20_cv2_act_Sigmoid[] = { "_model_20_cv2_conv_Conv_output_0" }; uint32_t dimensions__model_20_cv2_act_Sigmoid_output_0[] = {1, 40, 40, 128}; Qnn_Tensor_t outputs__model_20_cv2_act_Sigmoid[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_20_cv2_act_Sigmoid_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000152587890625f, .offset= 0}}}, .rank= 4, .dimensions=dimensions__model_20_cv2_act_Sigmoid_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_20_cv2_act_Sigmoid", // Node Name "qti.aisw", // Package Name "Sigmoid", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__model_20_cv2_act_Sigmoid, // Input Tensor Names 1, // Num Input Tensor Names outputs__model_20_cv2_act_Sigmoid, // Output Tensors 1// Num Output Tensors ), err); /* ADDING NODE FOR _model_20_cv2_act_Mul */ const char* inputs__model_20_cv2_act_Mul[] = { "_model_20_cv2_conv_Conv_output_0", "_model_20_cv2_act_Sigmoid_output_0" }; uint32_t dimensions__model_20_cv2_act_Mul_output_0[] = {1, 40, 40, 128}; Qnn_Tensor_t outputs__model_20_cv2_act_Mul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_20_cv2_act_Mul_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0001370172394672f, .offset= -2032}}}, .rank= 4, .dimensions=dimensions__model_20_cv2_act_Mul_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_20_cv2_act_Mul", // Node Name "qti.aisw", // Package Name "ElementWiseMultiply", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__model_20_cv2_act_Mul, // Input Tensor Names 2, // Num Input Tensor Names outputs__model_20_cv2_act_Mul, // Output Tensors 1// Num Output Tensors ), err); /* ADDING NODE FOR _model_20_Concat */ Qnn_Param_t params__model_20_Concat[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="axis", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 3}}}} }; const char* inputs__model_20_Concat[] = { "_model_20_m_m_0_cv2_act_Mul_output_0", "_model_20_cv2_act_Mul_output_0" }; uint32_t dimensions__model_20_Concat_output_0[] = {1, 40, 40, 256}; Qnn_Tensor_t outputs__model_20_Concat[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_20_Concat_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0001715130201774f, .offset= -1624}}}, .rank= 4, .dimensions=dimensions__model_20_Concat_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_20_Concat", // Node Name "qti.aisw", // Package Name "Concat", // Qnn Node Type params__model_20_Concat, // Node Params 1, // Num Node Params inputs__model_20_Concat, // Input Tensor Names 2, // Num Input Tensor Names outputs__model_20_Concat, // Output Tensors 1// Num Output Tensors ), err); uint32_t dimensions_model_20_cv3_conv_weight[] = {1, 1, 256, 256}; VALIDATE(cutoff_yolov5s.addTensor("model_20_cv3_conv_weight", // Node Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "model_20_cv3_conv_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0200817789882421f, .offset= -131}}}, .rank= 4, .dimensions=dimensions_model_20_cv3_conv_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(model_20_cv3_conv_weight), .dataSize=BINLEN(model_20_cv3_conv_weight)}}}}} ), err); uint32_t dimensions_model_20_cv3_conv_bias[] = {256}; VALIDATE(cutoff_yolov5s.addTensor("model_20_cv3_conv_bias", // Node Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "model_20_cv3_conv_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_SFIXED_POINT_32, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000000027572205f, .offset= 0}}}, .rank= 1, .dimensions=dimensions_model_20_cv3_conv_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(model_20_cv3_conv_bias), .dataSize=BINLEN(model_20_cv3_conv_bias)}}}}} ), err); /* ADDING NODE FOR _model_20_cv3_conv_Conv */ uint32_t dimensions___model_20_cv3_conv_Conv_dilation[] = {2}; uint32_t __model_20_cv3_conv_Conv_dilation[] = {1, 1}; uint32_t dimensions___model_20_cv3_conv_Conv_pad_amount[] = {2, 2}; uint32_t __model_20_cv3_conv_Conv_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions___model_20_cv3_conv_Conv_stride[] = {2}; uint32_t __model_20_cv3_conv_Conv_stride[] = {1, 1}; Qnn_Param_t params__model_20_cv3_conv_Conv[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "__model_20_cv3_conv_Conv_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions___model_20_cv3_conv_Conv_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)__model_20_cv3_conv_Conv_dilation, .dataSize=8}}}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "__model_20_cv3_conv_Conv_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions___model_20_cv3_conv_Conv_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)__model_20_cv3_conv_Conv_pad_amount, .dataSize=16}}}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "__model_20_cv3_conv_Conv_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions___model_20_cv3_conv_Conv_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)__model_20_cv3_conv_Conv_stride, .dataSize=8}}}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}} }; const char* inputs__model_20_cv3_conv_Conv[] = { "_model_20_Concat_output_0", "model_20_cv3_conv_weight", "model_20_cv3_conv_bias" }; uint32_t dimensions__model_20_cv3_conv_Conv_output_0[] = {1, 40, 40, 256}; Qnn_Tensor_t outputs__model_20_cv3_conv_Conv[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_20_cv3_conv_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0009585621883161f, .offset= -27710}}}, .rank= 4, .dimensions=dimensions__model_20_cv3_conv_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_20_cv3_conv_Conv", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__model_20_cv3_conv_Conv, // Node Params 4, // Num Node Params inputs__model_20_cv3_conv_Conv, // Input Tensor Names 3, // Num Input Tensor Names outputs__model_20_cv3_conv_Conv, // Output Tensors 1// Num Output Tensors ), err); /* ADDING NODE FOR _model_20_cv3_act_Sigmoid */ const char* inputs__model_20_cv3_act_Sigmoid[] = { "_model_20_cv3_conv_Conv_output_0" }; uint32_t dimensions__model_20_cv3_act_Sigmoid_output_0[] = {1, 40, 40, 256}; Qnn_Tensor_t outputs__model_20_cv3_act_Sigmoid[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_20_cv3_act_Sigmoid_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000152587890625f, .offset= 0}}}, .rank= 4, .dimensions=dimensions__model_20_cv3_act_Sigmoid_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_20_cv3_act_Sigmoid", // Node Name "qti.aisw", // Package Name "Sigmoid", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__model_20_cv3_act_Sigmoid, // Input Tensor Names 1, // Num Input Tensor Names outputs__model_20_cv3_act_Sigmoid, // Output Tensors 1// Num Output Tensors ), err); /* ADDING NODE FOR _model_20_cv3_act_Mul */ const char* inputs__model_20_cv3_act_Mul[] = { "_model_20_cv3_conv_Conv_output_0", "_model_20_cv3_act_Sigmoid_output_0" }; uint32_t dimensions__model_20_cv3_act_Mul_output_0[] = {1, 40, 40, 256}; Qnn_Tensor_t outputs__model_20_cv3_act_Mul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_20_cv3_act_Mul_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0005575082032010f, .offset= -499}}}, .rank= 4, .dimensions=dimensions__model_20_cv3_act_Mul_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_20_cv3_act_Mul", // Node Name "qti.aisw", // Package Name "ElementWiseMultiply", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__model_20_cv3_act_Mul, // Input Tensor Names 2, // Num Input Tensor Names outputs__model_20_cv3_act_Mul, // Output Tensors 1// Num Output Tensors ), err); uint32_t dimensions_model_21_conv_weight[] = {3, 3, 256, 256}; VALIDATE(cutoff_yolov5s.addTensor("model_21_conv_weight", // Node Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "model_21_conv_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0021989804226905f, .offset= -111}}}, .rank= 4, .dimensions=dimensions_model_21_conv_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(model_21_conv_weight), .dataSize=BINLEN(model_21_conv_weight)}}}}} ), err); uint32_t dimensions_model_21_conv_bias[] = {256}; VALIDATE(cutoff_yolov5s.addTensor("model_21_conv_bias", // Node Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "model_21_conv_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_SFIXED_POINT_32, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000000008375995f, .offset= 0}}}, .rank= 1, .dimensions=dimensions_model_21_conv_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(model_21_conv_bias), .dataSize=BINLEN(model_21_conv_bias)}}}}} ), err); /* ADDING NODE FOR _model_21_conv_Conv */ uint32_t dimensions___model_21_conv_Conv_dilation[] = {2}; uint32_t __model_21_conv_Conv_dilation[] = {1, 1}; uint32_t dimensions___model_21_conv_Conv_pad_amount[] = {2, 2}; uint32_t __model_21_conv_Conv_pad_amount[] = {1, 1, 1, 1}; uint32_t dimensions___model_21_conv_Conv_stride[] = {2}; uint32_t __model_21_conv_Conv_stride[] = {2, 2}; Qnn_Param_t params__model_21_conv_Conv[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "__model_21_conv_Conv_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions___model_21_conv_Conv_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)__model_21_conv_Conv_dilation, .dataSize=8}}}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "__model_21_conv_Conv_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions___model_21_conv_Conv_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)__model_21_conv_Conv_pad_amount, .dataSize=16}}}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "__model_21_conv_Conv_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions___model_21_conv_Conv_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)__model_21_conv_Conv_stride, .dataSize=8}}}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}} }; const char* inputs__model_21_conv_Conv[] = { "_model_20_cv3_act_Mul_output_0", "model_21_conv_weight", "model_21_conv_bias" }; uint32_t dimensions__model_21_conv_Conv_output_0[] = {1, 20, 20, 256}; Qnn_Tensor_t outputs__model_21_conv_Conv[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_21_conv_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0003379949193913f, .offset= -30754}}}, .rank= 4, .dimensions=dimensions__model_21_conv_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_21_conv_Conv", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__model_21_conv_Conv, // Node Params 4, // Num Node Params inputs__model_21_conv_Conv, // Input Tensor Names 3, // Num Input Tensor Names outputs__model_21_conv_Conv, // Output Tensors 1// Num Output Tensors ), err); /* ADDING NODE FOR _model_21_act_Sigmoid */ const char* inputs__model_21_act_Sigmoid[] = { "_model_21_conv_Conv_output_0" }; uint32_t dimensions__model_21_act_Sigmoid_output_0[] = {1, 20, 20, 256}; Qnn_Tensor_t outputs__model_21_act_Sigmoid[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_21_act_Sigmoid_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000152587890625f, .offset= 0}}}, .rank= 4, .dimensions=dimensions__model_21_act_Sigmoid_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_21_act_Sigmoid", // Node Name "qti.aisw", // Package Name "Sigmoid", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__model_21_act_Sigmoid, // Input Tensor Names 1, // Num Input Tensor Names outputs__model_21_act_Sigmoid, // Output Tensors 1// Num Output Tensors ), err); /* ADDING NODE FOR _model_21_act_Mul */ const char* inputs__model_21_act_Mul[] = { "_model_21_conv_Conv_output_0", "_model_21_act_Sigmoid_output_0" }; uint32_t dimensions__model_21_act_Mul_output_0[] = {1, 20, 20, 256}; Qnn_Tensor_t outputs__model_21_act_Mul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_21_act_Mul_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0001836284645833f, .offset= -1516}}}, .rank= 4, .dimensions=dimensions__model_21_act_Mul_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_21_act_Mul", // Node Name "qti.aisw", // Package Name "ElementWiseMultiply", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__model_21_act_Mul, // Input Tensor Names 2, // Num Input Tensor Names outputs__model_21_act_Mul, // Output Tensors 1// Num Output Tensors ), err); /* ADDING NODE FOR _model_22_Concat */ Qnn_Param_t params__model_22_Concat[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="axis", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 3}}}} }; const char* inputs__model_22_Concat[] = { "_model_21_act_Mul_output_0", "_model_10_act_Mul_output_0" }; uint32_t dimensions__model_22_Concat_output_0[] = {1, 20, 20, 512}; Qnn_Tensor_t outputs__model_22_Concat[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_22_Concat_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0001836296578404f, .offset= -1516}}}, .rank= 4, .dimensions=dimensions__model_22_Concat_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_22_Concat", // Node Name "qti.aisw", // Package Name "Concat", // Qnn Node Type params__model_22_Concat, // Node Params 1, // Num Node Params inputs__model_22_Concat, // Input Tensor Names 2, // Num Input Tensor Names outputs__model_22_Concat, // Output Tensors 1// Num Output Tensors ), err); uint32_t dimensions_model_23_cv1_conv_weight[] = {1, 1, 512, 256}; VALIDATE(cutoff_yolov5s.addTensor("model_23_cv1_conv_weight", // Node Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "model_23_cv1_conv_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0101558519527316f, .offset= -150}}}, .rank= 4, .dimensions=dimensions_model_23_cv1_conv_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(model_23_cv1_conv_weight), .dataSize=BINLEN(model_23_cv1_conv_weight)}}}}} ), err); uint32_t dimensions_model_23_cv1_conv_bias[] = {256}; VALIDATE(cutoff_yolov5s.addTensor("model_23_cv1_conv_bias", // Node Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "model_23_cv1_conv_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_SFIXED_POINT_32, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000000005051325f, .offset= 0}}}, .rank= 1, .dimensions=dimensions_model_23_cv1_conv_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(model_23_cv1_conv_bias), .dataSize=BINLEN(model_23_cv1_conv_bias)}}}}} ), err); /* ADDING NODE FOR _model_23_cv1_conv_Conv */ uint32_t dimensions___model_23_cv1_conv_Conv_dilation[] = {2}; uint32_t __model_23_cv1_conv_Conv_dilation[] = {1, 1}; uint32_t dimensions___model_23_cv1_conv_Conv_pad_amount[] = {2, 2}; uint32_t __model_23_cv1_conv_Conv_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions___model_23_cv1_conv_Conv_stride[] = {2}; uint32_t __model_23_cv1_conv_Conv_stride[] = {1, 1}; Qnn_Param_t params__model_23_cv1_conv_Conv[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "__model_23_cv1_conv_Conv_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions___model_23_cv1_conv_Conv_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)__model_23_cv1_conv_Conv_dilation, .dataSize=8}}}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "__model_23_cv1_conv_Conv_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions___model_23_cv1_conv_Conv_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)__model_23_cv1_conv_Conv_pad_amount, .dataSize=16}}}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "__model_23_cv1_conv_Conv_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions___model_23_cv1_conv_Conv_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)__model_23_cv1_conv_Conv_stride, .dataSize=8}}}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}} }; const char* inputs__model_23_cv1_conv_Conv[] = { "_model_22_Concat_output_0", "model_23_cv1_conv_weight", "model_23_cv1_conv_bias" }; uint32_t dimensions__model_23_cv1_conv_Conv_output_0[] = {1, 20, 20, 256}; Qnn_Tensor_t outputs__model_23_cv1_conv_Conv[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_23_cv1_conv_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0003467024362180f, .offset= -34150}}}, .rank= 4, .dimensions=dimensions__model_23_cv1_conv_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_23_cv1_conv_Conv", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__model_23_cv1_conv_Conv, // Node Params 4, // Num Node Params inputs__model_23_cv1_conv_Conv, // Input Tensor Names 3, // Num Input Tensor Names outputs__model_23_cv1_conv_Conv, // Output Tensors 1// Num Output Tensors ), err); /* ADDING NODE FOR _model_23_cv1_act_Sigmoid */ const char* inputs__model_23_cv1_act_Sigmoid[] = { "_model_23_cv1_conv_Conv_output_0" }; uint32_t dimensions__model_23_cv1_act_Sigmoid_output_0[] = {1, 20, 20, 256}; Qnn_Tensor_t outputs__model_23_cv1_act_Sigmoid[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_23_cv1_act_Sigmoid_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000152587890625f, .offset= 0}}}, .rank= 4, .dimensions=dimensions__model_23_cv1_act_Sigmoid_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_23_cv1_act_Sigmoid", // Node Name "qti.aisw", // Package Name "Sigmoid", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__model_23_cv1_act_Sigmoid, // Input Tensor Names 1, // Num Input Tensor Names outputs__model_23_cv1_act_Sigmoid, // Output Tensors 1// Num Output Tensors ), err); /* ADDING NODE FOR _model_23_cv1_act_Mul */ const char* inputs__model_23_cv1_act_Mul[] = { "_model_23_cv1_conv_Conv_output_0", "_model_23_cv1_act_Sigmoid_output_0" }; uint32_t dimensions__model_23_cv1_act_Mul_output_0[] = {1, 20, 20, 256}; Qnn_Tensor_t outputs__model_23_cv1_act_Mul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_23_cv1_act_Mul_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0001702841400402f, .offset= -1635}}}, .rank= 4, .dimensions=dimensions__model_23_cv1_act_Mul_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_23_cv1_act_Mul", // Node Name "qti.aisw", // Package Name "ElementWiseMultiply", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__model_23_cv1_act_Mul, // Input Tensor Names 2, // Num Input Tensor Names outputs__model_23_cv1_act_Mul, // Output Tensors 1// Num Output Tensors ), err); uint32_t dimensions_model_23_m_0_cv1_conv_weight[] = {1, 1, 256, 256}; VALIDATE(cutoff_yolov5s.addTensor("model_23_m_0_cv1_conv_weight", // Node Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "model_23_m_0_cv1_conv_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0100330747663975f, .offset= -168}}}, .rank= 4, .dimensions=dimensions_model_23_m_0_cv1_conv_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(model_23_m_0_cv1_conv_weight), .dataSize=BINLEN(model_23_m_0_cv1_conv_weight)}}}}} ), err); uint32_t dimensions_model_23_m_0_cv1_conv_bias[] = {256}; VALIDATE(cutoff_yolov5s.addTensor("model_23_m_0_cv1_conv_bias", // Node Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "model_23_m_0_cv1_conv_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_SFIXED_POINT_32, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000000007122232f, .offset= 0}}}, .rank= 1, .dimensions=dimensions_model_23_m_0_cv1_conv_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(model_23_m_0_cv1_conv_bias), .dataSize=BINLEN(model_23_m_0_cv1_conv_bias)}}}}} ), err); /* ADDING NODE FOR _model_23_m_m_0_cv1_conv_Conv */ uint32_t dimensions___model_23_m_m_0_cv1_conv_Conv_dilation[] = {2}; uint32_t __model_23_m_m_0_cv1_conv_Conv_dilation[] = {1, 1}; uint32_t dimensions___model_23_m_m_0_cv1_conv_Conv_pad_amount[] = {2, 2}; uint32_t __model_23_m_m_0_cv1_conv_Conv_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions___model_23_m_m_0_cv1_conv_Conv_stride[] = {2}; uint32_t __model_23_m_m_0_cv1_conv_Conv_stride[] = {1, 1}; Qnn_Param_t params__model_23_m_m_0_cv1_conv_Conv[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "__model_23_m_m_0_cv1_conv_Conv_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions___model_23_m_m_0_cv1_conv_Conv_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)__model_23_m_m_0_cv1_conv_Conv_dilation, .dataSize=8}}}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "__model_23_m_m_0_cv1_conv_Conv_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions___model_23_m_m_0_cv1_conv_Conv_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)__model_23_m_m_0_cv1_conv_Conv_pad_amount, .dataSize=16}}}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "__model_23_m_m_0_cv1_conv_Conv_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions___model_23_m_m_0_cv1_conv_Conv_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)__model_23_m_m_0_cv1_conv_Conv_stride, .dataSize=8}}}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}} }; const char* inputs__model_23_m_m_0_cv1_conv_Conv[] = { "_model_23_cv1_act_Mul_output_0", "model_23_m_0_cv1_conv_weight", "model_23_m_0_cv1_conv_bias" }; uint32_t dimensions__model_23_m_m_0_cv1_conv_Conv_output_0[] = {1, 20, 20, 256}; Qnn_Tensor_t outputs__model_23_m_m_0_cv1_conv_Conv[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_23_m_m_0_cv1_conv_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0003987826348748f, .offset= -38635}}}, .rank= 4, .dimensions=dimensions__model_23_m_m_0_cv1_conv_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_23_m_m_0_cv1_conv_Conv", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__model_23_m_m_0_cv1_conv_Conv, // Node Params 4, // Num Node Params inputs__model_23_m_m_0_cv1_conv_Conv, // Input Tensor Names 3, // Num Input Tensor Names outputs__model_23_m_m_0_cv1_conv_Conv, // Output Tensors 1// Num Output Tensors ), err); /* ADDING NODE FOR _model_23_m_m_0_cv1_act_Sigmoid */ const char* inputs__model_23_m_m_0_cv1_act_Sigmoid[] = { "_model_23_m_m_0_cv1_conv_Conv_output_0" }; uint32_t dimensions__model_23_m_m_0_cv1_act_Sigmoid_output_0[] = {1, 20, 20, 256}; Qnn_Tensor_t outputs__model_23_m_m_0_cv1_act_Sigmoid[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_23_m_m_0_cv1_act_Sigmoid_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000152587890625f, .offset= 0}}}, .rank= 4, .dimensions=dimensions__model_23_m_m_0_cv1_act_Sigmoid_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_23_m_m_0_cv1_act_Sigmoid", // Node Name "qti.aisw", // Package Name "Sigmoid", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__model_23_m_m_0_cv1_act_Sigmoid, // Input Tensor Names 1, // Num Input Tensor Names outputs__model_23_m_m_0_cv1_act_Sigmoid, // Output Tensors 1// Num Output Tensors ), err); /* ADDING NODE FOR _model_23_m_m_0_cv1_act_Mul */ const char* inputs__model_23_m_m_0_cv1_act_Mul[] = { "_model_23_m_m_0_cv1_conv_Conv_output_0", "_model_23_m_m_0_cv1_act_Sigmoid_output_0" }; uint32_t dimensions__model_23_m_m_0_cv1_act_Mul_output_0[] = {1, 20, 20, 256}; Qnn_Tensor_t outputs__model_23_m_m_0_cv1_act_Mul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_23_m_m_0_cv1_act_Mul_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0001679359265836f, .offset= -1658}}}, .rank= 4, .dimensions=dimensions__model_23_m_m_0_cv1_act_Mul_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_23_m_m_0_cv1_act_Mul", // Node Name "qti.aisw", // Package Name "ElementWiseMultiply", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__model_23_m_m_0_cv1_act_Mul, // Input Tensor Names 2, // Num Input Tensor Names outputs__model_23_m_m_0_cv1_act_Mul, // Output Tensors 1// Num Output Tensors ), err); uint32_t dimensions_model_23_m_0_cv2_conv_weight[] = {3, 3, 256, 256}; VALIDATE(cutoff_yolov5s.addTensor("model_23_m_0_cv2_conv_weight", // Node Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "model_23_m_0_cv2_conv_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0062467586249113f, .offset= -136}}}, .rank= 4, .dimensions=dimensions_model_23_m_0_cv2_conv_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(model_23_m_0_cv2_conv_weight), .dataSize=BINLEN(model_23_m_0_cv2_conv_weight)}}}}} ), err); uint32_t dimensions_model_23_m_0_cv2_conv_bias[] = {256}; VALIDATE(cutoff_yolov5s.addTensor("model_23_m_0_cv2_conv_bias", // Node Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "model_23_m_0_cv2_conv_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_SFIXED_POINT_32, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000000006880078f, .offset= 0}}}, .rank= 1, .dimensions=dimensions_model_23_m_0_cv2_conv_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(model_23_m_0_cv2_conv_bias), .dataSize=BINLEN(model_23_m_0_cv2_conv_bias)}}}}} ), err); /* ADDING NODE FOR _model_23_m_m_0_cv2_conv_Conv */ uint32_t dimensions___model_23_m_m_0_cv2_conv_Conv_dilation[] = {2}; uint32_t __model_23_m_m_0_cv2_conv_Conv_dilation[] = {1, 1}; uint32_t dimensions___model_23_m_m_0_cv2_conv_Conv_pad_amount[] = {2, 2}; uint32_t __model_23_m_m_0_cv2_conv_Conv_pad_amount[] = {1, 1, 1, 1}; uint32_t dimensions___model_23_m_m_0_cv2_conv_Conv_stride[] = {2}; uint32_t __model_23_m_m_0_cv2_conv_Conv_stride[] = {1, 1}; Qnn_Param_t params__model_23_m_m_0_cv2_conv_Conv[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "__model_23_m_m_0_cv2_conv_Conv_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions___model_23_m_m_0_cv2_conv_Conv_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)__model_23_m_m_0_cv2_conv_Conv_dilation, .dataSize=8}}}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "__model_23_m_m_0_cv2_conv_Conv_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions___model_23_m_m_0_cv2_conv_Conv_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)__model_23_m_m_0_cv2_conv_Conv_pad_amount, .dataSize=16}}}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "__model_23_m_m_0_cv2_conv_Conv_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions___model_23_m_m_0_cv2_conv_Conv_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)__model_23_m_m_0_cv2_conv_Conv_stride, .dataSize=8}}}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}} }; const char* inputs__model_23_m_m_0_cv2_conv_Conv[] = { "_model_23_m_m_0_cv1_act_Mul_output_0", "model_23_m_0_cv2_conv_weight", "model_23_m_0_cv2_conv_bias" }; uint32_t dimensions__model_23_m_m_0_cv2_conv_Conv_output_0[] = {1, 20, 20, 256}; Qnn_Tensor_t outputs__model_23_m_m_0_cv2_conv_Conv[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_23_m_m_0_cv2_conv_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0005546461325139f, .offset= -39380}}}, .rank= 4, .dimensions=dimensions__model_23_m_m_0_cv2_conv_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_23_m_m_0_cv2_conv_Conv", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__model_23_m_m_0_cv2_conv_Conv, // Node Params 4, // Num Node Params inputs__model_23_m_m_0_cv2_conv_Conv, // Input Tensor Names 3, // Num Input Tensor Names outputs__model_23_m_m_0_cv2_conv_Conv, // Output Tensors 1// Num Output Tensors ), err); /* ADDING NODE FOR _model_23_m_m_0_cv2_act_Sigmoid */ const char* inputs__model_23_m_m_0_cv2_act_Sigmoid[] = { "_model_23_m_m_0_cv2_conv_Conv_output_0" }; uint32_t dimensions__model_23_m_m_0_cv2_act_Sigmoid_output_0[] = {1, 20, 20, 256}; Qnn_Tensor_t outputs__model_23_m_m_0_cv2_act_Sigmoid[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_23_m_m_0_cv2_act_Sigmoid_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000152587890625f, .offset= 0}}}, .rank= 4, .dimensions=dimensions__model_23_m_m_0_cv2_act_Sigmoid_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_23_m_m_0_cv2_act_Sigmoid", // Node Name "qti.aisw", // Package Name "Sigmoid", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__model_23_m_m_0_cv2_act_Sigmoid, // Input Tensor Names 1, // Num Input Tensor Names outputs__model_23_m_m_0_cv2_act_Sigmoid, // Output Tensors 1// Num Output Tensors ), err); /* ADDING NODE FOR _model_23_m_m_0_cv2_act_Mul */ const char* inputs__model_23_m_m_0_cv2_act_Mul[] = { "_model_23_m_m_0_cv2_conv_Conv_output_0", "_model_23_m_m_0_cv2_act_Sigmoid_output_0" }; uint32_t dimensions__model_23_m_m_0_cv2_act_Mul_output_0[] = {1, 20, 20, 256}; Qnn_Tensor_t outputs__model_23_m_m_0_cv2_act_Mul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_23_m_m_0_cv2_act_Mul_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0002256098669022f, .offset= -1234}}}, .rank= 4, .dimensions=dimensions__model_23_m_m_0_cv2_act_Mul_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_23_m_m_0_cv2_act_Mul", // Node Name "qti.aisw", // Package Name "ElementWiseMultiply", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__model_23_m_m_0_cv2_act_Mul, // Input Tensor Names 2, // Num Input Tensor Names outputs__model_23_m_m_0_cv2_act_Mul, // Output Tensors 1// Num Output Tensors ), err); uint32_t dimensions_model_23_cv2_conv_weight[] = {1, 1, 512, 256}; VALIDATE(cutoff_yolov5s.addTensor("model_23_cv2_conv_weight", // Node Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "model_23_cv2_conv_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0062165814451873f, .offset= -153}}}, .rank= 4, .dimensions=dimensions_model_23_cv2_conv_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(model_23_cv2_conv_weight), .dataSize=BINLEN(model_23_cv2_conv_weight)}}}}} ), err); uint32_t dimensions_model_23_cv2_conv_bias[] = {256}; VALIDATE(cutoff_yolov5s.addTensor("model_23_cv2_conv_bias", // Node Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "model_23_cv2_conv_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_SFIXED_POINT_32, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000000005227985f, .offset= 0}}}, .rank= 1, .dimensions=dimensions_model_23_cv2_conv_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(model_23_cv2_conv_bias), .dataSize=BINLEN(model_23_cv2_conv_bias)}}}}} ), err); /* ADDING NODE FOR _model_23_cv2_conv_Conv */ uint32_t dimensions___model_23_cv2_conv_Conv_dilation[] = {2}; uint32_t __model_23_cv2_conv_Conv_dilation[] = {1, 1}; uint32_t dimensions___model_23_cv2_conv_Conv_pad_amount[] = {2, 2}; uint32_t __model_23_cv2_conv_Conv_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions___model_23_cv2_conv_Conv_stride[] = {2}; uint32_t __model_23_cv2_conv_Conv_stride[] = {1, 1}; Qnn_Param_t params__model_23_cv2_conv_Conv[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "__model_23_cv2_conv_Conv_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions___model_23_cv2_conv_Conv_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)__model_23_cv2_conv_Conv_dilation, .dataSize=8}}}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "__model_23_cv2_conv_Conv_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions___model_23_cv2_conv_Conv_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)__model_23_cv2_conv_Conv_pad_amount, .dataSize=16}}}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "__model_23_cv2_conv_Conv_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions___model_23_cv2_conv_Conv_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)__model_23_cv2_conv_Conv_stride, .dataSize=8}}}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}} }; const char* inputs__model_23_cv2_conv_Conv[] = { "_model_22_Concat_output_0", "model_23_cv2_conv_weight", "model_23_cv2_conv_bias" }; uint32_t dimensions__model_23_cv2_conv_Conv_output_0[] = {1, 20, 20, 256}; Qnn_Tensor_t outputs__model_23_cv2_conv_Conv[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_23_cv2_conv_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0002940779959317f, .offset= -34151}}}, .rank= 4, .dimensions=dimensions__model_23_cv2_conv_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_23_cv2_conv_Conv", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__model_23_cv2_conv_Conv, // Node Params 4, // Num Node Params inputs__model_23_cv2_conv_Conv, // Input Tensor Names 3, // Num Input Tensor Names outputs__model_23_cv2_conv_Conv, // Output Tensors 1// Num Output Tensors ), err); /* ADDING NODE FOR _model_23_cv2_act_Sigmoid */ const char* inputs__model_23_cv2_act_Sigmoid[] = { "_model_23_cv2_conv_Conv_output_0" }; uint32_t dimensions__model_23_cv2_act_Sigmoid_output_0[] = {1, 20, 20, 256}; Qnn_Tensor_t outputs__model_23_cv2_act_Sigmoid[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_23_cv2_act_Sigmoid_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000152587890625f, .offset= 0}}}, .rank= 4, .dimensions=dimensions__model_23_cv2_act_Sigmoid_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_23_cv2_act_Sigmoid", // Node Name "qti.aisw", // Package Name "Sigmoid", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__model_23_cv2_act_Sigmoid, // Input Tensor Names 1, // Num Input Tensor Names outputs__model_23_cv2_act_Sigmoid, // Output Tensors 1// Num Output Tensors ), err); /* ADDING NODE FOR _model_23_cv2_act_Mul */ const char* inputs__model_23_cv2_act_Mul[] = { "_model_23_cv2_conv_Conv_output_0", "_model_23_cv2_act_Sigmoid_output_0" }; uint32_t dimensions__model_23_cv2_act_Mul_output_0[] = {1, 20, 20, 256}; Qnn_Tensor_t outputs__model_23_cv2_act_Mul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_23_cv2_act_Mul_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0001450643758290f, .offset= -1920}}}, .rank= 4, .dimensions=dimensions__model_23_cv2_act_Mul_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_23_cv2_act_Mul", // Node Name "qti.aisw", // Package Name "ElementWiseMultiply", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__model_23_cv2_act_Mul, // Input Tensor Names 2, // Num Input Tensor Names outputs__model_23_cv2_act_Mul, // Output Tensors 1// Num Output Tensors ), err); /* ADDING NODE FOR _model_23_Concat */ Qnn_Param_t params__model_23_Concat[] = { {.paramType=QNN_PARAMTYPE_SCALAR, .name="axis", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 3}}}} }; const char* inputs__model_23_Concat[] = { "_model_23_m_m_0_cv2_act_Mul_output_0", "_model_23_cv2_act_Mul_output_0" }; uint32_t dimensions__model_23_Concat_output_0[] = {1, 20, 20, 512}; Qnn_Tensor_t outputs__model_23_Concat[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_23_Concat_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0002256117149955f, .offset= -1235}}}, .rank= 4, .dimensions=dimensions__model_23_Concat_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_23_Concat", // Node Name "qti.aisw", // Package Name "Concat", // Qnn Node Type params__model_23_Concat, // Node Params 1, // Num Node Params inputs__model_23_Concat, // Input Tensor Names 2, // Num Input Tensor Names outputs__model_23_Concat, // Output Tensors 1// Num Output Tensors ), err); uint32_t dimensions_model_23_cv3_conv_weight[] = {1, 1, 512, 512}; VALIDATE(cutoff_yolov5s.addTensor("model_23_cv3_conv_weight", // Node Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "model_23_cv3_conv_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0103659685701132f, .offset= -147}}}, .rank= 4, .dimensions=dimensions_model_23_cv3_conv_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(model_23_cv3_conv_weight), .dataSize=BINLEN(model_23_cv3_conv_weight)}}}}} ), err); uint32_t dimensions_model_23_cv3_conv_bias[] = {512}; VALIDATE(cutoff_yolov5s.addTensor("model_23_cv3_conv_bias", // Node Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "model_23_cv3_conv_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_SFIXED_POINT_32, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000000015874342f, .offset= 0}}}, .rank= 1, .dimensions=dimensions_model_23_cv3_conv_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(model_23_cv3_conv_bias), .dataSize=BINLEN(model_23_cv3_conv_bias)}}}}} ), err); /* ADDING NODE FOR _model_23_cv3_conv_Conv */ uint32_t dimensions___model_23_cv3_conv_Conv_dilation[] = {2}; uint32_t __model_23_cv3_conv_Conv_dilation[] = {1, 1}; uint32_t dimensions___model_23_cv3_conv_Conv_pad_amount[] = {2, 2}; uint32_t __model_23_cv3_conv_Conv_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions___model_23_cv3_conv_Conv_stride[] = {2}; uint32_t __model_23_cv3_conv_Conv_stride[] = {1, 1}; Qnn_Param_t params__model_23_cv3_conv_Conv[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "__model_23_cv3_conv_Conv_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions___model_23_cv3_conv_Conv_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)__model_23_cv3_conv_Conv_dilation, .dataSize=8}}}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "__model_23_cv3_conv_Conv_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions___model_23_cv3_conv_Conv_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)__model_23_cv3_conv_Conv_pad_amount, .dataSize=16}}}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "__model_23_cv3_conv_Conv_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions___model_23_cv3_conv_Conv_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)__model_23_cv3_conv_Conv_stride, .dataSize=8}}}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}} }; const char* inputs__model_23_cv3_conv_Conv[] = { "_model_23_Concat_output_0", "model_23_cv3_conv_weight", "model_23_cv3_conv_bias" }; uint32_t dimensions__model_23_cv3_conv_Conv_output_0[] = {1, 20, 20, 512}; Qnn_Tensor_t outputs__model_23_cv3_conv_Conv[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_23_cv3_conv_Conv_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0009371771593578f, .offset= -37300}}}, .rank= 4, .dimensions=dimensions__model_23_cv3_conv_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_23_cv3_conv_Conv", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__model_23_cv3_conv_Conv, // Node Params 4, // Num Node Params inputs__model_23_cv3_conv_Conv, // Input Tensor Names 3, // Num Input Tensor Names outputs__model_23_cv3_conv_Conv, // Output Tensors 1// Num Output Tensors ), err); /* ADDING NODE FOR _model_23_cv3_act_Sigmoid */ const char* inputs__model_23_cv3_act_Sigmoid[] = { "_model_23_cv3_conv_Conv_output_0" }; uint32_t dimensions__model_23_cv3_act_Sigmoid_output_0[] = {1, 20, 20, 512}; Qnn_Tensor_t outputs__model_23_cv3_act_Sigmoid[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_23_cv3_act_Sigmoid_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000152587890625f, .offset= 0}}}, .rank= 4, .dimensions=dimensions__model_23_cv3_act_Sigmoid_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_23_cv3_act_Sigmoid", // Node Name "qti.aisw", // Package Name "Sigmoid", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__model_23_cv3_act_Sigmoid, // Input Tensor Names 1, // Num Input Tensor Names outputs__model_23_cv3_act_Sigmoid, // Output Tensors 1// Num Output Tensors ), err); /* ADDING NODE FOR _model_23_cv3_act_Mul */ const char* inputs__model_23_cv3_act_Mul[] = { "_model_23_cv3_conv_Conv_output_0", "_model_23_cv3_act_Sigmoid_output_0" }; uint32_t dimensions__model_23_cv3_act_Mul_output_0[] = {1, 20, 20, 512}; Qnn_Tensor_t outputs__model_23_cv3_act_Mul[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_23_cv3_act_Mul_output_0", .type= QNN_TENSOR_TYPE_NATIVE, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0004080178914592f, .offset= -682}}}, .rank= 4, .dimensions=dimensions__model_23_cv3_act_Mul_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_23_cv3_act_Mul", // Node Name "qti.aisw", // Package Name "ElementWiseMultiply", // Qnn Node Type nullptr, // Node Params 0, // Num Node Params inputs__model_23_cv3_act_Mul, // Input Tensor Names 2, // Num Input Tensor Names outputs__model_23_cv3_act_Mul, // Output Tensors 1// Num Output Tensors ), err); uint32_t dimensions_model_24_m_0_weight[] = {1, 1, 128, 255}; VALIDATE(cutoff_yolov5s.addTensor("model_24_m_0_weight", // Node Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "model_24_m_0_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0041475184261799f, .offset= -128}}}, .rank= 4, .dimensions=dimensions_model_24_m_0_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(model_24_m_0_weight), .dataSize=BINLEN(model_24_m_0_weight)}}}}} ), err); uint32_t dimensions_model_24_m_0_bias[] = {255}; VALIDATE(cutoff_yolov5s.addTensor("model_24_m_0_bias", // Node Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "model_24_m_0_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_SFIXED_POINT_32, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000000031832315f, .offset= 0}}}, .rank= 1, .dimensions=dimensions_model_24_m_0_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(model_24_m_0_bias), .dataSize=BINLEN(model_24_m_0_bias)}}}}} ), err); /* ADDING NODE FOR _model_24_m_0_Conv */ uint32_t dimensions___model_24_m_0_Conv_dilation[] = {2}; uint32_t __model_24_m_0_Conv_dilation[] = {1, 1}; uint32_t dimensions___model_24_m_0_Conv_pad_amount[] = {2, 2}; uint32_t __model_24_m_0_Conv_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions___model_24_m_0_Conv_stride[] = {2}; uint32_t __model_24_m_0_Conv_stride[] = {1, 1}; Qnn_Param_t params__model_24_m_0_Conv[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "__model_24_m_0_Conv_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions___model_24_m_0_Conv_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)__model_24_m_0_Conv_dilation, .dataSize=8}}}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "__model_24_m_0_Conv_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions___model_24_m_0_Conv_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)__model_24_m_0_Conv_pad_amount, .dataSize=16}}}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "__model_24_m_0_Conv_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions___model_24_m_0_Conv_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)__model_24_m_0_Conv_stride, .dataSize=8}}}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}} }; const char* inputs__model_24_m_0_Conv[] = { "_model_17_cv3_act_Mul_output_0", "model_24_m_0_weight", "model_24_m_0_bias" }; uint32_t dimensions__model_24_m_0_Conv_output_0[] = {1, 80, 80, 255}; Qnn_Tensor_t outputs__model_24_m_0_Conv[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_24_m_0_Conv_output_0", .type= QNN_TENSOR_TYPE_APP_READ, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0004625362344086f, .offset= -47188}}}, .rank= 4, .dimensions=dimensions__model_24_m_0_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_24_m_0_Conv", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__model_24_m_0_Conv, // Node Params 4, // Num Node Params inputs__model_24_m_0_Conv, // Input Tensor Names 3, // Num Input Tensor Names outputs__model_24_m_0_Conv, // Output Tensors 1// Num Output Tensors ), err); uint32_t dimensions_model_24_m_1_weight[] = {1, 1, 256, 255}; VALIDATE(cutoff_yolov5s.addTensor("model_24_m_1_weight", // Node Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "model_24_m_1_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0040699676610529f, .offset= -134}}}, .rank= 4, .dimensions=dimensions_model_24_m_1_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(model_24_m_1_weight), .dataSize=BINLEN(model_24_m_1_weight)}}}}} ), err); uint32_t dimensions_model_24_m_1_bias[] = {255}; VALIDATE(cutoff_yolov5s.addTensor("model_24_m_1_bias", // Node Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "model_24_m_1_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_SFIXED_POINT_32, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000000034560799f, .offset= 0}}}, .rank= 1, .dimensions=dimensions_model_24_m_1_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(model_24_m_1_bias), .dataSize=BINLEN(model_24_m_1_bias)}}}}} ), err); /* ADDING NODE FOR _model_24_m_1_Conv */ uint32_t dimensions___model_24_m_1_Conv_dilation[] = {2}; uint32_t __model_24_m_1_Conv_dilation[] = {1, 1}; uint32_t dimensions___model_24_m_1_Conv_pad_amount[] = {2, 2}; uint32_t __model_24_m_1_Conv_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions___model_24_m_1_Conv_stride[] = {2}; uint32_t __model_24_m_1_Conv_stride[] = {1, 1}; Qnn_Param_t params__model_24_m_1_Conv[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "__model_24_m_1_Conv_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions___model_24_m_1_Conv_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)__model_24_m_1_Conv_dilation, .dataSize=8}}}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "__model_24_m_1_Conv_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions___model_24_m_1_Conv_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)__model_24_m_1_Conv_pad_amount, .dataSize=16}}}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "__model_24_m_1_Conv_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions___model_24_m_1_Conv_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)__model_24_m_1_Conv_stride, .dataSize=8}}}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}} }; const char* inputs__model_24_m_1_Conv[] = { "_model_20_cv3_act_Mul_output_0", "model_24_m_1_weight", "model_24_m_1_bias" }; uint32_t dimensions__model_24_m_1_Conv_output_0[] = {1, 40, 40, 255}; Qnn_Tensor_t outputs__model_24_m_1_Conv[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_24_m_1_Conv_output_0", .type= QNN_TENSOR_TYPE_APP_READ, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0003991636040155f, .offset= -44005}}}, .rank= 4, .dimensions=dimensions__model_24_m_1_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_24_m_1_Conv", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__model_24_m_1_Conv, // Node Params 4, // Num Node Params inputs__model_24_m_1_Conv, // Input Tensor Names 3, // Num Input Tensor Names outputs__model_24_m_1_Conv, // Output Tensors 1// Num Output Tensors ), err); uint32_t dimensions_model_24_m_2_weight[] = {1, 1, 512, 255}; VALIDATE(cutoff_yolov5s.addTensor("model_24_m_2_weight", // Node Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "model_24_m_2_weight", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_8, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0029995788354427f, .offset= -125}}}, .rank= 4, .dimensions=dimensions_model_24_m_2_weight, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(model_24_m_2_weight), .dataSize=BINLEN(model_24_m_2_weight)}}}}} ), err); uint32_t dimensions_model_24_m_2_bias[] = {255}; VALIDATE(cutoff_yolov5s.addTensor("model_24_m_2_bias", // Node Name (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "model_24_m_2_bias", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_SFIXED_POINT_32, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0000000037871359f, .offset= 0}}}, .rank= 1, .dimensions=dimensions_model_24_m_2_bias, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=BINVARSTART(model_24_m_2_bias), .dataSize=BINLEN(model_24_m_2_bias)}}}}} ), err); /* ADDING NODE FOR _model_24_m_2_Conv */ uint32_t dimensions___model_24_m_2_Conv_dilation[] = {2}; uint32_t __model_24_m_2_Conv_dilation[] = {1, 1}; uint32_t dimensions___model_24_m_2_Conv_pad_amount[] = {2, 2}; uint32_t __model_24_m_2_Conv_pad_amount[] = {0, 0, 0, 0}; uint32_t dimensions___model_24_m_2_Conv_stride[] = {2}; uint32_t __model_24_m_2_Conv_stride[] = {1, 1}; Qnn_Param_t params__model_24_m_2_Conv[] = { {.paramType=QNN_PARAMTYPE_TENSOR, .name="dilation", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "__model_24_m_2_Conv_dilation", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions___model_24_m_2_Conv_dilation, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)__model_24_m_2_Conv_dilation, .dataSize=8}}}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="pad_amount", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "__model_24_m_2_Conv_pad_amount", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 2, .dimensions=dimensions___model_24_m_2_Conv_pad_amount, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)__model_24_m_2_Conv_pad_amount, .dataSize=16}}}}}}}, {.paramType=QNN_PARAMTYPE_TENSOR, .name="stride", {.tensorParam=(Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "__model_24_m_2_Conv_stride", .type= QNN_TENSOR_TYPE_STATIC, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UINT_32, .quantizeParams= { QNN_DEFINITION_UNDEFINED, QNN_QUANTIZATION_ENCODING_UNDEFINED, {.scaleOffsetEncoding= {.scale= 0.0000000000000000f, .offset= 0}}}, .rank= 1, .dimensions=dimensions___model_24_m_2_Conv_stride, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=(uint8_t*)__model_24_m_2_Conv_stride, .dataSize=8}}}}}}}, {.paramType=QNN_PARAMTYPE_SCALAR, .name="group", {.scalarParam= (Qnn_Scalar_t) {QNN_DATATYPE_UINT_32, {.uint32Value = 1}}}} }; const char* inputs__model_24_m_2_Conv[] = { "_model_23_cv3_act_Mul_output_0", "model_24_m_2_weight", "model_24_m_2_bias" }; uint32_t dimensions__model_24_m_2_Conv_output_0[] = {1, 20, 20, 255}; Qnn_Tensor_t outputs__model_24_m_2_Conv[] = { (Qnn_Tensor_t) { .version= QNN_TENSOR_VERSION_1, {.v1= { .id=0, .name= "_model_24_m_2_Conv_output_0", .type= QNN_TENSOR_TYPE_APP_READ, .dataFormat= QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, .dataType= QNN_DATATYPE_UFIXED_POINT_16, .quantizeParams= { QNN_DEFINITION_DEFINED, QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, {.scaleOffsetEncoding= {.scale= 0.0003534925053827f, .offset= -43837}}}, .rank= 4, .dimensions=dimensions__model_24_m_2_Conv_output_0, .memType= QNN_TENSORMEMTYPE_RAW, {.clientBuf= { .data=nullptr, .dataSize=0}}}}} }; VALIDATE(cutoff_yolov5s.addNode(QNN_OPCONFIG_VERSION_1, // Op_Config_t Version "_model_24_m_2_Conv", // Node Name "qti.aisw", // Package Name "Conv2d", // Qnn Node Type params__model_24_m_2_Conv, // Node Params 4, // Num Node Params inputs__model_24_m_2_Conv, // Input Tensor Names 3, // Num Input Tensor Names outputs__model_24_m_2_Conv, // Output Tensors 1// Num Output Tensors ), err); // Add all models to array to get graphsInfo QnnModel* models [] = {&cutoff_yolov5s}; uint32_t numModels = 1; // Populate the constructed graphs in provided output variables VALIDATE(getGraphInfoFromModels(*models, numModels, graphsInfo), err); *numGraphsInfo = numModels; return err; } // PREPARE_GRAPHS QNN_API ModelError_t QnnModel_freeGraphsInfo(GraphInfoPtr_t** graphsInfo, uint32_t numGraphsInfo){ return qnn_wrapper_api::freeGraphsInfo(graphsInfo, numGraphsInfo); } // FREEGRAPHINFO }