| // Copyright 2022 Google LLC |
| // |
| // This source code is licensed under the BSD-style license found in the |
| // LICENSE file in the root directory of this source tree. |
| |
| #include <algorithm> |
| #include <array> |
| #include <cstddef> |
| #include <cstdint> |
| #include <limits> |
| #include <memory> |
| #include <random> |
| |
| #include <xnnpack.h> |
| #include <xnnpack/node-type.h> |
| #include <xnnpack/operator.h> |
| #include <xnnpack/subgraph.h> |
| |
| #include "subgraph-unary-tester.h" |
| #include <gtest/gtest.h> |
| |
| using ConvertTestF32ToQS8 = UnaryTest<float, int8_t>; |
| using ConvertTestF32ToQU8 = UnaryTest<float, uint8_t>; |
| using ConvertTestQS8ToQS8 = UnaryTest<int8_t, int8_t>; |
| using ConvertTestQS8ToF32 = UnaryTest<int8_t, float>; |
| using ConvertTestQU8ToQU8 = UnaryTest<uint8_t, uint8_t>; |
| using ConvertTestQU8ToF32 = UnaryTest<uint8_t, float>; |
| |
| TEST_F(ConvertTestF32ToQS8, define) |
| { |
| ASSERT_EQ(xnn_status_success, xnn_initialize(/*allocator=*/nullptr)); |
| |
| xnn_subgraph_t subgraph = nullptr; |
| ASSERT_EQ(xnn_status_success, xnn_create_subgraph(/*external_value_ids=*/2, /*flags=*/0, &subgraph)); |
| std::unique_ptr<xnn_subgraph, decltype(&xnn_delete_subgraph)> auto_subgraph(subgraph, xnn_delete_subgraph); |
| |
| input_id = XNN_INVALID_NODE_ID; |
| ASSERT_EQ( |
| xnn_status_success, xnn_define_tensor_value( |
| subgraph, xnn_datatype_fp32, dims.size(), dims.data(), nullptr, 0, |
| /*flags=*/XNN_VALUE_FLAG_EXTERNAL_INPUT, &input_id)); |
| ASSERT_NE(input_id, XNN_INVALID_NODE_ID); |
| |
| output_id = XNN_INVALID_NODE_ID; |
| ASSERT_EQ( |
| xnn_status_success, xnn_define_quantized_tensor_value( |
| subgraph, xnn_datatype_qint8, signed_zero_point, scale, dims.size(), dims.data(), nullptr, 1, |
| /*flags=*/XNN_VALUE_FLAG_EXTERNAL_OUTPUT, &output_id)); |
| ASSERT_NE(output_id, XNN_INVALID_NODE_ID); |
| |
| ASSERT_EQ(xnn_status_success, xnn_define_convert(subgraph, input_id, output_id, /*flags=*/0)); |
| |
| ASSERT_EQ(subgraph->num_nodes, 1); |
| const struct xnn_node* node = &subgraph->nodes[0]; |
| ASSERT_EQ(node->type, xnn_node_type_convert); |
| ASSERT_EQ(node->compute_type, xnn_compute_type_fp32_to_qs8); |
| ASSERT_EQ(node->num_inputs, 1); |
| ASSERT_EQ(node->inputs[0], input_id); |
| ASSERT_EQ(node->num_outputs, 1); |
| ASSERT_EQ(node->outputs[0], output_id); |
| ASSERT_EQ(node->flags, 0); |
| } |
| |
| TEST_F(ConvertTestF32ToQU8, define) |
| { |
| ASSERT_EQ(xnn_status_success, xnn_initialize(/*allocator=*/nullptr)); |
| |
| xnn_subgraph_t subgraph = nullptr; |
| ASSERT_EQ(xnn_status_success, xnn_create_subgraph(/*external_value_ids=*/2, /*flags=*/0, &subgraph)); |
| std::unique_ptr<xnn_subgraph, decltype(&xnn_delete_subgraph)> auto_subgraph(subgraph, xnn_delete_subgraph); |
| |
| input_id = XNN_INVALID_NODE_ID; |
| ASSERT_EQ( |
| xnn_status_success, xnn_define_tensor_value( |
| subgraph, xnn_datatype_fp32, dims.size(), dims.data(), nullptr, 0, |
| /*flags=*/XNN_VALUE_FLAG_EXTERNAL_INPUT, &input_id)); |
| ASSERT_NE(input_id, XNN_INVALID_NODE_ID); |
| |
| output_id = XNN_INVALID_NODE_ID; |
| ASSERT_EQ( |
| xnn_status_success, |
| xnn_define_quantized_tensor_value( |
| subgraph, xnn_datatype_quint8, unsigned_zero_point, scale, dims.size(), dims.data(), nullptr, 1, |
| /*flags=*/XNN_VALUE_FLAG_EXTERNAL_OUTPUT, &output_id)); |
| ASSERT_NE(output_id, XNN_INVALID_NODE_ID); |
| |
| ASSERT_EQ(xnn_status_success, xnn_define_convert(subgraph, input_id, output_id, /*flags=*/0)); |
| |
| ASSERT_EQ(subgraph->num_nodes, 1); |
| const struct xnn_node* node = &subgraph->nodes[0]; |
| ASSERT_EQ(node->type, xnn_node_type_convert); |
| ASSERT_EQ(node->compute_type, xnn_compute_type_fp32_to_qu8); |
| ASSERT_EQ(node->num_inputs, 1); |
| ASSERT_EQ(node->inputs[0], input_id); |
| ASSERT_EQ(node->num_outputs, 1); |
| ASSERT_EQ(node->outputs[0], output_id); |
| ASSERT_EQ(node->flags, 0); |
| } |
| |
| TEST_F(ConvertTestQS8ToF32, define) |
| { |
| ASSERT_EQ(xnn_status_success, xnn_initialize(/*allocator=*/nullptr)); |
| |
| xnn_subgraph_t subgraph = nullptr; |
| ASSERT_EQ(xnn_status_success, xnn_create_subgraph(/*external_value_ids=*/2, /*flags=*/0, &subgraph)); |
| std::unique_ptr<xnn_subgraph, decltype(&xnn_delete_subgraph)> auto_subgraph(subgraph, xnn_delete_subgraph); |
| |
| input_id = XNN_INVALID_NODE_ID; |
| ASSERT_EQ( |
| xnn_status_success, xnn_define_quantized_tensor_value( |
| subgraph, xnn_datatype_qint8, signed_zero_point, scale, dims.size(), dims.data(), nullptr, 0, |
| /*flags=*/XNN_VALUE_FLAG_EXTERNAL_INPUT, &input_id)); |
| ASSERT_NE(input_id, XNN_INVALID_NODE_ID); |
| |
| output_id = XNN_INVALID_NODE_ID; |
| ASSERT_EQ( |
| xnn_status_success, xnn_define_tensor_value( |
| subgraph, xnn_datatype_fp32, dims.size(), dims.data(), nullptr, 1, |
| /*flags=*/XNN_VALUE_FLAG_EXTERNAL_OUTPUT, &output_id)); |
| ASSERT_NE(output_id, XNN_INVALID_NODE_ID); |
| |
| ASSERT_EQ(xnn_status_success, xnn_define_convert(subgraph, input_id, output_id, /*flags=*/0)); |
| |
| ASSERT_EQ(subgraph->num_nodes, 1); |
| const struct xnn_node* node = &subgraph->nodes[0]; |
| ASSERT_EQ(node->type, xnn_node_type_convert); |
| ASSERT_EQ(node->compute_type, xnn_compute_type_qs8_to_fp32); |
| ASSERT_EQ(node->num_inputs, 1); |
| ASSERT_EQ(node->inputs[0], input_id); |
| ASSERT_EQ(node->num_outputs, 1); |
| ASSERT_EQ(node->outputs[0], output_id); |
| ASSERT_EQ(node->flags, 0); |
| } |
| |
| TEST_F(ConvertTestQS8ToQS8, define) |
| { |
| ASSERT_EQ(xnn_status_success, xnn_initialize(/*allocator=*/nullptr)); |
| |
| const int32_t input_zero_point = i8dist(rng); |
| const int32_t output_zero_point = i8dist(rng); |
| // Scale distributions chosen to guarantee 2**-8 <= input_scale / output_scale <= 2**7 |
| const float input_scale = std::uniform_real_distribution<float>(0.0883883f, 11.3137f)(rng); |
| const float output_scale = std::uniform_real_distribution<float>(0.0883883f, 11.3137f)(rng); |
| |
| xnn_subgraph_t subgraph = nullptr; |
| ASSERT_EQ(xnn_status_success, xnn_create_subgraph(/*external_value_ids=*/2, /*flags=*/0, &subgraph)); |
| std::unique_ptr<xnn_subgraph, decltype(&xnn_delete_subgraph)> auto_subgraph(subgraph, xnn_delete_subgraph); |
| |
| input_id = XNN_INVALID_NODE_ID; |
| ASSERT_EQ( |
| xnn_status_success, |
| xnn_define_quantized_tensor_value( |
| subgraph, xnn_datatype_qint8, input_zero_point, input_scale, dims.size(), dims.data(), nullptr, 0, |
| /*flags=*/XNN_VALUE_FLAG_EXTERNAL_INPUT, &input_id)); |
| ASSERT_NE(input_id, XNN_INVALID_NODE_ID); |
| |
| output_id = XNN_INVALID_NODE_ID; |
| ASSERT_EQ( |
| xnn_status_success, |
| xnn_define_quantized_tensor_value( |
| subgraph, xnn_datatype_qint8, output_zero_point, output_scale, dims.size(), dims.data(), nullptr, 1, |
| /*flags=*/XNN_VALUE_FLAG_EXTERNAL_OUTPUT, &output_id)); |
| ASSERT_NE(output_id, XNN_INVALID_NODE_ID); |
| |
| ASSERT_EQ(xnn_status_success, xnn_define_convert(subgraph, input_id, output_id, /*flags=*/0)); |
| |
| ASSERT_EQ(subgraph->num_nodes, 1); |
| const struct xnn_node* node = &subgraph->nodes[0]; |
| ASSERT_EQ(node->type, xnn_node_type_convert); |
| ASSERT_EQ(node->compute_type, xnn_compute_type_qs8); |
| ASSERT_EQ(node->num_inputs, 1); |
| ASSERT_EQ(node->inputs[0], input_id); |
| ASSERT_EQ(node->num_outputs, 1); |
| ASSERT_EQ(node->outputs[0], output_id); |
| ASSERT_EQ(node->flags, 0); |
| } |
| |
| TEST_F(ConvertTestQU8ToF32, define) |
| { |
| ASSERT_EQ(xnn_status_success, xnn_initialize(/*allocator=*/nullptr)); |
| |
| xnn_subgraph_t subgraph = nullptr; |
| ASSERT_EQ(xnn_status_success, xnn_create_subgraph(/*external_value_ids=*/2, /*flags=*/0, &subgraph)); |
| std::unique_ptr<xnn_subgraph, decltype(&xnn_delete_subgraph)> auto_subgraph(subgraph, xnn_delete_subgraph); |
| |
| input_id = XNN_INVALID_NODE_ID; |
| ASSERT_EQ( |
| xnn_status_success, |
| xnn_define_quantized_tensor_value( |
| subgraph, xnn_datatype_quint8, unsigned_zero_point, scale, dims.size(), dims.data(), nullptr, 0, |
| /*flags=*/XNN_VALUE_FLAG_EXTERNAL_INPUT, &input_id)); |
| ASSERT_NE(input_id, XNN_INVALID_NODE_ID); |
| |
| output_id = XNN_INVALID_NODE_ID; |
| ASSERT_EQ( |
| xnn_status_success, xnn_define_tensor_value( |
| subgraph, xnn_datatype_fp32, dims.size(), dims.data(), nullptr, 1, |
| /*flags=*/XNN_VALUE_FLAG_EXTERNAL_OUTPUT, &output_id)); |
| ASSERT_NE(output_id, XNN_INVALID_NODE_ID); |
| |
| ASSERT_EQ(xnn_status_success, xnn_define_convert(subgraph, input_id, output_id, /*flags=*/0)); |
| |
| ASSERT_EQ(subgraph->num_nodes, 1); |
| const struct xnn_node* node = &subgraph->nodes[0]; |
| ASSERT_EQ(node->type, xnn_node_type_convert); |
| ASSERT_EQ(node->compute_type, xnn_compute_type_qu8_to_fp32); |
| ASSERT_EQ(node->num_inputs, 1); |
| ASSERT_EQ(node->inputs[0], input_id); |
| ASSERT_EQ(node->num_outputs, 1); |
| ASSERT_EQ(node->outputs[0], output_id); |
| ASSERT_EQ(node->flags, 0); |
| } |
| |
| TEST_F(ConvertTestQU8ToQU8, define) |
| { |
| ASSERT_EQ(xnn_status_success, xnn_initialize(/*allocator=*/nullptr)); |
| |
| const int32_t input_zero_point = u8dist(rng); |
| const int32_t output_zero_point = u8dist(rng); |
| // Scale distributions chosen to guarantee 2**-8 <= input_scale / output_scale <= 2**7 |
| const float input_scale = std::uniform_real_distribution<float>(0.0883883f, 11.3137f)(rng); |
| const float output_scale = std::uniform_real_distribution<float>(0.0883883f, 11.3137f)(rng); |
| |
| xnn_subgraph_t subgraph = nullptr; |
| ASSERT_EQ(xnn_status_success, xnn_create_subgraph(/*external_value_ids=*/2, /*flags=*/0, &subgraph)); |
| std::unique_ptr<xnn_subgraph, decltype(&xnn_delete_subgraph)> auto_subgraph(subgraph, xnn_delete_subgraph); |
| |
| input_id = XNN_INVALID_NODE_ID; |
| ASSERT_EQ( |
| xnn_status_success, |
| xnn_define_quantized_tensor_value( |
| subgraph, xnn_datatype_quint8, input_zero_point, input_scale, dims.size(), dims.data(), nullptr, 0, |
| /*flags=*/XNN_VALUE_FLAG_EXTERNAL_INPUT, &input_id)); |
| ASSERT_NE(input_id, XNN_INVALID_NODE_ID); |
| |
| output_id = XNN_INVALID_NODE_ID; |
| ASSERT_EQ( |
| xnn_status_success, |
| xnn_define_quantized_tensor_value( |
| subgraph, xnn_datatype_quint8, output_zero_point, output_scale, dims.size(), dims.data(), nullptr, 1, |
| /*flags=*/XNN_VALUE_FLAG_EXTERNAL_OUTPUT, &output_id)); |
| ASSERT_NE(output_id, XNN_INVALID_NODE_ID); |
| |
| ASSERT_EQ(xnn_status_success, xnn_define_convert(subgraph, input_id, output_id, /*flags=*/0)); |
| |
| ASSERT_EQ(subgraph->num_nodes, 1); |
| const struct xnn_node* node = &subgraph->nodes[0]; |
| ASSERT_EQ(node->type, xnn_node_type_convert); |
| ASSERT_EQ(node->compute_type, xnn_compute_type_qu8); |
| ASSERT_EQ(node->num_inputs, 1); |
| ASSERT_EQ(node->inputs[0], input_id); |
| ASSERT_EQ(node->num_outputs, 1); |
| ASSERT_EQ(node->outputs[0], output_id); |
| ASSERT_EQ(node->flags, 0); |
| } |
| |
| TEST_F(ConvertTestF32ToQS8, matches_operator_api) |
| { |
| std::uniform_real_distribution<float> f32dist(-1.0f, 1.0f); |
| std::generate(input.begin(), input.end(), [&]() { return f32dist(rng); }); |
| std::fill(operator_output.begin(), operator_output.end(), INT8_C(0xA5)); |
| std::fill(subgraph_output.begin(), subgraph_output.end(), INT8_C(0xA5)); |
| |
| ASSERT_EQ(xnn_status_success, xnn_initialize(/*allocator=*/nullptr)); |
| |
| // Call operator API. |
| xnn_operator_t op = nullptr; |
| const xnn_status status = xnn_create_convert_nc_f32_qs8( |
| channels, channels, channels, scale, signed_zero_point, INT8_MIN, INT8_MAX, /*flags=*/0, &op); |
| if (status == xnn_status_unsupported_hardware) { |
| GTEST_SKIP(); |
| } |
| |
| ASSERT_EQ(xnn_status_success, status); |
| ASSERT_NE(nullptr, op); |
| std::unique_ptr<xnn_operator, decltype(&xnn_delete_operator)> auto_op(op, xnn_delete_operator); |
| |
| ASSERT_EQ( |
| xnn_status_success, |
| xnn_setup_convert_nc_f32_qs8(op, batch_size, input.data(), operator_output.data(), /*threadpool=*/nullptr)); |
| |
| ASSERT_EQ(xnn_status_success, xnn_run_operator(op, /*threadpool=*/nullptr)); |
| |
| // Call subgraph API. |
| xnn_subgraph_t subgraph = nullptr; |
| ASSERT_EQ(xnn_status_success, xnn_create_subgraph(/*external_value_ids=*/2, /*flags=*/0, &subgraph)); |
| std::unique_ptr<xnn_subgraph, decltype(&xnn_delete_subgraph)> auto_subgraph(subgraph, xnn_delete_subgraph); |
| input_id = XNN_INVALID_NODE_ID; |
| ASSERT_EQ( |
| xnn_status_success, xnn_define_tensor_value( |
| subgraph, xnn_datatype_fp32, dims.size(), dims.data(), nullptr, /*external_id=*/0, |
| /*flags=*/XNN_VALUE_FLAG_EXTERNAL_INPUT, &input_id)); |
| ASSERT_NE(input_id, XNN_INVALID_NODE_ID); |
| |
| output_id = XNN_INVALID_NODE_ID; |
| ASSERT_EQ( |
| xnn_status_success, |
| xnn_define_quantized_tensor_value( |
| subgraph, xnn_datatype_qint8, signed_zero_point, scale, dims.size(), dims.data(), nullptr, /*external_id=*/1, |
| /*flags=*/XNN_VALUE_FLAG_EXTERNAL_OUTPUT, &output_id)); |
| ASSERT_NE(output_id, XNN_INVALID_NODE_ID); |
| |
| xnn_runtime_t runtime = nullptr; |
| ASSERT_EQ(xnn_status_success, xnn_define_convert(subgraph, input_id, output_id, /*flags=*/0)); |
| ASSERT_EQ(xnn_status_success, xnn_create_runtime_v3(subgraph, nullptr, nullptr, /*flags=*/0, &runtime)); |
| ASSERT_NE(nullptr, runtime); |
| std::unique_ptr<xnn_runtime, decltype(&xnn_delete_runtime)> auto_runtime(runtime, xnn_delete_runtime); |
| std::array<xnn_external_value, 2> external = { |
| xnn_external_value{input_id, input.data()}, xnn_external_value{output_id, subgraph_output.data()}}; |
| ASSERT_EQ(xnn_status_success, xnn_setup_runtime(runtime, external.size(), external.data())); |
| ASSERT_EQ(xnn_status_success, xnn_invoke_runtime(runtime)); |
| |
| ASSERT_EQ(subgraph_output, operator_output); |
| } |
| |
| TEST_F(ConvertTestF32ToQU8, matches_operator_api) |
| { |
| std::uniform_real_distribution<float> f32dist(-1.0f, 1.0f); |
| std::generate(input.begin(), input.end(), [&]() { return f32dist(rng); }); |
| std::fill(operator_output.begin(), operator_output.end(), UINT8_C(0xA5)); |
| std::fill(subgraph_output.begin(), subgraph_output.end(), UINT8_C(0xA5)); |
| |
| ASSERT_EQ(xnn_status_success, xnn_initialize(/*allocator=*/nullptr)); |
| |
| // Call operator API. |
| xnn_operator_t op = nullptr; |
| const xnn_status status = xnn_create_convert_nc_f32_qu8( |
| channels, channels, channels, scale, unsigned_zero_point, 0, UINT8_MAX, /*flags=*/0, &op); |
| if (status == xnn_status_unsupported_hardware) { |
| GTEST_SKIP(); |
| } |
| |
| ASSERT_EQ(xnn_status_success, status); |
| ASSERT_NE(nullptr, op); |
| std::unique_ptr<xnn_operator, decltype(&xnn_delete_operator)> auto_op(op, xnn_delete_operator); |
| |
| ASSERT_EQ( |
| xnn_status_success, |
| xnn_setup_convert_nc_f32_qu8(op, batch_size, input.data(), operator_output.data(), /*threadpool=*/nullptr)); |
| |
| ASSERT_EQ(xnn_status_success, xnn_run_operator(op, /*threadpool=*/nullptr)); |
| |
| // Call subgraph API. |
| xnn_subgraph_t subgraph = nullptr; |
| ASSERT_EQ(xnn_status_success, xnn_create_subgraph(/*external_value_ids=*/2, /*flags=*/0, &subgraph)); |
| std::unique_ptr<xnn_subgraph, decltype(&xnn_delete_subgraph)> auto_subgraph(subgraph, xnn_delete_subgraph); |
| input_id = XNN_INVALID_NODE_ID; |
| ASSERT_EQ( |
| xnn_status_success, xnn_define_tensor_value( |
| subgraph, xnn_datatype_fp32, dims.size(), dims.data(), nullptr, /*external_id=*/0, |
| /*flags=*/XNN_VALUE_FLAG_EXTERNAL_INPUT, &input_id)); |
| ASSERT_NE(input_id, XNN_INVALID_NODE_ID); |
| |
| output_id = XNN_INVALID_NODE_ID; |
| ASSERT_EQ( |
| xnn_status_success, |
| xnn_define_quantized_tensor_value( |
| subgraph, xnn_datatype_quint8, unsigned_zero_point, scale, dims.size(), dims.data(), nullptr, /*external_id=*/1, |
| /*flags=*/XNN_VALUE_FLAG_EXTERNAL_OUTPUT, &output_id)); |
| ASSERT_NE(output_id, XNN_INVALID_NODE_ID); |
| |
| xnn_runtime_t runtime = nullptr; |
| ASSERT_EQ(xnn_status_success, xnn_define_convert(subgraph, input_id, output_id, /*flags=*/0)); |
| ASSERT_EQ(xnn_status_success, xnn_create_runtime_v3(subgraph, nullptr, nullptr, /*flags=*/0, &runtime)); |
| ASSERT_NE(nullptr, runtime); |
| std::unique_ptr<xnn_runtime, decltype(&xnn_delete_runtime)> auto_runtime(runtime, xnn_delete_runtime); |
| std::array<xnn_external_value, 2> external = { |
| xnn_external_value{input_id, input.data()}, xnn_external_value{output_id, subgraph_output.data()}}; |
| ASSERT_EQ(xnn_status_success, xnn_setup_runtime(runtime, external.size(), external.data())); |
| ASSERT_EQ(xnn_status_success, xnn_invoke_runtime(runtime)); |
| |
| ASSERT_EQ(subgraph_output, operator_output); |
| } |
| |
| TEST_F(ConvertTestQS8ToF32, matches_operator_api) |
| { |
| std::generate(input.begin(), input.end(), [&]() { return i8dist(rng); }); |
| std::fill(operator_output.begin(), operator_output.end(), std::nanf("")); |
| std::fill(subgraph_output.begin(), subgraph_output.end(), std::nanf("")); |
| |
| ASSERT_EQ(xnn_status_success, xnn_initialize(/*allocator=*/nullptr)); |
| |
| // Call operator API. |
| xnn_operator_t op = nullptr; |
| const xnn_status status = |
| xnn_create_convert_nc_qs8_f32(channels, channels, channels, scale, signed_zero_point, /*flags=*/0, &op); |
| if (status == xnn_status_unsupported_hardware) { |
| GTEST_SKIP(); |
| } |
| |
| ASSERT_EQ(xnn_status_success, status); |
| ASSERT_NE(nullptr, op); |
| std::unique_ptr<xnn_operator, decltype(&xnn_delete_operator)> auto_op(op, xnn_delete_operator); |
| |
| ASSERT_EQ( |
| xnn_status_success, |
| xnn_setup_convert_nc_qs8_f32(op, batch_size, input.data(), operator_output.data(), /*threadpool=*/nullptr)); |
| |
| ASSERT_EQ(xnn_status_success, xnn_run_operator(op, /*threadpool=*/nullptr)); |
| |
| // Call subgraph API. |
| xnn_subgraph_t subgraph = nullptr; |
| ASSERT_EQ(xnn_status_success, xnn_create_subgraph(/*external_value_ids=*/2, /*flags=*/0, &subgraph)); |
| std::unique_ptr<xnn_subgraph, decltype(&xnn_delete_subgraph)> auto_subgraph(subgraph, xnn_delete_subgraph); |
| |
| input_id = XNN_INVALID_NODE_ID; |
| ASSERT_EQ( |
| xnn_status_success, xnn_define_quantized_tensor_value( |
| subgraph, xnn_datatype_qint8, signed_zero_point, scale, dims.size(), dims.data(), nullptr, 0, |
| /*flags=*/XNN_VALUE_FLAG_EXTERNAL_INPUT, &input_id)); |
| ASSERT_NE(input_id, XNN_INVALID_NODE_ID); |
| |
| output_id = XNN_INVALID_NODE_ID; |
| ASSERT_EQ( |
| xnn_status_success, xnn_define_tensor_value( |
| subgraph, xnn_datatype_fp32, dims.size(), dims.data(), nullptr, 1, |
| /*flags=*/XNN_VALUE_FLAG_EXTERNAL_OUTPUT, &output_id)); |
| ASSERT_NE(output_id, XNN_INVALID_NODE_ID); |
| |
| xnn_runtime_t runtime = nullptr; |
| ASSERT_EQ(xnn_status_success, xnn_define_convert(subgraph, input_id, output_id, /*flags=*/0)); |
| ASSERT_EQ(xnn_status_success, xnn_create_runtime_v3(subgraph, nullptr, nullptr, /*flags=*/0, &runtime)); |
| ASSERT_NE(nullptr, runtime); |
| std::unique_ptr<xnn_runtime, decltype(&xnn_delete_runtime)> auto_runtime(runtime, xnn_delete_runtime); |
| std::array<xnn_external_value, 2> external = { |
| xnn_external_value{input_id, input.data()}, xnn_external_value{output_id, subgraph_output.data()}}; |
| ASSERT_EQ(xnn_status_success, xnn_setup_runtime(runtime, external.size(), external.data())); |
| ASSERT_EQ(xnn_status_success, xnn_invoke_runtime(runtime)); |
| |
| ASSERT_EQ(subgraph_output, operator_output); |
| } |
| |
| TEST_F(ConvertTestQS8ToQS8, matches_operator_api) |
| { |
| const int8_t input_zero_point = i8dist(rng); |
| const int8_t output_zero_point = i8dist(rng); |
| const float input_scale = std::uniform_real_distribution<float>(0.25f, 4.0f)(rng); |
| const float output_scale = std::uniform_real_distribution<float>(0.25f, 4.0f)(rng); |
| |
| std::generate(input.begin(), input.end(), [&]() { return i8dist(rng); }); |
| std::fill(operator_output.begin(), operator_output.end(), INT8_C(0xA5)); |
| std::fill(subgraph_output.begin(), subgraph_output.end(), INT8_C(0x5A)); |
| |
| ASSERT_EQ(xnn_status_success, xnn_initialize(/*allocator=*/nullptr)); |
| |
| // Call operator API. |
| xnn_operator_t op = nullptr; |
| const xnn_status status = |
| xnn_create_convert_nc_qs8(channels, channels, channels, input_scale, input_zero_point, output_scale, output_zero_point, /*flags=*/0, &op); |
| if (status == xnn_status_unsupported_hardware) { |
| GTEST_SKIP(); |
| } |
| |
| ASSERT_EQ(xnn_status_success, status); |
| ASSERT_NE(nullptr, op); |
| std::unique_ptr<xnn_operator, decltype(&xnn_delete_operator)> auto_op(op, xnn_delete_operator); |
| |
| ASSERT_EQ( |
| xnn_status_success, |
| xnn_setup_convert_nc_qs8(op, batch_size, input.data(), operator_output.data(), /*threadpool=*/nullptr)); |
| |
| ASSERT_EQ(xnn_status_success, xnn_run_operator(op, /*threadpool=*/nullptr)); |
| |
| // Call subgraph API. |
| xnn_subgraph_t subgraph = nullptr; |
| ASSERT_EQ(xnn_status_success, xnn_create_subgraph(/*external_value_ids=*/2, /*flags=*/0, &subgraph)); |
| std::unique_ptr<xnn_subgraph, decltype(&xnn_delete_subgraph)> auto_subgraph(subgraph, xnn_delete_subgraph); |
| |
| input_id = XNN_INVALID_NODE_ID; |
| ASSERT_EQ( |
| xnn_status_success, xnn_define_quantized_tensor_value( |
| subgraph, xnn_datatype_qint8, input_zero_point, input_scale, |
| dims.size(), dims.data(), nullptr, 0, |
| /*flags=*/XNN_VALUE_FLAG_EXTERNAL_INPUT, &input_id)); |
| ASSERT_NE(input_id, XNN_INVALID_NODE_ID); |
| |
| output_id = XNN_INVALID_NODE_ID; |
| ASSERT_EQ( |
| xnn_status_success, xnn_define_quantized_tensor_value( |
| subgraph, xnn_datatype_qint8, output_zero_point, output_scale, |
| dims.size(), dims.data(), nullptr, 1, |
| /*flags=*/XNN_VALUE_FLAG_EXTERNAL_OUTPUT, &output_id)); |
| ASSERT_NE(output_id, XNN_INVALID_NODE_ID); |
| |
| xnn_runtime_t runtime = nullptr; |
| ASSERT_EQ(xnn_status_success, xnn_define_convert(subgraph, input_id, output_id, /*flags=*/0)); |
| ASSERT_EQ(xnn_status_success, xnn_create_runtime_v3(subgraph, nullptr, nullptr, /*flags=*/0, &runtime)); |
| ASSERT_NE(nullptr, runtime); |
| std::unique_ptr<xnn_runtime, decltype(&xnn_delete_runtime)> auto_runtime(runtime, xnn_delete_runtime); |
| const std::array<xnn_external_value, 2> external = { |
| xnn_external_value{input_id, input.data()}, |
| xnn_external_value{output_id, subgraph_output.data()} |
| }; |
| ASSERT_EQ(xnn_status_success, xnn_setup_runtime(runtime, external.size(), external.data())); |
| ASSERT_EQ(xnn_status_success, xnn_invoke_runtime(runtime)); |
| |
| ASSERT_EQ(subgraph_output, operator_output); |
| } |
| |
| TEST_F(ConvertTestQU8ToF32, matches_operator_api) |
| { |
| std::generate(input.begin(), input.end(), [&]() { return u8dist(rng); }); |
| std::fill(operator_output.begin(), operator_output.end(), std::nanf("")); |
| std::fill(subgraph_output.begin(), subgraph_output.end(), std::nanf("")); |
| |
| ASSERT_EQ(xnn_status_success, xnn_initialize(/*allocator=*/nullptr)); |
| |
| // Call operator API. |
| xnn_operator_t op = nullptr; |
| const xnn_status status = |
| xnn_create_convert_nc_qu8_f32(channels, channels, channels, scale, unsigned_zero_point, /*flags=*/0, &op); |
| if (status == xnn_status_unsupported_hardware) { |
| GTEST_SKIP(); |
| } |
| |
| ASSERT_EQ(xnn_status_success, status); |
| ASSERT_NE(nullptr, op); |
| std::unique_ptr<xnn_operator, decltype(&xnn_delete_operator)> auto_op(op, xnn_delete_operator); |
| |
| ASSERT_EQ( |
| xnn_status_success, |
| xnn_setup_convert_nc_qu8_f32(op, batch_size, input.data(), operator_output.data(), /*threadpool=*/nullptr)); |
| |
| ASSERT_EQ(xnn_status_success, xnn_run_operator(op, /*threadpool=*/nullptr)); |
| |
| // Call subgraph API. |
| xnn_subgraph_t subgraph = nullptr; |
| ASSERT_EQ(xnn_status_success, xnn_create_subgraph(/*external_value_ids=*/2, /*flags=*/0, &subgraph)); |
| std::unique_ptr<xnn_subgraph, decltype(&xnn_delete_subgraph)> auto_subgraph(subgraph, xnn_delete_subgraph); |
| |
| input_id = XNN_INVALID_NODE_ID; |
| ASSERT_EQ( |
| xnn_status_success, |
| xnn_define_quantized_tensor_value( |
| subgraph, xnn_datatype_quint8, unsigned_zero_point, scale, dims.size(), dims.data(), nullptr, 0, |
| /*flags=*/XNN_VALUE_FLAG_EXTERNAL_INPUT, &input_id)); |
| ASSERT_NE(input_id, XNN_INVALID_NODE_ID); |
| |
| output_id = XNN_INVALID_NODE_ID; |
| ASSERT_EQ( |
| xnn_status_success, xnn_define_tensor_value( |
| subgraph, xnn_datatype_fp32, dims.size(), dims.data(), nullptr, 1, |
| /*flags=*/XNN_VALUE_FLAG_EXTERNAL_OUTPUT, &output_id)); |
| ASSERT_NE(output_id, XNN_INVALID_NODE_ID); |
| |
| xnn_runtime_t runtime = nullptr; |
| ASSERT_EQ(xnn_status_success, xnn_define_convert(subgraph, input_id, output_id, /*flags=*/0)); |
| ASSERT_EQ(xnn_status_success, xnn_create_runtime_v3(subgraph, nullptr, nullptr, /*flags=*/0, &runtime)); |
| ASSERT_NE(nullptr, runtime); |
| std::unique_ptr<xnn_runtime, decltype(&xnn_delete_runtime)> auto_runtime(runtime, xnn_delete_runtime); |
| std::array<xnn_external_value, 2> external = { |
| xnn_external_value{input_id, input.data()}, xnn_external_value{output_id, subgraph_output.data()}}; |
| ASSERT_EQ(xnn_status_success, xnn_setup_runtime(runtime, external.size(), external.data())); |
| ASSERT_EQ(xnn_status_success, xnn_invoke_runtime(runtime)); |
| |
| ASSERT_EQ(subgraph_output, operator_output); |
| } |
| |
| TEST_F(ConvertTestQU8ToQU8, matches_operator_api) |
| { |
| const uint8_t input_zero_point = u8dist(rng); |
| const uint8_t output_zero_point = u8dist(rng); |
| const float input_scale = std::uniform_real_distribution<float>(0.25f, 4.0f)(rng); |
| const float output_scale = std::uniform_real_distribution<float>(0.25f, 4.0f)(rng); |
| |
| std::generate(input.begin(), input.end(), [&]() { return u8dist(rng); }); |
| std::fill(operator_output.begin(), operator_output.end(), UINT8_C(0xA5)); |
| std::fill(subgraph_output.begin(), subgraph_output.end(), UINT8_C(0x5A)); |
| |
| ASSERT_EQ(xnn_status_success, xnn_initialize(/*allocator=*/nullptr)); |
| |
| // Call operator API. |
| xnn_operator_t op = nullptr; |
| const xnn_status status = |
| xnn_create_convert_nc_qu8(channels, channels, channels, input_scale, input_zero_point, output_scale, output_zero_point, /*flags=*/0, &op); |
| if (status == xnn_status_unsupported_hardware) { |
| GTEST_SKIP(); |
| } |
| |
| ASSERT_EQ(xnn_status_success, status); |
| ASSERT_NE(nullptr, op); |
| std::unique_ptr<xnn_operator, decltype(&xnn_delete_operator)> auto_op(op, xnn_delete_operator); |
| |
| ASSERT_EQ( |
| xnn_status_success, |
| xnn_setup_convert_nc_qu8(op, batch_size, input.data(), operator_output.data(), /*threadpool=*/nullptr)); |
| |
| ASSERT_EQ(xnn_status_success, xnn_run_operator(op, /*threadpool=*/nullptr)); |
| |
| // Call subgraph API. |
| xnn_subgraph_t subgraph = nullptr; |
| ASSERT_EQ(xnn_status_success, xnn_create_subgraph(/*external_value_ids=*/2, /*flags=*/0, &subgraph)); |
| std::unique_ptr<xnn_subgraph, decltype(&xnn_delete_subgraph)> auto_subgraph(subgraph, xnn_delete_subgraph); |
| |
| input_id = XNN_INVALID_NODE_ID; |
| ASSERT_EQ( |
| xnn_status_success, xnn_define_quantized_tensor_value( |
| subgraph, xnn_datatype_quint8, input_zero_point, input_scale, |
| dims.size(), dims.data(), nullptr, 0, |
| /*flags=*/XNN_VALUE_FLAG_EXTERNAL_INPUT, &input_id)); |
| ASSERT_NE(input_id, XNN_INVALID_NODE_ID); |
| |
| output_id = XNN_INVALID_NODE_ID; |
| ASSERT_EQ( |
| xnn_status_success, xnn_define_quantized_tensor_value( |
| subgraph, xnn_datatype_quint8, output_zero_point, output_scale, |
| dims.size(), dims.data(), nullptr, 1, |
| /*flags=*/XNN_VALUE_FLAG_EXTERNAL_OUTPUT, &output_id)); |
| ASSERT_NE(output_id, XNN_INVALID_NODE_ID); |
| |
| xnn_runtime_t runtime = nullptr; |
| ASSERT_EQ(xnn_status_success, xnn_define_convert(subgraph, input_id, output_id, /*flags=*/0)); |
| ASSERT_EQ(xnn_status_success, xnn_create_runtime_v3(subgraph, nullptr, nullptr, /*flags=*/0, &runtime)); |
| ASSERT_NE(nullptr, runtime); |
| std::unique_ptr<xnn_runtime, decltype(&xnn_delete_runtime)> auto_runtime(runtime, xnn_delete_runtime); |
| const std::array<xnn_external_value, 2> external = { |
| xnn_external_value{input_id, input.data()}, |
| xnn_external_value{output_id, subgraph_output.data()} |
| }; |
| ASSERT_EQ(xnn_status_success, xnn_setup_runtime(runtime, external.size(), external.data())); |
| ASSERT_EQ(xnn_status_success, xnn_invoke_runtime(runtime)); |
| |
| ASSERT_EQ(subgraph_output, operator_output); |
| } |