Add REDUCE_* ops

Operations added:
- REDUCE_PROD
- REDUCE_SUM
- REDUCE_MAX
- REDUCE_MIN
- REDUCE_ANY
- REDUCE_ALL

Fix: 113564646
Test: NeuralNetworksTest_static_asan
Change-Id: I3a03358c941b0d0b324406b4f43a8bde97dfa505
Merged-In: I3a03358c941b0d0b324406b4f43a8bde97dfa505
(cherry picked from commit 88fe2437673dab80cef6bccb60f23e02ddbdb15f)
diff --git a/common/Android.bp b/common/Android.bp
index 2ed8928..c527539 100644
--- a/common/Android.bp
+++ b/common/Android.bp
@@ -33,6 +33,7 @@
         "operations/LogicalNot.cpp",
         "operations/LogSoftmax.cpp",
         "operations/PRelu.cpp",
+        "operations/Reduce.cpp",
     ],
 }
 
diff --git a/common/OperationResolver.cpp b/common/OperationResolver.cpp
index e49d276..292c003 100644
--- a/common/OperationResolver.cpp
+++ b/common/OperationResolver.cpp
@@ -39,6 +39,12 @@
 const OperationRegistration* register_LOG_SOFTMAX();
 const OperationRegistration* register_NOT_EQUAL();
 const OperationRegistration* register_PRELU();
+const OperationRegistration* register_REDUCE_ALL();
+const OperationRegistration* register_REDUCE_ANY();
+const OperationRegistration* register_REDUCE_MAX();
+const OperationRegistration* register_REDUCE_MIN();
+const OperationRegistration* register_REDUCE_PROD();
+const OperationRegistration* register_REDUCE_SUM();
 
 OperationResolver::OperationResolver() {
     registerOperation(register_ABS());
@@ -56,6 +62,12 @@
     registerOperation(register_LOG_SOFTMAX());
     registerOperation(register_NOT_EQUAL());
     registerOperation(register_PRELU());
+    registerOperation(register_REDUCE_ALL());
+    registerOperation(register_REDUCE_ANY());
+    registerOperation(register_REDUCE_MAX());
+    registerOperation(register_REDUCE_MIN());
+    registerOperation(register_REDUCE_PROD());
+    registerOperation(register_REDUCE_SUM());
 }
 
 const OperationRegistration* OperationResolver::findOperation(OperationType operationType) const {
diff --git a/common/Utils.cpp b/common/Utils.cpp
index 9ab8b30..0a1ddaf 100644
--- a/common/Utils.cpp
+++ b/common/Utils.cpp
@@ -279,7 +279,7 @@
         "QUANTIZE",
         "QUANTIZED_16BIT_LSTM",
         "RANDOM_MULTINOMIAL",
-        "REDUCE",
+        "REDUCE_PROD",
         "ROI_ALIGN",
         "RSQRT",
         "SELECT",
@@ -298,6 +298,11 @@
         "ROI_POOLING",
         "EQUAL",
         "NOT_EQUAL",
+        "REDUCE_SUM",
+        "REDUCE_MAX",
+        "REDUCE_MIN",
+        "REDUCE_ANY",
+        "REDUCE_ALL",
 };
 
 static_assert(COUNT(kOperationNames) == kNumberOfOperationTypes, "kOperationNames is incorrect");
diff --git a/common/include/Utils.h b/common/include/Utils.h
index 45f6b88..d15d9c9 100644
--- a/common/include/Utils.h
+++ b/common/include/Utils.h
@@ -31,7 +31,7 @@
 const int kNumberOfDataTypes = 11;
 
 // The number of operation types (OperationCode) defined in NeuralNetworks.h.
-const int kNumberOfOperationTypes = 92;
+const int kNumberOfOperationTypes = 97;
 
 // The number of execution preferences defined in NeuralNetworks.h.
 const int kNumberOfPreferences = 3;
diff --git a/common/operations/Reduce.cpp b/common/operations/Reduce.cpp
new file mode 100644
index 0000000..6b1d413
--- /dev/null
+++ b/common/operations/Reduce.cpp
@@ -0,0 +1,229 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "Operations"
+
+#include "tensorflow/contrib/lite/kernels/internal/reference/reference_ops.h"
+
+#include "HalInterfaces.h"
+#include "OperationResolver.h"
+#include "OperationsUtils.h"
+#include "Tracing.h"
+
+namespace android {
+namespace nn {
+namespace reduce {
+
+typedef uint8_t bool8;  // TODO: Where to define this?
+
+constexpr uint32_t kNumInputs = 3;
+constexpr uint32_t kInputTensor = 0;
+constexpr uint32_t kInputAxes = 1;
+constexpr uint32_t kInputKeepDims = 2;
+
+constexpr uint32_t kNumOutputs = 1;
+constexpr uint32_t kOutputTensor = 0;
+
+// Values from
+// https://en.wikipedia.org/wiki/Half-precision_floating-point_format#IEEE_754_half-precision_binary_floating-point_format:_binary16
+constexpr _Float16 kFloat16Max = 65504;
+constexpr _Float16 kFloat16Lowest = -kFloat16Max;
+
+namespace {
+
+template <typename T>
+inline bool compute(IOperationExecutionContext* context, T init, T func(T, T)) {
+    const Shape inputShape = context->getInputShape(kInputTensor);
+    const Shape axesShape = context->getInputShape(kInputAxes);
+    const Shape outputShape = context->getOutputShape(kOutputTensor);
+    const uint32_t inputRank = getNumberOfDimensions(inputShape);
+    const uint32_t numAxes = getNumberOfElements(axesShape);
+    std::vector<int> tempIndex(inputShape.dimensions.size());
+    std::vector<int> tempAxes(numAxes);
+    return tflite::reference_ops::ReduceGeneric<T>(
+            context->getInputBuffer<T>(kInputTensor),
+            reinterpret_cast<const int32_t*>(inputShape.dimensions.data()), inputRank,
+            context->getOutputBuffer<T>(kOutputTensor),
+            reinterpret_cast<const int32_t*>(outputShape.dimensions.data()),
+            outputShape.dimensions.size(), context->getInputBuffer<int32_t>(kInputAxes), numAxes,
+            context->getInputValue<bool8>(kInputKeepDims), tempIndex.data(), tempAxes.data(), init,
+            func);
+}
+
+}  // namespace
+
+bool validateProdSum(const IOperationValidationContext* context) {
+    NN_RET_CHECK_EQ(context->getNumInputs(), kNumInputs);
+    NN_RET_CHECK_EQ(context->getNumOutputs(), kNumOutputs);
+    OperandType inputType = context->getInputType(kInputTensor);
+    NN_RET_CHECK(inputType == OperandType::TENSOR_FLOAT16 ||
+                 inputType == OperandType::TENSOR_FLOAT32)
+            << "Unsupported tensor type for REDUCE_PROD or REDUCE_SUM";
+    NN_RET_CHECK(
+            validateInputTypes(context, {inputType, OperandType::TENSOR_INT32, OperandType::BOOL}));
+    NN_RET_CHECK(validateOutputTypes(context, {inputType}));
+    return validateHalVersion(context, HalVersion::V1_2);
+}
+
+bool validateMaxMin(const IOperationValidationContext* context) {
+    NN_RET_CHECK_EQ(context->getNumInputs(), kNumInputs);
+    NN_RET_CHECK_EQ(context->getNumOutputs(), kNumOutputs);
+    OperandType inputType = context->getInputType(kInputTensor);
+    NN_RET_CHECK(inputType == OperandType::TENSOR_FLOAT16 ||
+                 inputType == OperandType::TENSOR_FLOAT32 ||
+                 inputType == OperandType::TENSOR_QUANT8_ASYMM)
+            << "Unsupported tensor type for REDUCE_MAX or REDUCE_MIN";
+    NN_RET_CHECK(
+            validateInputTypes(context, {inputType, OperandType::TENSOR_INT32, OperandType::BOOL}));
+    NN_RET_CHECK(validateOutputTypes(context, {inputType}));
+    return validateHalVersion(context, HalVersion::V1_2);
+}
+
+bool validateLogical(const IOperationValidationContext* context) {
+    NN_RET_CHECK_EQ(context->getNumInputs(), kNumInputs);
+    NN_RET_CHECK_EQ(context->getNumOutputs(), kNumOutputs);
+    OperandType inputType = context->getInputType(kInputTensor);
+    NN_RET_CHECK(inputType == OperandType::TENSOR_BOOL8)
+            << "Unsupported tensor type for REDUCE_ANY or REDUCE_ALL";
+    NN_RET_CHECK(
+            validateInputTypes(context, {inputType, OperandType::TENSOR_INT32, OperandType::BOOL}));
+    NN_RET_CHECK(validateOutputTypes(context, {inputType}));
+    return validateHalVersion(context, HalVersion::V1_2);
+}
+
+bool prepare(IOperationExecutionContext* context) {
+    Shape inputShape = context->getInputShape(kInputTensor);
+    const uint32_t inputRank = getNumberOfDimensions(inputShape);
+
+    std::vector<bool> shouldReduce(inputRank);
+    const int32_t* axes = context->getInputBuffer<int32_t>(kInputAxes);
+    Shape axesShape = context->getInputShape(kInputAxes);
+    NN_RET_CHECK_EQ(getNumberOfDimensions(axesShape), 1u);
+    const uint32_t numAxes = getNumberOfElements(axesShape);
+    for (uint32_t i = 0; i < numAxes; ++i) {
+        int32_t axis = axes[i];
+        NN_RET_CHECK(handleNegativeAxis(inputRank, &axis));
+        shouldReduce[axis] = true;
+    }
+
+    // Input and output must have the same quantization parameters, etc.
+    Shape outputShape = inputShape;
+    outputShape.dimensions.clear();
+    bool keepDims = context->getInputValue<bool8>(kInputKeepDims);
+    for (uint32_t axis = 0; axis < inputRank; ++axis) {
+        if (shouldReduce[axis]) {
+            if (keepDims) {
+                outputShape.dimensions.push_back(1);
+            }
+        } else {
+            outputShape.dimensions.push_back(getSizeOfDimension(inputShape, axis));
+        }
+    }
+
+    return context->setOutputShape(kOutputTensor, outputShape);
+}
+
+bool executeProd(IOperationExecutionContext* context) {
+    switch (context->getInputType(kInputTensor)) {
+        case OperandType::TENSOR_FLOAT16:
+            return compute<_Float16>(context, 1, [](_Float16 a, _Float16 b) { return a * b; });
+        case OperandType::TENSOR_FLOAT32:
+            return compute<float>(context, 1, [](float a, float b) { return a * b; });
+        default:
+            NN_RET_CHECK_FAIL() << "Unsupported tensor type for operation REDUCE_PROD";
+    }
+}
+
+bool executeSum(IOperationExecutionContext* context) {
+    switch (context->getInputType(kInputTensor)) {
+        case OperandType::TENSOR_FLOAT16:
+            return compute<_Float16>(context, 0, [](_Float16 a, _Float16 b) { return a + b; });
+        case OperandType::TENSOR_FLOAT32:
+            return compute<float>(context, 0, [](float a, float b) { return a + b; });
+        default:
+            NN_RET_CHECK_FAIL() << "Unsupported tensor type for operation REDUCE_SUM";
+    }
+}
+
+bool executeMax(IOperationExecutionContext* context) {
+    switch (context->getInputType(kInputTensor)) {
+        case OperandType::TENSOR_FLOAT16:
+            return compute<_Float16>(context, kFloat16Lowest,
+                                     [](_Float16 a, _Float16 b) { return std::max(a, b); });
+        case OperandType::TENSOR_FLOAT32:
+            return compute<float>(context, std::numeric_limits<float>::lowest(),
+                                  [](float a, float b) { return std::max(a, b); });
+        case OperandType::TENSOR_QUANT8_ASYMM:
+            return compute<uint8_t>(context, std::numeric_limits<uint8_t>::lowest(),
+                                    [](uint8_t a, uint8_t b) { return std::max(a, b); });
+        default:
+            NN_RET_CHECK_FAIL() << "Unsupported tensor type for operation REDUCE_MAX";
+    }
+}
+
+bool executeMin(IOperationExecutionContext* context) {
+    switch (context->getInputType(kInputTensor)) {
+        case OperandType::TENSOR_FLOAT16:
+            return compute<_Float16>(context, kFloat16Max,
+                                     [](_Float16 a, _Float16 b) { return std::min(a, b); });
+        case OperandType::TENSOR_FLOAT32:
+            return compute<float>(context, std::numeric_limits<float>::max(),
+                                  [](float a, float b) { return std::min(a, b); });
+        case OperandType::TENSOR_QUANT8_ASYMM:
+            return compute<uint8_t>(context, std::numeric_limits<uint8_t>::max(),
+                                    [](uint8_t a, uint8_t b) { return std::min(a, b); });
+        default:
+            NN_RET_CHECK_FAIL() << "Unsupported tensor type for operation REDUCE_MIN";
+    }
+}
+
+bool executeAny(IOperationExecutionContext* context) {
+    switch (context->getInputType(kInputTensor)) {
+        case OperandType::TENSOR_BOOL8:
+            return compute<bool8>(context, false,
+                                  [](bool8 a, bool8 b) { return static_cast<bool8>(a || b); });
+        default:
+            NN_RET_CHECK_FAIL() << "Unsupported tensor type for operation REDUCE_ANY";
+    }
+}
+
+bool executeAll(IOperationExecutionContext* context) {
+    switch (context->getInputType(kInputTensor)) {
+        case OperandType::TENSOR_BOOL8:
+            return compute<bool8>(context, true,
+                                  [](bool8 a, bool8 b) { return static_cast<bool8>(a && b); });
+        default:
+            NN_RET_CHECK_FAIL() << "Unsupported tensor type for operation REDUCE_ALL";
+    }
+}
+
+}  // namespace reduce
+
+NN_REGISTER_OPERATION(REDUCE_PROD, "REDUCE_PROD", reduce::validateProdSum, reduce::prepare,
+                      reduce::executeProd);
+NN_REGISTER_OPERATION(REDUCE_SUM, "REDUCE_SUM", reduce::validateProdSum, reduce::prepare,
+                      reduce::executeSum);
+NN_REGISTER_OPERATION(REDUCE_MAX, "REDUCE_MAX", reduce::validateMaxMin, reduce::prepare,
+                      reduce::executeMax);
+NN_REGISTER_OPERATION(REDUCE_MIN, "REDUCE_MIN", reduce::validateMaxMin, reduce::prepare,
+                      reduce::executeMin);
+NN_REGISTER_OPERATION(REDUCE_ANY, "REDUCE_ANY", reduce::validateLogical, reduce::prepare,
+                      reduce::executeAny);
+NN_REGISTER_OPERATION(REDUCE_ALL, "REDUCE_ALL", reduce::validateLogical, reduce::prepare,
+                      reduce::executeAll);
+
+}  // namespace nn
+}  // namespace android
diff --git a/runtime/include/NeuralNetworks.h b/runtime/include/NeuralNetworks.h
index ca14b6a..540505e 100644
--- a/runtime/include/NeuralNetworks.h
+++ b/runtime/include/NeuralNetworks.h
@@ -2866,7 +2866,32 @@
      */
     ANEURALNETWORKS_RANDOM_MULTINOMIAL = 72,
 
-    ANEURALNETWORKS_REDUCE = 73,
+    /**
+     * Reduces a tensor by multiplying elements along given dimensions.
+     *
+     * If keep_dims is true, the reduced dimensions are
+     * retained with length 1. Otherwise, the rank of the tensor is reduced by
+     * 1 for each entry in dimensions.
+     *
+     * Supported tensor {@link OperandCode}:
+     * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
+     * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
+     *
+     * Supported tensor rank: up to 4
+     *
+     * Inputs:
+     * * 0: An n-D tensor.
+     * * 1: A 1-D tensor of {@link ANEURALNETWORKS_TENSOR_INT32}. The dimensions
+     *      to reduce. Dimension values must be in the range [-n, n).
+     * * 2: An {@link ANEURALNETWORKS_BOOL} scalar, keep_dims. If true,
+     *      retains reduced dimensions with length 1.
+     *
+     * Outputs:
+     * * 0: A tensor of the same {@link OperandCode} as input0.
+     *
+     * Available since API level 29.
+     */
+    ANEURALNETWORKS_REDUCE_PROD = 73,
 
     /**
      * Select and scale the feature map of each region of interest to a unified
@@ -3249,6 +3274,7 @@
      * Available since API level 29.
      */
     ANEURALNETWORKS_ROI_POOLING = 89,
+
     /**
      * For input tensors x and y, computes x == y elementwise.
      *
@@ -3274,6 +3300,7 @@
      * Available since API level 29.
      */
     ANEURALNETWORKS_EQUAL = 90,
+
     /**
      * For input tensors x and y, computes x != y elementwise.
      *
@@ -3299,6 +3326,145 @@
      * Available since API level 29.
      */
     ANEURALNETWORKS_NOT_EQUAL = 91,
+
+    /**
+     * Reduces a tensor by summing elements along given dimensions.
+     *
+     * If keep_dims is true, the reduced dimensions are
+     * retained with length 1. Otherwise, the rank of the tensor is reduced by
+     * 1 for each entry in dimensions.
+     *
+     * Supported tensor {@link OperandCode}:
+     * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
+     * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
+     *
+     * Supported tensor rank: up to 4
+     *
+     * Inputs:
+     * * 0: An n-D tensor.
+     * * 1: A 1-D tensor of {@link ANEURALNETWORKS_TENSOR_INT32}. The dimensions
+     *      to reduce. Dimension values must be in the range [-n, n).
+     * * 2: An {@link ANEURALNETWORKS_BOOL} scalar, keep_dims. If true,
+     *      retains reduced dimensions with length 1.
+     *
+     * Outputs:
+     * * 0: A tensor of the same {@link OperandCode} as input0.
+     *
+     * Available since API level 29.
+     */
+    ANEURALNETWORKS_REDUCE_SUM = 92,
+
+    /**
+     * Reduces a tensor by computing the maximum of elements along given
+     * dimensions.
+     *
+     * If keep_dims is true, the reduced dimensions are
+     * retained with length 1. Otherwise, the rank of the tensor is reduced by
+     * 1 for each entry in dimensions.
+     *
+     * Supported tensor {@link OperandCode}:
+     * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
+     * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
+     * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
+     *
+     * Supported tensor rank: up to 4
+     *
+     * Inputs:
+     * * 0: An n-D tensor.
+     * * 1: A 1-D tensor of {@link ANEURALNETWORKS_TENSOR_INT32}. The dimensions
+     *      to reduce. Dimension values must be in the range [-n, n).
+     * * 2: An {@link ANEURALNETWORKS_BOOL} scalar, keep_dims. If true,
+     *      retains reduced dimensions with length 1.
+     *
+     * Outputs:
+     * * 0: A tensor of the same {@link OperandCode} as input0.
+     *
+     * Available since API level 29.
+     */
+    ANEURALNETWORKS_REDUCE_MAX = 93,
+
+    /**
+     * Reduces a tensor by computing the minimum of elements along given
+     * dimensions.
+     *
+     * If keep_dims is true, the reduced dimensions are
+     * retained with length 1. Otherwise, the rank of the tensor is reduced by
+     * 1 for each entry in dimensions.
+     *
+     * Supported tensor {@link OperandCode}:
+     * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
+     * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
+     * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
+     *
+     * Supported tensor rank: up to 4
+     *
+     * Inputs:
+     * * 0: An n-D tensor.
+     * * 1: A 1-D tensor of {@link ANEURALNETWORKS_TENSOR_INT32}. The dimensions
+     *      to reduce. Dimension values must be in the range [-n, n).
+     * * 2: An {@link ANEURALNETWORKS_BOOL} scalar, keep_dims. If true,
+     *      retains reduced dimensions with length 1.
+     *
+     * Outputs:
+     * * 0: A tensor of the same {@link OperandCode} as input0.
+     *
+     * Available since API level 29.
+     */
+    ANEURALNETWORKS_REDUCE_MIN = 94,
+
+    /**
+     * Reduces a tensor by computing the "logical or" of elements along given
+     * dimensions.
+     *
+     * If keep_dims is true, the reduced dimensions are
+     * retained with length 1. Otherwise, the rank of the tensor is reduced by
+     * 1 for each entry in dimensions.
+     *
+     * Supported tensor {@link OperandCode}:
+     * * {@link ANEURALNETWORKS_TENSOR_BOOL8}
+     *
+     * Supported tensor rank: up to 4
+     *
+     * Inputs:
+     * * 0: An n-D tensor.
+     * * 1: A 1-D tensor of {@link ANEURALNETWORKS_TENSOR_INT32}. The dimensions
+     *      to reduce. Dimension values must be in the range [-n, n).
+     * * 2: An {@link ANEURALNETWORKS_BOOL} scalar, keep_dims. If true,
+     *      retains reduced dimensions with length 1.
+     *
+     * Outputs:
+     * * 0: A tensor of the same {@link OperandCode} as input0.
+     *
+     * Available since API level 29.
+     */
+    ANEURALNETWORKS_REDUCE_ANY = 95,
+
+    /**
+     * Reduces a tensor by computing the "logical and" of elements along given
+     * dimensions.
+     *
+     * If keep_dims is true, the reduced dimensions are
+     * retained with length 1. Otherwise, the rank of the tensor is reduced by
+     * 1 for each entry in dimensions.
+     *
+     * Supported tensor {@link OperandCode}:
+     * * {@link ANEURALNETWORKS_TENSOR_BOOL8}
+     *
+     * Supported tensor rank: up to 4
+     *
+     * Inputs:
+     * * 0: An n-D tensor.
+     * * 1: A 1-D tensor of {@link ANEURALNETWORKS_TENSOR_INT32}. The dimensions
+     *      to reduce. Dimension values must be in the range [-n, n).
+     * * 2: An {@link ANEURALNETWORKS_BOOL} scalar, keep_dims. If true,
+     *      retains reduced dimensions with length 1.
+     *
+     * Outputs:
+     * * 0: A tensor of the same {@link OperandCode} as input0.
+     *
+     * Available since API level 29.
+     */
+    ANEURALNETWORKS_REDUCE_ALL = 96,
 } OperationCode;
 
 /**
diff --git a/runtime/test/TestValidateOperations.cpp b/runtime/test/TestValidateOperations.cpp
index 2ab8d12..dce0af1 100644
--- a/runtime/test/TestValidateOperations.cpp
+++ b/runtime/test/TestValidateOperations.cpp
@@ -1953,4 +1953,69 @@
     comparisonTest(ANEURALNETWORKS_GREATER_EQUAL, ANEURALNETWORKS_TENSOR_QUANT8_ASYMM);
 }
 
+void reduceOpTest(ANeuralNetworksOperationType operationCode, int32_t inputOperandType) {
+    bool isQuant = inputOperandType == ANEURALNETWORKS_TENSOR_QUANT8_ASYMM;
+    float scale = isQuant ? 1.f / 256 : 0.0f;
+    uint32_t inputDimensions[4] = {2, 2, 2, 2};
+    ANeuralNetworksOperandType input1 = {
+            .type = inputOperandType,
+            .dimensionCount = 4,
+            .dimensions = inputDimensions,
+            .scale = scale,
+            .zeroPoint = 0,
+    };
+    uint32_t axesDimensions[1] = {2};
+    ANeuralNetworksOperandType input2 = {
+            .type = ANEURALNETWORKS_TENSOR_INT32,
+            .dimensionCount = 1,
+            .dimensions = axesDimensions,
+    };
+    ANeuralNetworksOperandType input3 = {
+            .type = ANEURALNETWORKS_BOOL,
+            .dimensions = {},
+    };
+    ANeuralNetworksOperandType output = {
+            .type = inputOperandType,
+            .dimensionCount = 4,
+            .dimensions = inputDimensions,
+            .scale = scale,
+    };
+    OperationTestBase test(operationCode, {input1, input2, input3}, {output});
+
+    EXPECT_TRUE(test.testMutatingInputOperandCode());
+    EXPECT_TRUE(test.testMutatingInputOperandCounts());
+    EXPECT_TRUE(test.testMutatingOutputOperandCode());
+    EXPECT_TRUE(test.testMutatingOutputOperandCounts());
+}
+
+TEST(OperationValidationTest, REDUCE_PROD) {
+    reduceOpTest(ANEURALNETWORKS_REDUCE_PROD, ANEURALNETWORKS_TENSOR_FLOAT16);
+    reduceOpTest(ANEURALNETWORKS_REDUCE_PROD, ANEURALNETWORKS_TENSOR_FLOAT32);
+}
+
+TEST(OperationValidationTest, REDUCE_SUM) {
+    reduceOpTest(ANEURALNETWORKS_REDUCE_SUM, ANEURALNETWORKS_TENSOR_FLOAT16);
+    reduceOpTest(ANEURALNETWORKS_REDUCE_SUM, ANEURALNETWORKS_TENSOR_FLOAT32);
+}
+
+TEST(OperationValidationTest, REDUCE_MAX) {
+    reduceOpTest(ANEURALNETWORKS_REDUCE_MAX, ANEURALNETWORKS_TENSOR_FLOAT16);
+    reduceOpTest(ANEURALNETWORKS_REDUCE_MAX, ANEURALNETWORKS_TENSOR_FLOAT32);
+    reduceOpTest(ANEURALNETWORKS_REDUCE_MAX, ANEURALNETWORKS_TENSOR_QUANT8_ASYMM);
+}
+
+TEST(OperationValidationTest, REDUCE_MIN) {
+    reduceOpTest(ANEURALNETWORKS_REDUCE_MIN, ANEURALNETWORKS_TENSOR_FLOAT16);
+    reduceOpTest(ANEURALNETWORKS_REDUCE_MIN, ANEURALNETWORKS_TENSOR_FLOAT32);
+    reduceOpTest(ANEURALNETWORKS_REDUCE_MIN, ANEURALNETWORKS_TENSOR_QUANT8_ASYMM);
+}
+
+TEST(OperationValidationTest, REDUCE_ANY) {
+    reduceOpTest(ANEURALNETWORKS_REDUCE_ANY, ANEURALNETWORKS_TENSOR_BOOL8);
+}
+
+TEST(OperationValidationTest, REDUCE_ALL) {
+    reduceOpTest(ANEURALNETWORKS_REDUCE_ALL, ANEURALNETWORKS_TENSOR_BOOL8);
+}
+
 }  // end namespace
diff --git a/runtime/test/for-cts/TestGeneratedOneFile.cpp b/runtime/test/for-cts/TestGeneratedOneFile.cpp
index b2ba330..84bcad0 100644
--- a/runtime/test/for-cts/TestGeneratedOneFile.cpp
+++ b/runtime/test/for-cts/TestGeneratedOneFile.cpp
@@ -391,6 +391,12 @@
 #include "../generated/tests/quantized_lstm.mod.py.cpp"
 #include "../generated/tests/random_multinomial.mod.py.cpp"
 #include "../generated/tests/random_multinomial_float16.mod.py.cpp"
+#include "../generated/tests/reduce_all.mod.py.cpp"
+#include "../generated/tests/reduce_any.mod.py.cpp"
+#include "../generated/tests/reduce_max.mod.py.cpp"
+#include "../generated/tests/reduce_min.mod.py.cpp"
+#include "../generated/tests/reduce_prod.mod.py.cpp"
+#include "../generated/tests/reduce_sum.mod.py.cpp"
 #include "../generated/tests/relu1_float16_1.mod.py.cpp"
 #include "../generated/tests/relu1_float16_2.mod.py.cpp"
 #include "../generated/tests/relu6_float16_1.mod.py.cpp"
diff --git a/runtime/test/generated/all_generated_V1_2_vts_tests.cpp b/runtime/test/generated/all_generated_V1_2_vts_tests.cpp
index 0351f04..0af59b8 100644
--- a/runtime/test/generated/all_generated_V1_2_vts_tests.cpp
+++ b/runtime/test/generated/all_generated_V1_2_vts_tests.cpp
@@ -16759,6 +16759,984 @@
 }
 
 
+// Generated from: reduce_all.mod.py.
+namespace reduce_all {
+// Generated reduce_all test
+#include "examples/reduce_all.example.cpp"
+// Generated model constructor
+#include "vts_models/reduce_all.model.cpp"
+} // namespace reduce_all
+
+TEST_F(NeuralnetworksHidlTest, reduce_all) {
+  generated_tests::Execute(device,
+                           reduce_all::createTestModel,
+                           reduce_all::is_ignored,
+                           reduce_all::get_examples());
+}
+
+TEST_F(ValidationTest, reduce_all) {
+  const Model model = reduce_all::createTestModel();
+  const std::vector<Request> requests = createRequests(reduce_all::get_examples());
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+TEST_F(NeuralnetworksHidlTest, reduce_all_2) {
+  generated_tests::Execute(device,
+                           reduce_all::createTestModel_2,
+                           reduce_all::is_ignored_2,
+                           reduce_all::get_examples_2());
+}
+
+TEST_F(ValidationTest, reduce_all_2) {
+  const Model model = reduce_all::createTestModel_2();
+  const std::vector<Request> requests = createRequests(reduce_all::get_examples_2());
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+TEST_F(NeuralnetworksHidlTest, reduce_all_3) {
+  generated_tests::Execute(device,
+                           reduce_all::createTestModel_3,
+                           reduce_all::is_ignored_3,
+                           reduce_all::get_examples_3());
+}
+
+TEST_F(ValidationTest, reduce_all_3) {
+  const Model model = reduce_all::createTestModel_3();
+  const std::vector<Request> requests = createRequests(reduce_all::get_examples_3());
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+// Generated from: reduce_any.mod.py.
+namespace reduce_any {
+// Generated reduce_any test
+#include "examples/reduce_any.example.cpp"
+// Generated model constructor
+#include "vts_models/reduce_any.model.cpp"
+} // namespace reduce_any
+
+TEST_F(NeuralnetworksHidlTest, reduce_any) {
+  generated_tests::Execute(device,
+                           reduce_any::createTestModel,
+                           reduce_any::is_ignored,
+                           reduce_any::get_examples());
+}
+
+TEST_F(ValidationTest, reduce_any) {
+  const Model model = reduce_any::createTestModel();
+  const std::vector<Request> requests = createRequests(reduce_any::get_examples());
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+TEST_F(NeuralnetworksHidlTest, reduce_any_2) {
+  generated_tests::Execute(device,
+                           reduce_any::createTestModel_2,
+                           reduce_any::is_ignored_2,
+                           reduce_any::get_examples_2());
+}
+
+TEST_F(ValidationTest, reduce_any_2) {
+  const Model model = reduce_any::createTestModel_2();
+  const std::vector<Request> requests = createRequests(reduce_any::get_examples_2());
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+TEST_F(NeuralnetworksHidlTest, reduce_any_3) {
+  generated_tests::Execute(device,
+                           reduce_any::createTestModel_3,
+                           reduce_any::is_ignored_3,
+                           reduce_any::get_examples_3());
+}
+
+TEST_F(ValidationTest, reduce_any_3) {
+  const Model model = reduce_any::createTestModel_3();
+  const std::vector<Request> requests = createRequests(reduce_any::get_examples_3());
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+// Generated from: reduce_max.mod.py.
+namespace reduce_max {
+// Generated reduce_max test
+#include "examples/reduce_max.example.cpp"
+// Generated model constructor
+#include "vts_models/reduce_max.model.cpp"
+} // namespace reduce_max
+
+TEST_F(NeuralnetworksHidlTest, reduce_max) {
+  generated_tests::Execute(device,
+                           reduce_max::createTestModel,
+                           reduce_max::is_ignored,
+                           reduce_max::get_examples());
+}
+
+TEST_F(ValidationTest, reduce_max) {
+  const Model model = reduce_max::createTestModel();
+  const std::vector<Request> requests = createRequests(reduce_max::get_examples());
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+TEST_F(NeuralnetworksHidlTest, reduce_max_relaxed) {
+  generated_tests::Execute(device,
+                           reduce_max::createTestModel_relaxed,
+                           reduce_max::is_ignored_relaxed,
+                           reduce_max::get_examples_relaxed());
+}
+
+TEST_F(ValidationTest, reduce_max_relaxed) {
+  const Model model = reduce_max::createTestModel_relaxed();
+  const std::vector<Request> requests = createRequests(reduce_max::get_examples_relaxed());
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+TEST_F(NeuralnetworksHidlTest, reduce_max_float16) {
+  generated_tests::Execute(device,
+                           reduce_max::createTestModel_float16,
+                           reduce_max::is_ignored_float16,
+                           reduce_max::get_examples_float16());
+}
+
+TEST_F(ValidationTest, reduce_max_float16) {
+  const Model model = reduce_max::createTestModel_float16();
+  const std::vector<Request> requests = createRequests(reduce_max::get_examples_float16());
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+TEST_F(NeuralnetworksHidlTest, reduce_max_quant8) {
+  generated_tests::Execute(device,
+                           reduce_max::createTestModel_quant8,
+                           reduce_max::is_ignored_quant8,
+                           reduce_max::get_examples_quant8());
+}
+
+TEST_F(ValidationTest, reduce_max_quant8) {
+  const Model model = reduce_max::createTestModel_quant8();
+  const std::vector<Request> requests = createRequests(reduce_max::get_examples_quant8());
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+TEST_F(NeuralnetworksHidlTest, reduce_max_2) {
+  generated_tests::Execute(device,
+                           reduce_max::createTestModel_2,
+                           reduce_max::is_ignored_2,
+                           reduce_max::get_examples_2());
+}
+
+TEST_F(ValidationTest, reduce_max_2) {
+  const Model model = reduce_max::createTestModel_2();
+  const std::vector<Request> requests = createRequests(reduce_max::get_examples_2());
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+TEST_F(NeuralnetworksHidlTest, reduce_max_relaxed_2) {
+  generated_tests::Execute(device,
+                           reduce_max::createTestModel_relaxed_2,
+                           reduce_max::is_ignored_relaxed_2,
+                           reduce_max::get_examples_relaxed_2());
+}
+
+TEST_F(ValidationTest, reduce_max_relaxed_2) {
+  const Model model = reduce_max::createTestModel_relaxed_2();
+  const std::vector<Request> requests = createRequests(reduce_max::get_examples_relaxed_2());
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+TEST_F(NeuralnetworksHidlTest, reduce_max_float16_2) {
+  generated_tests::Execute(device,
+                           reduce_max::createTestModel_float16_2,
+                           reduce_max::is_ignored_float16_2,
+                           reduce_max::get_examples_float16_2());
+}
+
+TEST_F(ValidationTest, reduce_max_float16_2) {
+  const Model model = reduce_max::createTestModel_float16_2();
+  const std::vector<Request> requests = createRequests(reduce_max::get_examples_float16_2());
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+TEST_F(NeuralnetworksHidlTest, reduce_max_quant8_2) {
+  generated_tests::Execute(device,
+                           reduce_max::createTestModel_quant8_2,
+                           reduce_max::is_ignored_quant8_2,
+                           reduce_max::get_examples_quant8_2());
+}
+
+TEST_F(ValidationTest, reduce_max_quant8_2) {
+  const Model model = reduce_max::createTestModel_quant8_2();
+  const std::vector<Request> requests = createRequests(reduce_max::get_examples_quant8_2());
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+TEST_F(NeuralnetworksHidlTest, reduce_max_3) {
+  generated_tests::Execute(device,
+                           reduce_max::createTestModel_3,
+                           reduce_max::is_ignored_3,
+                           reduce_max::get_examples_3());
+}
+
+TEST_F(ValidationTest, reduce_max_3) {
+  const Model model = reduce_max::createTestModel_3();
+  const std::vector<Request> requests = createRequests(reduce_max::get_examples_3());
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+TEST_F(NeuralnetworksHidlTest, reduce_max_relaxed_3) {
+  generated_tests::Execute(device,
+                           reduce_max::createTestModel_relaxed_3,
+                           reduce_max::is_ignored_relaxed_3,
+                           reduce_max::get_examples_relaxed_3());
+}
+
+TEST_F(ValidationTest, reduce_max_relaxed_3) {
+  const Model model = reduce_max::createTestModel_relaxed_3();
+  const std::vector<Request> requests = createRequests(reduce_max::get_examples_relaxed_3());
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+TEST_F(NeuralnetworksHidlTest, reduce_max_float16_3) {
+  generated_tests::Execute(device,
+                           reduce_max::createTestModel_float16_3,
+                           reduce_max::is_ignored_float16_3,
+                           reduce_max::get_examples_float16_3());
+}
+
+TEST_F(ValidationTest, reduce_max_float16_3) {
+  const Model model = reduce_max::createTestModel_float16_3();
+  const std::vector<Request> requests = createRequests(reduce_max::get_examples_float16_3());
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+TEST_F(NeuralnetworksHidlTest, reduce_max_quant8_3) {
+  generated_tests::Execute(device,
+                           reduce_max::createTestModel_quant8_3,
+                           reduce_max::is_ignored_quant8_3,
+                           reduce_max::get_examples_quant8_3());
+}
+
+TEST_F(ValidationTest, reduce_max_quant8_3) {
+  const Model model = reduce_max::createTestModel_quant8_3();
+  const std::vector<Request> requests = createRequests(reduce_max::get_examples_quant8_3());
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+TEST_F(NeuralnetworksHidlTest, reduce_max_4) {
+  generated_tests::Execute(device,
+                           reduce_max::createTestModel_4,
+                           reduce_max::is_ignored_4,
+                           reduce_max::get_examples_4());
+}
+
+TEST_F(ValidationTest, reduce_max_4) {
+  const Model model = reduce_max::createTestModel_4();
+  const std::vector<Request> requests = createRequests(reduce_max::get_examples_4());
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+TEST_F(NeuralnetworksHidlTest, reduce_max_relaxed_4) {
+  generated_tests::Execute(device,
+                           reduce_max::createTestModel_relaxed_4,
+                           reduce_max::is_ignored_relaxed_4,
+                           reduce_max::get_examples_relaxed_4());
+}
+
+TEST_F(ValidationTest, reduce_max_relaxed_4) {
+  const Model model = reduce_max::createTestModel_relaxed_4();
+  const std::vector<Request> requests = createRequests(reduce_max::get_examples_relaxed_4());
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+TEST_F(NeuralnetworksHidlTest, reduce_max_float16_4) {
+  generated_tests::Execute(device,
+                           reduce_max::createTestModel_float16_4,
+                           reduce_max::is_ignored_float16_4,
+                           reduce_max::get_examples_float16_4());
+}
+
+TEST_F(ValidationTest, reduce_max_float16_4) {
+  const Model model = reduce_max::createTestModel_float16_4();
+  const std::vector<Request> requests = createRequests(reduce_max::get_examples_float16_4());
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+TEST_F(NeuralnetworksHidlTest, reduce_max_quant8_4) {
+  generated_tests::Execute(device,
+                           reduce_max::createTestModel_quant8_4,
+                           reduce_max::is_ignored_quant8_4,
+                           reduce_max::get_examples_quant8_4());
+}
+
+TEST_F(ValidationTest, reduce_max_quant8_4) {
+  const Model model = reduce_max::createTestModel_quant8_4();
+  const std::vector<Request> requests = createRequests(reduce_max::get_examples_quant8_4());
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+// Generated from: reduce_min.mod.py.
+namespace reduce_min {
+// Generated reduce_min test
+#include "examples/reduce_min.example.cpp"
+// Generated model constructor
+#include "vts_models/reduce_min.model.cpp"
+} // namespace reduce_min
+
+TEST_F(NeuralnetworksHidlTest, reduce_min) {
+  generated_tests::Execute(device,
+                           reduce_min::createTestModel,
+                           reduce_min::is_ignored,
+                           reduce_min::get_examples());
+}
+
+TEST_F(ValidationTest, reduce_min) {
+  const Model model = reduce_min::createTestModel();
+  const std::vector<Request> requests = createRequests(reduce_min::get_examples());
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+TEST_F(NeuralnetworksHidlTest, reduce_min_relaxed) {
+  generated_tests::Execute(device,
+                           reduce_min::createTestModel_relaxed,
+                           reduce_min::is_ignored_relaxed,
+                           reduce_min::get_examples_relaxed());
+}
+
+TEST_F(ValidationTest, reduce_min_relaxed) {
+  const Model model = reduce_min::createTestModel_relaxed();
+  const std::vector<Request> requests = createRequests(reduce_min::get_examples_relaxed());
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+TEST_F(NeuralnetworksHidlTest, reduce_min_float16) {
+  generated_tests::Execute(device,
+                           reduce_min::createTestModel_float16,
+                           reduce_min::is_ignored_float16,
+                           reduce_min::get_examples_float16());
+}
+
+TEST_F(ValidationTest, reduce_min_float16) {
+  const Model model = reduce_min::createTestModel_float16();
+  const std::vector<Request> requests = createRequests(reduce_min::get_examples_float16());
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+TEST_F(NeuralnetworksHidlTest, reduce_min_quant8) {
+  generated_tests::Execute(device,
+                           reduce_min::createTestModel_quant8,
+                           reduce_min::is_ignored_quant8,
+                           reduce_min::get_examples_quant8());
+}
+
+TEST_F(ValidationTest, reduce_min_quant8) {
+  const Model model = reduce_min::createTestModel_quant8();
+  const std::vector<Request> requests = createRequests(reduce_min::get_examples_quant8());
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+TEST_F(NeuralnetworksHidlTest, reduce_min_2) {
+  generated_tests::Execute(device,
+                           reduce_min::createTestModel_2,
+                           reduce_min::is_ignored_2,
+                           reduce_min::get_examples_2());
+}
+
+TEST_F(ValidationTest, reduce_min_2) {
+  const Model model = reduce_min::createTestModel_2();
+  const std::vector<Request> requests = createRequests(reduce_min::get_examples_2());
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+TEST_F(NeuralnetworksHidlTest, reduce_min_relaxed_2) {
+  generated_tests::Execute(device,
+                           reduce_min::createTestModel_relaxed_2,
+                           reduce_min::is_ignored_relaxed_2,
+                           reduce_min::get_examples_relaxed_2());
+}
+
+TEST_F(ValidationTest, reduce_min_relaxed_2) {
+  const Model model = reduce_min::createTestModel_relaxed_2();
+  const std::vector<Request> requests = createRequests(reduce_min::get_examples_relaxed_2());
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+TEST_F(NeuralnetworksHidlTest, reduce_min_float16_2) {
+  generated_tests::Execute(device,
+                           reduce_min::createTestModel_float16_2,
+                           reduce_min::is_ignored_float16_2,
+                           reduce_min::get_examples_float16_2());
+}
+
+TEST_F(ValidationTest, reduce_min_float16_2) {
+  const Model model = reduce_min::createTestModel_float16_2();
+  const std::vector<Request> requests = createRequests(reduce_min::get_examples_float16_2());
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+TEST_F(NeuralnetworksHidlTest, reduce_min_quant8_2) {
+  generated_tests::Execute(device,
+                           reduce_min::createTestModel_quant8_2,
+                           reduce_min::is_ignored_quant8_2,
+                           reduce_min::get_examples_quant8_2());
+}
+
+TEST_F(ValidationTest, reduce_min_quant8_2) {
+  const Model model = reduce_min::createTestModel_quant8_2();
+  const std::vector<Request> requests = createRequests(reduce_min::get_examples_quant8_2());
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+TEST_F(NeuralnetworksHidlTest, reduce_min_3) {
+  generated_tests::Execute(device,
+                           reduce_min::createTestModel_3,
+                           reduce_min::is_ignored_3,
+                           reduce_min::get_examples_3());
+}
+
+TEST_F(ValidationTest, reduce_min_3) {
+  const Model model = reduce_min::createTestModel_3();
+  const std::vector<Request> requests = createRequests(reduce_min::get_examples_3());
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+TEST_F(NeuralnetworksHidlTest, reduce_min_relaxed_3) {
+  generated_tests::Execute(device,
+                           reduce_min::createTestModel_relaxed_3,
+                           reduce_min::is_ignored_relaxed_3,
+                           reduce_min::get_examples_relaxed_3());
+}
+
+TEST_F(ValidationTest, reduce_min_relaxed_3) {
+  const Model model = reduce_min::createTestModel_relaxed_3();
+  const std::vector<Request> requests = createRequests(reduce_min::get_examples_relaxed_3());
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+TEST_F(NeuralnetworksHidlTest, reduce_min_float16_3) {
+  generated_tests::Execute(device,
+                           reduce_min::createTestModel_float16_3,
+                           reduce_min::is_ignored_float16_3,
+                           reduce_min::get_examples_float16_3());
+}
+
+TEST_F(ValidationTest, reduce_min_float16_3) {
+  const Model model = reduce_min::createTestModel_float16_3();
+  const std::vector<Request> requests = createRequests(reduce_min::get_examples_float16_3());
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+TEST_F(NeuralnetworksHidlTest, reduce_min_quant8_3) {
+  generated_tests::Execute(device,
+                           reduce_min::createTestModel_quant8_3,
+                           reduce_min::is_ignored_quant8_3,
+                           reduce_min::get_examples_quant8_3());
+}
+
+TEST_F(ValidationTest, reduce_min_quant8_3) {
+  const Model model = reduce_min::createTestModel_quant8_3();
+  const std::vector<Request> requests = createRequests(reduce_min::get_examples_quant8_3());
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+TEST_F(NeuralnetworksHidlTest, reduce_min_4) {
+  generated_tests::Execute(device,
+                           reduce_min::createTestModel_4,
+                           reduce_min::is_ignored_4,
+                           reduce_min::get_examples_4());
+}
+
+TEST_F(ValidationTest, reduce_min_4) {
+  const Model model = reduce_min::createTestModel_4();
+  const std::vector<Request> requests = createRequests(reduce_min::get_examples_4());
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+TEST_F(NeuralnetworksHidlTest, reduce_min_relaxed_4) {
+  generated_tests::Execute(device,
+                           reduce_min::createTestModel_relaxed_4,
+                           reduce_min::is_ignored_relaxed_4,
+                           reduce_min::get_examples_relaxed_4());
+}
+
+TEST_F(ValidationTest, reduce_min_relaxed_4) {
+  const Model model = reduce_min::createTestModel_relaxed_4();
+  const std::vector<Request> requests = createRequests(reduce_min::get_examples_relaxed_4());
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+TEST_F(NeuralnetworksHidlTest, reduce_min_float16_4) {
+  generated_tests::Execute(device,
+                           reduce_min::createTestModel_float16_4,
+                           reduce_min::is_ignored_float16_4,
+                           reduce_min::get_examples_float16_4());
+}
+
+TEST_F(ValidationTest, reduce_min_float16_4) {
+  const Model model = reduce_min::createTestModel_float16_4();
+  const std::vector<Request> requests = createRequests(reduce_min::get_examples_float16_4());
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+TEST_F(NeuralnetworksHidlTest, reduce_min_quant8_4) {
+  generated_tests::Execute(device,
+                           reduce_min::createTestModel_quant8_4,
+                           reduce_min::is_ignored_quant8_4,
+                           reduce_min::get_examples_quant8_4());
+}
+
+TEST_F(ValidationTest, reduce_min_quant8_4) {
+  const Model model = reduce_min::createTestModel_quant8_4();
+  const std::vector<Request> requests = createRequests(reduce_min::get_examples_quant8_4());
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+// Generated from: reduce_prod.mod.py.
+namespace reduce_prod {
+// Generated reduce_prod test
+#include "examples/reduce_prod.example.cpp"
+// Generated model constructor
+#include "vts_models/reduce_prod.model.cpp"
+} // namespace reduce_prod
+
+TEST_F(NeuralnetworksHidlTest, reduce_prod) {
+  generated_tests::Execute(device,
+                           reduce_prod::createTestModel,
+                           reduce_prod::is_ignored,
+                           reduce_prod::get_examples());
+}
+
+TEST_F(ValidationTest, reduce_prod) {
+  const Model model = reduce_prod::createTestModel();
+  const std::vector<Request> requests = createRequests(reduce_prod::get_examples());
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+TEST_F(NeuralnetworksHidlTest, reduce_prod_relaxed) {
+  generated_tests::Execute(device,
+                           reduce_prod::createTestModel_relaxed,
+                           reduce_prod::is_ignored_relaxed,
+                           reduce_prod::get_examples_relaxed());
+}
+
+TEST_F(ValidationTest, reduce_prod_relaxed) {
+  const Model model = reduce_prod::createTestModel_relaxed();
+  const std::vector<Request> requests = createRequests(reduce_prod::get_examples_relaxed());
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+TEST_F(NeuralnetworksHidlTest, reduce_prod_float16) {
+  generated_tests::Execute(device,
+                           reduce_prod::createTestModel_float16,
+                           reduce_prod::is_ignored_float16,
+                           reduce_prod::get_examples_float16());
+}
+
+TEST_F(ValidationTest, reduce_prod_float16) {
+  const Model model = reduce_prod::createTestModel_float16();
+  const std::vector<Request> requests = createRequests(reduce_prod::get_examples_float16());
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+TEST_F(NeuralnetworksHidlTest, reduce_prod_2) {
+  generated_tests::Execute(device,
+                           reduce_prod::createTestModel_2,
+                           reduce_prod::is_ignored_2,
+                           reduce_prod::get_examples_2());
+}
+
+TEST_F(ValidationTest, reduce_prod_2) {
+  const Model model = reduce_prod::createTestModel_2();
+  const std::vector<Request> requests = createRequests(reduce_prod::get_examples_2());
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+TEST_F(NeuralnetworksHidlTest, reduce_prod_relaxed_2) {
+  generated_tests::Execute(device,
+                           reduce_prod::createTestModel_relaxed_2,
+                           reduce_prod::is_ignored_relaxed_2,
+                           reduce_prod::get_examples_relaxed_2());
+}
+
+TEST_F(ValidationTest, reduce_prod_relaxed_2) {
+  const Model model = reduce_prod::createTestModel_relaxed_2();
+  const std::vector<Request> requests = createRequests(reduce_prod::get_examples_relaxed_2());
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+TEST_F(NeuralnetworksHidlTest, reduce_prod_float16_2) {
+  generated_tests::Execute(device,
+                           reduce_prod::createTestModel_float16_2,
+                           reduce_prod::is_ignored_float16_2,
+                           reduce_prod::get_examples_float16_2());
+}
+
+TEST_F(ValidationTest, reduce_prod_float16_2) {
+  const Model model = reduce_prod::createTestModel_float16_2();
+  const std::vector<Request> requests = createRequests(reduce_prod::get_examples_float16_2());
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+TEST_F(NeuralnetworksHidlTest, reduce_prod_3) {
+  generated_tests::Execute(device,
+                           reduce_prod::createTestModel_3,
+                           reduce_prod::is_ignored_3,
+                           reduce_prod::get_examples_3());
+}
+
+TEST_F(ValidationTest, reduce_prod_3) {
+  const Model model = reduce_prod::createTestModel_3();
+  const std::vector<Request> requests = createRequests(reduce_prod::get_examples_3());
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+TEST_F(NeuralnetworksHidlTest, reduce_prod_relaxed_3) {
+  generated_tests::Execute(device,
+                           reduce_prod::createTestModel_relaxed_3,
+                           reduce_prod::is_ignored_relaxed_3,
+                           reduce_prod::get_examples_relaxed_3());
+}
+
+TEST_F(ValidationTest, reduce_prod_relaxed_3) {
+  const Model model = reduce_prod::createTestModel_relaxed_3();
+  const std::vector<Request> requests = createRequests(reduce_prod::get_examples_relaxed_3());
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+TEST_F(NeuralnetworksHidlTest, reduce_prod_float16_3) {
+  generated_tests::Execute(device,
+                           reduce_prod::createTestModel_float16_3,
+                           reduce_prod::is_ignored_float16_3,
+                           reduce_prod::get_examples_float16_3());
+}
+
+TEST_F(ValidationTest, reduce_prod_float16_3) {
+  const Model model = reduce_prod::createTestModel_float16_3();
+  const std::vector<Request> requests = createRequests(reduce_prod::get_examples_float16_3());
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+TEST_F(NeuralnetworksHidlTest, reduce_prod_4) {
+  generated_tests::Execute(device,
+                           reduce_prod::createTestModel_4,
+                           reduce_prod::is_ignored_4,
+                           reduce_prod::get_examples_4());
+}
+
+TEST_F(ValidationTest, reduce_prod_4) {
+  const Model model = reduce_prod::createTestModel_4();
+  const std::vector<Request> requests = createRequests(reduce_prod::get_examples_4());
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+TEST_F(NeuralnetworksHidlTest, reduce_prod_relaxed_4) {
+  generated_tests::Execute(device,
+                           reduce_prod::createTestModel_relaxed_4,
+                           reduce_prod::is_ignored_relaxed_4,
+                           reduce_prod::get_examples_relaxed_4());
+}
+
+TEST_F(ValidationTest, reduce_prod_relaxed_4) {
+  const Model model = reduce_prod::createTestModel_relaxed_4();
+  const std::vector<Request> requests = createRequests(reduce_prod::get_examples_relaxed_4());
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+TEST_F(NeuralnetworksHidlTest, reduce_prod_float16_4) {
+  generated_tests::Execute(device,
+                           reduce_prod::createTestModel_float16_4,
+                           reduce_prod::is_ignored_float16_4,
+                           reduce_prod::get_examples_float16_4());
+}
+
+TEST_F(ValidationTest, reduce_prod_float16_4) {
+  const Model model = reduce_prod::createTestModel_float16_4();
+  const std::vector<Request> requests = createRequests(reduce_prod::get_examples_float16_4());
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+// Generated from: reduce_sum.mod.py.
+namespace reduce_sum {
+// Generated reduce_sum test
+#include "examples/reduce_sum.example.cpp"
+// Generated model constructor
+#include "vts_models/reduce_sum.model.cpp"
+} // namespace reduce_sum
+
+TEST_F(NeuralnetworksHidlTest, reduce_sum) {
+  generated_tests::Execute(device,
+                           reduce_sum::createTestModel,
+                           reduce_sum::is_ignored,
+                           reduce_sum::get_examples());
+}
+
+TEST_F(ValidationTest, reduce_sum) {
+  const Model model = reduce_sum::createTestModel();
+  const std::vector<Request> requests = createRequests(reduce_sum::get_examples());
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+TEST_F(NeuralnetworksHidlTest, reduce_sum_relaxed) {
+  generated_tests::Execute(device,
+                           reduce_sum::createTestModel_relaxed,
+                           reduce_sum::is_ignored_relaxed,
+                           reduce_sum::get_examples_relaxed());
+}
+
+TEST_F(ValidationTest, reduce_sum_relaxed) {
+  const Model model = reduce_sum::createTestModel_relaxed();
+  const std::vector<Request> requests = createRequests(reduce_sum::get_examples_relaxed());
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+TEST_F(NeuralnetworksHidlTest, reduce_sum_float16) {
+  generated_tests::Execute(device,
+                           reduce_sum::createTestModel_float16,
+                           reduce_sum::is_ignored_float16,
+                           reduce_sum::get_examples_float16());
+}
+
+TEST_F(ValidationTest, reduce_sum_float16) {
+  const Model model = reduce_sum::createTestModel_float16();
+  const std::vector<Request> requests = createRequests(reduce_sum::get_examples_float16());
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+TEST_F(NeuralnetworksHidlTest, reduce_sum_2) {
+  generated_tests::Execute(device,
+                           reduce_sum::createTestModel_2,
+                           reduce_sum::is_ignored_2,
+                           reduce_sum::get_examples_2());
+}
+
+TEST_F(ValidationTest, reduce_sum_2) {
+  const Model model = reduce_sum::createTestModel_2();
+  const std::vector<Request> requests = createRequests(reduce_sum::get_examples_2());
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+TEST_F(NeuralnetworksHidlTest, reduce_sum_relaxed_2) {
+  generated_tests::Execute(device,
+                           reduce_sum::createTestModel_relaxed_2,
+                           reduce_sum::is_ignored_relaxed_2,
+                           reduce_sum::get_examples_relaxed_2());
+}
+
+TEST_F(ValidationTest, reduce_sum_relaxed_2) {
+  const Model model = reduce_sum::createTestModel_relaxed_2();
+  const std::vector<Request> requests = createRequests(reduce_sum::get_examples_relaxed_2());
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+TEST_F(NeuralnetworksHidlTest, reduce_sum_float16_2) {
+  generated_tests::Execute(device,
+                           reduce_sum::createTestModel_float16_2,
+                           reduce_sum::is_ignored_float16_2,
+                           reduce_sum::get_examples_float16_2());
+}
+
+TEST_F(ValidationTest, reduce_sum_float16_2) {
+  const Model model = reduce_sum::createTestModel_float16_2();
+  const std::vector<Request> requests = createRequests(reduce_sum::get_examples_float16_2());
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+TEST_F(NeuralnetworksHidlTest, reduce_sum_3) {
+  generated_tests::Execute(device,
+                           reduce_sum::createTestModel_3,
+                           reduce_sum::is_ignored_3,
+                           reduce_sum::get_examples_3());
+}
+
+TEST_F(ValidationTest, reduce_sum_3) {
+  const Model model = reduce_sum::createTestModel_3();
+  const std::vector<Request> requests = createRequests(reduce_sum::get_examples_3());
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+TEST_F(NeuralnetworksHidlTest, reduce_sum_relaxed_3) {
+  generated_tests::Execute(device,
+                           reduce_sum::createTestModel_relaxed_3,
+                           reduce_sum::is_ignored_relaxed_3,
+                           reduce_sum::get_examples_relaxed_3());
+}
+
+TEST_F(ValidationTest, reduce_sum_relaxed_3) {
+  const Model model = reduce_sum::createTestModel_relaxed_3();
+  const std::vector<Request> requests = createRequests(reduce_sum::get_examples_relaxed_3());
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+TEST_F(NeuralnetworksHidlTest, reduce_sum_float16_3) {
+  generated_tests::Execute(device,
+                           reduce_sum::createTestModel_float16_3,
+                           reduce_sum::is_ignored_float16_3,
+                           reduce_sum::get_examples_float16_3());
+}
+
+TEST_F(ValidationTest, reduce_sum_float16_3) {
+  const Model model = reduce_sum::createTestModel_float16_3();
+  const std::vector<Request> requests = createRequests(reduce_sum::get_examples_float16_3());
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+TEST_F(NeuralnetworksHidlTest, reduce_sum_4) {
+  generated_tests::Execute(device,
+                           reduce_sum::createTestModel_4,
+                           reduce_sum::is_ignored_4,
+                           reduce_sum::get_examples_4());
+}
+
+TEST_F(ValidationTest, reduce_sum_4) {
+  const Model model = reduce_sum::createTestModel_4();
+  const std::vector<Request> requests = createRequests(reduce_sum::get_examples_4());
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+TEST_F(NeuralnetworksHidlTest, reduce_sum_relaxed_4) {
+  generated_tests::Execute(device,
+                           reduce_sum::createTestModel_relaxed_4,
+                           reduce_sum::is_ignored_relaxed_4,
+                           reduce_sum::get_examples_relaxed_4());
+}
+
+TEST_F(ValidationTest, reduce_sum_relaxed_4) {
+  const Model model = reduce_sum::createTestModel_relaxed_4();
+  const std::vector<Request> requests = createRequests(reduce_sum::get_examples_relaxed_4());
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+TEST_F(NeuralnetworksHidlTest, reduce_sum_float16_4) {
+  generated_tests::Execute(device,
+                           reduce_sum::createTestModel_float16_4,
+                           reduce_sum::is_ignored_float16_4,
+                           reduce_sum::get_examples_float16_4());
+}
+
+TEST_F(ValidationTest, reduce_sum_float16_4) {
+  const Model model = reduce_sum::createTestModel_float16_4();
+  const std::vector<Request> requests = createRequests(reduce_sum::get_examples_float16_4());
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
 // Generated from: relu1_float16_1.mod.py.
 namespace relu1_float16_1 {
 // Generated relu1_float16_1 test
diff --git a/runtime/test/generated/examples/reduce_all.example.cpp b/runtime/test/generated/examples/reduce_all.example.cpp
new file mode 100644
index 0000000..5d05d1e
--- /dev/null
+++ b/runtime/test/generated/examples/reduce_all.example.cpp
@@ -0,0 +1,125 @@
+// clang-format off
+// Generated file (from: reduce_all.mod.py). Do not edit
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+  // int -> BOOL8 map
+  {{0, {false}}},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+  // int -> BOOL8 map
+  {{0, {false}}},
+}
+},
+}, // End of an example
+};
+return examples;
+};
+
+std::vector<MixedTypedExample>& get_examples_2() {
+static std::vector<MixedTypedExample> examples_2 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+  // int -> BOOL8 map
+  {{0, {true, true, true, true, true, false, true, true, true, true, true, true}}},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+  // int -> BOOL8 map
+  {{0, {true, false}}},
+}
+},
+}, // End of an example
+};
+return examples_2;
+};
+
+std::vector<MixedTypedExample>& get_examples_3() {
+static std::vector<MixedTypedExample> examples_3 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+  // int -> BOOL8 map
+  {{0, {true, true, true, true, true, true, true, true, false, true, true, true}}},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+  // int -> BOOL8 map
+  {{0, {true, false, true}}},
+}
+},
+}, // End of an example
+};
+return examples_3;
+};
+
diff --git a/runtime/test/generated/examples/reduce_any.example.cpp b/runtime/test/generated/examples/reduce_any.example.cpp
new file mode 100644
index 0000000..a4fa8c1
--- /dev/null
+++ b/runtime/test/generated/examples/reduce_any.example.cpp
@@ -0,0 +1,125 @@
+// clang-format off
+// Generated file (from: reduce_any.mod.py). Do not edit
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+  // int -> BOOL8 map
+  {{0, {false}}},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+  // int -> BOOL8 map
+  {{0, {false}}},
+}
+},
+}, // End of an example
+};
+return examples;
+};
+
+std::vector<MixedTypedExample>& get_examples_2() {
+static std::vector<MixedTypedExample> examples_2 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+  // int -> BOOL8 map
+  {{0, {false, false, false, false, false, false, false, true, false, false, false, true}}},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+  // int -> BOOL8 map
+  {{0, {false, true}}},
+}
+},
+}, // End of an example
+};
+return examples_2;
+};
+
+std::vector<MixedTypedExample>& get_examples_3() {
+static std::vector<MixedTypedExample> examples_3 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+  // int -> BOOL8 map
+  {{0, {false, false, false, false, false, false, false, true, false, false, false, true}}},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+  // int -> BOOL8 map
+  {{0, {true, false, true}}},
+}
+},
+}, // End of an example
+};
+return examples_3;
+};
+
diff --git a/runtime/test/generated/examples/reduce_max.example.cpp b/runtime/test/generated/examples/reduce_max.example.cpp
new file mode 100644
index 0000000..c122c18
--- /dev/null
+++ b/runtime/test/generated/examples/reduce_max.example.cpp
@@ -0,0 +1,658 @@
+// clang-format off
+// Generated file (from: reduce_max.mod.py). Do not edit
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {-1.0f, -2.0f, 3.0f, 4.0f, 5.0f, -6.0f}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+  // int -> BOOL8 map
+  {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {-1.0f, 4.0f, 5.0f}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+  // int -> BOOL8 map
+  {},
+}
+},
+}, // End of an example
+};
+return examples;
+};
+
+std::vector<MixedTypedExample>& get_examples_relaxed() {
+static std::vector<MixedTypedExample> examples_relaxed = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {-1.0f, -2.0f, 3.0f, 4.0f, 5.0f, -6.0f}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+  // int -> BOOL8 map
+  {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {-1.0f, 4.0f, 5.0f}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+  // int -> BOOL8 map
+  {},
+}
+},
+}, // End of an example
+};
+return examples_relaxed;
+};
+
+std::vector<MixedTypedExample>& get_examples_float16() {
+static std::vector<MixedTypedExample> examples_float16 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {{0, {-1.0f, -2.0f, 3.0f, 4.0f, 5.0f, -6.0f}}},
+  // int -> BOOL8 map
+  {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {{0, {-1.0f, 4.0f, 5.0f}}},
+  // int -> BOOL8 map
+  {},
+}
+},
+}, // End of an example
+};
+return examples_float16;
+};
+
+std::vector<MixedTypedExample>& get_examples_quant8() {
+static std::vector<MixedTypedExample> examples_quant8 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {{0, {125, 123, 133, 135, 137, 115}}},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+  // int -> BOOL8 map
+  {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {{0, {125, 135, 137}}},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+  // int -> BOOL8 map
+  {},
+}
+},
+}, // End of an example
+};
+return examples_quant8;
+};
+
+std::vector<MixedTypedExample>& get_examples_2() {
+static std::vector<MixedTypedExample> examples_2 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {9.527f}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+  // int -> BOOL8 map
+  {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {9.527f}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+  // int -> BOOL8 map
+  {},
+}
+},
+}, // End of an example
+};
+return examples_2;
+};
+
+std::vector<MixedTypedExample>& get_examples_relaxed_2() {
+static std::vector<MixedTypedExample> examples_relaxed_2 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {9.527f}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+  // int -> BOOL8 map
+  {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {9.527f}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+  // int -> BOOL8 map
+  {},
+}
+},
+}, // End of an example
+};
+return examples_relaxed_2;
+};
+
+std::vector<MixedTypedExample>& get_examples_float16_2() {
+static std::vector<MixedTypedExample> examples_float16_2 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {{0, {9.527000427246094f}}},
+  // int -> BOOL8 map
+  {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {{0, {9.527000427246094f}}},
+  // int -> BOOL8 map
+  {},
+}
+},
+}, // End of an example
+};
+return examples_float16_2;
+};
+
+std::vector<MixedTypedExample>& get_examples_quant8_2() {
+static std::vector<MixedTypedExample> examples_quant8_2 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {{0, {146}}},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+  // int -> BOOL8 map
+  {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {{0, {146}}},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+  // int -> BOOL8 map
+  {},
+}
+},
+}, // End of an example
+};
+return examples_quant8_2;
+};
+
+std::vector<MixedTypedExample>& get_examples_3() {
+static std::vector<MixedTypedExample> examples_3 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {0.1f, 0.2f, 0.3f, 0.4f, 0.5f, 0.6f, 0.7f, 0.8f, 0.9f, 1.0f, 1.1f, 1.2f, 1.3f, 1.4f, 1.5f, 1.6f, 1.7f, 1.8f, 1.9f, 2.0f, 2.1f, 2.2f, 2.3f, 2.4f}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+  // int -> BOOL8 map
+  {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {2.3f, 2.4f}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+  // int -> BOOL8 map
+  {},
+}
+},
+}, // End of an example
+};
+return examples_3;
+};
+
+std::vector<MixedTypedExample>& get_examples_relaxed_3() {
+static std::vector<MixedTypedExample> examples_relaxed_3 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {0.1f, 0.2f, 0.3f, 0.4f, 0.5f, 0.6f, 0.7f, 0.8f, 0.9f, 1.0f, 1.1f, 1.2f, 1.3f, 1.4f, 1.5f, 1.6f, 1.7f, 1.8f, 1.9f, 2.0f, 2.1f, 2.2f, 2.3f, 2.4f}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+  // int -> BOOL8 map
+  {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {2.3f, 2.4f}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+  // int -> BOOL8 map
+  {},
+}
+},
+}, // End of an example
+};
+return examples_relaxed_3;
+};
+
+std::vector<MixedTypedExample>& get_examples_float16_3() {
+static std::vector<MixedTypedExample> examples_float16_3 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {{0, {0.10000000149011612f, 0.20000000298023224f, 0.30000001192092896f, 0.4000000059604645f, 0.5f, 0.6000000238418579f, 0.699999988079071f, 0.800000011920929f, 0.8999999761581421f, 1.0f, 1.100000023841858f, 1.2000000476837158f, 1.2999999523162842f, 1.399999976158142f, 1.5f, 1.600000023841858f, 1.7000000476837158f, 1.7999999523162842f, 1.899999976158142f, 2.0f, 2.0999999046325684f, 2.200000047683716f, 2.299999952316284f, 2.4000000953674316f}}},
+  // int -> BOOL8 map
+  {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {{0, {2.299999952316284f, 2.4000000953674316f}}},
+  // int -> BOOL8 map
+  {},
+}
+},
+}, // End of an example
+};
+return examples_float16_3;
+};
+
+std::vector<MixedTypedExample>& get_examples_quant8_3() {
+static std::vector<MixedTypedExample> examples_quant8_3 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {{0, {127, 127, 128, 128, 128, 128, 128, 129, 129, 129, 129, 129, 130, 130, 130, 130, 130, 131, 131, 131, 131, 131, 132, 132}}},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+  // int -> BOOL8 map
+  {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {{0, {132, 132}}},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+  // int -> BOOL8 map
+  {},
+}
+},
+}, // End of an example
+};
+return examples_quant8_3;
+};
+
+std::vector<MixedTypedExample>& get_examples_4() {
+static std::vector<MixedTypedExample> examples_4 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {0.1f, 0.2f, 0.3f, 0.4f, 0.5f, 0.6f, 0.7f, 0.8f, 0.9f, 1.0f, 1.1f, 1.2f, 1.3f, 1.4f, 1.5f, 1.6f, 1.7f, 1.8f, 1.9f, 2.0f, 2.1f, 2.2f, 2.3f, 2.4f}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+  // int -> BOOL8 map
+  {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {2.0f, 2.2f, 2.4f}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+  // int -> BOOL8 map
+  {},
+}
+},
+}, // End of an example
+};
+return examples_4;
+};
+
+std::vector<MixedTypedExample>& get_examples_relaxed_4() {
+static std::vector<MixedTypedExample> examples_relaxed_4 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {0.1f, 0.2f, 0.3f, 0.4f, 0.5f, 0.6f, 0.7f, 0.8f, 0.9f, 1.0f, 1.1f, 1.2f, 1.3f, 1.4f, 1.5f, 1.6f, 1.7f, 1.8f, 1.9f, 2.0f, 2.1f, 2.2f, 2.3f, 2.4f}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+  // int -> BOOL8 map
+  {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {2.0f, 2.2f, 2.4f}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+  // int -> BOOL8 map
+  {},
+}
+},
+}, // End of an example
+};
+return examples_relaxed_4;
+};
+
+std::vector<MixedTypedExample>& get_examples_float16_4() {
+static std::vector<MixedTypedExample> examples_float16_4 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {{0, {0.10000000149011612f, 0.20000000298023224f, 0.30000001192092896f, 0.4000000059604645f, 0.5f, 0.6000000238418579f, 0.699999988079071f, 0.800000011920929f, 0.8999999761581421f, 1.0f, 1.100000023841858f, 1.2000000476837158f, 1.2999999523162842f, 1.399999976158142f, 1.5f, 1.600000023841858f, 1.7000000476837158f, 1.7999999523162842f, 1.899999976158142f, 2.0f, 2.0999999046325684f, 2.200000047683716f, 2.299999952316284f, 2.4000000953674316f}}},
+  // int -> BOOL8 map
+  {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {{0, {2.0f, 2.200000047683716f, 2.4000000953674316f}}},
+  // int -> BOOL8 map
+  {},
+}
+},
+}, // End of an example
+};
+return examples_float16_4;
+};
+
+std::vector<MixedTypedExample>& get_examples_quant8_4() {
+static std::vector<MixedTypedExample> examples_quant8_4 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {{0, {127, 127, 128, 128, 128, 128, 128, 129, 129, 129, 129, 129, 130, 130, 130, 130, 130, 131, 131, 131, 131, 131, 132, 132}}},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+  // int -> BOOL8 map
+  {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {{0, {131, 131, 132}}},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+  // int -> BOOL8 map
+  {},
+}
+},
+}, // End of an example
+};
+return examples_quant8_4;
+};
+
diff --git a/runtime/test/generated/examples/reduce_min.example.cpp b/runtime/test/generated/examples/reduce_min.example.cpp
new file mode 100644
index 0000000..1d21d3c
--- /dev/null
+++ b/runtime/test/generated/examples/reduce_min.example.cpp
@@ -0,0 +1,658 @@
+// clang-format off
+// Generated file (from: reduce_min.mod.py). Do not edit
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {-1.0f, -2.0f, 3.0f, 4.0f, 5.0f, -6.0f}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+  // int -> BOOL8 map
+  {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {-2.0f, 3.0f, -6.0f}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+  // int -> BOOL8 map
+  {},
+}
+},
+}, // End of an example
+};
+return examples;
+};
+
+std::vector<MixedTypedExample>& get_examples_relaxed() {
+static std::vector<MixedTypedExample> examples_relaxed = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {-1.0f, -2.0f, 3.0f, 4.0f, 5.0f, -6.0f}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+  // int -> BOOL8 map
+  {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {-2.0f, 3.0f, -6.0f}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+  // int -> BOOL8 map
+  {},
+}
+},
+}, // End of an example
+};
+return examples_relaxed;
+};
+
+std::vector<MixedTypedExample>& get_examples_float16() {
+static std::vector<MixedTypedExample> examples_float16 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {{0, {-1.0f, -2.0f, 3.0f, 4.0f, 5.0f, -6.0f}}},
+  // int -> BOOL8 map
+  {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {{0, {-2.0f, 3.0f, -6.0f}}},
+  // int -> BOOL8 map
+  {},
+}
+},
+}, // End of an example
+};
+return examples_float16;
+};
+
+std::vector<MixedTypedExample>& get_examples_quant8() {
+static std::vector<MixedTypedExample> examples_quant8 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {{0, {125, 123, 133, 135, 137, 115}}},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+  // int -> BOOL8 map
+  {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {{0, {123, 133, 115}}},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+  // int -> BOOL8 map
+  {},
+}
+},
+}, // End of an example
+};
+return examples_quant8;
+};
+
+std::vector<MixedTypedExample>& get_examples_2() {
+static std::vector<MixedTypedExample> examples_2 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {9.527f}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+  // int -> BOOL8 map
+  {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {9.527f}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+  // int -> BOOL8 map
+  {},
+}
+},
+}, // End of an example
+};
+return examples_2;
+};
+
+std::vector<MixedTypedExample>& get_examples_relaxed_2() {
+static std::vector<MixedTypedExample> examples_relaxed_2 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {9.527f}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+  // int -> BOOL8 map
+  {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {9.527f}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+  // int -> BOOL8 map
+  {},
+}
+},
+}, // End of an example
+};
+return examples_relaxed_2;
+};
+
+std::vector<MixedTypedExample>& get_examples_float16_2() {
+static std::vector<MixedTypedExample> examples_float16_2 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {{0, {9.527000427246094f}}},
+  // int -> BOOL8 map
+  {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {{0, {9.527000427246094f}}},
+  // int -> BOOL8 map
+  {},
+}
+},
+}, // End of an example
+};
+return examples_float16_2;
+};
+
+std::vector<MixedTypedExample>& get_examples_quant8_2() {
+static std::vector<MixedTypedExample> examples_quant8_2 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {{0, {146}}},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+  // int -> BOOL8 map
+  {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {{0, {146}}},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+  // int -> BOOL8 map
+  {},
+}
+},
+}, // End of an example
+};
+return examples_quant8_2;
+};
+
+std::vector<MixedTypedExample>& get_examples_3() {
+static std::vector<MixedTypedExample> examples_3 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {0.1f, 0.2f, 0.3f, 0.4f, 0.5f, 0.6f, 0.7f, 0.8f, 0.9f, 1.0f, 1.1f, 1.2f, 1.3f, 1.4f, 1.5f, 1.6f, 1.7f, 1.8f, 1.9f, 2.0f, 2.1f, 2.2f, 2.3f, 2.4f}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+  // int -> BOOL8 map
+  {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {0.1f, 0.2f}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+  // int -> BOOL8 map
+  {},
+}
+},
+}, // End of an example
+};
+return examples_3;
+};
+
+std::vector<MixedTypedExample>& get_examples_relaxed_3() {
+static std::vector<MixedTypedExample> examples_relaxed_3 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {0.1f, 0.2f, 0.3f, 0.4f, 0.5f, 0.6f, 0.7f, 0.8f, 0.9f, 1.0f, 1.1f, 1.2f, 1.3f, 1.4f, 1.5f, 1.6f, 1.7f, 1.8f, 1.9f, 2.0f, 2.1f, 2.2f, 2.3f, 2.4f}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+  // int -> BOOL8 map
+  {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {0.1f, 0.2f}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+  // int -> BOOL8 map
+  {},
+}
+},
+}, // End of an example
+};
+return examples_relaxed_3;
+};
+
+std::vector<MixedTypedExample>& get_examples_float16_3() {
+static std::vector<MixedTypedExample> examples_float16_3 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {{0, {0.10000000149011612f, 0.20000000298023224f, 0.30000001192092896f, 0.4000000059604645f, 0.5f, 0.6000000238418579f, 0.699999988079071f, 0.800000011920929f, 0.8999999761581421f, 1.0f, 1.100000023841858f, 1.2000000476837158f, 1.2999999523162842f, 1.399999976158142f, 1.5f, 1.600000023841858f, 1.7000000476837158f, 1.7999999523162842f, 1.899999976158142f, 2.0f, 2.0999999046325684f, 2.200000047683716f, 2.299999952316284f, 2.4000000953674316f}}},
+  // int -> BOOL8 map
+  {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {{0, {0.10000000149011612f, 0.20000000298023224f}}},
+  // int -> BOOL8 map
+  {},
+}
+},
+}, // End of an example
+};
+return examples_float16_3;
+};
+
+std::vector<MixedTypedExample>& get_examples_quant8_3() {
+static std::vector<MixedTypedExample> examples_quant8_3 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {{0, {127, 127, 128, 128, 128, 128, 128, 129, 129, 129, 129, 129, 130, 130, 130, 130, 130, 131, 131, 131, 131, 131, 132, 132}}},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+  // int -> BOOL8 map
+  {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {{0, {127, 127}}},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+  // int -> BOOL8 map
+  {},
+}
+},
+}, // End of an example
+};
+return examples_quant8_3;
+};
+
+std::vector<MixedTypedExample>& get_examples_4() {
+static std::vector<MixedTypedExample> examples_4 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {0.1f, 0.2f, 0.3f, 0.4f, 0.5f, 0.6f, 0.7f, 0.8f, 0.9f, 1.0f, 1.1f, 1.2f, 1.3f, 1.4f, 1.5f, 1.6f, 1.7f, 1.8f, 1.9f, 2.0f, 2.1f, 2.2f, 2.3f, 2.4f}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+  // int -> BOOL8 map
+  {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {0.1f, 0.3f, 0.5f}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+  // int -> BOOL8 map
+  {},
+}
+},
+}, // End of an example
+};
+return examples_4;
+};
+
+std::vector<MixedTypedExample>& get_examples_relaxed_4() {
+static std::vector<MixedTypedExample> examples_relaxed_4 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {0.1f, 0.2f, 0.3f, 0.4f, 0.5f, 0.6f, 0.7f, 0.8f, 0.9f, 1.0f, 1.1f, 1.2f, 1.3f, 1.4f, 1.5f, 1.6f, 1.7f, 1.8f, 1.9f, 2.0f, 2.1f, 2.2f, 2.3f, 2.4f}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+  // int -> BOOL8 map
+  {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {0.1f, 0.3f, 0.5f}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+  // int -> BOOL8 map
+  {},
+}
+},
+}, // End of an example
+};
+return examples_relaxed_4;
+};
+
+std::vector<MixedTypedExample>& get_examples_float16_4() {
+static std::vector<MixedTypedExample> examples_float16_4 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {{0, {0.10000000149011612f, 0.20000000298023224f, 0.30000001192092896f, 0.4000000059604645f, 0.5f, 0.6000000238418579f, 0.699999988079071f, 0.800000011920929f, 0.8999999761581421f, 1.0f, 1.100000023841858f, 1.2000000476837158f, 1.2999999523162842f, 1.399999976158142f, 1.5f, 1.600000023841858f, 1.7000000476837158f, 1.7999999523162842f, 1.899999976158142f, 2.0f, 2.0999999046325684f, 2.200000047683716f, 2.299999952316284f, 2.4000000953674316f}}},
+  // int -> BOOL8 map
+  {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {{0, {0.10000000149011612f, 0.30000001192092896f, 0.5f}}},
+  // int -> BOOL8 map
+  {},
+}
+},
+}, // End of an example
+};
+return examples_float16_4;
+};
+
+std::vector<MixedTypedExample>& get_examples_quant8_4() {
+static std::vector<MixedTypedExample> examples_quant8_4 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {{0, {127, 127, 128, 128, 128, 128, 128, 129, 129, 129, 129, 129, 130, 130, 130, 130, 130, 131, 131, 131, 131, 131, 132, 132}}},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+  // int -> BOOL8 map
+  {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {{0, {127, 128, 128}}},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+  // int -> BOOL8 map
+  {},
+}
+},
+}, // End of an example
+};
+return examples_quant8_4;
+};
+
diff --git a/runtime/test/generated/examples/reduce_prod.example.cpp b/runtime/test/generated/examples/reduce_prod.example.cpp
new file mode 100644
index 0000000..fa214cd
--- /dev/null
+++ b/runtime/test/generated/examples/reduce_prod.example.cpp
@@ -0,0 +1,494 @@
+// clang-format off
+// Generated file (from: reduce_prod.mod.py). Do not edit
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {-1.0f, -2.0f, 3.0f, 4.0f, 5.0f, -6.0f}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+  // int -> BOOL8 map
+  {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {2.0f, 12.0f, -30.0f}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+  // int -> BOOL8 map
+  {},
+}
+},
+}, // End of an example
+};
+return examples;
+};
+
+std::vector<MixedTypedExample>& get_examples_relaxed() {
+static std::vector<MixedTypedExample> examples_relaxed = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {-1.0f, -2.0f, 3.0f, 4.0f, 5.0f, -6.0f}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+  // int -> BOOL8 map
+  {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {2.0f, 12.0f, -30.0f}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+  // int -> BOOL8 map
+  {},
+}
+},
+}, // End of an example
+};
+return examples_relaxed;
+};
+
+std::vector<MixedTypedExample>& get_examples_float16() {
+static std::vector<MixedTypedExample> examples_float16 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {{0, {-1.0f, -2.0f, 3.0f, 4.0f, 5.0f, -6.0f}}},
+  // int -> BOOL8 map
+  {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {{0, {2.0f, 12.0f, -30.0f}}},
+  // int -> BOOL8 map
+  {},
+}
+},
+}, // End of an example
+};
+return examples_float16;
+};
+
+std::vector<MixedTypedExample>& get_examples_2() {
+static std::vector<MixedTypedExample> examples_2 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {9.527f}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+  // int -> BOOL8 map
+  {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {9.527f}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+  // int -> BOOL8 map
+  {},
+}
+},
+}, // End of an example
+};
+return examples_2;
+};
+
+std::vector<MixedTypedExample>& get_examples_relaxed_2() {
+static std::vector<MixedTypedExample> examples_relaxed_2 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {9.527f}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+  // int -> BOOL8 map
+  {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {9.527f}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+  // int -> BOOL8 map
+  {},
+}
+},
+}, // End of an example
+};
+return examples_relaxed_2;
+};
+
+std::vector<MixedTypedExample>& get_examples_float16_2() {
+static std::vector<MixedTypedExample> examples_float16_2 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {{0, {9.527000427246094f}}},
+  // int -> BOOL8 map
+  {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {{0, {9.527000427246094f}}},
+  // int -> BOOL8 map
+  {},
+}
+},
+}, // End of an example
+};
+return examples_float16_2;
+};
+
+std::vector<MixedTypedExample>& get_examples_3() {
+static std::vector<MixedTypedExample> examples_3 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 1.0f, 1.1f, 1.2f, 1.3f, 1.4f, 1.5f, 1.6f, 1.7f, 1.8f, 1.9f, 2.0f, 2.1f, 2.2f, 2.3f, 2.4f}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+  // int -> BOOL8 map
+  {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {31623.4143225f, 19619.905536f}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+  // int -> BOOL8 map
+  {},
+}
+},
+}, // End of an example
+};
+return examples_3;
+};
+
+std::vector<MixedTypedExample>& get_examples_relaxed_3() {
+static std::vector<MixedTypedExample> examples_relaxed_3 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 1.0f, 1.1f, 1.2f, 1.3f, 1.4f, 1.5f, 1.6f, 1.7f, 1.8f, 1.9f, 2.0f, 2.1f, 2.2f, 2.3f, 2.4f}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+  // int -> BOOL8 map
+  {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {31623.4143225f, 19619.905536f}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+  // int -> BOOL8 map
+  {},
+}
+},
+}, // End of an example
+};
+return examples_relaxed_3;
+};
+
+std::vector<MixedTypedExample>& get_examples_float16_3() {
+static std::vector<MixedTypedExample> examples_float16_3 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 1.0f, 1.100000023841858f, 1.2000000476837158f, 1.2999999523162842f, 1.399999976158142f, 1.5f, 1.600000023841858f, 1.7000000476837158f, 1.7999999523162842f, 1.899999976158142f, 2.0f, 2.0999999046325684f, 2.200000047683716f, 2.299999952316284f, 2.4000000953674316f}}},
+  // int -> BOOL8 map
+  {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {{0, {31623.4140625f, 19619.90625f}}},
+  // int -> BOOL8 map
+  {},
+}
+},
+}, // End of an example
+};
+return examples_float16_3;
+};
+
+std::vector<MixedTypedExample>& get_examples_4() {
+static std::vector<MixedTypedExample> examples_4 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 1.0f, 1.1f, 1.2f, 1.3f, 1.4f, 1.5f, 1.6f, 1.7f, 1.8f, 1.9f, 2.0f, 2.1f, 2.2f, 2.3f, 2.4f}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+  // int -> BOOL8 map
+  {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {774.592f, 1197.504f, 668.89152f}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+  // int -> BOOL8 map
+  {},
+}
+},
+}, // End of an example
+};
+return examples_4;
+};
+
+std::vector<MixedTypedExample>& get_examples_relaxed_4() {
+static std::vector<MixedTypedExample> examples_relaxed_4 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 1.0f, 1.1f, 1.2f, 1.3f, 1.4f, 1.5f, 1.6f, 1.7f, 1.8f, 1.9f, 2.0f, 2.1f, 2.2f, 2.3f, 2.4f}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+  // int -> BOOL8 map
+  {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {774.592f, 1197.504f, 668.89152f}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+  // int -> BOOL8 map
+  {},
+}
+},
+}, // End of an example
+};
+return examples_relaxed_4;
+};
+
+std::vector<MixedTypedExample>& get_examples_float16_4() {
+static std::vector<MixedTypedExample> examples_float16_4 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 1.0f, 1.100000023841858f, 1.2000000476837158f, 1.2999999523162842f, 1.399999976158142f, 1.5f, 1.600000023841858f, 1.7000000476837158f, 1.7999999523162842f, 1.899999976158142f, 2.0f, 2.0999999046325684f, 2.200000047683716f, 2.299999952316284f, 2.4000000953674316f}}},
+  // int -> BOOL8 map
+  {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {{0, {774.5919799804688f, 1197.5040283203125f, 668.8915405273438f}}},
+  // int -> BOOL8 map
+  {},
+}
+},
+}, // End of an example
+};
+return examples_float16_4;
+};
+
diff --git a/runtime/test/generated/examples/reduce_sum.example.cpp b/runtime/test/generated/examples/reduce_sum.example.cpp
new file mode 100644
index 0000000..2b944b0
--- /dev/null
+++ b/runtime/test/generated/examples/reduce_sum.example.cpp
@@ -0,0 +1,494 @@
+// clang-format off
+// Generated file (from: reduce_sum.mod.py). Do not edit
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {-1.0f, -2.0f, 3.0f, 4.0f, 5.0f, -6.0f}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+  // int -> BOOL8 map
+  {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {-3.0f, 7.0f, -1.0f}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+  // int -> BOOL8 map
+  {},
+}
+},
+}, // End of an example
+};
+return examples;
+};
+
+std::vector<MixedTypedExample>& get_examples_relaxed() {
+static std::vector<MixedTypedExample> examples_relaxed = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {-1.0f, -2.0f, 3.0f, 4.0f, 5.0f, -6.0f}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+  // int -> BOOL8 map
+  {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {-3.0f, 7.0f, -1.0f}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+  // int -> BOOL8 map
+  {},
+}
+},
+}, // End of an example
+};
+return examples_relaxed;
+};
+
+std::vector<MixedTypedExample>& get_examples_float16() {
+static std::vector<MixedTypedExample> examples_float16 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {{0, {-1.0f, -2.0f, 3.0f, 4.0f, 5.0f, -6.0f}}},
+  // int -> BOOL8 map
+  {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {{0, {-3.0f, 7.0f, -1.0f}}},
+  // int -> BOOL8 map
+  {},
+}
+},
+}, // End of an example
+};
+return examples_float16;
+};
+
+std::vector<MixedTypedExample>& get_examples_2() {
+static std::vector<MixedTypedExample> examples_2 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {9.527f}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+  // int -> BOOL8 map
+  {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {9.527f}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+  // int -> BOOL8 map
+  {},
+}
+},
+}, // End of an example
+};
+return examples_2;
+};
+
+std::vector<MixedTypedExample>& get_examples_relaxed_2() {
+static std::vector<MixedTypedExample> examples_relaxed_2 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {9.527f}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+  // int -> BOOL8 map
+  {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {9.527f}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+  // int -> BOOL8 map
+  {},
+}
+},
+}, // End of an example
+};
+return examples_relaxed_2;
+};
+
+std::vector<MixedTypedExample>& get_examples_float16_2() {
+static std::vector<MixedTypedExample> examples_float16_2 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {{0, {9.527000427246094f}}},
+  // int -> BOOL8 map
+  {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {{0, {9.527000427246094f}}},
+  // int -> BOOL8 map
+  {},
+}
+},
+}, // End of an example
+};
+return examples_float16_2;
+};
+
+std::vector<MixedTypedExample>& get_examples_3() {
+static std::vector<MixedTypedExample> examples_3 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {0.1f, 0.2f, 0.3f, 0.4f, 0.5f, 0.6f, 0.7f, 0.8f, 0.9f, 1.0f, 1.1f, 1.2f, 1.3f, 1.4f, 1.5f, 1.6f, 1.7f, 1.8f, 1.9f, 2.0f, 2.1f, 2.2f, 2.3f, 2.4f}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+  // int -> BOOL8 map
+  {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {14.4f, 15.6f}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+  // int -> BOOL8 map
+  {},
+}
+},
+}, // End of an example
+};
+return examples_3;
+};
+
+std::vector<MixedTypedExample>& get_examples_relaxed_3() {
+static std::vector<MixedTypedExample> examples_relaxed_3 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {0.1f, 0.2f, 0.3f, 0.4f, 0.5f, 0.6f, 0.7f, 0.8f, 0.9f, 1.0f, 1.1f, 1.2f, 1.3f, 1.4f, 1.5f, 1.6f, 1.7f, 1.8f, 1.9f, 2.0f, 2.1f, 2.2f, 2.3f, 2.4f}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+  // int -> BOOL8 map
+  {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {14.4f, 15.6f}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+  // int -> BOOL8 map
+  {},
+}
+},
+}, // End of an example
+};
+return examples_relaxed_3;
+};
+
+std::vector<MixedTypedExample>& get_examples_float16_3() {
+static std::vector<MixedTypedExample> examples_float16_3 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {{0, {0.10000000149011612f, 0.20000000298023224f, 0.30000001192092896f, 0.4000000059604645f, 0.5f, 0.6000000238418579f, 0.699999988079071f, 0.800000011920929f, 0.8999999761581421f, 1.0f, 1.100000023841858f, 1.2000000476837158f, 1.2999999523162842f, 1.399999976158142f, 1.5f, 1.600000023841858f, 1.7000000476837158f, 1.7999999523162842f, 1.899999976158142f, 2.0f, 2.0999999046325684f, 2.200000047683716f, 2.299999952316284f, 2.4000000953674316f}}},
+  // int -> BOOL8 map
+  {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {{0, {14.399999618530273f, 15.600000381469727f}}},
+  // int -> BOOL8 map
+  {},
+}
+},
+}, // End of an example
+};
+return examples_float16_3;
+};
+
+std::vector<MixedTypedExample>& get_examples_4() {
+static std::vector<MixedTypedExample> examples_4 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {0.1f, 0.2f, 0.3f, 0.4f, 0.5f, 0.6f, 0.7f, 0.8f, 0.9f, 1.0f, 1.1f, 1.2f, 1.3f, 1.4f, 1.5f, 1.6f, 1.7f, 1.8f, 1.9f, 2.0f, 2.1f, 2.2f, 2.3f, 2.4f}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+  // int -> BOOL8 map
+  {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {8.4f, 10.0f, 11.6f}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+  // int -> BOOL8 map
+  {},
+}
+},
+}, // End of an example
+};
+return examples_4;
+};
+
+std::vector<MixedTypedExample>& get_examples_relaxed_4() {
+static std::vector<MixedTypedExample> examples_relaxed_4 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {0.1f, 0.2f, 0.3f, 0.4f, 0.5f, 0.6f, 0.7f, 0.8f, 0.9f, 1.0f, 1.1f, 1.2f, 1.3f, 1.4f, 1.5f, 1.6f, 1.7f, 1.8f, 1.9f, 2.0f, 2.1f, 2.2f, 2.3f, 2.4f}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+  // int -> BOOL8 map
+  {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {8.4f, 10.0f, 11.6f}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+  // int -> BOOL8 map
+  {},
+}
+},
+}, // End of an example
+};
+return examples_relaxed_4;
+};
+
+std::vector<MixedTypedExample>& get_examples_float16_4() {
+static std::vector<MixedTypedExample> examples_float16_4 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {{0, {0.10000000149011612f, 0.20000000298023224f, 0.30000001192092896f, 0.4000000059604645f, 0.5f, 0.6000000238418579f, 0.699999988079071f, 0.800000011920929f, 0.8999999761581421f, 1.0f, 1.100000023841858f, 1.2000000476837158f, 1.2999999523162842f, 1.399999976158142f, 1.5f, 1.600000023841858f, 1.7000000476837158f, 1.7999999523162842f, 1.899999976158142f, 2.0f, 2.0999999046325684f, 2.200000047683716f, 2.299999952316284f, 2.4000000953674316f}}},
+  // int -> BOOL8 map
+  {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {{0, {8.399999618530273f, 10.0f, 11.600000381469727f}}},
+  // int -> BOOL8 map
+  {},
+}
+},
+}, // End of an example
+};
+return examples_float16_4;
+};
+
diff --git a/runtime/test/generated/models/reduce_all.model.cpp b/runtime/test/generated/models/reduce_all.model.cpp
new file mode 100644
index 0000000..d1deb8b
--- /dev/null
+++ b/runtime/test/generated/models/reduce_all.model.cpp
@@ -0,0 +1,85 @@
+// clang-format off
+// Generated file (from: reduce_all.mod.py). Do not edit
+void CreateModel(Model *model) {
+  OperandType type0(Type::TENSOR_BOOL8, {1});
+  OperandType type1(Type::TENSOR_INT32, {1});
+  OperandType type2(Type::BOOL, {});
+  // Phase 1, operands
+  auto input0 = model->addOperand(&type0);
+  auto param = model->addOperand(&type1);
+  auto param1 = model->addOperand(&type2);
+  auto output0 = model->addOperand(&type0);
+  // Phase 2, operations
+  static int32_t param_init[] = {0};
+  model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
+  static bool8 param1_init[] = {true};
+  model->setOperandValue(param1, param1_init, sizeof(bool8) * 1);
+  model->addOperation(ANEURALNETWORKS_REDUCE_ALL, {input0, param, param1}, {output0});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {input0},
+    {output0});
+  assert(model->isValid());
+}
+
+inline bool is_ignored(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_2(Model *model) {
+  OperandType type2(Type::BOOL, {});
+  OperandType type3(Type::TENSOR_BOOL8, {2, 3, 2});
+  OperandType type4(Type::TENSOR_BOOL8, {2});
+  OperandType type5(Type::TENSOR_INT32, {4});
+  // Phase 1, operands
+  auto input01 = model->addOperand(&type3);
+  auto param2 = model->addOperand(&type5);
+  auto param3 = model->addOperand(&type2);
+  auto output01 = model->addOperand(&type4);
+  // Phase 2, operations
+  static int32_t param2_init[] = {1, 0, -3, -3};
+  model->setOperandValue(param2, param2_init, sizeof(int32_t) * 4);
+  static bool8 param3_init[] = {false};
+  model->setOperandValue(param3, param3_init, sizeof(bool8) * 1);
+  model->addOperation(ANEURALNETWORKS_REDUCE_ALL, {input01, param2, param3}, {output01});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {input01},
+    {output01});
+  assert(model->isValid());
+}
+
+inline bool is_ignored_2(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_3(Model *model) {
+  OperandType type2(Type::BOOL, {});
+  OperandType type3(Type::TENSOR_BOOL8, {2, 3, 2});
+  OperandType type6(Type::TENSOR_BOOL8, {1, 3, 1});
+  OperandType type7(Type::TENSOR_INT32, {2});
+  // Phase 1, operands
+  auto input02 = model->addOperand(&type3);
+  auto param4 = model->addOperand(&type7);
+  auto param5 = model->addOperand(&type2);
+  auto output02 = model->addOperand(&type6);
+  // Phase 2, operations
+  static int32_t param4_init[] = {0, 2};
+  model->setOperandValue(param4, param4_init, sizeof(int32_t) * 2);
+  static bool8 param5_init[] = {true};
+  model->setOperandValue(param5, param5_init, sizeof(bool8) * 1);
+  model->addOperation(ANEURALNETWORKS_REDUCE_ALL, {input02, param4, param5}, {output02});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {input02},
+    {output02});
+  assert(model->isValid());
+}
+
+inline bool is_ignored_3(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
diff --git a/runtime/test/generated/models/reduce_any.model.cpp b/runtime/test/generated/models/reduce_any.model.cpp
new file mode 100644
index 0000000..e08536c
--- /dev/null
+++ b/runtime/test/generated/models/reduce_any.model.cpp
@@ -0,0 +1,85 @@
+// clang-format off
+// Generated file (from: reduce_any.mod.py). Do not edit
+void CreateModel(Model *model) {
+  OperandType type0(Type::TENSOR_BOOL8, {1});
+  OperandType type1(Type::TENSOR_INT32, {1});
+  OperandType type2(Type::BOOL, {});
+  // Phase 1, operands
+  auto input0 = model->addOperand(&type0);
+  auto param = model->addOperand(&type1);
+  auto param1 = model->addOperand(&type2);
+  auto output0 = model->addOperand(&type0);
+  // Phase 2, operations
+  static int32_t param_init[] = {0};
+  model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
+  static bool8 param1_init[] = {true};
+  model->setOperandValue(param1, param1_init, sizeof(bool8) * 1);
+  model->addOperation(ANEURALNETWORKS_REDUCE_ANY, {input0, param, param1}, {output0});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {input0},
+    {output0});
+  assert(model->isValid());
+}
+
+inline bool is_ignored(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_2(Model *model) {
+  OperandType type2(Type::BOOL, {});
+  OperandType type3(Type::TENSOR_BOOL8, {2, 3, 2});
+  OperandType type4(Type::TENSOR_BOOL8, {2});
+  OperandType type5(Type::TENSOR_INT32, {4});
+  // Phase 1, operands
+  auto input01 = model->addOperand(&type3);
+  auto param2 = model->addOperand(&type5);
+  auto param3 = model->addOperand(&type2);
+  auto output01 = model->addOperand(&type4);
+  // Phase 2, operations
+  static int32_t param2_init[] = {1, 0, -3, -3};
+  model->setOperandValue(param2, param2_init, sizeof(int32_t) * 4);
+  static bool8 param3_init[] = {false};
+  model->setOperandValue(param3, param3_init, sizeof(bool8) * 1);
+  model->addOperation(ANEURALNETWORKS_REDUCE_ANY, {input01, param2, param3}, {output01});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {input01},
+    {output01});
+  assert(model->isValid());
+}
+
+inline bool is_ignored_2(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_3(Model *model) {
+  OperandType type2(Type::BOOL, {});
+  OperandType type3(Type::TENSOR_BOOL8, {2, 3, 2});
+  OperandType type6(Type::TENSOR_BOOL8, {1, 3, 1});
+  OperandType type7(Type::TENSOR_INT32, {2});
+  // Phase 1, operands
+  auto input02 = model->addOperand(&type3);
+  auto param4 = model->addOperand(&type7);
+  auto param5 = model->addOperand(&type2);
+  auto output02 = model->addOperand(&type6);
+  // Phase 2, operations
+  static int32_t param4_init[] = {0, 2};
+  model->setOperandValue(param4, param4_init, sizeof(int32_t) * 2);
+  static bool8 param5_init[] = {true};
+  model->setOperandValue(param5, param5_init, sizeof(bool8) * 1);
+  model->addOperation(ANEURALNETWORKS_REDUCE_ANY, {input02, param4, param5}, {output02});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {input02},
+    {output02});
+  assert(model->isValid());
+}
+
+inline bool is_ignored_3(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
diff --git a/runtime/test/generated/models/reduce_max.model.cpp b/runtime/test/generated/models/reduce_max.model.cpp
new file mode 100644
index 0000000..d43189a
--- /dev/null
+++ b/runtime/test/generated/models/reduce_max.model.cpp
@@ -0,0 +1,454 @@
+// clang-format off
+// Generated file (from: reduce_max.mod.py). Do not edit
+void CreateModel(Model *model) {
+  OperandType type0(Type::TENSOR_FLOAT32, {3, 2});
+  OperandType type1(Type::TENSOR_FLOAT32, {3});
+  OperandType type2(Type::TENSOR_INT32, {1});
+  OperandType type3(Type::BOOL, {});
+  // Phase 1, operands
+  auto input0 = model->addOperand(&type0);
+  auto param = model->addOperand(&type2);
+  auto param1 = model->addOperand(&type3);
+  auto output0 = model->addOperand(&type1);
+  // Phase 2, operations
+  static int32_t param_init[] = {-1};
+  model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
+  static bool8 param1_init[] = {false};
+  model->setOperandValue(param1, param1_init, sizeof(bool8) * 1);
+  model->addOperation(ANEURALNETWORKS_REDUCE_MAX, {input0, param, param1}, {output0});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {input0},
+    {output0});
+  assert(model->isValid());
+}
+
+inline bool is_ignored(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_relaxed(Model *model) {
+  OperandType type0(Type::TENSOR_FLOAT32, {3, 2});
+  OperandType type1(Type::TENSOR_FLOAT32, {3});
+  OperandType type2(Type::TENSOR_INT32, {1});
+  OperandType type3(Type::BOOL, {});
+  // Phase 1, operands
+  auto input0 = model->addOperand(&type0);
+  auto param = model->addOperand(&type2);
+  auto param1 = model->addOperand(&type3);
+  auto output0 = model->addOperand(&type1);
+  // Phase 2, operations
+  static int32_t param_init[] = {-1};
+  model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
+  static bool8 param1_init[] = {false};
+  model->setOperandValue(param1, param1_init, sizeof(bool8) * 1);
+  model->addOperation(ANEURALNETWORKS_REDUCE_MAX, {input0, param, param1}, {output0});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {input0},
+    {output0});
+  // Phase 4: set relaxed execution
+  model->relaxComputationFloat32toFloat16(true);
+  assert(model->isValid());
+}
+
+inline bool is_ignored_relaxed(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_float16(Model *model) {
+  OperandType type10(Type::TENSOR_FLOAT16, {3, 2});
+  OperandType type11(Type::TENSOR_FLOAT16, {3});
+  OperandType type2(Type::TENSOR_INT32, {1});
+  OperandType type3(Type::BOOL, {});
+  // Phase 1, operands
+  auto input0 = model->addOperand(&type10);
+  auto param = model->addOperand(&type2);
+  auto param1 = model->addOperand(&type3);
+  auto output0 = model->addOperand(&type11);
+  // Phase 2, operations
+  static int32_t param_init[] = {-1};
+  model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
+  static bool8 param1_init[] = {false};
+  model->setOperandValue(param1, param1_init, sizeof(bool8) * 1);
+  model->addOperation(ANEURALNETWORKS_REDUCE_MAX, {input0, param, param1}, {output0});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {input0},
+    {output0});
+  assert(model->isValid());
+}
+
+inline bool is_ignored_float16(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_quant8(Model *model) {
+  OperandType type12(Type::TENSOR_QUANT8_ASYMM, {3, 2}, 0.5f, 127);
+  OperandType type13(Type::TENSOR_QUANT8_ASYMM, {3}, 0.5f, 127);
+  OperandType type2(Type::TENSOR_INT32, {1});
+  OperandType type3(Type::BOOL, {});
+  // Phase 1, operands
+  auto input0 = model->addOperand(&type12);
+  auto param = model->addOperand(&type2);
+  auto param1 = model->addOperand(&type3);
+  auto output0 = model->addOperand(&type13);
+  // Phase 2, operations
+  static int32_t param_init[] = {-1};
+  model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
+  static bool8 param1_init[] = {false};
+  model->setOperandValue(param1, param1_init, sizeof(bool8) * 1);
+  model->addOperation(ANEURALNETWORKS_REDUCE_MAX, {input0, param, param1}, {output0});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {input0},
+    {output0});
+  assert(model->isValid());
+}
+
+inline bool is_ignored_quant8(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_2(Model *model) {
+  OperandType type2(Type::TENSOR_INT32, {1});
+  OperandType type3(Type::BOOL, {});
+  OperandType type4(Type::TENSOR_FLOAT32, {1});
+  // Phase 1, operands
+  auto input01 = model->addOperand(&type4);
+  auto param2 = model->addOperand(&type2);
+  auto param3 = model->addOperand(&type3);
+  auto output01 = model->addOperand(&type4);
+  // Phase 2, operations
+  static int32_t param2_init[] = {0};
+  model->setOperandValue(param2, param2_init, sizeof(int32_t) * 1);
+  static bool8 param3_init[] = {true};
+  model->setOperandValue(param3, param3_init, sizeof(bool8) * 1);
+  model->addOperation(ANEURALNETWORKS_REDUCE_MAX, {input01, param2, param3}, {output01});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {input01},
+    {output01});
+  assert(model->isValid());
+}
+
+inline bool is_ignored_2(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_relaxed_2(Model *model) {
+  OperandType type2(Type::TENSOR_INT32, {1});
+  OperandType type3(Type::BOOL, {});
+  OperandType type4(Type::TENSOR_FLOAT32, {1});
+  // Phase 1, operands
+  auto input01 = model->addOperand(&type4);
+  auto param2 = model->addOperand(&type2);
+  auto param3 = model->addOperand(&type3);
+  auto output01 = model->addOperand(&type4);
+  // Phase 2, operations
+  static int32_t param2_init[] = {0};
+  model->setOperandValue(param2, param2_init, sizeof(int32_t) * 1);
+  static bool8 param3_init[] = {true};
+  model->setOperandValue(param3, param3_init, sizeof(bool8) * 1);
+  model->addOperation(ANEURALNETWORKS_REDUCE_MAX, {input01, param2, param3}, {output01});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {input01},
+    {output01});
+  // Phase 4: set relaxed execution
+  model->relaxComputationFloat32toFloat16(true);
+  assert(model->isValid());
+}
+
+inline bool is_ignored_relaxed_2(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_float16_2(Model *model) {
+  OperandType type14(Type::TENSOR_FLOAT16, {1});
+  OperandType type2(Type::TENSOR_INT32, {1});
+  OperandType type3(Type::BOOL, {});
+  // Phase 1, operands
+  auto input01 = model->addOperand(&type14);
+  auto param2 = model->addOperand(&type2);
+  auto param3 = model->addOperand(&type3);
+  auto output01 = model->addOperand(&type14);
+  // Phase 2, operations
+  static int32_t param2_init[] = {0};
+  model->setOperandValue(param2, param2_init, sizeof(int32_t) * 1);
+  static bool8 param3_init[] = {true};
+  model->setOperandValue(param3, param3_init, sizeof(bool8) * 1);
+  model->addOperation(ANEURALNETWORKS_REDUCE_MAX, {input01, param2, param3}, {output01});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {input01},
+    {output01});
+  assert(model->isValid());
+}
+
+inline bool is_ignored_float16_2(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_quant8_2(Model *model) {
+  OperandType type15(Type::TENSOR_QUANT8_ASYMM, {1}, 0.5f, 127);
+  OperandType type2(Type::TENSOR_INT32, {1});
+  OperandType type3(Type::BOOL, {});
+  // Phase 1, operands
+  auto input01 = model->addOperand(&type15);
+  auto param2 = model->addOperand(&type2);
+  auto param3 = model->addOperand(&type3);
+  auto output01 = model->addOperand(&type15);
+  // Phase 2, operations
+  static int32_t param2_init[] = {0};
+  model->setOperandValue(param2, param2_init, sizeof(int32_t) * 1);
+  static bool8 param3_init[] = {true};
+  model->setOperandValue(param3, param3_init, sizeof(bool8) * 1);
+  model->addOperation(ANEURALNETWORKS_REDUCE_MAX, {input01, param2, param3}, {output01});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {input01},
+    {output01});
+  assert(model->isValid());
+}
+
+inline bool is_ignored_quant8_2(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_3(Model *model) {
+  OperandType type3(Type::BOOL, {});
+  OperandType type5(Type::TENSOR_FLOAT32, {4, 3, 2});
+  OperandType type6(Type::TENSOR_FLOAT32, {2});
+  OperandType type7(Type::TENSOR_INT32, {4});
+  // Phase 1, operands
+  auto input02 = model->addOperand(&type5);
+  auto param4 = model->addOperand(&type7);
+  auto param5 = model->addOperand(&type3);
+  auto output02 = model->addOperand(&type6);
+  // Phase 2, operations
+  static int32_t param4_init[] = {1, 0, -3, -3};
+  model->setOperandValue(param4, param4_init, sizeof(int32_t) * 4);
+  static bool8 param5_init[] = {false};
+  model->setOperandValue(param5, param5_init, sizeof(bool8) * 1);
+  model->addOperation(ANEURALNETWORKS_REDUCE_MAX, {input02, param4, param5}, {output02});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {input02},
+    {output02});
+  assert(model->isValid());
+}
+
+inline bool is_ignored_3(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_relaxed_3(Model *model) {
+  OperandType type3(Type::BOOL, {});
+  OperandType type5(Type::TENSOR_FLOAT32, {4, 3, 2});
+  OperandType type6(Type::TENSOR_FLOAT32, {2});
+  OperandType type7(Type::TENSOR_INT32, {4});
+  // Phase 1, operands
+  auto input02 = model->addOperand(&type5);
+  auto param4 = model->addOperand(&type7);
+  auto param5 = model->addOperand(&type3);
+  auto output02 = model->addOperand(&type6);
+  // Phase 2, operations
+  static int32_t param4_init[] = {1, 0, -3, -3};
+  model->setOperandValue(param4, param4_init, sizeof(int32_t) * 4);
+  static bool8 param5_init[] = {false};
+  model->setOperandValue(param5, param5_init, sizeof(bool8) * 1);
+  model->addOperation(ANEURALNETWORKS_REDUCE_MAX, {input02, param4, param5}, {output02});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {input02},
+    {output02});
+  // Phase 4: set relaxed execution
+  model->relaxComputationFloat32toFloat16(true);
+  assert(model->isValid());
+}
+
+inline bool is_ignored_relaxed_3(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_float16_3(Model *model) {
+  OperandType type16(Type::TENSOR_FLOAT16, {4, 3, 2});
+  OperandType type17(Type::TENSOR_FLOAT16, {2});
+  OperandType type3(Type::BOOL, {});
+  OperandType type7(Type::TENSOR_INT32, {4});
+  // Phase 1, operands
+  auto input02 = model->addOperand(&type16);
+  auto param4 = model->addOperand(&type7);
+  auto param5 = model->addOperand(&type3);
+  auto output02 = model->addOperand(&type17);
+  // Phase 2, operations
+  static int32_t param4_init[] = {1, 0, -3, -3};
+  model->setOperandValue(param4, param4_init, sizeof(int32_t) * 4);
+  static bool8 param5_init[] = {false};
+  model->setOperandValue(param5, param5_init, sizeof(bool8) * 1);
+  model->addOperation(ANEURALNETWORKS_REDUCE_MAX, {input02, param4, param5}, {output02});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {input02},
+    {output02});
+  assert(model->isValid());
+}
+
+inline bool is_ignored_float16_3(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_quant8_3(Model *model) {
+  OperandType type18(Type::TENSOR_QUANT8_ASYMM, {4, 3, 2}, 0.5f, 127);
+  OperandType type19(Type::TENSOR_QUANT8_ASYMM, {2}, 0.5f, 127);
+  OperandType type3(Type::BOOL, {});
+  OperandType type7(Type::TENSOR_INT32, {4});
+  // Phase 1, operands
+  auto input02 = model->addOperand(&type18);
+  auto param4 = model->addOperand(&type7);
+  auto param5 = model->addOperand(&type3);
+  auto output02 = model->addOperand(&type19);
+  // Phase 2, operations
+  static int32_t param4_init[] = {1, 0, -3, -3};
+  model->setOperandValue(param4, param4_init, sizeof(int32_t) * 4);
+  static bool8 param5_init[] = {false};
+  model->setOperandValue(param5, param5_init, sizeof(bool8) * 1);
+  model->addOperation(ANEURALNETWORKS_REDUCE_MAX, {input02, param4, param5}, {output02});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {input02},
+    {output02});
+  assert(model->isValid());
+}
+
+inline bool is_ignored_quant8_3(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_4(Model *model) {
+  OperandType type3(Type::BOOL, {});
+  OperandType type5(Type::TENSOR_FLOAT32, {4, 3, 2});
+  OperandType type8(Type::TENSOR_FLOAT32, {1, 3, 1});
+  OperandType type9(Type::TENSOR_INT32, {2});
+  // Phase 1, operands
+  auto input03 = model->addOperand(&type5);
+  auto param6 = model->addOperand(&type9);
+  auto param7 = model->addOperand(&type3);
+  auto output03 = model->addOperand(&type8);
+  // Phase 2, operations
+  static int32_t param6_init[] = {0, 2};
+  model->setOperandValue(param6, param6_init, sizeof(int32_t) * 2);
+  static bool8 param7_init[] = {true};
+  model->setOperandValue(param7, param7_init, sizeof(bool8) * 1);
+  model->addOperation(ANEURALNETWORKS_REDUCE_MAX, {input03, param6, param7}, {output03});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {input03},
+    {output03});
+  assert(model->isValid());
+}
+
+inline bool is_ignored_4(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_relaxed_4(Model *model) {
+  OperandType type3(Type::BOOL, {});
+  OperandType type5(Type::TENSOR_FLOAT32, {4, 3, 2});
+  OperandType type8(Type::TENSOR_FLOAT32, {1, 3, 1});
+  OperandType type9(Type::TENSOR_INT32, {2});
+  // Phase 1, operands
+  auto input03 = model->addOperand(&type5);
+  auto param6 = model->addOperand(&type9);
+  auto param7 = model->addOperand(&type3);
+  auto output03 = model->addOperand(&type8);
+  // Phase 2, operations
+  static int32_t param6_init[] = {0, 2};
+  model->setOperandValue(param6, param6_init, sizeof(int32_t) * 2);
+  static bool8 param7_init[] = {true};
+  model->setOperandValue(param7, param7_init, sizeof(bool8) * 1);
+  model->addOperation(ANEURALNETWORKS_REDUCE_MAX, {input03, param6, param7}, {output03});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {input03},
+    {output03});
+  // Phase 4: set relaxed execution
+  model->relaxComputationFloat32toFloat16(true);
+  assert(model->isValid());
+}
+
+inline bool is_ignored_relaxed_4(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_float16_4(Model *model) {
+  OperandType type16(Type::TENSOR_FLOAT16, {4, 3, 2});
+  OperandType type20(Type::TENSOR_FLOAT16, {1, 3, 1});
+  OperandType type3(Type::BOOL, {});
+  OperandType type9(Type::TENSOR_INT32, {2});
+  // Phase 1, operands
+  auto input03 = model->addOperand(&type16);
+  auto param6 = model->addOperand(&type9);
+  auto param7 = model->addOperand(&type3);
+  auto output03 = model->addOperand(&type20);
+  // Phase 2, operations
+  static int32_t param6_init[] = {0, 2};
+  model->setOperandValue(param6, param6_init, sizeof(int32_t) * 2);
+  static bool8 param7_init[] = {true};
+  model->setOperandValue(param7, param7_init, sizeof(bool8) * 1);
+  model->addOperation(ANEURALNETWORKS_REDUCE_MAX, {input03, param6, param7}, {output03});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {input03},
+    {output03});
+  assert(model->isValid());
+}
+
+inline bool is_ignored_float16_4(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_quant8_4(Model *model) {
+  OperandType type18(Type::TENSOR_QUANT8_ASYMM, {4, 3, 2}, 0.5f, 127);
+  OperandType type21(Type::TENSOR_QUANT8_ASYMM, {1, 3, 1}, 0.5f, 127);
+  OperandType type3(Type::BOOL, {});
+  OperandType type9(Type::TENSOR_INT32, {2});
+  // Phase 1, operands
+  auto input03 = model->addOperand(&type18);
+  auto param6 = model->addOperand(&type9);
+  auto param7 = model->addOperand(&type3);
+  auto output03 = model->addOperand(&type21);
+  // Phase 2, operations
+  static int32_t param6_init[] = {0, 2};
+  model->setOperandValue(param6, param6_init, sizeof(int32_t) * 2);
+  static bool8 param7_init[] = {true};
+  model->setOperandValue(param7, param7_init, sizeof(bool8) * 1);
+  model->addOperation(ANEURALNETWORKS_REDUCE_MAX, {input03, param6, param7}, {output03});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {input03},
+    {output03});
+  assert(model->isValid());
+}
+
+inline bool is_ignored_quant8_4(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
diff --git a/runtime/test/generated/models/reduce_min.model.cpp b/runtime/test/generated/models/reduce_min.model.cpp
new file mode 100644
index 0000000..0490599
--- /dev/null
+++ b/runtime/test/generated/models/reduce_min.model.cpp
@@ -0,0 +1,454 @@
+// clang-format off
+// Generated file (from: reduce_min.mod.py). Do not edit
+void CreateModel(Model *model) {
+  OperandType type0(Type::TENSOR_FLOAT32, {3, 2});
+  OperandType type1(Type::TENSOR_FLOAT32, {3});
+  OperandType type2(Type::TENSOR_INT32, {1});
+  OperandType type3(Type::BOOL, {});
+  // Phase 1, operands
+  auto input0 = model->addOperand(&type0);
+  auto param = model->addOperand(&type2);
+  auto param1 = model->addOperand(&type3);
+  auto output0 = model->addOperand(&type1);
+  // Phase 2, operations
+  static int32_t param_init[] = {-1};
+  model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
+  static bool8 param1_init[] = {false};
+  model->setOperandValue(param1, param1_init, sizeof(bool8) * 1);
+  model->addOperation(ANEURALNETWORKS_REDUCE_MIN, {input0, param, param1}, {output0});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {input0},
+    {output0});
+  assert(model->isValid());
+}
+
+inline bool is_ignored(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_relaxed(Model *model) {
+  OperandType type0(Type::TENSOR_FLOAT32, {3, 2});
+  OperandType type1(Type::TENSOR_FLOAT32, {3});
+  OperandType type2(Type::TENSOR_INT32, {1});
+  OperandType type3(Type::BOOL, {});
+  // Phase 1, operands
+  auto input0 = model->addOperand(&type0);
+  auto param = model->addOperand(&type2);
+  auto param1 = model->addOperand(&type3);
+  auto output0 = model->addOperand(&type1);
+  // Phase 2, operations
+  static int32_t param_init[] = {-1};
+  model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
+  static bool8 param1_init[] = {false};
+  model->setOperandValue(param1, param1_init, sizeof(bool8) * 1);
+  model->addOperation(ANEURALNETWORKS_REDUCE_MIN, {input0, param, param1}, {output0});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {input0},
+    {output0});
+  // Phase 4: set relaxed execution
+  model->relaxComputationFloat32toFloat16(true);
+  assert(model->isValid());
+}
+
+inline bool is_ignored_relaxed(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_float16(Model *model) {
+  OperandType type10(Type::TENSOR_FLOAT16, {3, 2});
+  OperandType type11(Type::TENSOR_FLOAT16, {3});
+  OperandType type2(Type::TENSOR_INT32, {1});
+  OperandType type3(Type::BOOL, {});
+  // Phase 1, operands
+  auto input0 = model->addOperand(&type10);
+  auto param = model->addOperand(&type2);
+  auto param1 = model->addOperand(&type3);
+  auto output0 = model->addOperand(&type11);
+  // Phase 2, operations
+  static int32_t param_init[] = {-1};
+  model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
+  static bool8 param1_init[] = {false};
+  model->setOperandValue(param1, param1_init, sizeof(bool8) * 1);
+  model->addOperation(ANEURALNETWORKS_REDUCE_MIN, {input0, param, param1}, {output0});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {input0},
+    {output0});
+  assert(model->isValid());
+}
+
+inline bool is_ignored_float16(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_quant8(Model *model) {
+  OperandType type12(Type::TENSOR_QUANT8_ASYMM, {3, 2}, 0.5f, 127);
+  OperandType type13(Type::TENSOR_QUANT8_ASYMM, {3}, 0.5f, 127);
+  OperandType type2(Type::TENSOR_INT32, {1});
+  OperandType type3(Type::BOOL, {});
+  // Phase 1, operands
+  auto input0 = model->addOperand(&type12);
+  auto param = model->addOperand(&type2);
+  auto param1 = model->addOperand(&type3);
+  auto output0 = model->addOperand(&type13);
+  // Phase 2, operations
+  static int32_t param_init[] = {-1};
+  model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
+  static bool8 param1_init[] = {false};
+  model->setOperandValue(param1, param1_init, sizeof(bool8) * 1);
+  model->addOperation(ANEURALNETWORKS_REDUCE_MIN, {input0, param, param1}, {output0});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {input0},
+    {output0});
+  assert(model->isValid());
+}
+
+inline bool is_ignored_quant8(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_2(Model *model) {
+  OperandType type2(Type::TENSOR_INT32, {1});
+  OperandType type3(Type::BOOL, {});
+  OperandType type4(Type::TENSOR_FLOAT32, {1});
+  // Phase 1, operands
+  auto input01 = model->addOperand(&type4);
+  auto param2 = model->addOperand(&type2);
+  auto param3 = model->addOperand(&type3);
+  auto output01 = model->addOperand(&type4);
+  // Phase 2, operations
+  static int32_t param2_init[] = {0};
+  model->setOperandValue(param2, param2_init, sizeof(int32_t) * 1);
+  static bool8 param3_init[] = {true};
+  model->setOperandValue(param3, param3_init, sizeof(bool8) * 1);
+  model->addOperation(ANEURALNETWORKS_REDUCE_MIN, {input01, param2, param3}, {output01});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {input01},
+    {output01});
+  assert(model->isValid());
+}
+
+inline bool is_ignored_2(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_relaxed_2(Model *model) {
+  OperandType type2(Type::TENSOR_INT32, {1});
+  OperandType type3(Type::BOOL, {});
+  OperandType type4(Type::TENSOR_FLOAT32, {1});
+  // Phase 1, operands
+  auto input01 = model->addOperand(&type4);
+  auto param2 = model->addOperand(&type2);
+  auto param3 = model->addOperand(&type3);
+  auto output01 = model->addOperand(&type4);
+  // Phase 2, operations
+  static int32_t param2_init[] = {0};
+  model->setOperandValue(param2, param2_init, sizeof(int32_t) * 1);
+  static bool8 param3_init[] = {true};
+  model->setOperandValue(param3, param3_init, sizeof(bool8) * 1);
+  model->addOperation(ANEURALNETWORKS_REDUCE_MIN, {input01, param2, param3}, {output01});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {input01},
+    {output01});
+  // Phase 4: set relaxed execution
+  model->relaxComputationFloat32toFloat16(true);
+  assert(model->isValid());
+}
+
+inline bool is_ignored_relaxed_2(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_float16_2(Model *model) {
+  OperandType type14(Type::TENSOR_FLOAT16, {1});
+  OperandType type2(Type::TENSOR_INT32, {1});
+  OperandType type3(Type::BOOL, {});
+  // Phase 1, operands
+  auto input01 = model->addOperand(&type14);
+  auto param2 = model->addOperand(&type2);
+  auto param3 = model->addOperand(&type3);
+  auto output01 = model->addOperand(&type14);
+  // Phase 2, operations
+  static int32_t param2_init[] = {0};
+  model->setOperandValue(param2, param2_init, sizeof(int32_t) * 1);
+  static bool8 param3_init[] = {true};
+  model->setOperandValue(param3, param3_init, sizeof(bool8) * 1);
+  model->addOperation(ANEURALNETWORKS_REDUCE_MIN, {input01, param2, param3}, {output01});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {input01},
+    {output01});
+  assert(model->isValid());
+}
+
+inline bool is_ignored_float16_2(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_quant8_2(Model *model) {
+  OperandType type15(Type::TENSOR_QUANT8_ASYMM, {1}, 0.5f, 127);
+  OperandType type2(Type::TENSOR_INT32, {1});
+  OperandType type3(Type::BOOL, {});
+  // Phase 1, operands
+  auto input01 = model->addOperand(&type15);
+  auto param2 = model->addOperand(&type2);
+  auto param3 = model->addOperand(&type3);
+  auto output01 = model->addOperand(&type15);
+  // Phase 2, operations
+  static int32_t param2_init[] = {0};
+  model->setOperandValue(param2, param2_init, sizeof(int32_t) * 1);
+  static bool8 param3_init[] = {true};
+  model->setOperandValue(param3, param3_init, sizeof(bool8) * 1);
+  model->addOperation(ANEURALNETWORKS_REDUCE_MIN, {input01, param2, param3}, {output01});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {input01},
+    {output01});
+  assert(model->isValid());
+}
+
+inline bool is_ignored_quant8_2(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_3(Model *model) {
+  OperandType type3(Type::BOOL, {});
+  OperandType type5(Type::TENSOR_FLOAT32, {4, 3, 2});
+  OperandType type6(Type::TENSOR_FLOAT32, {2});
+  OperandType type7(Type::TENSOR_INT32, {4});
+  // Phase 1, operands
+  auto input02 = model->addOperand(&type5);
+  auto param4 = model->addOperand(&type7);
+  auto param5 = model->addOperand(&type3);
+  auto output02 = model->addOperand(&type6);
+  // Phase 2, operations
+  static int32_t param4_init[] = {1, 0, -3, -3};
+  model->setOperandValue(param4, param4_init, sizeof(int32_t) * 4);
+  static bool8 param5_init[] = {false};
+  model->setOperandValue(param5, param5_init, sizeof(bool8) * 1);
+  model->addOperation(ANEURALNETWORKS_REDUCE_MIN, {input02, param4, param5}, {output02});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {input02},
+    {output02});
+  assert(model->isValid());
+}
+
+inline bool is_ignored_3(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_relaxed_3(Model *model) {
+  OperandType type3(Type::BOOL, {});
+  OperandType type5(Type::TENSOR_FLOAT32, {4, 3, 2});
+  OperandType type6(Type::TENSOR_FLOAT32, {2});
+  OperandType type7(Type::TENSOR_INT32, {4});
+  // Phase 1, operands
+  auto input02 = model->addOperand(&type5);
+  auto param4 = model->addOperand(&type7);
+  auto param5 = model->addOperand(&type3);
+  auto output02 = model->addOperand(&type6);
+  // Phase 2, operations
+  static int32_t param4_init[] = {1, 0, -3, -3};
+  model->setOperandValue(param4, param4_init, sizeof(int32_t) * 4);
+  static bool8 param5_init[] = {false};
+  model->setOperandValue(param5, param5_init, sizeof(bool8) * 1);
+  model->addOperation(ANEURALNETWORKS_REDUCE_MIN, {input02, param4, param5}, {output02});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {input02},
+    {output02});
+  // Phase 4: set relaxed execution
+  model->relaxComputationFloat32toFloat16(true);
+  assert(model->isValid());
+}
+
+inline bool is_ignored_relaxed_3(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_float16_3(Model *model) {
+  OperandType type16(Type::TENSOR_FLOAT16, {4, 3, 2});
+  OperandType type17(Type::TENSOR_FLOAT16, {2});
+  OperandType type3(Type::BOOL, {});
+  OperandType type7(Type::TENSOR_INT32, {4});
+  // Phase 1, operands
+  auto input02 = model->addOperand(&type16);
+  auto param4 = model->addOperand(&type7);
+  auto param5 = model->addOperand(&type3);
+  auto output02 = model->addOperand(&type17);
+  // Phase 2, operations
+  static int32_t param4_init[] = {1, 0, -3, -3};
+  model->setOperandValue(param4, param4_init, sizeof(int32_t) * 4);
+  static bool8 param5_init[] = {false};
+  model->setOperandValue(param5, param5_init, sizeof(bool8) * 1);
+  model->addOperation(ANEURALNETWORKS_REDUCE_MIN, {input02, param4, param5}, {output02});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {input02},
+    {output02});
+  assert(model->isValid());
+}
+
+inline bool is_ignored_float16_3(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_quant8_3(Model *model) {
+  OperandType type18(Type::TENSOR_QUANT8_ASYMM, {4, 3, 2}, 0.5f, 127);
+  OperandType type19(Type::TENSOR_QUANT8_ASYMM, {2}, 0.5f, 127);
+  OperandType type3(Type::BOOL, {});
+  OperandType type7(Type::TENSOR_INT32, {4});
+  // Phase 1, operands
+  auto input02 = model->addOperand(&type18);
+  auto param4 = model->addOperand(&type7);
+  auto param5 = model->addOperand(&type3);
+  auto output02 = model->addOperand(&type19);
+  // Phase 2, operations
+  static int32_t param4_init[] = {1, 0, -3, -3};
+  model->setOperandValue(param4, param4_init, sizeof(int32_t) * 4);
+  static bool8 param5_init[] = {false};
+  model->setOperandValue(param5, param5_init, sizeof(bool8) * 1);
+  model->addOperation(ANEURALNETWORKS_REDUCE_MIN, {input02, param4, param5}, {output02});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {input02},
+    {output02});
+  assert(model->isValid());
+}
+
+inline bool is_ignored_quant8_3(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_4(Model *model) {
+  OperandType type3(Type::BOOL, {});
+  OperandType type5(Type::TENSOR_FLOAT32, {4, 3, 2});
+  OperandType type8(Type::TENSOR_FLOAT32, {1, 3, 1});
+  OperandType type9(Type::TENSOR_INT32, {2});
+  // Phase 1, operands
+  auto input03 = model->addOperand(&type5);
+  auto param6 = model->addOperand(&type9);
+  auto param7 = model->addOperand(&type3);
+  auto output03 = model->addOperand(&type8);
+  // Phase 2, operations
+  static int32_t param6_init[] = {0, 2};
+  model->setOperandValue(param6, param6_init, sizeof(int32_t) * 2);
+  static bool8 param7_init[] = {true};
+  model->setOperandValue(param7, param7_init, sizeof(bool8) * 1);
+  model->addOperation(ANEURALNETWORKS_REDUCE_MIN, {input03, param6, param7}, {output03});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {input03},
+    {output03});
+  assert(model->isValid());
+}
+
+inline bool is_ignored_4(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_relaxed_4(Model *model) {
+  OperandType type3(Type::BOOL, {});
+  OperandType type5(Type::TENSOR_FLOAT32, {4, 3, 2});
+  OperandType type8(Type::TENSOR_FLOAT32, {1, 3, 1});
+  OperandType type9(Type::TENSOR_INT32, {2});
+  // Phase 1, operands
+  auto input03 = model->addOperand(&type5);
+  auto param6 = model->addOperand(&type9);
+  auto param7 = model->addOperand(&type3);
+  auto output03 = model->addOperand(&type8);
+  // Phase 2, operations
+  static int32_t param6_init[] = {0, 2};
+  model->setOperandValue(param6, param6_init, sizeof(int32_t) * 2);
+  static bool8 param7_init[] = {true};
+  model->setOperandValue(param7, param7_init, sizeof(bool8) * 1);
+  model->addOperation(ANEURALNETWORKS_REDUCE_MIN, {input03, param6, param7}, {output03});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {input03},
+    {output03});
+  // Phase 4: set relaxed execution
+  model->relaxComputationFloat32toFloat16(true);
+  assert(model->isValid());
+}
+
+inline bool is_ignored_relaxed_4(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_float16_4(Model *model) {
+  OperandType type16(Type::TENSOR_FLOAT16, {4, 3, 2});
+  OperandType type20(Type::TENSOR_FLOAT16, {1, 3, 1});
+  OperandType type3(Type::BOOL, {});
+  OperandType type9(Type::TENSOR_INT32, {2});
+  // Phase 1, operands
+  auto input03 = model->addOperand(&type16);
+  auto param6 = model->addOperand(&type9);
+  auto param7 = model->addOperand(&type3);
+  auto output03 = model->addOperand(&type20);
+  // Phase 2, operations
+  static int32_t param6_init[] = {0, 2};
+  model->setOperandValue(param6, param6_init, sizeof(int32_t) * 2);
+  static bool8 param7_init[] = {true};
+  model->setOperandValue(param7, param7_init, sizeof(bool8) * 1);
+  model->addOperation(ANEURALNETWORKS_REDUCE_MIN, {input03, param6, param7}, {output03});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {input03},
+    {output03});
+  assert(model->isValid());
+}
+
+inline bool is_ignored_float16_4(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_quant8_4(Model *model) {
+  OperandType type18(Type::TENSOR_QUANT8_ASYMM, {4, 3, 2}, 0.5f, 127);
+  OperandType type21(Type::TENSOR_QUANT8_ASYMM, {1, 3, 1}, 0.5f, 127);
+  OperandType type3(Type::BOOL, {});
+  OperandType type9(Type::TENSOR_INT32, {2});
+  // Phase 1, operands
+  auto input03 = model->addOperand(&type18);
+  auto param6 = model->addOperand(&type9);
+  auto param7 = model->addOperand(&type3);
+  auto output03 = model->addOperand(&type21);
+  // Phase 2, operations
+  static int32_t param6_init[] = {0, 2};
+  model->setOperandValue(param6, param6_init, sizeof(int32_t) * 2);
+  static bool8 param7_init[] = {true};
+  model->setOperandValue(param7, param7_init, sizeof(bool8) * 1);
+  model->addOperation(ANEURALNETWORKS_REDUCE_MIN, {input03, param6, param7}, {output03});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {input03},
+    {output03});
+  assert(model->isValid());
+}
+
+inline bool is_ignored_quant8_4(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
diff --git a/runtime/test/generated/models/reduce_prod.model.cpp b/runtime/test/generated/models/reduce_prod.model.cpp
new file mode 100644
index 0000000..891a8e7
--- /dev/null
+++ b/runtime/test/generated/models/reduce_prod.model.cpp
@@ -0,0 +1,343 @@
+// clang-format off
+// Generated file (from: reduce_prod.mod.py). Do not edit
+void CreateModel(Model *model) {
+  OperandType type0(Type::TENSOR_FLOAT32, {3, 2});
+  OperandType type1(Type::TENSOR_FLOAT32, {3});
+  OperandType type2(Type::TENSOR_INT32, {1});
+  OperandType type3(Type::BOOL, {});
+  // Phase 1, operands
+  auto input0 = model->addOperand(&type0);
+  auto param = model->addOperand(&type2);
+  auto param1 = model->addOperand(&type3);
+  auto output0 = model->addOperand(&type1);
+  // Phase 2, operations
+  static int32_t param_init[] = {-1};
+  model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
+  static bool8 param1_init[] = {false};
+  model->setOperandValue(param1, param1_init, sizeof(bool8) * 1);
+  model->addOperation(ANEURALNETWORKS_REDUCE_PROD, {input0, param, param1}, {output0});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {input0},
+    {output0});
+  assert(model->isValid());
+}
+
+inline bool is_ignored(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_relaxed(Model *model) {
+  OperandType type0(Type::TENSOR_FLOAT32, {3, 2});
+  OperandType type1(Type::TENSOR_FLOAT32, {3});
+  OperandType type2(Type::TENSOR_INT32, {1});
+  OperandType type3(Type::BOOL, {});
+  // Phase 1, operands
+  auto input0 = model->addOperand(&type0);
+  auto param = model->addOperand(&type2);
+  auto param1 = model->addOperand(&type3);
+  auto output0 = model->addOperand(&type1);
+  // Phase 2, operations
+  static int32_t param_init[] = {-1};
+  model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
+  static bool8 param1_init[] = {false};
+  model->setOperandValue(param1, param1_init, sizeof(bool8) * 1);
+  model->addOperation(ANEURALNETWORKS_REDUCE_PROD, {input0, param, param1}, {output0});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {input0},
+    {output0});
+  // Phase 4: set relaxed execution
+  model->relaxComputationFloat32toFloat16(true);
+  assert(model->isValid());
+}
+
+inline bool is_ignored_relaxed(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_float16(Model *model) {
+  OperandType type10(Type::TENSOR_FLOAT16, {3, 2});
+  OperandType type11(Type::TENSOR_FLOAT16, {3});
+  OperandType type2(Type::TENSOR_INT32, {1});
+  OperandType type3(Type::BOOL, {});
+  // Phase 1, operands
+  auto input0 = model->addOperand(&type10);
+  auto param = model->addOperand(&type2);
+  auto param1 = model->addOperand(&type3);
+  auto output0 = model->addOperand(&type11);
+  // Phase 2, operations
+  static int32_t param_init[] = {-1};
+  model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
+  static bool8 param1_init[] = {false};
+  model->setOperandValue(param1, param1_init, sizeof(bool8) * 1);
+  model->addOperation(ANEURALNETWORKS_REDUCE_PROD, {input0, param, param1}, {output0});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {input0},
+    {output0});
+  assert(model->isValid());
+}
+
+inline bool is_ignored_float16(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_2(Model *model) {
+  OperandType type2(Type::TENSOR_INT32, {1});
+  OperandType type3(Type::BOOL, {});
+  OperandType type4(Type::TENSOR_FLOAT32, {1});
+  // Phase 1, operands
+  auto input01 = model->addOperand(&type4);
+  auto param2 = model->addOperand(&type2);
+  auto param3 = model->addOperand(&type3);
+  auto output01 = model->addOperand(&type4);
+  // Phase 2, operations
+  static int32_t param2_init[] = {0};
+  model->setOperandValue(param2, param2_init, sizeof(int32_t) * 1);
+  static bool8 param3_init[] = {true};
+  model->setOperandValue(param3, param3_init, sizeof(bool8) * 1);
+  model->addOperation(ANEURALNETWORKS_REDUCE_PROD, {input01, param2, param3}, {output01});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {input01},
+    {output01});
+  assert(model->isValid());
+}
+
+inline bool is_ignored_2(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_relaxed_2(Model *model) {
+  OperandType type2(Type::TENSOR_INT32, {1});
+  OperandType type3(Type::BOOL, {});
+  OperandType type4(Type::TENSOR_FLOAT32, {1});
+  // Phase 1, operands
+  auto input01 = model->addOperand(&type4);
+  auto param2 = model->addOperand(&type2);
+  auto param3 = model->addOperand(&type3);
+  auto output01 = model->addOperand(&type4);
+  // Phase 2, operations
+  static int32_t param2_init[] = {0};
+  model->setOperandValue(param2, param2_init, sizeof(int32_t) * 1);
+  static bool8 param3_init[] = {true};
+  model->setOperandValue(param3, param3_init, sizeof(bool8) * 1);
+  model->addOperation(ANEURALNETWORKS_REDUCE_PROD, {input01, param2, param3}, {output01});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {input01},
+    {output01});
+  // Phase 4: set relaxed execution
+  model->relaxComputationFloat32toFloat16(true);
+  assert(model->isValid());
+}
+
+inline bool is_ignored_relaxed_2(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_float16_2(Model *model) {
+  OperandType type12(Type::TENSOR_FLOAT16, {1});
+  OperandType type2(Type::TENSOR_INT32, {1});
+  OperandType type3(Type::BOOL, {});
+  // Phase 1, operands
+  auto input01 = model->addOperand(&type12);
+  auto param2 = model->addOperand(&type2);
+  auto param3 = model->addOperand(&type3);
+  auto output01 = model->addOperand(&type12);
+  // Phase 2, operations
+  static int32_t param2_init[] = {0};
+  model->setOperandValue(param2, param2_init, sizeof(int32_t) * 1);
+  static bool8 param3_init[] = {true};
+  model->setOperandValue(param3, param3_init, sizeof(bool8) * 1);
+  model->addOperation(ANEURALNETWORKS_REDUCE_PROD, {input01, param2, param3}, {output01});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {input01},
+    {output01});
+  assert(model->isValid());
+}
+
+inline bool is_ignored_float16_2(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_3(Model *model) {
+  OperandType type3(Type::BOOL, {});
+  OperandType type5(Type::TENSOR_FLOAT32, {4, 3, 2});
+  OperandType type6(Type::TENSOR_FLOAT32, {2});
+  OperandType type7(Type::TENSOR_INT32, {4});
+  // Phase 1, operands
+  auto input02 = model->addOperand(&type5);
+  auto param4 = model->addOperand(&type7);
+  auto param5 = model->addOperand(&type3);
+  auto output02 = model->addOperand(&type6);
+  // Phase 2, operations
+  static int32_t param4_init[] = {1, 0, -3, -3};
+  model->setOperandValue(param4, param4_init, sizeof(int32_t) * 4);
+  static bool8 param5_init[] = {false};
+  model->setOperandValue(param5, param5_init, sizeof(bool8) * 1);
+  model->addOperation(ANEURALNETWORKS_REDUCE_PROD, {input02, param4, param5}, {output02});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {input02},
+    {output02});
+  assert(model->isValid());
+}
+
+inline bool is_ignored_3(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_relaxed_3(Model *model) {
+  OperandType type3(Type::BOOL, {});
+  OperandType type5(Type::TENSOR_FLOAT32, {4, 3, 2});
+  OperandType type6(Type::TENSOR_FLOAT32, {2});
+  OperandType type7(Type::TENSOR_INT32, {4});
+  // Phase 1, operands
+  auto input02 = model->addOperand(&type5);
+  auto param4 = model->addOperand(&type7);
+  auto param5 = model->addOperand(&type3);
+  auto output02 = model->addOperand(&type6);
+  // Phase 2, operations
+  static int32_t param4_init[] = {1, 0, -3, -3};
+  model->setOperandValue(param4, param4_init, sizeof(int32_t) * 4);
+  static bool8 param5_init[] = {false};
+  model->setOperandValue(param5, param5_init, sizeof(bool8) * 1);
+  model->addOperation(ANEURALNETWORKS_REDUCE_PROD, {input02, param4, param5}, {output02});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {input02},
+    {output02});
+  // Phase 4: set relaxed execution
+  model->relaxComputationFloat32toFloat16(true);
+  assert(model->isValid());
+}
+
+inline bool is_ignored_relaxed_3(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_float16_3(Model *model) {
+  OperandType type13(Type::TENSOR_FLOAT16, {4, 3, 2});
+  OperandType type14(Type::TENSOR_FLOAT16, {2});
+  OperandType type3(Type::BOOL, {});
+  OperandType type7(Type::TENSOR_INT32, {4});
+  // Phase 1, operands
+  auto input02 = model->addOperand(&type13);
+  auto param4 = model->addOperand(&type7);
+  auto param5 = model->addOperand(&type3);
+  auto output02 = model->addOperand(&type14);
+  // Phase 2, operations
+  static int32_t param4_init[] = {1, 0, -3, -3};
+  model->setOperandValue(param4, param4_init, sizeof(int32_t) * 4);
+  static bool8 param5_init[] = {false};
+  model->setOperandValue(param5, param5_init, sizeof(bool8) * 1);
+  model->addOperation(ANEURALNETWORKS_REDUCE_PROD, {input02, param4, param5}, {output02});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {input02},
+    {output02});
+  assert(model->isValid());
+}
+
+inline bool is_ignored_float16_3(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_4(Model *model) {
+  OperandType type3(Type::BOOL, {});
+  OperandType type5(Type::TENSOR_FLOAT32, {4, 3, 2});
+  OperandType type8(Type::TENSOR_FLOAT32, {1, 3, 1});
+  OperandType type9(Type::TENSOR_INT32, {2});
+  // Phase 1, operands
+  auto input03 = model->addOperand(&type5);
+  auto param6 = model->addOperand(&type9);
+  auto param7 = model->addOperand(&type3);
+  auto output03 = model->addOperand(&type8);
+  // Phase 2, operations
+  static int32_t param6_init[] = {0, 2};
+  model->setOperandValue(param6, param6_init, sizeof(int32_t) * 2);
+  static bool8 param7_init[] = {true};
+  model->setOperandValue(param7, param7_init, sizeof(bool8) * 1);
+  model->addOperation(ANEURALNETWORKS_REDUCE_PROD, {input03, param6, param7}, {output03});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {input03},
+    {output03});
+  assert(model->isValid());
+}
+
+inline bool is_ignored_4(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_relaxed_4(Model *model) {
+  OperandType type3(Type::BOOL, {});
+  OperandType type5(Type::TENSOR_FLOAT32, {4, 3, 2});
+  OperandType type8(Type::TENSOR_FLOAT32, {1, 3, 1});
+  OperandType type9(Type::TENSOR_INT32, {2});
+  // Phase 1, operands
+  auto input03 = model->addOperand(&type5);
+  auto param6 = model->addOperand(&type9);
+  auto param7 = model->addOperand(&type3);
+  auto output03 = model->addOperand(&type8);
+  // Phase 2, operations
+  static int32_t param6_init[] = {0, 2};
+  model->setOperandValue(param6, param6_init, sizeof(int32_t) * 2);
+  static bool8 param7_init[] = {true};
+  model->setOperandValue(param7, param7_init, sizeof(bool8) * 1);
+  model->addOperation(ANEURALNETWORKS_REDUCE_PROD, {input03, param6, param7}, {output03});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {input03},
+    {output03});
+  // Phase 4: set relaxed execution
+  model->relaxComputationFloat32toFloat16(true);
+  assert(model->isValid());
+}
+
+inline bool is_ignored_relaxed_4(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_float16_4(Model *model) {
+  OperandType type13(Type::TENSOR_FLOAT16, {4, 3, 2});
+  OperandType type15(Type::TENSOR_FLOAT16, {1, 3, 1});
+  OperandType type3(Type::BOOL, {});
+  OperandType type9(Type::TENSOR_INT32, {2});
+  // Phase 1, operands
+  auto input03 = model->addOperand(&type13);
+  auto param6 = model->addOperand(&type9);
+  auto param7 = model->addOperand(&type3);
+  auto output03 = model->addOperand(&type15);
+  // Phase 2, operations
+  static int32_t param6_init[] = {0, 2};
+  model->setOperandValue(param6, param6_init, sizeof(int32_t) * 2);
+  static bool8 param7_init[] = {true};
+  model->setOperandValue(param7, param7_init, sizeof(bool8) * 1);
+  model->addOperation(ANEURALNETWORKS_REDUCE_PROD, {input03, param6, param7}, {output03});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {input03},
+    {output03});
+  assert(model->isValid());
+}
+
+inline bool is_ignored_float16_4(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
diff --git a/runtime/test/generated/models/reduce_sum.model.cpp b/runtime/test/generated/models/reduce_sum.model.cpp
new file mode 100644
index 0000000..978aae0
--- /dev/null
+++ b/runtime/test/generated/models/reduce_sum.model.cpp
@@ -0,0 +1,343 @@
+// clang-format off
+// Generated file (from: reduce_sum.mod.py). Do not edit
+void CreateModel(Model *model) {
+  OperandType type0(Type::TENSOR_FLOAT32, {3, 2});
+  OperandType type1(Type::TENSOR_FLOAT32, {3});
+  OperandType type2(Type::TENSOR_INT32, {1});
+  OperandType type3(Type::BOOL, {});
+  // Phase 1, operands
+  auto input0 = model->addOperand(&type0);
+  auto param = model->addOperand(&type2);
+  auto param1 = model->addOperand(&type3);
+  auto output0 = model->addOperand(&type1);
+  // Phase 2, operations
+  static int32_t param_init[] = {-1};
+  model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
+  static bool8 param1_init[] = {false};
+  model->setOperandValue(param1, param1_init, sizeof(bool8) * 1);
+  model->addOperation(ANEURALNETWORKS_REDUCE_SUM, {input0, param, param1}, {output0});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {input0},
+    {output0});
+  assert(model->isValid());
+}
+
+inline bool is_ignored(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_relaxed(Model *model) {
+  OperandType type0(Type::TENSOR_FLOAT32, {3, 2});
+  OperandType type1(Type::TENSOR_FLOAT32, {3});
+  OperandType type2(Type::TENSOR_INT32, {1});
+  OperandType type3(Type::BOOL, {});
+  // Phase 1, operands
+  auto input0 = model->addOperand(&type0);
+  auto param = model->addOperand(&type2);
+  auto param1 = model->addOperand(&type3);
+  auto output0 = model->addOperand(&type1);
+  // Phase 2, operations
+  static int32_t param_init[] = {-1};
+  model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
+  static bool8 param1_init[] = {false};
+  model->setOperandValue(param1, param1_init, sizeof(bool8) * 1);
+  model->addOperation(ANEURALNETWORKS_REDUCE_SUM, {input0, param, param1}, {output0});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {input0},
+    {output0});
+  // Phase 4: set relaxed execution
+  model->relaxComputationFloat32toFloat16(true);
+  assert(model->isValid());
+}
+
+inline bool is_ignored_relaxed(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_float16(Model *model) {
+  OperandType type10(Type::TENSOR_FLOAT16, {3, 2});
+  OperandType type11(Type::TENSOR_FLOAT16, {3});
+  OperandType type2(Type::TENSOR_INT32, {1});
+  OperandType type3(Type::BOOL, {});
+  // Phase 1, operands
+  auto input0 = model->addOperand(&type10);
+  auto param = model->addOperand(&type2);
+  auto param1 = model->addOperand(&type3);
+  auto output0 = model->addOperand(&type11);
+  // Phase 2, operations
+  static int32_t param_init[] = {-1};
+  model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
+  static bool8 param1_init[] = {false};
+  model->setOperandValue(param1, param1_init, sizeof(bool8) * 1);
+  model->addOperation(ANEURALNETWORKS_REDUCE_SUM, {input0, param, param1}, {output0});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {input0},
+    {output0});
+  assert(model->isValid());
+}
+
+inline bool is_ignored_float16(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_2(Model *model) {
+  OperandType type2(Type::TENSOR_INT32, {1});
+  OperandType type3(Type::BOOL, {});
+  OperandType type4(Type::TENSOR_FLOAT32, {1});
+  // Phase 1, operands
+  auto input01 = model->addOperand(&type4);
+  auto param2 = model->addOperand(&type2);
+  auto param3 = model->addOperand(&type3);
+  auto output01 = model->addOperand(&type4);
+  // Phase 2, operations
+  static int32_t param2_init[] = {0};
+  model->setOperandValue(param2, param2_init, sizeof(int32_t) * 1);
+  static bool8 param3_init[] = {true};
+  model->setOperandValue(param3, param3_init, sizeof(bool8) * 1);
+  model->addOperation(ANEURALNETWORKS_REDUCE_SUM, {input01, param2, param3}, {output01});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {input01},
+    {output01});
+  assert(model->isValid());
+}
+
+inline bool is_ignored_2(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_relaxed_2(Model *model) {
+  OperandType type2(Type::TENSOR_INT32, {1});
+  OperandType type3(Type::BOOL, {});
+  OperandType type4(Type::TENSOR_FLOAT32, {1});
+  // Phase 1, operands
+  auto input01 = model->addOperand(&type4);
+  auto param2 = model->addOperand(&type2);
+  auto param3 = model->addOperand(&type3);
+  auto output01 = model->addOperand(&type4);
+  // Phase 2, operations
+  static int32_t param2_init[] = {0};
+  model->setOperandValue(param2, param2_init, sizeof(int32_t) * 1);
+  static bool8 param3_init[] = {true};
+  model->setOperandValue(param3, param3_init, sizeof(bool8) * 1);
+  model->addOperation(ANEURALNETWORKS_REDUCE_SUM, {input01, param2, param3}, {output01});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {input01},
+    {output01});
+  // Phase 4: set relaxed execution
+  model->relaxComputationFloat32toFloat16(true);
+  assert(model->isValid());
+}
+
+inline bool is_ignored_relaxed_2(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_float16_2(Model *model) {
+  OperandType type12(Type::TENSOR_FLOAT16, {1});
+  OperandType type2(Type::TENSOR_INT32, {1});
+  OperandType type3(Type::BOOL, {});
+  // Phase 1, operands
+  auto input01 = model->addOperand(&type12);
+  auto param2 = model->addOperand(&type2);
+  auto param3 = model->addOperand(&type3);
+  auto output01 = model->addOperand(&type12);
+  // Phase 2, operations
+  static int32_t param2_init[] = {0};
+  model->setOperandValue(param2, param2_init, sizeof(int32_t) * 1);
+  static bool8 param3_init[] = {true};
+  model->setOperandValue(param3, param3_init, sizeof(bool8) * 1);
+  model->addOperation(ANEURALNETWORKS_REDUCE_SUM, {input01, param2, param3}, {output01});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {input01},
+    {output01});
+  assert(model->isValid());
+}
+
+inline bool is_ignored_float16_2(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_3(Model *model) {
+  OperandType type3(Type::BOOL, {});
+  OperandType type5(Type::TENSOR_FLOAT32, {4, 3, 2});
+  OperandType type6(Type::TENSOR_FLOAT32, {2});
+  OperandType type7(Type::TENSOR_INT32, {4});
+  // Phase 1, operands
+  auto input02 = model->addOperand(&type5);
+  auto param4 = model->addOperand(&type7);
+  auto param5 = model->addOperand(&type3);
+  auto output02 = model->addOperand(&type6);
+  // Phase 2, operations
+  static int32_t param4_init[] = {1, 0, -3, -3};
+  model->setOperandValue(param4, param4_init, sizeof(int32_t) * 4);
+  static bool8 param5_init[] = {false};
+  model->setOperandValue(param5, param5_init, sizeof(bool8) * 1);
+  model->addOperation(ANEURALNETWORKS_REDUCE_SUM, {input02, param4, param5}, {output02});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {input02},
+    {output02});
+  assert(model->isValid());
+}
+
+inline bool is_ignored_3(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_relaxed_3(Model *model) {
+  OperandType type3(Type::BOOL, {});
+  OperandType type5(Type::TENSOR_FLOAT32, {4, 3, 2});
+  OperandType type6(Type::TENSOR_FLOAT32, {2});
+  OperandType type7(Type::TENSOR_INT32, {4});
+  // Phase 1, operands
+  auto input02 = model->addOperand(&type5);
+  auto param4 = model->addOperand(&type7);
+  auto param5 = model->addOperand(&type3);
+  auto output02 = model->addOperand(&type6);
+  // Phase 2, operations
+  static int32_t param4_init[] = {1, 0, -3, -3};
+  model->setOperandValue(param4, param4_init, sizeof(int32_t) * 4);
+  static bool8 param5_init[] = {false};
+  model->setOperandValue(param5, param5_init, sizeof(bool8) * 1);
+  model->addOperation(ANEURALNETWORKS_REDUCE_SUM, {input02, param4, param5}, {output02});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {input02},
+    {output02});
+  // Phase 4: set relaxed execution
+  model->relaxComputationFloat32toFloat16(true);
+  assert(model->isValid());
+}
+
+inline bool is_ignored_relaxed_3(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_float16_3(Model *model) {
+  OperandType type13(Type::TENSOR_FLOAT16, {4, 3, 2});
+  OperandType type14(Type::TENSOR_FLOAT16, {2});
+  OperandType type3(Type::BOOL, {});
+  OperandType type7(Type::TENSOR_INT32, {4});
+  // Phase 1, operands
+  auto input02 = model->addOperand(&type13);
+  auto param4 = model->addOperand(&type7);
+  auto param5 = model->addOperand(&type3);
+  auto output02 = model->addOperand(&type14);
+  // Phase 2, operations
+  static int32_t param4_init[] = {1, 0, -3, -3};
+  model->setOperandValue(param4, param4_init, sizeof(int32_t) * 4);
+  static bool8 param5_init[] = {false};
+  model->setOperandValue(param5, param5_init, sizeof(bool8) * 1);
+  model->addOperation(ANEURALNETWORKS_REDUCE_SUM, {input02, param4, param5}, {output02});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {input02},
+    {output02});
+  assert(model->isValid());
+}
+
+inline bool is_ignored_float16_3(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_4(Model *model) {
+  OperandType type3(Type::BOOL, {});
+  OperandType type5(Type::TENSOR_FLOAT32, {4, 3, 2});
+  OperandType type8(Type::TENSOR_FLOAT32, {1, 3, 1});
+  OperandType type9(Type::TENSOR_INT32, {2});
+  // Phase 1, operands
+  auto input03 = model->addOperand(&type5);
+  auto param6 = model->addOperand(&type9);
+  auto param7 = model->addOperand(&type3);
+  auto output03 = model->addOperand(&type8);
+  // Phase 2, operations
+  static int32_t param6_init[] = {0, 2};
+  model->setOperandValue(param6, param6_init, sizeof(int32_t) * 2);
+  static bool8 param7_init[] = {true};
+  model->setOperandValue(param7, param7_init, sizeof(bool8) * 1);
+  model->addOperation(ANEURALNETWORKS_REDUCE_SUM, {input03, param6, param7}, {output03});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {input03},
+    {output03});
+  assert(model->isValid());
+}
+
+inline bool is_ignored_4(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_relaxed_4(Model *model) {
+  OperandType type3(Type::BOOL, {});
+  OperandType type5(Type::TENSOR_FLOAT32, {4, 3, 2});
+  OperandType type8(Type::TENSOR_FLOAT32, {1, 3, 1});
+  OperandType type9(Type::TENSOR_INT32, {2});
+  // Phase 1, operands
+  auto input03 = model->addOperand(&type5);
+  auto param6 = model->addOperand(&type9);
+  auto param7 = model->addOperand(&type3);
+  auto output03 = model->addOperand(&type8);
+  // Phase 2, operations
+  static int32_t param6_init[] = {0, 2};
+  model->setOperandValue(param6, param6_init, sizeof(int32_t) * 2);
+  static bool8 param7_init[] = {true};
+  model->setOperandValue(param7, param7_init, sizeof(bool8) * 1);
+  model->addOperation(ANEURALNETWORKS_REDUCE_SUM, {input03, param6, param7}, {output03});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {input03},
+    {output03});
+  // Phase 4: set relaxed execution
+  model->relaxComputationFloat32toFloat16(true);
+  assert(model->isValid());
+}
+
+inline bool is_ignored_relaxed_4(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_float16_4(Model *model) {
+  OperandType type13(Type::TENSOR_FLOAT16, {4, 3, 2});
+  OperandType type15(Type::TENSOR_FLOAT16, {1, 3, 1});
+  OperandType type3(Type::BOOL, {});
+  OperandType type9(Type::TENSOR_INT32, {2});
+  // Phase 1, operands
+  auto input03 = model->addOperand(&type13);
+  auto param6 = model->addOperand(&type9);
+  auto param7 = model->addOperand(&type3);
+  auto output03 = model->addOperand(&type15);
+  // Phase 2, operations
+  static int32_t param6_init[] = {0, 2};
+  model->setOperandValue(param6, param6_init, sizeof(int32_t) * 2);
+  static bool8 param7_init[] = {true};
+  model->setOperandValue(param7, param7_init, sizeof(bool8) * 1);
+  model->addOperation(ANEURALNETWORKS_REDUCE_SUM, {input03, param6, param7}, {output03});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {input03},
+    {output03});
+  assert(model->isValid());
+}
+
+inline bool is_ignored_float16_4(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
diff --git a/runtime/test/generated/tests/reduce_all.mod.py.cpp b/runtime/test/generated/tests/reduce_all.mod.py.cpp
new file mode 100644
index 0000000..df54dbe
--- /dev/null
+++ b/runtime/test/generated/tests/reduce_all.mod.py.cpp
@@ -0,0 +1,29 @@
+// clang-format off
+// Generated file (from: reduce_all.mod.py). Do not edit
+#include "../../TestGenerated.h"
+
+namespace reduce_all {
+// Generated reduce_all test
+#include "generated/examples/reduce_all.example.cpp"
+// Generated model constructor
+#include "generated/models/reduce_all.model.cpp"
+} // namespace reduce_all
+
+TEST_F(GeneratedTests, reduce_all) {
+    execute(reduce_all::CreateModel,
+            reduce_all::is_ignored,
+            reduce_all::get_examples());
+}
+
+TEST_F(GeneratedTests, reduce_all_2) {
+    execute(reduce_all::CreateModel_2,
+            reduce_all::is_ignored_2,
+            reduce_all::get_examples_2());
+}
+
+TEST_F(GeneratedTests, reduce_all_3) {
+    execute(reduce_all::CreateModel_3,
+            reduce_all::is_ignored_3,
+            reduce_all::get_examples_3());
+}
+
diff --git a/runtime/test/generated/tests/reduce_any.mod.py.cpp b/runtime/test/generated/tests/reduce_any.mod.py.cpp
new file mode 100644
index 0000000..2be3ee6
--- /dev/null
+++ b/runtime/test/generated/tests/reduce_any.mod.py.cpp
@@ -0,0 +1,29 @@
+// clang-format off
+// Generated file (from: reduce_any.mod.py). Do not edit
+#include "../../TestGenerated.h"
+
+namespace reduce_any {
+// Generated reduce_any test
+#include "generated/examples/reduce_any.example.cpp"
+// Generated model constructor
+#include "generated/models/reduce_any.model.cpp"
+} // namespace reduce_any
+
+TEST_F(GeneratedTests, reduce_any) {
+    execute(reduce_any::CreateModel,
+            reduce_any::is_ignored,
+            reduce_any::get_examples());
+}
+
+TEST_F(GeneratedTests, reduce_any_2) {
+    execute(reduce_any::CreateModel_2,
+            reduce_any::is_ignored_2,
+            reduce_any::get_examples_2());
+}
+
+TEST_F(GeneratedTests, reduce_any_3) {
+    execute(reduce_any::CreateModel_3,
+            reduce_any::is_ignored_3,
+            reduce_any::get_examples_3());
+}
+
diff --git a/runtime/test/generated/tests/reduce_max.mod.py.cpp b/runtime/test/generated/tests/reduce_max.mod.py.cpp
new file mode 100644
index 0000000..52c3ee5
--- /dev/null
+++ b/runtime/test/generated/tests/reduce_max.mod.py.cpp
@@ -0,0 +1,107 @@
+// clang-format off
+// Generated file (from: reduce_max.mod.py). Do not edit
+#include "../../TestGenerated.h"
+
+namespace reduce_max {
+// Generated reduce_max test
+#include "generated/examples/reduce_max.example.cpp"
+// Generated model constructor
+#include "generated/models/reduce_max.model.cpp"
+} // namespace reduce_max
+
+TEST_F(GeneratedTests, reduce_max) {
+    execute(reduce_max::CreateModel,
+            reduce_max::is_ignored,
+            reduce_max::get_examples());
+}
+
+TEST_F(GeneratedTests, reduce_max_relaxed) {
+    execute(reduce_max::CreateModel_relaxed,
+            reduce_max::is_ignored_relaxed,
+            reduce_max::get_examples_relaxed());
+}
+
+TEST_F(GeneratedTests, reduce_max_float16) {
+    execute(reduce_max::CreateModel_float16,
+            reduce_max::is_ignored_float16,
+            reduce_max::get_examples_float16());
+}
+
+TEST_F(GeneratedTests, reduce_max_quant8) {
+    execute(reduce_max::CreateModel_quant8,
+            reduce_max::is_ignored_quant8,
+            reduce_max::get_examples_quant8());
+}
+
+TEST_F(GeneratedTests, reduce_max_2) {
+    execute(reduce_max::CreateModel_2,
+            reduce_max::is_ignored_2,
+            reduce_max::get_examples_2());
+}
+
+TEST_F(GeneratedTests, reduce_max_relaxed_2) {
+    execute(reduce_max::CreateModel_relaxed_2,
+            reduce_max::is_ignored_relaxed_2,
+            reduce_max::get_examples_relaxed_2());
+}
+
+TEST_F(GeneratedTests, reduce_max_float16_2) {
+    execute(reduce_max::CreateModel_float16_2,
+            reduce_max::is_ignored_float16_2,
+            reduce_max::get_examples_float16_2());
+}
+
+TEST_F(GeneratedTests, reduce_max_quant8_2) {
+    execute(reduce_max::CreateModel_quant8_2,
+            reduce_max::is_ignored_quant8_2,
+            reduce_max::get_examples_quant8_2());
+}
+
+TEST_F(GeneratedTests, reduce_max_3) {
+    execute(reduce_max::CreateModel_3,
+            reduce_max::is_ignored_3,
+            reduce_max::get_examples_3());
+}
+
+TEST_F(GeneratedTests, reduce_max_relaxed_3) {
+    execute(reduce_max::CreateModel_relaxed_3,
+            reduce_max::is_ignored_relaxed_3,
+            reduce_max::get_examples_relaxed_3());
+}
+
+TEST_F(GeneratedTests, reduce_max_float16_3) {
+    execute(reduce_max::CreateModel_float16_3,
+            reduce_max::is_ignored_float16_3,
+            reduce_max::get_examples_float16_3());
+}
+
+TEST_F(GeneratedTests, reduce_max_quant8_3) {
+    execute(reduce_max::CreateModel_quant8_3,
+            reduce_max::is_ignored_quant8_3,
+            reduce_max::get_examples_quant8_3());
+}
+
+TEST_F(GeneratedTests, reduce_max_4) {
+    execute(reduce_max::CreateModel_4,
+            reduce_max::is_ignored_4,
+            reduce_max::get_examples_4());
+}
+
+TEST_F(GeneratedTests, reduce_max_relaxed_4) {
+    execute(reduce_max::CreateModel_relaxed_4,
+            reduce_max::is_ignored_relaxed_4,
+            reduce_max::get_examples_relaxed_4());
+}
+
+TEST_F(GeneratedTests, reduce_max_float16_4) {
+    execute(reduce_max::CreateModel_float16_4,
+            reduce_max::is_ignored_float16_4,
+            reduce_max::get_examples_float16_4());
+}
+
+TEST_F(GeneratedTests, reduce_max_quant8_4) {
+    execute(reduce_max::CreateModel_quant8_4,
+            reduce_max::is_ignored_quant8_4,
+            reduce_max::get_examples_quant8_4());
+}
+
diff --git a/runtime/test/generated/tests/reduce_min.mod.py.cpp b/runtime/test/generated/tests/reduce_min.mod.py.cpp
new file mode 100644
index 0000000..f23f9fc
--- /dev/null
+++ b/runtime/test/generated/tests/reduce_min.mod.py.cpp
@@ -0,0 +1,107 @@
+// clang-format off
+// Generated file (from: reduce_min.mod.py). Do not edit
+#include "../../TestGenerated.h"
+
+namespace reduce_min {
+// Generated reduce_min test
+#include "generated/examples/reduce_min.example.cpp"
+// Generated model constructor
+#include "generated/models/reduce_min.model.cpp"
+} // namespace reduce_min
+
+TEST_F(GeneratedTests, reduce_min) {
+    execute(reduce_min::CreateModel,
+            reduce_min::is_ignored,
+            reduce_min::get_examples());
+}
+
+TEST_F(GeneratedTests, reduce_min_relaxed) {
+    execute(reduce_min::CreateModel_relaxed,
+            reduce_min::is_ignored_relaxed,
+            reduce_min::get_examples_relaxed());
+}
+
+TEST_F(GeneratedTests, reduce_min_float16) {
+    execute(reduce_min::CreateModel_float16,
+            reduce_min::is_ignored_float16,
+            reduce_min::get_examples_float16());
+}
+
+TEST_F(GeneratedTests, reduce_min_quant8) {
+    execute(reduce_min::CreateModel_quant8,
+            reduce_min::is_ignored_quant8,
+            reduce_min::get_examples_quant8());
+}
+
+TEST_F(GeneratedTests, reduce_min_2) {
+    execute(reduce_min::CreateModel_2,
+            reduce_min::is_ignored_2,
+            reduce_min::get_examples_2());
+}
+
+TEST_F(GeneratedTests, reduce_min_relaxed_2) {
+    execute(reduce_min::CreateModel_relaxed_2,
+            reduce_min::is_ignored_relaxed_2,
+            reduce_min::get_examples_relaxed_2());
+}
+
+TEST_F(GeneratedTests, reduce_min_float16_2) {
+    execute(reduce_min::CreateModel_float16_2,
+            reduce_min::is_ignored_float16_2,
+            reduce_min::get_examples_float16_2());
+}
+
+TEST_F(GeneratedTests, reduce_min_quant8_2) {
+    execute(reduce_min::CreateModel_quant8_2,
+            reduce_min::is_ignored_quant8_2,
+            reduce_min::get_examples_quant8_2());
+}
+
+TEST_F(GeneratedTests, reduce_min_3) {
+    execute(reduce_min::CreateModel_3,
+            reduce_min::is_ignored_3,
+            reduce_min::get_examples_3());
+}
+
+TEST_F(GeneratedTests, reduce_min_relaxed_3) {
+    execute(reduce_min::CreateModel_relaxed_3,
+            reduce_min::is_ignored_relaxed_3,
+            reduce_min::get_examples_relaxed_3());
+}
+
+TEST_F(GeneratedTests, reduce_min_float16_3) {
+    execute(reduce_min::CreateModel_float16_3,
+            reduce_min::is_ignored_float16_3,
+            reduce_min::get_examples_float16_3());
+}
+
+TEST_F(GeneratedTests, reduce_min_quant8_3) {
+    execute(reduce_min::CreateModel_quant8_3,
+            reduce_min::is_ignored_quant8_3,
+            reduce_min::get_examples_quant8_3());
+}
+
+TEST_F(GeneratedTests, reduce_min_4) {
+    execute(reduce_min::CreateModel_4,
+            reduce_min::is_ignored_4,
+            reduce_min::get_examples_4());
+}
+
+TEST_F(GeneratedTests, reduce_min_relaxed_4) {
+    execute(reduce_min::CreateModel_relaxed_4,
+            reduce_min::is_ignored_relaxed_4,
+            reduce_min::get_examples_relaxed_4());
+}
+
+TEST_F(GeneratedTests, reduce_min_float16_4) {
+    execute(reduce_min::CreateModel_float16_4,
+            reduce_min::is_ignored_float16_4,
+            reduce_min::get_examples_float16_4());
+}
+
+TEST_F(GeneratedTests, reduce_min_quant8_4) {
+    execute(reduce_min::CreateModel_quant8_4,
+            reduce_min::is_ignored_quant8_4,
+            reduce_min::get_examples_quant8_4());
+}
+
diff --git a/runtime/test/generated/tests/reduce_prod.mod.py.cpp b/runtime/test/generated/tests/reduce_prod.mod.py.cpp
new file mode 100644
index 0000000..ddce8c8
--- /dev/null
+++ b/runtime/test/generated/tests/reduce_prod.mod.py.cpp
@@ -0,0 +1,83 @@
+// clang-format off
+// Generated file (from: reduce_prod.mod.py). Do not edit
+#include "../../TestGenerated.h"
+
+namespace reduce_prod {
+// Generated reduce_prod test
+#include "generated/examples/reduce_prod.example.cpp"
+// Generated model constructor
+#include "generated/models/reduce_prod.model.cpp"
+} // namespace reduce_prod
+
+TEST_F(GeneratedTests, reduce_prod) {
+    execute(reduce_prod::CreateModel,
+            reduce_prod::is_ignored,
+            reduce_prod::get_examples());
+}
+
+TEST_F(GeneratedTests, reduce_prod_relaxed) {
+    execute(reduce_prod::CreateModel_relaxed,
+            reduce_prod::is_ignored_relaxed,
+            reduce_prod::get_examples_relaxed());
+}
+
+TEST_F(GeneratedTests, reduce_prod_float16) {
+    execute(reduce_prod::CreateModel_float16,
+            reduce_prod::is_ignored_float16,
+            reduce_prod::get_examples_float16());
+}
+
+TEST_F(GeneratedTests, reduce_prod_2) {
+    execute(reduce_prod::CreateModel_2,
+            reduce_prod::is_ignored_2,
+            reduce_prod::get_examples_2());
+}
+
+TEST_F(GeneratedTests, reduce_prod_relaxed_2) {
+    execute(reduce_prod::CreateModel_relaxed_2,
+            reduce_prod::is_ignored_relaxed_2,
+            reduce_prod::get_examples_relaxed_2());
+}
+
+TEST_F(GeneratedTests, reduce_prod_float16_2) {
+    execute(reduce_prod::CreateModel_float16_2,
+            reduce_prod::is_ignored_float16_2,
+            reduce_prod::get_examples_float16_2());
+}
+
+TEST_F(GeneratedTests, reduce_prod_3) {
+    execute(reduce_prod::CreateModel_3,
+            reduce_prod::is_ignored_3,
+            reduce_prod::get_examples_3());
+}
+
+TEST_F(GeneratedTests, reduce_prod_relaxed_3) {
+    execute(reduce_prod::CreateModel_relaxed_3,
+            reduce_prod::is_ignored_relaxed_3,
+            reduce_prod::get_examples_relaxed_3());
+}
+
+TEST_F(GeneratedTests, reduce_prod_float16_3) {
+    execute(reduce_prod::CreateModel_float16_3,
+            reduce_prod::is_ignored_float16_3,
+            reduce_prod::get_examples_float16_3());
+}
+
+TEST_F(GeneratedTests, reduce_prod_4) {
+    execute(reduce_prod::CreateModel_4,
+            reduce_prod::is_ignored_4,
+            reduce_prod::get_examples_4());
+}
+
+TEST_F(GeneratedTests, reduce_prod_relaxed_4) {
+    execute(reduce_prod::CreateModel_relaxed_4,
+            reduce_prod::is_ignored_relaxed_4,
+            reduce_prod::get_examples_relaxed_4());
+}
+
+TEST_F(GeneratedTests, reduce_prod_float16_4) {
+    execute(reduce_prod::CreateModel_float16_4,
+            reduce_prod::is_ignored_float16_4,
+            reduce_prod::get_examples_float16_4());
+}
+
diff --git a/runtime/test/generated/tests/reduce_sum.mod.py.cpp b/runtime/test/generated/tests/reduce_sum.mod.py.cpp
new file mode 100644
index 0000000..4632df8
--- /dev/null
+++ b/runtime/test/generated/tests/reduce_sum.mod.py.cpp
@@ -0,0 +1,83 @@
+// clang-format off
+// Generated file (from: reduce_sum.mod.py). Do not edit
+#include "../../TestGenerated.h"
+
+namespace reduce_sum {
+// Generated reduce_sum test
+#include "generated/examples/reduce_sum.example.cpp"
+// Generated model constructor
+#include "generated/models/reduce_sum.model.cpp"
+} // namespace reduce_sum
+
+TEST_F(GeneratedTests, reduce_sum) {
+    execute(reduce_sum::CreateModel,
+            reduce_sum::is_ignored,
+            reduce_sum::get_examples());
+}
+
+TEST_F(GeneratedTests, reduce_sum_relaxed) {
+    execute(reduce_sum::CreateModel_relaxed,
+            reduce_sum::is_ignored_relaxed,
+            reduce_sum::get_examples_relaxed());
+}
+
+TEST_F(GeneratedTests, reduce_sum_float16) {
+    execute(reduce_sum::CreateModel_float16,
+            reduce_sum::is_ignored_float16,
+            reduce_sum::get_examples_float16());
+}
+
+TEST_F(GeneratedTests, reduce_sum_2) {
+    execute(reduce_sum::CreateModel_2,
+            reduce_sum::is_ignored_2,
+            reduce_sum::get_examples_2());
+}
+
+TEST_F(GeneratedTests, reduce_sum_relaxed_2) {
+    execute(reduce_sum::CreateModel_relaxed_2,
+            reduce_sum::is_ignored_relaxed_2,
+            reduce_sum::get_examples_relaxed_2());
+}
+
+TEST_F(GeneratedTests, reduce_sum_float16_2) {
+    execute(reduce_sum::CreateModel_float16_2,
+            reduce_sum::is_ignored_float16_2,
+            reduce_sum::get_examples_float16_2());
+}
+
+TEST_F(GeneratedTests, reduce_sum_3) {
+    execute(reduce_sum::CreateModel_3,
+            reduce_sum::is_ignored_3,
+            reduce_sum::get_examples_3());
+}
+
+TEST_F(GeneratedTests, reduce_sum_relaxed_3) {
+    execute(reduce_sum::CreateModel_relaxed_3,
+            reduce_sum::is_ignored_relaxed_3,
+            reduce_sum::get_examples_relaxed_3());
+}
+
+TEST_F(GeneratedTests, reduce_sum_float16_3) {
+    execute(reduce_sum::CreateModel_float16_3,
+            reduce_sum::is_ignored_float16_3,
+            reduce_sum::get_examples_float16_3());
+}
+
+TEST_F(GeneratedTests, reduce_sum_4) {
+    execute(reduce_sum::CreateModel_4,
+            reduce_sum::is_ignored_4,
+            reduce_sum::get_examples_4());
+}
+
+TEST_F(GeneratedTests, reduce_sum_relaxed_4) {
+    execute(reduce_sum::CreateModel_relaxed_4,
+            reduce_sum::is_ignored_relaxed_4,
+            reduce_sum::get_examples_relaxed_4());
+}
+
+TEST_F(GeneratedTests, reduce_sum_float16_4) {
+    execute(reduce_sum::CreateModel_float16_4,
+            reduce_sum::is_ignored_float16_4,
+            reduce_sum::get_examples_float16_4());
+}
+
diff --git a/runtime/test/generated/vts_models/reduce_all.model.cpp b/runtime/test/generated/vts_models/reduce_all.model.cpp
new file mode 100644
index 0000000..eff6907
--- /dev/null
+++ b/runtime/test/generated/vts_models/reduce_all.model.cpp
@@ -0,0 +1,215 @@
+// clang-format off
+// Generated file (from: reduce_all.mod.py). Do not edit
+// Create the model
+Model createTestModel() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_BOOL8,
+            .dimensions = {1},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {1},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 4},
+        },
+        {
+            .type = OperandType::BOOL,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 4, .length = 1},
+        },
+        {
+            .type = OperandType::TENSOR_BOOL8,
+            .dimensions = {1},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::REDUCE_ALL,
+            .inputs = {0, 1, 2},
+            .outputs = {3},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0};
+    const std::vector<uint32_t> outputIndexes = {3};
+    std::vector<uint8_t> operandValues = {
+      0, 0, 0, 0, 1
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_2() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_BOOL8,
+            .dimensions = {2, 3, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {4},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 16},
+        },
+        {
+            .type = OperandType::BOOL,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 16, .length = 1},
+        },
+        {
+            .type = OperandType::TENSOR_BOOL8,
+            .dimensions = {2},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::REDUCE_ALL,
+            .inputs = {0, 1, 2},
+            .outputs = {3},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0};
+    const std::vector<uint32_t> outputIndexes = {3};
+    std::vector<uint8_t> operandValues = {
+      1, 0, 0, 0, 0, 0, 0, 0, 253, 255, 255, 255, 253, 255, 255, 255, 0
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored_2(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_3() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_BOOL8,
+            .dimensions = {2, 3, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 8},
+        },
+        {
+            .type = OperandType::BOOL,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 8, .length = 1},
+        },
+        {
+            .type = OperandType::TENSOR_BOOL8,
+            .dimensions = {1, 3, 1},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::REDUCE_ALL,
+            .inputs = {0, 1, 2},
+            .outputs = {3},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0};
+    const std::vector<uint32_t> outputIndexes = {3};
+    std::vector<uint8_t> operandValues = {
+      0, 0, 0, 0, 2, 0, 0, 0, 1
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored_3(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
diff --git a/runtime/test/generated/vts_models/reduce_any.model.cpp b/runtime/test/generated/vts_models/reduce_any.model.cpp
new file mode 100644
index 0000000..0c48ce4
--- /dev/null
+++ b/runtime/test/generated/vts_models/reduce_any.model.cpp
@@ -0,0 +1,215 @@
+// clang-format off
+// Generated file (from: reduce_any.mod.py). Do not edit
+// Create the model
+Model createTestModel() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_BOOL8,
+            .dimensions = {1},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {1},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 4},
+        },
+        {
+            .type = OperandType::BOOL,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 4, .length = 1},
+        },
+        {
+            .type = OperandType::TENSOR_BOOL8,
+            .dimensions = {1},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::REDUCE_ANY,
+            .inputs = {0, 1, 2},
+            .outputs = {3},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0};
+    const std::vector<uint32_t> outputIndexes = {3};
+    std::vector<uint8_t> operandValues = {
+      0, 0, 0, 0, 1
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_2() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_BOOL8,
+            .dimensions = {2, 3, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {4},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 16},
+        },
+        {
+            .type = OperandType::BOOL,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 16, .length = 1},
+        },
+        {
+            .type = OperandType::TENSOR_BOOL8,
+            .dimensions = {2},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::REDUCE_ANY,
+            .inputs = {0, 1, 2},
+            .outputs = {3},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0};
+    const std::vector<uint32_t> outputIndexes = {3};
+    std::vector<uint8_t> operandValues = {
+      1, 0, 0, 0, 0, 0, 0, 0, 253, 255, 255, 255, 253, 255, 255, 255, 0
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored_2(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_3() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_BOOL8,
+            .dimensions = {2, 3, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 8},
+        },
+        {
+            .type = OperandType::BOOL,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 8, .length = 1},
+        },
+        {
+            .type = OperandType::TENSOR_BOOL8,
+            .dimensions = {1, 3, 1},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::REDUCE_ANY,
+            .inputs = {0, 1, 2},
+            .outputs = {3},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0};
+    const std::vector<uint32_t> outputIndexes = {3};
+    std::vector<uint8_t> operandValues = {
+      0, 0, 0, 0, 2, 0, 0, 0, 1
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored_3(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
diff --git a/runtime/test/generated/vts_models/reduce_max.model.cpp b/runtime/test/generated/vts_models/reduce_max.model.cpp
new file mode 100644
index 0000000..99c8d57
--- /dev/null
+++ b/runtime/test/generated/vts_models/reduce_max.model.cpp
@@ -0,0 +1,1142 @@
+// clang-format off
+// Generated file (from: reduce_max.mod.py). Do not edit
+// Create the model
+Model createTestModel() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {3, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {1},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 4},
+        },
+        {
+            .type = OperandType::BOOL,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 4, .length = 1},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {3},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::REDUCE_MAX,
+            .inputs = {0, 1, 2},
+            .outputs = {3},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0};
+    const std::vector<uint32_t> outputIndexes = {3};
+    std::vector<uint8_t> operandValues = {
+      255, 255, 255, 255, 0
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_relaxed() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {3, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {1},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 4},
+        },
+        {
+            .type = OperandType::BOOL,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 4, .length = 1},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {3},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::REDUCE_MAX,
+            .inputs = {0, 1, 2},
+            .outputs = {3},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0};
+    const std::vector<uint32_t> outputIndexes = {3};
+    std::vector<uint8_t> operandValues = {
+      255, 255, 255, 255, 0
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+        .relaxComputationFloat32toFloat16 = true,
+    };
+}
+
+inline bool is_ignored_relaxed(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_float16() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {3, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {1},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 4},
+        },
+        {
+            .type = OperandType::BOOL,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 4, .length = 1},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {3},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::REDUCE_MAX,
+            .inputs = {0, 1, 2},
+            .outputs = {3},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0};
+    const std::vector<uint32_t> outputIndexes = {3};
+    std::vector<uint8_t> operandValues = {
+      255, 255, 255, 255, 0
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored_float16(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_quant8() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_QUANT8_ASYMM,
+            .dimensions = {3, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.5f,
+            .zeroPoint = 127,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {1},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 4},
+        },
+        {
+            .type = OperandType::BOOL,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 4, .length = 1},
+        },
+        {
+            .type = OperandType::TENSOR_QUANT8_ASYMM,
+            .dimensions = {3},
+            .numberOfConsumers = 0,
+            .scale = 0.5f,
+            .zeroPoint = 127,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::REDUCE_MAX,
+            .inputs = {0, 1, 2},
+            .outputs = {3},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0};
+    const std::vector<uint32_t> outputIndexes = {3};
+    std::vector<uint8_t> operandValues = {
+      255, 255, 255, 255, 0
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored_quant8(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_2() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {1},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {1},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 4},
+        },
+        {
+            .type = OperandType::BOOL,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 4, .length = 1},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {1},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::REDUCE_MAX,
+            .inputs = {0, 1, 2},
+            .outputs = {3},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0};
+    const std::vector<uint32_t> outputIndexes = {3};
+    std::vector<uint8_t> operandValues = {
+      0, 0, 0, 0, 1
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored_2(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_relaxed_2() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {1},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {1},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 4},
+        },
+        {
+            .type = OperandType::BOOL,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 4, .length = 1},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {1},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::REDUCE_MAX,
+            .inputs = {0, 1, 2},
+            .outputs = {3},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0};
+    const std::vector<uint32_t> outputIndexes = {3};
+    std::vector<uint8_t> operandValues = {
+      0, 0, 0, 0, 1
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+        .relaxComputationFloat32toFloat16 = true,
+    };
+}
+
+inline bool is_ignored_relaxed_2(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_float16_2() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {1},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {1},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 4},
+        },
+        {
+            .type = OperandType::BOOL,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 4, .length = 1},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {1},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::REDUCE_MAX,
+            .inputs = {0, 1, 2},
+            .outputs = {3},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0};
+    const std::vector<uint32_t> outputIndexes = {3};
+    std::vector<uint8_t> operandValues = {
+      0, 0, 0, 0, 1
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored_float16_2(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_quant8_2() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_QUANT8_ASYMM,
+            .dimensions = {1},
+            .numberOfConsumers = 1,
+            .scale = 0.5f,
+            .zeroPoint = 127,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {1},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 4},
+        },
+        {
+            .type = OperandType::BOOL,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 4, .length = 1},
+        },
+        {
+            .type = OperandType::TENSOR_QUANT8_ASYMM,
+            .dimensions = {1},
+            .numberOfConsumers = 0,
+            .scale = 0.5f,
+            .zeroPoint = 127,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::REDUCE_MAX,
+            .inputs = {0, 1, 2},
+            .outputs = {3},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0};
+    const std::vector<uint32_t> outputIndexes = {3};
+    std::vector<uint8_t> operandValues = {
+      0, 0, 0, 0, 1
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored_quant8_2(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_3() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {4, 3, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {4},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 16},
+        },
+        {
+            .type = OperandType::BOOL,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 16, .length = 1},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {2},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::REDUCE_MAX,
+            .inputs = {0, 1, 2},
+            .outputs = {3},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0};
+    const std::vector<uint32_t> outputIndexes = {3};
+    std::vector<uint8_t> operandValues = {
+      1, 0, 0, 0, 0, 0, 0, 0, 253, 255, 255, 255, 253, 255, 255, 255, 0
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored_3(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_relaxed_3() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {4, 3, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {4},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 16},
+        },
+        {
+            .type = OperandType::BOOL,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 16, .length = 1},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {2},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::REDUCE_MAX,
+            .inputs = {0, 1, 2},
+            .outputs = {3},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0};
+    const std::vector<uint32_t> outputIndexes = {3};
+    std::vector<uint8_t> operandValues = {
+      1, 0, 0, 0, 0, 0, 0, 0, 253, 255, 255, 255, 253, 255, 255, 255, 0
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+        .relaxComputationFloat32toFloat16 = true,
+    };
+}
+
+inline bool is_ignored_relaxed_3(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_float16_3() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {4, 3, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {4},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 16},
+        },
+        {
+            .type = OperandType::BOOL,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 16, .length = 1},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {2},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::REDUCE_MAX,
+            .inputs = {0, 1, 2},
+            .outputs = {3},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0};
+    const std::vector<uint32_t> outputIndexes = {3};
+    std::vector<uint8_t> operandValues = {
+      1, 0, 0, 0, 0, 0, 0, 0, 253, 255, 255, 255, 253, 255, 255, 255, 0
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored_float16_3(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_quant8_3() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_QUANT8_ASYMM,
+            .dimensions = {4, 3, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.5f,
+            .zeroPoint = 127,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {4},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 16},
+        },
+        {
+            .type = OperandType::BOOL,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 16, .length = 1},
+        },
+        {
+            .type = OperandType::TENSOR_QUANT8_ASYMM,
+            .dimensions = {2},
+            .numberOfConsumers = 0,
+            .scale = 0.5f,
+            .zeroPoint = 127,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::REDUCE_MAX,
+            .inputs = {0, 1, 2},
+            .outputs = {3},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0};
+    const std::vector<uint32_t> outputIndexes = {3};
+    std::vector<uint8_t> operandValues = {
+      1, 0, 0, 0, 0, 0, 0, 0, 253, 255, 255, 255, 253, 255, 255, 255, 0
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored_quant8_3(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_4() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {4, 3, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 8},
+        },
+        {
+            .type = OperandType::BOOL,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 8, .length = 1},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {1, 3, 1},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::REDUCE_MAX,
+            .inputs = {0, 1, 2},
+            .outputs = {3},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0};
+    const std::vector<uint32_t> outputIndexes = {3};
+    std::vector<uint8_t> operandValues = {
+      0, 0, 0, 0, 2, 0, 0, 0, 1
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored_4(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_relaxed_4() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {4, 3, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 8},
+        },
+        {
+            .type = OperandType::BOOL,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 8, .length = 1},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {1, 3, 1},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::REDUCE_MAX,
+            .inputs = {0, 1, 2},
+            .outputs = {3},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0};
+    const std::vector<uint32_t> outputIndexes = {3};
+    std::vector<uint8_t> operandValues = {
+      0, 0, 0, 0, 2, 0, 0, 0, 1
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+        .relaxComputationFloat32toFloat16 = true,
+    };
+}
+
+inline bool is_ignored_relaxed_4(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_float16_4() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {4, 3, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 8},
+        },
+        {
+            .type = OperandType::BOOL,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 8, .length = 1},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {1, 3, 1},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::REDUCE_MAX,
+            .inputs = {0, 1, 2},
+            .outputs = {3},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0};
+    const std::vector<uint32_t> outputIndexes = {3};
+    std::vector<uint8_t> operandValues = {
+      0, 0, 0, 0, 2, 0, 0, 0, 1
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored_float16_4(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_quant8_4() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_QUANT8_ASYMM,
+            .dimensions = {4, 3, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.5f,
+            .zeroPoint = 127,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 8},
+        },
+        {
+            .type = OperandType::BOOL,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 8, .length = 1},
+        },
+        {
+            .type = OperandType::TENSOR_QUANT8_ASYMM,
+            .dimensions = {1, 3, 1},
+            .numberOfConsumers = 0,
+            .scale = 0.5f,
+            .zeroPoint = 127,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::REDUCE_MAX,
+            .inputs = {0, 1, 2},
+            .outputs = {3},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0};
+    const std::vector<uint32_t> outputIndexes = {3};
+    std::vector<uint8_t> operandValues = {
+      0, 0, 0, 0, 2, 0, 0, 0, 1
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored_quant8_4(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
diff --git a/runtime/test/generated/vts_models/reduce_min.model.cpp b/runtime/test/generated/vts_models/reduce_min.model.cpp
new file mode 100644
index 0000000..c0a9234
--- /dev/null
+++ b/runtime/test/generated/vts_models/reduce_min.model.cpp
@@ -0,0 +1,1142 @@
+// clang-format off
+// Generated file (from: reduce_min.mod.py). Do not edit
+// Create the model
+Model createTestModel() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {3, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {1},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 4},
+        },
+        {
+            .type = OperandType::BOOL,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 4, .length = 1},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {3},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::REDUCE_MIN,
+            .inputs = {0, 1, 2},
+            .outputs = {3},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0};
+    const std::vector<uint32_t> outputIndexes = {3};
+    std::vector<uint8_t> operandValues = {
+      255, 255, 255, 255, 0
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_relaxed() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {3, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {1},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 4},
+        },
+        {
+            .type = OperandType::BOOL,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 4, .length = 1},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {3},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::REDUCE_MIN,
+            .inputs = {0, 1, 2},
+            .outputs = {3},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0};
+    const std::vector<uint32_t> outputIndexes = {3};
+    std::vector<uint8_t> operandValues = {
+      255, 255, 255, 255, 0
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+        .relaxComputationFloat32toFloat16 = true,
+    };
+}
+
+inline bool is_ignored_relaxed(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_float16() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {3, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {1},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 4},
+        },
+        {
+            .type = OperandType::BOOL,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 4, .length = 1},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {3},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::REDUCE_MIN,
+            .inputs = {0, 1, 2},
+            .outputs = {3},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0};
+    const std::vector<uint32_t> outputIndexes = {3};
+    std::vector<uint8_t> operandValues = {
+      255, 255, 255, 255, 0
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored_float16(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_quant8() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_QUANT8_ASYMM,
+            .dimensions = {3, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.5f,
+            .zeroPoint = 127,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {1},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 4},
+        },
+        {
+            .type = OperandType::BOOL,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 4, .length = 1},
+        },
+        {
+            .type = OperandType::TENSOR_QUANT8_ASYMM,
+            .dimensions = {3},
+            .numberOfConsumers = 0,
+            .scale = 0.5f,
+            .zeroPoint = 127,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::REDUCE_MIN,
+            .inputs = {0, 1, 2},
+            .outputs = {3},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0};
+    const std::vector<uint32_t> outputIndexes = {3};
+    std::vector<uint8_t> operandValues = {
+      255, 255, 255, 255, 0
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored_quant8(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_2() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {1},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {1},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 4},
+        },
+        {
+            .type = OperandType::BOOL,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 4, .length = 1},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {1},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::REDUCE_MIN,
+            .inputs = {0, 1, 2},
+            .outputs = {3},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0};
+    const std::vector<uint32_t> outputIndexes = {3};
+    std::vector<uint8_t> operandValues = {
+      0, 0, 0, 0, 1
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored_2(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_relaxed_2() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {1},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {1},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 4},
+        },
+        {
+            .type = OperandType::BOOL,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 4, .length = 1},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {1},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::REDUCE_MIN,
+            .inputs = {0, 1, 2},
+            .outputs = {3},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0};
+    const std::vector<uint32_t> outputIndexes = {3};
+    std::vector<uint8_t> operandValues = {
+      0, 0, 0, 0, 1
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+        .relaxComputationFloat32toFloat16 = true,
+    };
+}
+
+inline bool is_ignored_relaxed_2(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_float16_2() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {1},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {1},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 4},
+        },
+        {
+            .type = OperandType::BOOL,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 4, .length = 1},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {1},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::REDUCE_MIN,
+            .inputs = {0, 1, 2},
+            .outputs = {3},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0};
+    const std::vector<uint32_t> outputIndexes = {3};
+    std::vector<uint8_t> operandValues = {
+      0, 0, 0, 0, 1
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored_float16_2(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_quant8_2() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_QUANT8_ASYMM,
+            .dimensions = {1},
+            .numberOfConsumers = 1,
+            .scale = 0.5f,
+            .zeroPoint = 127,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {1},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 4},
+        },
+        {
+            .type = OperandType::BOOL,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 4, .length = 1},
+        },
+        {
+            .type = OperandType::TENSOR_QUANT8_ASYMM,
+            .dimensions = {1},
+            .numberOfConsumers = 0,
+            .scale = 0.5f,
+            .zeroPoint = 127,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::REDUCE_MIN,
+            .inputs = {0, 1, 2},
+            .outputs = {3},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0};
+    const std::vector<uint32_t> outputIndexes = {3};
+    std::vector<uint8_t> operandValues = {
+      0, 0, 0, 0, 1
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored_quant8_2(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_3() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {4, 3, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {4},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 16},
+        },
+        {
+            .type = OperandType::BOOL,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 16, .length = 1},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {2},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::REDUCE_MIN,
+            .inputs = {0, 1, 2},
+            .outputs = {3},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0};
+    const std::vector<uint32_t> outputIndexes = {3};
+    std::vector<uint8_t> operandValues = {
+      1, 0, 0, 0, 0, 0, 0, 0, 253, 255, 255, 255, 253, 255, 255, 255, 0
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored_3(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_relaxed_3() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {4, 3, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {4},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 16},
+        },
+        {
+            .type = OperandType::BOOL,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 16, .length = 1},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {2},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::REDUCE_MIN,
+            .inputs = {0, 1, 2},
+            .outputs = {3},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0};
+    const std::vector<uint32_t> outputIndexes = {3};
+    std::vector<uint8_t> operandValues = {
+      1, 0, 0, 0, 0, 0, 0, 0, 253, 255, 255, 255, 253, 255, 255, 255, 0
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+        .relaxComputationFloat32toFloat16 = true,
+    };
+}
+
+inline bool is_ignored_relaxed_3(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_float16_3() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {4, 3, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {4},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 16},
+        },
+        {
+            .type = OperandType::BOOL,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 16, .length = 1},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {2},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::REDUCE_MIN,
+            .inputs = {0, 1, 2},
+            .outputs = {3},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0};
+    const std::vector<uint32_t> outputIndexes = {3};
+    std::vector<uint8_t> operandValues = {
+      1, 0, 0, 0, 0, 0, 0, 0, 253, 255, 255, 255, 253, 255, 255, 255, 0
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored_float16_3(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_quant8_3() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_QUANT8_ASYMM,
+            .dimensions = {4, 3, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.5f,
+            .zeroPoint = 127,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {4},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 16},
+        },
+        {
+            .type = OperandType::BOOL,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 16, .length = 1},
+        },
+        {
+            .type = OperandType::TENSOR_QUANT8_ASYMM,
+            .dimensions = {2},
+            .numberOfConsumers = 0,
+            .scale = 0.5f,
+            .zeroPoint = 127,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::REDUCE_MIN,
+            .inputs = {0, 1, 2},
+            .outputs = {3},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0};
+    const std::vector<uint32_t> outputIndexes = {3};
+    std::vector<uint8_t> operandValues = {
+      1, 0, 0, 0, 0, 0, 0, 0, 253, 255, 255, 255, 253, 255, 255, 255, 0
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored_quant8_3(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_4() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {4, 3, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 8},
+        },
+        {
+            .type = OperandType::BOOL,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 8, .length = 1},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {1, 3, 1},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::REDUCE_MIN,
+            .inputs = {0, 1, 2},
+            .outputs = {3},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0};
+    const std::vector<uint32_t> outputIndexes = {3};
+    std::vector<uint8_t> operandValues = {
+      0, 0, 0, 0, 2, 0, 0, 0, 1
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored_4(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_relaxed_4() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {4, 3, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 8},
+        },
+        {
+            .type = OperandType::BOOL,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 8, .length = 1},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {1, 3, 1},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::REDUCE_MIN,
+            .inputs = {0, 1, 2},
+            .outputs = {3},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0};
+    const std::vector<uint32_t> outputIndexes = {3};
+    std::vector<uint8_t> operandValues = {
+      0, 0, 0, 0, 2, 0, 0, 0, 1
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+        .relaxComputationFloat32toFloat16 = true,
+    };
+}
+
+inline bool is_ignored_relaxed_4(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_float16_4() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {4, 3, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 8},
+        },
+        {
+            .type = OperandType::BOOL,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 8, .length = 1},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {1, 3, 1},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::REDUCE_MIN,
+            .inputs = {0, 1, 2},
+            .outputs = {3},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0};
+    const std::vector<uint32_t> outputIndexes = {3};
+    std::vector<uint8_t> operandValues = {
+      0, 0, 0, 0, 2, 0, 0, 0, 1
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored_float16_4(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_quant8_4() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_QUANT8_ASYMM,
+            .dimensions = {4, 3, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.5f,
+            .zeroPoint = 127,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 8},
+        },
+        {
+            .type = OperandType::BOOL,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 8, .length = 1},
+        },
+        {
+            .type = OperandType::TENSOR_QUANT8_ASYMM,
+            .dimensions = {1, 3, 1},
+            .numberOfConsumers = 0,
+            .scale = 0.5f,
+            .zeroPoint = 127,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::REDUCE_MIN,
+            .inputs = {0, 1, 2},
+            .outputs = {3},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0};
+    const std::vector<uint32_t> outputIndexes = {3};
+    std::vector<uint8_t> operandValues = {
+      0, 0, 0, 0, 2, 0, 0, 0, 1
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored_quant8_4(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
diff --git a/runtime/test/generated/vts_models/reduce_prod.model.cpp b/runtime/test/generated/vts_models/reduce_prod.model.cpp
new file mode 100644
index 0000000..b597381
--- /dev/null
+++ b/runtime/test/generated/vts_models/reduce_prod.model.cpp
@@ -0,0 +1,858 @@
+// clang-format off
+// Generated file (from: reduce_prod.mod.py). Do not edit
+// Create the model
+Model createTestModel() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {3, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {1},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 4},
+        },
+        {
+            .type = OperandType::BOOL,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 4, .length = 1},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {3},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::REDUCE_PROD,
+            .inputs = {0, 1, 2},
+            .outputs = {3},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0};
+    const std::vector<uint32_t> outputIndexes = {3};
+    std::vector<uint8_t> operandValues = {
+      255, 255, 255, 255, 0
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_relaxed() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {3, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {1},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 4},
+        },
+        {
+            .type = OperandType::BOOL,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 4, .length = 1},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {3},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::REDUCE_PROD,
+            .inputs = {0, 1, 2},
+            .outputs = {3},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0};
+    const std::vector<uint32_t> outputIndexes = {3};
+    std::vector<uint8_t> operandValues = {
+      255, 255, 255, 255, 0
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+        .relaxComputationFloat32toFloat16 = true,
+    };
+}
+
+inline bool is_ignored_relaxed(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_float16() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {3, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {1},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 4},
+        },
+        {
+            .type = OperandType::BOOL,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 4, .length = 1},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {3},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::REDUCE_PROD,
+            .inputs = {0, 1, 2},
+            .outputs = {3},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0};
+    const std::vector<uint32_t> outputIndexes = {3};
+    std::vector<uint8_t> operandValues = {
+      255, 255, 255, 255, 0
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored_float16(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_2() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {1},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {1},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 4},
+        },
+        {
+            .type = OperandType::BOOL,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 4, .length = 1},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {1},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::REDUCE_PROD,
+            .inputs = {0, 1, 2},
+            .outputs = {3},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0};
+    const std::vector<uint32_t> outputIndexes = {3};
+    std::vector<uint8_t> operandValues = {
+      0, 0, 0, 0, 1
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored_2(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_relaxed_2() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {1},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {1},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 4},
+        },
+        {
+            .type = OperandType::BOOL,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 4, .length = 1},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {1},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::REDUCE_PROD,
+            .inputs = {0, 1, 2},
+            .outputs = {3},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0};
+    const std::vector<uint32_t> outputIndexes = {3};
+    std::vector<uint8_t> operandValues = {
+      0, 0, 0, 0, 1
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+        .relaxComputationFloat32toFloat16 = true,
+    };
+}
+
+inline bool is_ignored_relaxed_2(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_float16_2() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {1},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {1},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 4},
+        },
+        {
+            .type = OperandType::BOOL,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 4, .length = 1},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {1},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::REDUCE_PROD,
+            .inputs = {0, 1, 2},
+            .outputs = {3},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0};
+    const std::vector<uint32_t> outputIndexes = {3};
+    std::vector<uint8_t> operandValues = {
+      0, 0, 0, 0, 1
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored_float16_2(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_3() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {4, 3, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {4},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 16},
+        },
+        {
+            .type = OperandType::BOOL,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 16, .length = 1},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {2},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::REDUCE_PROD,
+            .inputs = {0, 1, 2},
+            .outputs = {3},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0};
+    const std::vector<uint32_t> outputIndexes = {3};
+    std::vector<uint8_t> operandValues = {
+      1, 0, 0, 0, 0, 0, 0, 0, 253, 255, 255, 255, 253, 255, 255, 255, 0
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored_3(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_relaxed_3() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {4, 3, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {4},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 16},
+        },
+        {
+            .type = OperandType::BOOL,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 16, .length = 1},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {2},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::REDUCE_PROD,
+            .inputs = {0, 1, 2},
+            .outputs = {3},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0};
+    const std::vector<uint32_t> outputIndexes = {3};
+    std::vector<uint8_t> operandValues = {
+      1, 0, 0, 0, 0, 0, 0, 0, 253, 255, 255, 255, 253, 255, 255, 255, 0
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+        .relaxComputationFloat32toFloat16 = true,
+    };
+}
+
+inline bool is_ignored_relaxed_3(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_float16_3() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {4, 3, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {4},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 16},
+        },
+        {
+            .type = OperandType::BOOL,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 16, .length = 1},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {2},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::REDUCE_PROD,
+            .inputs = {0, 1, 2},
+            .outputs = {3},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0};
+    const std::vector<uint32_t> outputIndexes = {3};
+    std::vector<uint8_t> operandValues = {
+      1, 0, 0, 0, 0, 0, 0, 0, 253, 255, 255, 255, 253, 255, 255, 255, 0
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored_float16_3(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_4() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {4, 3, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 8},
+        },
+        {
+            .type = OperandType::BOOL,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 8, .length = 1},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {1, 3, 1},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::REDUCE_PROD,
+            .inputs = {0, 1, 2},
+            .outputs = {3},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0};
+    const std::vector<uint32_t> outputIndexes = {3};
+    std::vector<uint8_t> operandValues = {
+      0, 0, 0, 0, 2, 0, 0, 0, 1
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored_4(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_relaxed_4() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {4, 3, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 8},
+        },
+        {
+            .type = OperandType::BOOL,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 8, .length = 1},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {1, 3, 1},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::REDUCE_PROD,
+            .inputs = {0, 1, 2},
+            .outputs = {3},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0};
+    const std::vector<uint32_t> outputIndexes = {3};
+    std::vector<uint8_t> operandValues = {
+      0, 0, 0, 0, 2, 0, 0, 0, 1
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+        .relaxComputationFloat32toFloat16 = true,
+    };
+}
+
+inline bool is_ignored_relaxed_4(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_float16_4() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {4, 3, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 8},
+        },
+        {
+            .type = OperandType::BOOL,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 8, .length = 1},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {1, 3, 1},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::REDUCE_PROD,
+            .inputs = {0, 1, 2},
+            .outputs = {3},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0};
+    const std::vector<uint32_t> outputIndexes = {3};
+    std::vector<uint8_t> operandValues = {
+      0, 0, 0, 0, 2, 0, 0, 0, 1
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored_float16_4(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
diff --git a/runtime/test/generated/vts_models/reduce_sum.model.cpp b/runtime/test/generated/vts_models/reduce_sum.model.cpp
new file mode 100644
index 0000000..1fb541b
--- /dev/null
+++ b/runtime/test/generated/vts_models/reduce_sum.model.cpp
@@ -0,0 +1,858 @@
+// clang-format off
+// Generated file (from: reduce_sum.mod.py). Do not edit
+// Create the model
+Model createTestModel() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {3, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {1},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 4},
+        },
+        {
+            .type = OperandType::BOOL,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 4, .length = 1},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {3},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::REDUCE_SUM,
+            .inputs = {0, 1, 2},
+            .outputs = {3},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0};
+    const std::vector<uint32_t> outputIndexes = {3};
+    std::vector<uint8_t> operandValues = {
+      255, 255, 255, 255, 0
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_relaxed() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {3, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {1},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 4},
+        },
+        {
+            .type = OperandType::BOOL,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 4, .length = 1},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {3},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::REDUCE_SUM,
+            .inputs = {0, 1, 2},
+            .outputs = {3},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0};
+    const std::vector<uint32_t> outputIndexes = {3};
+    std::vector<uint8_t> operandValues = {
+      255, 255, 255, 255, 0
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+        .relaxComputationFloat32toFloat16 = true,
+    };
+}
+
+inline bool is_ignored_relaxed(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_float16() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {3, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {1},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 4},
+        },
+        {
+            .type = OperandType::BOOL,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 4, .length = 1},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {3},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::REDUCE_SUM,
+            .inputs = {0, 1, 2},
+            .outputs = {3},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0};
+    const std::vector<uint32_t> outputIndexes = {3};
+    std::vector<uint8_t> operandValues = {
+      255, 255, 255, 255, 0
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored_float16(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_2() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {1},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {1},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 4},
+        },
+        {
+            .type = OperandType::BOOL,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 4, .length = 1},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {1},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::REDUCE_SUM,
+            .inputs = {0, 1, 2},
+            .outputs = {3},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0};
+    const std::vector<uint32_t> outputIndexes = {3};
+    std::vector<uint8_t> operandValues = {
+      0, 0, 0, 0, 1
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored_2(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_relaxed_2() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {1},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {1},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 4},
+        },
+        {
+            .type = OperandType::BOOL,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 4, .length = 1},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {1},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::REDUCE_SUM,
+            .inputs = {0, 1, 2},
+            .outputs = {3},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0};
+    const std::vector<uint32_t> outputIndexes = {3};
+    std::vector<uint8_t> operandValues = {
+      0, 0, 0, 0, 1
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+        .relaxComputationFloat32toFloat16 = true,
+    };
+}
+
+inline bool is_ignored_relaxed_2(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_float16_2() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {1},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {1},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 4},
+        },
+        {
+            .type = OperandType::BOOL,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 4, .length = 1},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {1},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::REDUCE_SUM,
+            .inputs = {0, 1, 2},
+            .outputs = {3},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0};
+    const std::vector<uint32_t> outputIndexes = {3};
+    std::vector<uint8_t> operandValues = {
+      0, 0, 0, 0, 1
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored_float16_2(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_3() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {4, 3, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {4},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 16},
+        },
+        {
+            .type = OperandType::BOOL,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 16, .length = 1},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {2},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::REDUCE_SUM,
+            .inputs = {0, 1, 2},
+            .outputs = {3},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0};
+    const std::vector<uint32_t> outputIndexes = {3};
+    std::vector<uint8_t> operandValues = {
+      1, 0, 0, 0, 0, 0, 0, 0, 253, 255, 255, 255, 253, 255, 255, 255, 0
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored_3(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_relaxed_3() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {4, 3, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {4},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 16},
+        },
+        {
+            .type = OperandType::BOOL,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 16, .length = 1},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {2},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::REDUCE_SUM,
+            .inputs = {0, 1, 2},
+            .outputs = {3},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0};
+    const std::vector<uint32_t> outputIndexes = {3};
+    std::vector<uint8_t> operandValues = {
+      1, 0, 0, 0, 0, 0, 0, 0, 253, 255, 255, 255, 253, 255, 255, 255, 0
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+        .relaxComputationFloat32toFloat16 = true,
+    };
+}
+
+inline bool is_ignored_relaxed_3(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_float16_3() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {4, 3, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {4},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 16},
+        },
+        {
+            .type = OperandType::BOOL,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 16, .length = 1},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {2},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::REDUCE_SUM,
+            .inputs = {0, 1, 2},
+            .outputs = {3},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0};
+    const std::vector<uint32_t> outputIndexes = {3};
+    std::vector<uint8_t> operandValues = {
+      1, 0, 0, 0, 0, 0, 0, 0, 253, 255, 255, 255, 253, 255, 255, 255, 0
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored_float16_3(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_4() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {4, 3, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 8},
+        },
+        {
+            .type = OperandType::BOOL,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 8, .length = 1},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {1, 3, 1},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::REDUCE_SUM,
+            .inputs = {0, 1, 2},
+            .outputs = {3},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0};
+    const std::vector<uint32_t> outputIndexes = {3};
+    std::vector<uint8_t> operandValues = {
+      0, 0, 0, 0, 2, 0, 0, 0, 1
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored_4(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_relaxed_4() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {4, 3, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 8},
+        },
+        {
+            .type = OperandType::BOOL,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 8, .length = 1},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {1, 3, 1},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::REDUCE_SUM,
+            .inputs = {0, 1, 2},
+            .outputs = {3},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0};
+    const std::vector<uint32_t> outputIndexes = {3};
+    std::vector<uint8_t> operandValues = {
+      0, 0, 0, 0, 2, 0, 0, 0, 1
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+        .relaxComputationFloat32toFloat16 = true,
+    };
+}
+
+inline bool is_ignored_relaxed_4(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_float16_4() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {4, 3, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 8},
+        },
+        {
+            .type = OperandType::BOOL,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 8, .length = 1},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {1, 3, 1},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::REDUCE_SUM,
+            .inputs = {0, 1, 2},
+            .outputs = {3},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0};
+    const std::vector<uint32_t> outputIndexes = {3};
+    std::vector<uint8_t> operandValues = {
+      0, 0, 0, 0, 2, 0, 0, 0, 1
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored_float16_4(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
diff --git a/runtime/test/specs/V1_2/reduce_all.mod.py b/runtime/test/specs/V1_2/reduce_all.mod.py
new file mode 100644
index 0000000..831a702
--- /dev/null
+++ b/runtime/test/specs/V1_2/reduce_all.mod.py
@@ -0,0 +1,51 @@
+#
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+def test(input0, output0, axes, keep_dims, input_data, output_data):
+  model = Model().Operation("REDUCE_ALL", input0, axes, keep_dims).To(output0)
+  Example({
+      input0: input_data,
+      output0: output_data,
+  }, model=model)
+
+test(
+    input0=Input("input0", "TENSOR_BOOL8", "{1}"),
+    input_data=[False],
+    axes=[0],
+    keep_dims=True,
+    output0=Output("output0", "TENSOR_BOOL8", "{1}"),
+    output_data=[False],
+)
+
+test(
+    input0=Input("input0", "TENSOR_BOOL8", "{2, 3, 2}"),
+    input_data=[True, True, True, True, True, False,
+                True, True, True, True, True, True],
+    axes=[1, 0, -3, -3],
+    keep_dims=False,
+    output0=Output("output0", "TENSOR_BOOL8", "{2}"),
+    output_data=[True, False],
+)
+
+test(
+    input0=Input("input0", "TENSOR_BOOL8", "{2, 3, 2}"),
+    input_data=[True, True, True, True, True, True,
+                True, True, False, True, True, True],
+    axes=[0, 2],
+    keep_dims=True,
+    output0=Output("output0", "TENSOR_BOOL8", "{1, 3, 1}"),
+    output_data=[True, False, True],
+)
diff --git a/runtime/test/specs/V1_2/reduce_any.mod.py b/runtime/test/specs/V1_2/reduce_any.mod.py
new file mode 100644
index 0000000..da0caca
--- /dev/null
+++ b/runtime/test/specs/V1_2/reduce_any.mod.py
@@ -0,0 +1,53 @@
+#
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+def test(input0, output0, axes, keep_dims, input_data, output_data):
+  model = Model().Operation("REDUCE_ANY", input0, axes, keep_dims).To(output0)
+  Example({
+      input0: input_data,
+      output0: output_data,
+  }, model=model)
+
+# Tests below were adapted from tensorflow/lite/kernels/reduce_test.cc
+
+test(
+    input0=Input("input0", "TENSOR_BOOL8", "{1}"),
+    input_data=[False],
+    axes=[0],
+    keep_dims=True,
+    output0=Output("output0", "TENSOR_BOOL8", "{1}"),
+    output_data=[False],
+)
+
+test(
+    input0=Input("input0", "TENSOR_BOOL8", "{2, 3, 2}"),
+    input_data=[False, False, False, False, False, False,
+                False, True,  False, False, False, True],
+    axes=[1, 0, -3, -3],
+    keep_dims=False,
+    output0=Output("output0", "TENSOR_BOOL8", "{2}"),
+    output_data=[False, True],
+)
+
+test(
+    input0=Input("input0", "TENSOR_BOOL8", "{2, 3, 2}"),
+    input_data=[False, False, False, False, False, False,
+                False, True,  False, False, False, True],
+    axes=[0, 2],
+    keep_dims=True,
+    output0=Output("output0", "TENSOR_BOOL8", "{1, 3, 1}"),
+    output_data=[True, False, True],
+)
diff --git a/runtime/test/specs/V1_2/reduce_max.mod.py b/runtime/test/specs/V1_2/reduce_max.mod.py
new file mode 100644
index 0000000..f08041d
--- /dev/null
+++ b/runtime/test/specs/V1_2/reduce_max.mod.py
@@ -0,0 +1,70 @@
+#
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+def test(input0, output0, axes, keep_dims, input_data, output_data):
+  model = Model().Operation("REDUCE_MAX", input0, axes, keep_dims).To(output0)
+  quant8 = DataTypeConverter().Identify({
+      input0: ["TENSOR_QUANT8_ASYMM", 0.5, 127],
+      output0: ["TENSOR_QUANT8_ASYMM", 0.5, 127],
+  })
+  Example({
+      input0: input_data,
+      output0: output_data,
+  }, model=model).AddVariations("relaxed", "float16", quant8)
+
+test(
+    input0=Input("input0", "TENSOR_FLOAT32", "{3, 2}"),
+    input_data=[-1, -2,
+                3, 4,
+                5, -6],
+    axes=[-1],
+    keep_dims=False,
+    output0=Output("output0", "TENSOR_FLOAT32", "{3}"),
+    output_data=[-1, 4, 5],
+)
+
+# Tests below were adapted from tensorflow/lite/kernels/reduce_test.cc
+
+test(
+    input0=Input("input0", "TENSOR_FLOAT32", "{1}"),
+    input_data=[9.527],
+    axes=[0],
+    keep_dims=True,
+    output0=Output("output0", "TENSOR_FLOAT32", "{1}"),
+    output_data=[9.527],
+)
+
+test(
+    input0=Input("input0", "TENSOR_FLOAT32", "{4, 3, 2}"),
+    input_data=[0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8,
+                0.9, 1.0, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6,
+                1.7, 1.8, 1.9, 2.0, 2.1, 2.2, 2.3, 2.4],
+    axes=[1, 0, -3, -3],
+    keep_dims=False,
+    output0=Output("output0", "TENSOR_FLOAT32", "{2}"),
+    output_data=[2.3, 2.4],
+)
+
+test(
+    input0=Input("input0", "TENSOR_FLOAT32", "{4, 3, 2}"),
+    input_data=[0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8,
+                0.9, 1.0, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6,
+                1.7, 1.8, 1.9, 2.0, 2.1, 2.2, 2.3, 2.4],
+    axes=[0, 2],
+    keep_dims=True,
+    output0=Output("output0", "TENSOR_FLOAT32", "{1, 3, 1}"),
+    output_data=[2.0, 2.2, 2.4],
+)
diff --git a/runtime/test/specs/V1_2/reduce_min.mod.py b/runtime/test/specs/V1_2/reduce_min.mod.py
new file mode 100644
index 0000000..57b8279
--- /dev/null
+++ b/runtime/test/specs/V1_2/reduce_min.mod.py
@@ -0,0 +1,70 @@
+#
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+def test(input0, output0, axes, keep_dims, input_data, output_data):
+  model = Model().Operation("REDUCE_MIN", input0, axes, keep_dims).To(output0)
+  quant8 = DataTypeConverter().Identify({
+      input0: ["TENSOR_QUANT8_ASYMM", 0.5, 127],
+      output0: ["TENSOR_QUANT8_ASYMM", 0.5, 127],
+  })
+  Example({
+      input0: input_data,
+      output0: output_data,
+  }, model=model).AddVariations("relaxed", "float16", quant8)
+
+test(
+    input0=Input("input0", "TENSOR_FLOAT32", "{3, 2}"),
+    input_data=[-1, -2,
+                3, 4,
+                5, -6],
+    axes=[-1],
+    keep_dims=False,
+    output0=Output("output0", "TENSOR_FLOAT32", "{3}"),
+    output_data=[-2, 3, -6],
+)
+
+# Tests below were adapted from tensorflow/lite/kernels/reduce_test.cc
+
+test(
+    input0=Input("input0", "TENSOR_FLOAT32", "{1}"),
+    input_data=[9.527],
+    axes=[0],
+    keep_dims=True,
+    output0=Output("output0", "TENSOR_FLOAT32", "{1}"),
+    output_data=[9.527],
+)
+
+test(
+    input0=Input("input0", "TENSOR_FLOAT32", "{4, 3, 2}"),
+    input_data=[0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8,
+                0.9, 1.0, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6,
+                1.7, 1.8, 1.9, 2.0, 2.1, 2.2, 2.3, 2.4],
+    axes=[1, 0, -3, -3],
+    keep_dims=False,
+    output0=Output("output0", "TENSOR_FLOAT32", "{2}"),
+    output_data=[0.1, 0.2],
+)
+
+test(
+    input0=Input("input0", "TENSOR_FLOAT32", "{4, 3, 2}"),
+    input_data=[0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8,
+                0.9, 1.0, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6,
+                1.7, 1.8, 1.9, 2.0, 2.1, 2.2, 2.3, 2.4],
+    axes=[0, 2],
+    keep_dims=True,
+    output0=Output("output0", "TENSOR_FLOAT32", "{1, 3, 1}"),
+    output_data=[0.1, 0.3, 0.5],
+)
diff --git a/runtime/test/specs/V1_2/reduce_prod.mod.py b/runtime/test/specs/V1_2/reduce_prod.mod.py
new file mode 100644
index 0000000..978f273
--- /dev/null
+++ b/runtime/test/specs/V1_2/reduce_prod.mod.py
@@ -0,0 +1,66 @@
+#
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+def test(input0, output0, axes, keep_dims, input_data, output_data):
+  model = Model().Operation("REDUCE_PROD", input0, axes, keep_dims).To(output0)
+  Example({
+      input0: input_data,
+      output0: output_data,
+  }, model=model).AddVariations("relaxed", "float16")
+
+test(
+    input0=Input("input0", "TENSOR_FLOAT32", "{3, 2}"),
+    input_data=[-1, -2,
+                3, 4,
+                5, -6],
+    axes=[-1],
+    keep_dims=False,
+    output0=Output("output0", "TENSOR_FLOAT32", "{3}"),
+    output_data=[-1 * -2, 3 * 4, 5 * -6],
+)
+
+# Tests below were adapted from tensorflow/lite/kernels/reduce_test.cc
+
+test(
+    input0=Input("input0", "TENSOR_FLOAT32", "{1}"),
+    input_data=[9.527],
+    axes=[0],
+    keep_dims=True,
+    output0=Output("output0", "TENSOR_FLOAT32", "{1}"),
+    output_data=[9.527],
+)
+
+test(
+    input0=Input("input0", "TENSOR_FLOAT32", "{4, 3, 2}"),
+    input_data=[1.0,  2.0,  3.0,  4.0,  5.0,  6.0,  7.0,  8.0,
+                9.0,  1.00, 1.10, 1.20, 1.30, 1.40, 1.50, 1.60,
+                1.70, 1.80, 1.90, 2.00, 2.10, 2.20, 2.30, 2.40],
+    axes=[1, 0, -3, -3],
+    keep_dims=False,
+    output0=Output("output0", "TENSOR_FLOAT32", "{2}"),
+    output_data=[3.16234143225e+4, 1.9619905536e+4],
+)
+
+test(
+    input0=Input("input0", "TENSOR_FLOAT32", "{4, 3, 2}"),
+    input_data=[1.0,  2.0,  3.0,  4.0,  5.0,  6.0,  7.0,  8.0,
+                9.0,  1.00, 1.10, 1.20, 1.30, 1.40, 1.50, 1.60,
+                1.70, 1.80, 1.90, 2.00, 2.10, 2.20, 2.30, 2.40],
+    axes=[0, 2],
+    keep_dims=True,
+    output0=Output("output0", "TENSOR_FLOAT32", "{1, 3, 1}"),
+    output_data=[7.74592e+2, 1.197504e+3, 6.6889152e+2],
+)
diff --git a/runtime/test/specs/V1_2/reduce_sum.mod.py b/runtime/test/specs/V1_2/reduce_sum.mod.py
new file mode 100644
index 0000000..c59579e
--- /dev/null
+++ b/runtime/test/specs/V1_2/reduce_sum.mod.py
@@ -0,0 +1,66 @@
+#
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+def test(input0, output0, axes, keep_dims, input_data, output_data):
+  model = Model().Operation("REDUCE_SUM", input0, axes, keep_dims).To(output0)
+  Example({
+      input0: input_data,
+      output0: output_data,
+  }, model=model).AddVariations("relaxed", "float16")
+
+test(
+    input0=Input("input0", "TENSOR_FLOAT32", "{3, 2}"),
+    input_data=[-1, -2,
+                3, 4,
+                5, -6],
+    axes=[-1],
+    keep_dims=False,
+    output0=Output("output0", "TENSOR_FLOAT32", "{3}"),
+    output_data=[-1 - 2, 3 + 4, 5 - 6],
+)
+
+# Tests below were adapted from tensorflow/lite/kernels/reduce_test.cc
+
+test(
+    input0=Input("input0", "TENSOR_FLOAT32", "{1}"),
+    input_data=[9.527],
+    axes=[0],
+    keep_dims=True,
+    output0=Output("output0", "TENSOR_FLOAT32", "{1}"),
+    output_data=[9.527],
+)
+
+test(
+    input0=Input("input0", "TENSOR_FLOAT32", "{4, 3, 2}"),
+    input_data=[0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8,
+                0.9, 1.0, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6,
+                1.7, 1.8, 1.9, 2.0, 2.1, 2.2, 2.3, 2.4],
+    axes=[1, 0, -3, -3],
+    keep_dims=False,
+    output0=Output("output0", "TENSOR_FLOAT32", "{2}"),
+    output_data=[14.4, 15.6],
+)
+
+test(
+    input0=Input("input0", "TENSOR_FLOAT32", "{4, 3, 2}"),
+    input_data=[0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8,
+                0.9, 1.0, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6,
+                1.7, 1.8, 1.9, 2.0, 2.1, 2.2, 2.3, 2.4],
+    axes=[0, 2],
+    keep_dims=True,
+    output0=Output("output0", "TENSOR_FLOAT32", "{1, 3, 1}"),
+    output_data=[8.4, 10.0, 11.6],
+)