Support zero batch in broadcast ops.

Also switch to OperationResolver.

Bug: 126737477
Test: NeuralNetworksTest_static
Change-Id: Ia2aaf7db4539ce5ffb97eb2341b0b5a56b2b8483
Merged-In: Ia2aaf7db4539ce5ffb97eb2341b0b5a56b2b8483
(cherry picked from commit 041d28acbe75b80b5d55db5daea7303751f1c4fa)
diff --git a/common/Android.bp b/common/Android.bp
index 21d2e3e..34608ee 100644
--- a/common/Android.bp
+++ b/common/Android.bp
@@ -25,6 +25,7 @@
     srcs: [
         "OperationResolver.cpp",
         "operations/BidirectionalSequenceRNN.cpp",
+        "operations/Broadcast.cpp",
         "operations/ChannelShuffle.cpp",
         "operations/Comparisons.cpp",
         "operations/Conv2D.cpp",
diff --git a/common/CpuExecutor.cpp b/common/CpuExecutor.cpp
index fa4e00a..0114389 100644
--- a/common/CpuExecutor.cpp
+++ b/common/CpuExecutor.cpp
@@ -688,64 +688,6 @@
             LOG(ERROR) << "OEM operation not supported for CPU execution";
             success = false;
         } break;
-        case OperationType::ADD: {
-            if (!allParametersPresent(3, 1)) {
-                return ANEURALNETWORKS_BAD_DATA;
-            }
-            const RunTimeOperandInfo& in1 = mOperands[ins[0]];
-            const RunTimeOperandInfo& in2 = mOperands[ins[1]];
-            int32_t activation = getScalarData<int32_t>(mOperands[ins[2]]);
-
-            RunTimeOperandInfo& out = mOperands[outs[0]];
-            Shape outShape = out.shape();
-
-            if (!addMulPrepare(in1.shape(), in2.shape(), &outShape) ||
-                !setInfoAndAllocateIfNeeded(&out, outShape, &result)) {
-                break;
-            }
-            if (in1.type == OperandType::TENSOR_FLOAT32) {
-                success = addFloat32(reinterpret_cast<const float*>(in1.buffer), in1.shape(),
-                                     reinterpret_cast<const float*>(in2.buffer), in2.shape(),
-                                     activation, reinterpret_cast<float*>(out.buffer), outShape);
-            } else if (in1.type == OperandType::TENSOR_FLOAT16) {
-                success = addFloat16(reinterpret_cast<const _Float16*>(in1.buffer), in1.shape(),
-                                     reinterpret_cast<const _Float16*>(in2.buffer), in2.shape(),
-                                     activation, reinterpret_cast<_Float16*>(out.buffer), outShape);
-            } else if (in1.type == OperandType::TENSOR_QUANT8_ASYMM) {
-                success = addQuant8(reinterpret_cast<const uint8_t*>(in1.buffer), in1.shape(),
-                                    reinterpret_cast<const uint8_t*>(in2.buffer), in2.shape(),
-                                    activation, reinterpret_cast<uint8_t*>(out.buffer), outShape);
-            }
-        } break;
-        case OperationType::MUL: {
-            if (!allParametersPresent(3, 1)) {
-                return ANEURALNETWORKS_BAD_DATA;
-            }
-            const RunTimeOperandInfo& in1 = mOperands[ins[0]];
-            const RunTimeOperandInfo& in2 = mOperands[ins[1]];
-            int32_t activation = getScalarData<int32_t>(mOperands[ins[2]]);
-
-            RunTimeOperandInfo& out = mOperands[outs[0]];
-            Shape outShape = out.shape();
-
-            if (!addMulPrepare(in1.shape(), in2.shape(), &outShape) ||
-                !setInfoAndAllocateIfNeeded(&out, outShape, &result)) {
-                break;
-            }
-            if (in1.type == OperandType::TENSOR_FLOAT32) {
-                success = mulFloat32(reinterpret_cast<const float*>(in1.buffer), in1.shape(),
-                                     reinterpret_cast<const float*>(in2.buffer), in2.shape(),
-                                     activation, reinterpret_cast<float*>(out.buffer), outShape);
-            } else if (in1.type == OperandType::TENSOR_FLOAT16) {
-                success = mulFloat16(reinterpret_cast<const _Float16*>(in1.buffer), in1.shape(),
-                                     reinterpret_cast<const _Float16*>(in2.buffer), in2.shape(),
-                                     activation, reinterpret_cast<_Float16*>(out.buffer), outShape);
-            } else if (in1.type == OperandType::TENSOR_QUANT8_ASYMM) {
-                success = mulQuant8(reinterpret_cast<const uint8_t*>(in1.buffer), in1.shape(),
-                                    reinterpret_cast<const uint8_t*>(in2.buffer), in2.shape(),
-                                    activation, reinterpret_cast<uint8_t*>(out.buffer), outShape);
-            }
-        } break;
         case OperationType::FLOOR: {
             if (!allParametersPresent(1, 1)) {
                 return ANEURALNETWORKS_BAD_DATA;
@@ -1716,60 +1658,6 @@
                                         reinterpret_cast<const int32_t*>(strides.buffer), beginMask,
                                         endMask, shrinkAxisMask, output.buffer, outShape);
         } break;
-        case OperationType::DIV: {
-            if (!allParametersPresent(3, 1)) {
-                return ANEURALNETWORKS_BAD_DATA;
-            }
-            const RunTimeOperandInfo& in1 = mOperands[ins[0]];
-            const RunTimeOperandInfo& in2 = mOperands[ins[1]];
-            int32_t activation = getScalarData<int32_t>(mOperands[ins[2]]);
-
-            RunTimeOperandInfo& out = mOperands[outs[0]];
-            Shape outShape = out.shape();
-
-            if (!addMulPrepare(in1.shape(), in2.shape(), &outShape) ||
-                !setInfoAndAllocateIfNeeded(&out, outShape, &result)) {
-                break;
-            }
-            if (in1.type == OperandType::TENSOR_FLOAT32) {
-                success = divFloat32(reinterpret_cast<const float*>(in1.buffer), in1.shape(),
-                                     reinterpret_cast<const float*>(in2.buffer), in2.shape(),
-                                     activation, reinterpret_cast<float*>(out.buffer), outShape);
-            } else if (in1.type == OperandType::TENSOR_FLOAT16) {
-                success = divFloat16(reinterpret_cast<const _Float16*>(in1.buffer), in1.shape(),
-                                     reinterpret_cast<const _Float16*>(in2.buffer), in2.shape(),
-                                     activation, reinterpret_cast<_Float16*>(out.buffer), outShape);
-            }
-        } break;
-        case OperationType::SUB: {
-            if (!allParametersPresent(3, 1)) {
-                return ANEURALNETWORKS_BAD_DATA;
-            }
-            const RunTimeOperandInfo& in1 = mOperands[ins[0]];
-            const RunTimeOperandInfo& in2 = mOperands[ins[1]];
-            int32_t activation = getScalarData<int32_t>(mOperands[ins[2]]);
-
-            RunTimeOperandInfo& out = mOperands[outs[0]];
-            Shape outShape = out.shape();
-
-            if (!addMulPrepare(in1.shape(), in2.shape(), &outShape) ||
-                !setInfoAndAllocateIfNeeded(&out, outShape, &result)) {
-                break;
-            }
-            if (in1.type == OperandType::TENSOR_FLOAT16) {
-                success = subFloat16(reinterpret_cast<const _Float16*>(in1.buffer), in1.shape(),
-                                     reinterpret_cast<const _Float16*>(in2.buffer), in2.shape(),
-                                     activation, reinterpret_cast<_Float16*>(out.buffer), outShape);
-            } else if (in1.type == OperandType::TENSOR_FLOAT32) {
-                success = subFloat32(reinterpret_cast<const float*>(in1.buffer), in1.shape(),
-                                     reinterpret_cast<const float*>(in2.buffer), in2.shape(),
-                                     activation, reinterpret_cast<float*>(out.buffer), outShape);
-            } else if (in1.type == OperandType::TENSOR_QUANT8_ASYMM) {
-                success = subQuant8(reinterpret_cast<const uint8_t*>(in1.buffer), in1.shape(),
-                                    reinterpret_cast<const uint8_t*>(in2.buffer), in2.shape(),
-                                    activation, reinterpret_cast<uint8_t*>(out.buffer), outShape);
-            }
-        } break;
         case OperationType::MEAN: {
             if (!allParametersPresent(3, 1)) {
                 return ANEURALNETWORKS_BAD_DATA;
diff --git a/common/OperationResolver.cpp b/common/OperationResolver.cpp
index 8f1edfb..e77cf40 100644
--- a/common/OperationResolver.cpp
+++ b/common/OperationResolver.cpp
@@ -25,6 +25,7 @@
 
 // TODO(b/119608412): Find a way to not reference every operation here.
 const OperationRegistration* register_ABS();
+const OperationRegistration* register_ADD();
 const OperationRegistration* register_AVERAGE_POOL_2D();
 const OperationRegistration* register_AXIS_ALIGNED_BBOX_TRANSFORM();
 const OperationRegistration* register_BIDIRECTIONAL_SEQUENCE_RNN();
@@ -33,6 +34,7 @@
 const OperationRegistration* register_CONV_2D();
 const OperationRegistration* register_DEQUANTIZE();
 const OperationRegistration* register_DETECTION_POSTPROCESSING();
+const OperationRegistration* register_DIV();
 const OperationRegistration* register_EQUAL();
 const OperationRegistration* register_EXP();
 const OperationRegistration* register_FULLY_CONNECTED();
@@ -51,6 +53,7 @@
 const OperationRegistration* register_LOGICAL_OR();
 const OperationRegistration* register_LOG_SOFTMAX();
 const OperationRegistration* register_MAX_POOL_2D();
+const OperationRegistration* register_MUL();
 const OperationRegistration* register_NEG();
 const OperationRegistration* register_NOT_EQUAL();
 const OperationRegistration* register_PRELU();
@@ -68,11 +71,13 @@
 const OperationRegistration* register_SELECT();
 const OperationRegistration* register_SIN();
 const OperationRegistration* register_SQRT();
+const OperationRegistration* register_SUB();
 const OperationRegistration* register_UNIDIRECTIONAL_SEQUENCE_LSTM();
 const OperationRegistration* register_UNIDIRECTIONAL_SEQUENCE_RNN();
 
 BuiltinOperationResolver::BuiltinOperationResolver() {
     registerOperation(register_ABS());
+    registerOperation(register_ADD());
     registerOperation(register_AVERAGE_POOL_2D());
     registerOperation(register_AXIS_ALIGNED_BBOX_TRANSFORM());
     registerOperation(register_BIDIRECTIONAL_SEQUENCE_RNN());
@@ -81,6 +86,7 @@
     registerOperation(register_CONV_2D());
     registerOperation(register_DEQUANTIZE());
     registerOperation(register_DETECTION_POSTPROCESSING());
+    registerOperation(register_DIV());
     registerOperation(register_EQUAL());
     registerOperation(register_EXP());
     registerOperation(register_FULLY_CONNECTED());
@@ -99,6 +105,7 @@
     registerOperation(register_LOGICAL_OR());
     registerOperation(register_LOG_SOFTMAX());
     registerOperation(register_MAX_POOL_2D());
+    registerOperation(register_MUL());
     registerOperation(register_NEG());
     registerOperation(register_NOT_EQUAL());
     registerOperation(register_PRELU());
@@ -116,6 +123,7 @@
     registerOperation(register_SELECT());
     registerOperation(register_SIN());
     registerOperation(register_SQRT());
+    registerOperation(register_SUB());
     registerOperation(register_UNIDIRECTIONAL_SEQUENCE_LSTM());
     registerOperation(register_UNIDIRECTIONAL_SEQUENCE_RNN());
 }
diff --git a/common/OperationsUtils.cpp b/common/OperationsUtils.cpp
index af4e2e7..25a5258 100644
--- a/common/OperationsUtils.cpp
+++ b/common/OperationsUtils.cpp
@@ -289,6 +289,7 @@
 }
 
 bool calculateBroadcastedShape(const Shape& in1, const Shape& in2, Shape* out) {
+    NN_RET_CHECK(in1.type == in2.type);
     uint32_t numberOfDims1 = getNumberOfDimensions(in1);
     uint32_t numberOfDims2 = getNumberOfDimensions(in2);
     uint32_t maxDims = std::max(numberOfDims1, numberOfDims2);
@@ -308,7 +309,7 @@
                        << "\nSecond tensor: dimension " << numberOfDims2 - i << "of size " << dim2;
             return false;
         }
-        out->dimensions[maxDims - i] = std::max(dim1, dim2);
+        out->dimensions[maxDims - i] = (dim1 == 1) ? dim2 : dim1;
     }
     return true;
 }
@@ -318,15 +319,6 @@
     return static_cast<uint8_t>(doubleValue / newShape.scale + newShape.offset);
 }
 
-bool addMulPrepare(const Shape& in1, const Shape& in2, Shape* out) {
-    NN_OPS_CHECK(getNumberOfDimensions(in1) <= 4 && getNumberOfDimensions(in2) <= 4);
-    NN_OPS_CHECK(in1.type == in2.type);
-    if (SameShape(in1, in2)) {
-        return SetShape(in1, out);
-    }
-    return calculateBroadcastedShape(in1, in2, out);
-}
-
 bool floorPrepare(const Shape& input, Shape* output) {
     return SetShape(input, output);
 }
diff --git a/common/Utils.cpp b/common/Utils.cpp
index 20429fd..ccb4f62 100644
--- a/common/Utils.cpp
+++ b/common/Utils.cpp
@@ -571,80 +571,6 @@
         case ANEURALNETWORKS_OEM_OPERATION: {
             return ANEURALNETWORKS_NO_ERROR;
         }
-        case ANEURALNETWORKS_ADD: {
-            if (inputCount != 3 || outputCount != 1) {
-                logInvalidInOutNumber(3, 1);
-                return ANEURALNETWORKS_BAD_DATA;
-            }
-            auto inputType = operands[inputIndexes[0]].type;
-            std::vector<OperandType> inExpectedTypes;
-            std::vector<OperandType> outExpectedTypes;
-            if (inputType == OperandType::TENSOR_FLOAT32 ||
-                inputType == OperandType::TENSOR_QUANT8_ASYMM) {
-                inExpectedTypes = {
-                        inputType,
-                        inputType,
-                        OperandType::INT32,
-                };
-                outExpectedTypes = {inputType};
-                NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0));
-            } else if (inputType == OperandType::TENSOR_FLOAT16) {
-                inExpectedTypes = {
-                        OperandType::TENSOR_FLOAT16,
-                        OperandType::TENSOR_FLOAT16,
-                        OperandType::INT32,
-                };
-                outExpectedTypes = {OperandType::TENSOR_FLOAT16};
-                NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
-            } else {
-                LOG(ERROR) << "Unsupported input tensor type for operation "
-                           << getOperationName(opType);
-                return ANEURALNETWORKS_BAD_DATA;
-            }
-            return validateOperationOperandTypes(operands,
-                                                 inputCount, inputIndexes,
-                                                 inExpectedTypes,
-                                                 outputCount, outputIndexes,
-                                                 outExpectedTypes);
-        }
-        case ANEURALNETWORKS_MUL: {
-            if (inputCount != 3 || outputCount != 1) {
-                logInvalidInOutNumber(3, 1);
-                return ANEURALNETWORKS_BAD_DATA;
-            }
-            auto inputType = operands[inputIndexes[0]].type;
-            std::vector<OperandType> inExpectedTypes;
-            std::vector<OperandType> outExpectedTypes;
-            if (inputType == OperandType::TENSOR_FLOAT32) {
-                inExpectedTypes = {OperandType::TENSOR_FLOAT32,
-                                   OperandType::TENSOR_FLOAT32,
-                                   OperandType::INT32};
-                outExpectedTypes = {OperandType::TENSOR_FLOAT32};
-            } else if (inputType == OperandType::TENSOR_FLOAT16) {
-                NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
-                inExpectedTypes = {
-                        OperandType::TENSOR_FLOAT16,
-                        OperandType::TENSOR_FLOAT16,
-                        OperandType::INT32,
-                };
-                outExpectedTypes = {OperandType::TENSOR_FLOAT16};
-            } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM) {
-                inExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM,
-                                   OperandType::TENSOR_QUANT8_ASYMM,
-                                   OperandType::INT32};
-                outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM};
-            } else {
-                LOG(ERROR) << "Unsupported input tensor type for operation "
-                           << getOperationName(opType);
-                return ANEURALNETWORKS_BAD_DATA;
-            }
-            NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0));
-            return validateOperationOperandTypes(operands,
-                                                 inputCount, inputIndexes,
-                                                 inExpectedTypes,
-                                                 outputCount, outputIndexes,
-                                                 outExpectedTypes);
-        }
         case ANEURALNETWORKS_FLOOR: {
             if (inputCount != 1 || outputCount != 1) {
                 logInvalidInOutNumber(1, 1);
@@ -1749,68 +1675,6 @@
                                                  inExpectedTypes, outputCount, outputIndexes,
                                                  outExpectedTypes);
         }
-        case ANEURALNETWORKS_DIV: {
-            if (inputCount != 3 || outputCount != 1) {
-                logInvalidInOutNumber(3, 1);
-                return ANEURALNETWORKS_BAD_DATA;
-            }
-            auto inputType = operands[inputIndexes[0]].type;
-            std::vector<OperandType> inExpectedTypes;
-            std::vector<OperandType> outExpectedTypes;
-            if (inputType == OperandType::TENSOR_FLOAT32) {
-                inExpectedTypes = {OperandType::TENSOR_FLOAT32,
-                                   OperandType::TENSOR_FLOAT32,
-                                   OperandType::INT32};
-                outExpectedTypes = {OperandType::TENSOR_FLOAT32};
-            } else if (inputType == OperandType::TENSOR_FLOAT16) {
-                inExpectedTypes = {
-                        OperandType::TENSOR_FLOAT16,
-                        OperandType::TENSOR_FLOAT16,
-                        OperandType::INT32,
-                };
-                outExpectedTypes = {OperandType::TENSOR_FLOAT16};
-                NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
-            } else {
-                LOG(ERROR) << "Unsupported input tensor type for operation "
-                           << getOperationName(opType);
-                return ANEURALNETWORKS_BAD_DATA;
-            }
-            NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_1));
-            return validateOperationOperandTypes(operands,
-                                                 inputCount, inputIndexes,
-                                                 inExpectedTypes,
-                                                 outputCount, outputIndexes,
-                                                 outExpectedTypes);
-        }
-        case ANEURALNETWORKS_SUB: {
-            if (inputCount != 3 || outputCount != 1) {
-                logInvalidInOutNumber(3, 1);
-                return ANEURALNETWORKS_BAD_DATA;
-            }
-            auto inputType = operands[inputIndexes[0]].type;
-            std::vector<OperandType> inExpectedTypes;
-            std::vector<OperandType> outExpectedTypes;
-            if (inputType == OperandType::TENSOR_FLOAT32) {
-                inExpectedTypes = {OperandType::TENSOR_FLOAT32, OperandType::TENSOR_FLOAT32,
-                                   OperandType::INT32};
-                outExpectedTypes = {OperandType::TENSOR_FLOAT32};
-                NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_1));
-            } else if (inputType == OperandType::TENSOR_FLOAT16 ||
-                       inputType == OperandType::TENSOR_QUANT8_ASYMM) {
-                inExpectedTypes = {inputType, inputType, OperandType::INT32};
-                outExpectedTypes = {inputType};
-                NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
-            } else {
-                LOG(ERROR) << "Unsupported input tensor type for operation "
-                           << getOperationName(opType);
-                return ANEURALNETWORKS_BAD_DATA;
-            }
-            return validateOperationOperandTypes(operands,
-                                                 inputCount, inputIndexes,
-                                                 inExpectedTypes,
-                                                 outputCount, outputIndexes,
-                                                 outExpectedTypes);
-        }
         case ANEURALNETWORKS_MEAN: {
             if (inputCount != 3 || outputCount != 1) {
                 logInvalidInOutNumber(3, 1);
diff --git a/common/include/Operations.h b/common/include/Operations.h
index 3eafd7e..d816761 100644
--- a/common/include/Operations.h
+++ b/common/include/Operations.h
@@ -44,20 +44,6 @@
 
 struct Shape;
 
-bool addFloat16(const _Float16* in1, const Shape& shape1, const _Float16* in2, const Shape& shape2,
-                int32_t activation, _Float16* out, const Shape& shapeOut);
-bool addFloat32(const float* in1, const Shape& shape1, const float* in2, const Shape& shape2,
-                int32_t activation, float* out, const Shape& shapeOut);
-bool addQuant8(const uint8_t* in1, const Shape& shape1, const uint8_t* in2, const Shape& shape2,
-               int32_t activation, uint8_t* out, const Shape& shapeOut);
-
-bool mulFloat16(const _Float16* in1, const Shape& shape1, const _Float16* in2, const Shape& shape2,
-                int32_t activation, _Float16* out, const Shape& shapeOut);
-bool mulFloat32(const float* in1, const Shape& shape1, const float* in2, const Shape& shape2,
-                int32_t activation, float* out, const Shape& shapeOut);
-bool mulQuant8(const uint8_t* in1, const Shape& shape1, const uint8_t* in2, const Shape& shape2,
-               int32_t activation, uint8_t* out, const Shape& shapeOut);
-
 bool floorFloat16(const _Float16* inputData, _Float16* outputData, const Shape& shape);
 bool floorFloat32(const float* inputData, float* outputData, const Shape& shape);
 
@@ -172,20 +158,6 @@
                          const int32_t* padding, const Shape& paddingShape, T* outputData,
                          const Shape& outputShape);
 
-bool subFloat16(const _Float16* in1, const Shape& shape1, const _Float16* in2, const Shape& shape2,
-                int32_t activation, _Float16* out, const Shape& shapeOut);
-
-bool subFloat32(const float* in1, const Shape& shape1, const float* in2, const Shape& shape2,
-                int32_t activation, float* out, const Shape& shapeOut);
-
-bool subQuant8(const uint8_t* in1, const Shape& shape1, const uint8_t* in2, const Shape& shape2,
-               int32_t activation, uint8_t* out, const Shape& shapeOut);
-
-bool divFloat16(const _Float16* in1, const Shape& shape1, const _Float16* in2, const Shape& shape2,
-                int32_t activation, _Float16* out, const Shape& shapeOut);
-bool divFloat32(const float* in1, const Shape& shape1, const float* in2, const Shape& shape2,
-                int32_t activation, float* out, const Shape& shapeOut);
-
 template <typename T>
 bool transposeGeneric(const T* inputData, const Shape& inputShape, const int32_t* perm,
                       const Shape& permShape, T* outputData, const Shape& outputShape);
diff --git a/common/include/OperationsUtils.h b/common/include/OperationsUtils.h
index 0f6666e..604c355 100644
--- a/common/include/OperationsUtils.h
+++ b/common/include/OperationsUtils.h
@@ -293,8 +293,6 @@
 uint8_t requantize(uint8_t value, const Shape& oldShape, const Shape& newShape);
 
 // Preparation functions for the corresponding ops
-bool addMulPrepare(const Shape& in1, const Shape& in2, Shape* out1);
-
 bool floorPrepare(const Shape& input, Shape* output);
 
 bool quantizePrepare(const Shape& input, Shape* output);
diff --git a/common/operations/Broadcast.cpp b/common/operations/Broadcast.cpp
new file mode 100644
index 0000000..76b1c44
--- /dev/null
+++ b/common/operations/Broadcast.cpp
@@ -0,0 +1,518 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Contains the implementation of the operations.
+
+#define LOG_TAG "Operations"
+
+#include "CpuOperationUtils.h"
+#include "OperationResolver.h"
+
+#include "tensorflow/lite/kernels/internal/optimized/legacy_optimized_ops.h"
+#include "tensorflow/lite/kernels/internal/reference/legacy_reference_ops.h"
+
+#include "Tracing.h"
+
+namespace android {
+namespace nn {
+namespace broadcast {
+
+constexpr uint32_t kNumInputs = 3;
+constexpr uint32_t kInputTensor1 = 0;
+constexpr uint32_t kInputTensor2 = 1;
+constexpr uint32_t kActivationScalar = 2;
+
+constexpr uint32_t kNumOutputs = 1;
+constexpr uint32_t kOutputTensor = 0;
+
+namespace {
+
+#define ANDROID_NN_MACRO_DISPATCH(macro)                                \
+    switch (activation) {                                               \
+        case (int32_t)FusedActivationFunc::NONE:                        \
+            macro(kNone);                                               \
+            break;                                                      \
+        case (int32_t)FusedActivationFunc::RELU:                        \
+            macro(kRelu);                                               \
+            break;                                                      \
+        case (int32_t)FusedActivationFunc::RELU1:                       \
+            macro(kRelu1);                                              \
+            break;                                                      \
+        case (int32_t)FusedActivationFunc::RELU6:                       \
+            macro(kRelu6);                                              \
+            break;                                                      \
+        default:                                                        \
+            LOG(ERROR) << "Unsupported fused activation function type"; \
+            return false;                                               \
+    }
+
+using binaryFunctionFloat32 = std::function<bool(
+        const float* in1, const Shape& shape1, const float* in2, const Shape& shape2,
+        int32_t activation, float* out, const Shape& shapeOut)>;
+
+bool binaryOperationFloat16(const _Float16* in1, const Shape& shape1, const _Float16* in2,
+                            const Shape& shape2, int32_t activation, _Float16* out,
+                            const Shape& shapeOut, binaryFunctionFloat32 operationFloat32) {
+    std::vector<float> in1_float32(getNumberOfElements(shape1));
+    convertFloat16ToFloat32(in1, &in1_float32);
+    std::vector<float> in2_float32(getNumberOfElements(shape2));
+    convertFloat16ToFloat32(in2, &in2_float32);
+    std::vector<float> out_float32(getNumberOfElements(shapeOut));
+
+    operationFloat32(in1_float32.data(), shape1, in2_float32.data(), shape2, activation,
+                     out_float32.data(), shapeOut);
+    convertFloat32ToFloat16(out_float32, out);
+
+    return true;
+}
+
+bool addFloat32(const float* in1, const Shape& shape1, const float* in2, const Shape& shape2,
+                int32_t activation, float* out, const Shape& shapeOut) {
+    NNTRACE_TRANS("addFloat32");
+    bool needBroadcast = !SameShape(shape1, shape2);
+    if (needBroadcast) {
+        NNTRACE_COMP_SWITCH("optimized_ops::BroadcastAdd");
+#define ANDROID_NN_BROADCAST_ADD(activation)                                              \
+    tflite::optimized_ops::BroadcastAdd<tflite::FusedActivationFunctionType::activation>( \
+            in1, convertShapeToDims(shape1), in2, convertShapeToDims(shape2), out,        \
+            convertShapeToDims(shapeOut))
+
+        ANDROID_NN_MACRO_DISPATCH(ANDROID_NN_BROADCAST_ADD)
+#undef ANDROID_NN_BROADCAST_ADD
+    } else {
+        NNTRACE_COMP_SWITCH("optimized_ops::Add");
+#define ANDROID_NN_ADD(activation)                                                 \
+    tflite::optimized_ops::Add<tflite::FusedActivationFunctionType::activation>(   \
+            in1, convertShapeToDims(shape1), in2, convertShapeToDims(shape2), out, \
+            convertShapeToDims(shapeOut))
+
+        ANDROID_NN_MACRO_DISPATCH(ANDROID_NN_ADD)
+#undef ANDROID_NN_ADD
+    }
+
+    return true;
+}
+
+bool addFloat16(const _Float16* in1, const Shape& shape1, const _Float16* in2, const Shape& shape2,
+                int32_t activation, _Float16* out, const Shape& shapeOut) {
+    NNTRACE_TRANS("addFloat16");
+    return binaryOperationFloat16(in1, shape1, in2, shape2, activation, out, shapeOut, &addFloat32);
+}
+
+bool addQuant8(const uint8_t* in1, const Shape& shape1, const uint8_t* in2, const Shape& shape2,
+               int32_t activation, uint8_t* out, const Shape& shapeOut) {
+    NNTRACE_TRANS("addQuant8");
+    bool needBroadcast = !SameShape(shape1, shape2);
+
+    const int32_t input1_offset = -shape1.offset;
+    const int32_t input2_offset = -shape2.offset;
+    const int32_t output_offset = shapeOut.offset;
+    const int left_shift = 20;
+    const double twice_max_input_scale = 2 * std::max(shape1.scale, shape2.scale);
+    const double real_input1_multiplier = shape1.scale / twice_max_input_scale;
+    const double real_input2_multiplier = shape2.scale / twice_max_input_scale;
+    const double real_output_multiplier =
+            twice_max_input_scale / ((1 << left_shift) * shapeOut.scale);
+
+    int32_t input1_multiplier;
+    int32_t input1_shift;
+    if (!QuantizeMultiplierSmallerThanOne(real_input1_multiplier, &input1_multiplier,
+                                          &input1_shift)) {
+        return false;
+    }
+    int32_t input2_multiplier;
+    int32_t input2_shift;
+    if (!QuantizeMultiplierSmallerThanOne(real_input2_multiplier, &input2_multiplier,
+                                          &input2_shift)) {
+        return false;
+    }
+    int32_t output_multiplier;
+    int32_t output_shift;
+    if (!QuantizeMultiplierSmallerThanOne(real_output_multiplier, &output_multiplier,
+                                          &output_shift)) {
+        return false;
+    }
+    int32_t output_activation_min;
+    int32_t output_activation_max;
+    CalculateActivationRangeUint8(activation, shapeOut, &output_activation_min,
+                                  &output_activation_max);
+
+    if (needBroadcast) {
+        NNTRACE_COMP_SWITCH("optimized_ops::BroadcastAdd");
+#define ANDROID_NN_BROADCAST_ADD(activation)                                                     \
+    tflite::optimized_ops::BroadcastAdd<tflite::FusedActivationFunctionType::activation>(        \
+            left_shift, in1, convertShapeToDims(shape1), input1_offset, input1_multiplier,       \
+            input1_shift, in2, convertShapeToDims(shape2), input2_offset, input2_multiplier,     \
+            input2_shift, output_offset, output_multiplier, output_shift, output_activation_min, \
+            output_activation_max, out, convertShapeToDims(shapeOut))
+
+        ANDROID_NN_MACRO_DISPATCH(ANDROID_NN_BROADCAST_ADD)
+#undef ANDROID_NN_BROADCAST_ADD
+    } else {
+        NNTRACE_COMP_SWITCH("optimized_ops::Add");
+#define ANDROID_NN_NORMAL_ADD(activation)                                                        \
+    tflite::optimized_ops::Add<tflite::FusedActivationFunctionType::activation>(                 \
+            left_shift, in1, convertShapeToDims(shape1), input1_offset, input1_multiplier,       \
+            input1_shift, in2, convertShapeToDims(shape2), input2_offset, input2_multiplier,     \
+            input2_shift, output_offset, output_multiplier, output_shift, output_activation_min, \
+            output_activation_max, out, convertShapeToDims(shapeOut))
+
+        ANDROID_NN_MACRO_DISPATCH(ANDROID_NN_NORMAL_ADD)
+#undef ANDROID_NN_NORMAL_ADD
+    }
+
+    return true;
+}
+
+bool mulFloat32(const float* in1, const Shape& shape1, const float* in2, const Shape& shape2,
+                int32_t activation, float* out, const Shape& shapeOut) {
+    NNTRACE_TRANS("mulFloat32");
+    bool needBroadcast = !SameShape(shape1, shape2);
+
+    if (needBroadcast) {
+        NNTRACE_COMP_SWITCH("optimized_ops::BroadcastMul");
+#define ANDROID_NN_BROADCAST_MUL(activation)                                              \
+    tflite::optimized_ops::BroadcastMul<tflite::FusedActivationFunctionType::activation>( \
+            in1, convertShapeToDims(shape1), in2, convertShapeToDims(shape2), out,        \
+            convertShapeToDims(shapeOut))
+
+        ANDROID_NN_MACRO_DISPATCH(ANDROID_NN_BROADCAST_MUL)
+#undef ANDROID_NN_BROADCAST_MUL
+    } else {
+        float output_activation_min, output_activation_max;
+        CalculateActivationRangeFloat(activation, &output_activation_min, &output_activation_max);
+
+        NNTRACE_COMP_SWITCH("optimized_ops::Mul");
+        tflite::optimized_ops::Mul(in1, convertShapeToDims(shape1), in2, convertShapeToDims(shape2),
+                                   output_activation_min, output_activation_max, out,
+                                   convertShapeToDims(shapeOut));
+    }
+
+    return true;
+}
+
+bool mulFloat16(const _Float16* in1, const Shape& shape1, const _Float16* in2, const Shape& shape2,
+                int32_t activation, _Float16* out, const Shape& shapeOut) {
+    NNTRACE_TRANS("mulFloat16");
+    return binaryOperationFloat16(in1, shape1, in2, shape2, activation, out, shapeOut, &mulFloat32);
+}
+
+bool mulQuant8(const uint8_t* in1, const Shape& shape1, const uint8_t* in2, const Shape& shape2,
+               int32_t activation, uint8_t* out, const Shape& shapeOut) {
+    NNTRACE_TRANS("mulQuant8");
+    const int32_t input1_offset = -shape1.offset;
+    const int32_t input2_offset = -shape2.offset;
+    const int32_t output_offset = shapeOut.offset;
+    const double input_product_scale = shape1.scale * shape2.scale;
+    const double real_multiplier = input_product_scale / shapeOut.scale;
+    int32 output_multiplier;
+    int output_shift;
+    if (!QuantizeMultiplierSmallerThanOne(real_multiplier, &output_multiplier, &output_shift)) {
+        return false;
+    }
+    int32_t output_activation_min;
+    int32_t output_activation_max;
+    CalculateActivationRangeUint8(activation, shapeOut, &output_activation_min,
+                                  &output_activation_max);
+
+    // Use BROADCAST version to handle the normal case.
+    NNTRACE_COMP_SWITCH("optimized_ops::BroadcastMul");
+    tflite::optimized_ops::BroadcastMul(in1, convertShapeToDims(shape1), input1_offset, in2,
+                                        convertShapeToDims(shape2), input2_offset, output_offset,
+                                        output_multiplier, output_shift, output_activation_min,
+                                        output_activation_max, out, convertShapeToDims(shapeOut));
+
+    return true;
+}
+
+bool subFloat32(const float* in1, const Shape& shape1, const float* in2, const Shape& shape2,
+                int32_t activation, float* out, const Shape& shapeOut) {
+    NNTRACE_TRANS("subFloat32");
+    NNTRACE_COMP_SWITCH("optimized_ops::Sub");
+    tflite::optimized_ops::Sub(in1, convertShapeToDims(shape1), in2, convertShapeToDims(shape2),
+                               out, convertShapeToDims(shapeOut));
+
+    // TFLite does not apply activation to broadcast sub.
+    float output_activation_min, output_activation_max;
+    CalculateActivationRangeFloat(activation, &output_activation_min, &output_activation_max);
+    uint32_t numOutputElements = getNumberOfElements(shapeOut);
+    for (uint32_t i = 0; i < numOutputElements; i++) {
+        out[i] = std::min(std::max(out[i], output_activation_min), output_activation_max);
+    }
+    return true;
+}
+
+bool subFloat16(const _Float16* in1, const Shape& shape1, const _Float16* in2, const Shape& shape2,
+                int32_t activation, _Float16* out, const Shape& shapeOut) {
+    NNTRACE_TRANS("subFloat16");
+    return binaryOperationFloat16(in1, shape1, in2, shape2, activation, out, shapeOut, &subFloat32);
+}
+
+bool subQuant8(const uint8_t* in1, const Shape& shape1, const uint8_t* in2, const Shape& shape2,
+               int32_t activation, uint8_t* out, const Shape& shapeOut) {
+    NNTRACE_TRANS("subQuant8");
+
+    const int32_t input1_offset = -shape1.offset;
+    const int32_t input2_offset = -shape2.offset;
+    const int32_t output_offset = shapeOut.offset;
+    const int left_shift = 20;
+    const double twice_max_input_scale = 2 * std::max(shape1.scale, shape2.scale);
+    const double real_input1_multiplier = shape1.scale / twice_max_input_scale;
+    const double real_input2_multiplier = shape2.scale / twice_max_input_scale;
+    const double real_output_multiplier =
+            twice_max_input_scale / ((1 << left_shift) * shapeOut.scale);
+
+    int32_t input1_multiplier;
+    int32_t input1_shift;
+    if (!QuantizeMultiplierSmallerThanOne(real_input1_multiplier, &input1_multiplier,
+                                          &input1_shift)) {
+        return false;
+    }
+    int32_t input2_multiplier;
+    int32_t input2_shift;
+    if (!QuantizeMultiplierSmallerThanOne(real_input2_multiplier, &input2_multiplier,
+                                          &input2_shift)) {
+        return false;
+    }
+    input2_multiplier *= -1;
+    int32_t output_multiplier;
+    int32_t output_shift;
+    if (!QuantizeMultiplierSmallerThanOne(real_output_multiplier, &output_multiplier,
+                                          &output_shift)) {
+        return false;
+    }
+    int32_t output_activation_min;
+    int32_t output_activation_max;
+    CalculateActivationRangeUint8(activation, shapeOut, &output_activation_min,
+                                  &output_activation_max);
+
+    // We are using tflite::optimized_ops::BroadcastAdd unconditionally here
+    // because tflite::optimized_ops::Add fails to pass some of the
+    // sub_quantized_different_scales tests.
+    NNTRACE_COMP_SWITCH("optimized_ops::BroadcastAdd");
+#define ANDROID_NN_BROADCAST_ADD(activation)                                                     \
+    tflite::optimized_ops::BroadcastAdd<tflite::FusedActivationFunctionType::activation>(        \
+            left_shift, in1, convertShapeToDims(shape1), input1_offset, input1_multiplier,       \
+            input1_shift, in2, convertShapeToDims(shape2), input2_offset, input2_multiplier,     \
+            input2_shift, output_offset, output_multiplier, output_shift, output_activation_min, \
+            output_activation_max, out, convertShapeToDims(shapeOut))
+
+    ANDROID_NN_MACRO_DISPATCH(ANDROID_NN_BROADCAST_ADD)
+#undef ANDROID_NN_BROADCAST_ADD
+
+    return true;
+}
+
+bool divFloat32(const float* in1, const Shape& shape1, const float* in2, const Shape& shape2,
+                int32_t activation, float* out, const Shape& shapeOut) {
+    NNTRACE_TRANS("divFloat32");
+    float output_activation_min, output_activation_max;
+    CalculateActivationRangeFloat(activation, &output_activation_min, &output_activation_max);
+
+    bool needBroadcast = !SameShape(shape1, shape2);
+    if (needBroadcast) {
+        NNTRACE_COMP_SWITCH("optimized_ops::BroadcastDiv");
+        tflite::optimized_ops::BroadcastDiv(
+                in1, convertShapeToDims(shape1), in2, convertShapeToDims(shape2),
+                output_activation_min, output_activation_max, out, convertShapeToDims(shapeOut));
+    } else {
+        NNTRACE_COMP_SWITCH("optimized_ops::Div");
+        tflite::optimized_ops::Div(in1, convertShapeToDims(shape1), in2, convertShapeToDims(shape2),
+                                   output_activation_min, output_activation_max, out,
+                                   convertShapeToDims(shapeOut));
+    }
+    return true;
+}
+
+bool divFloat16(const _Float16* in1, const Shape& shape1, const _Float16* in2, const Shape& shape2,
+                int32_t activation, _Float16* out, const Shape& shapeOut) {
+    NNTRACE_TRANS("divFloat16");
+    return binaryOperationFloat16(in1, shape1, in2, shape2, activation, out, shapeOut, &divFloat32);
+}
+
+}  // namespace
+
+bool validate(OperationType opType, const IOperationValidationContext* context) {
+    NN_RET_CHECK_EQ(context->getNumInputs(), kNumInputs);
+    NN_RET_CHECK_EQ(context->getNumOutputs(), kNumOutputs);
+    auto inputType = context->getInputType(kInputTensor1);
+    if (inputType == OperandType::TENSOR_FLOAT32) {
+        NN_RET_CHECK(validateHalVersion(context, HalVersion::V1_0));
+    } else if (inputType == OperandType::TENSOR_FLOAT16) {
+        NN_RET_CHECK(validateHalVersion(context, HalVersion::V1_2));
+    } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM) {
+        if (opType == OperationType::SUB) {
+            NN_RET_CHECK(validateHalVersion(context, HalVersion::V1_2));
+        } else if (opType == OperationType::DIV) {
+            NN_RET_CHECK_FAIL() << "Unsupported tensor type for operation DIV";
+        } else {
+            NN_RET_CHECK(validateHalVersion(context, HalVersion::V1_0));
+        }
+    } else {
+        NN_RET_CHECK_FAIL() << "Unsupported tensor type for operation " << getOperationName(opType);
+    }
+    return validateInputTypes(context, {inputType, inputType, OperandType::INT32}) &&
+           validateOutputTypes(context, {inputType});
+}
+
+bool prepare(IOperationExecutionContext* context) {
+    Shape input1 = context->getInputShape(kInputTensor1);
+    Shape input2 = context->getInputShape(kInputTensor2);
+    Shape output = context->getOutputShape(kOutputTensor);
+    NN_RET_CHECK_LE(getNumberOfDimensions(input1), 4);
+    NN_RET_CHECK_LE(getNumberOfDimensions(input2), 4);
+    NN_RET_CHECK(calculateBroadcastedShape(input1, input2, &output));
+    return context->setOutputShape(kOutputTensor, output);
+}
+
+bool executeAdd(IOperationExecutionContext* context) {
+    // Bypass execution in the case of zero-sized input.
+    if (getNumberOfElements(context->getOutputShape(kOutputTensor)) == 0) return true;
+    switch (context->getInputType(kInputTensor1)) {
+        case OperandType::TENSOR_FLOAT16:
+            return addFloat16(context->getInputBuffer<_Float16>(kInputTensor1),
+                              context->getInputShape(kInputTensor1),
+                              context->getInputBuffer<_Float16>(kInputTensor2),
+                              context->getInputShape(kInputTensor2),
+                              context->getInputValue<int32_t>(kActivationScalar),
+                              context->getOutputBuffer<_Float16>(kOutputTensor),
+                              context->getOutputShape(kOutputTensor));
+        case OperandType::TENSOR_FLOAT32:
+            return addFloat32(context->getInputBuffer<float>(kInputTensor1),
+                              context->getInputShape(kInputTensor1),
+                              context->getInputBuffer<float>(kInputTensor2),
+                              context->getInputShape(kInputTensor2),
+                              context->getInputValue<int32_t>(kActivationScalar),
+                              context->getOutputBuffer<float>(kOutputTensor),
+                              context->getOutputShape(kOutputTensor));
+        case OperandType::TENSOR_QUANT8_ASYMM:
+            return addQuant8(context->getInputBuffer<uint8_t>(kInputTensor1),
+                             context->getInputShape(kInputTensor1),
+                             context->getInputBuffer<uint8_t>(kInputTensor2),
+                             context->getInputShape(kInputTensor2),
+                             context->getInputValue<int32_t>(kActivationScalar),
+                             context->getOutputBuffer<uint8_t>(kOutputTensor),
+                             context->getOutputShape(kOutputTensor));
+        default:
+            NN_RET_CHECK_FAIL() << "Unsupported tensor type for operation ADD";
+    }
+}
+
+bool executeMul(IOperationExecutionContext* context) {
+    // Bypass execution in the case of zero-sized input.
+    if (getNumberOfElements(context->getOutputShape(kOutputTensor)) == 0) return true;
+    switch (context->getInputType(kInputTensor1)) {
+        case OperandType::TENSOR_FLOAT16:
+            return mulFloat16(context->getInputBuffer<_Float16>(kInputTensor1),
+                              context->getInputShape(kInputTensor1),
+                              context->getInputBuffer<_Float16>(kInputTensor2),
+                              context->getInputShape(kInputTensor2),
+                              context->getInputValue<int32_t>(kActivationScalar),
+                              context->getOutputBuffer<_Float16>(kOutputTensor),
+                              context->getOutputShape(kOutputTensor));
+        case OperandType::TENSOR_FLOAT32:
+            return mulFloat32(context->getInputBuffer<float>(kInputTensor1),
+                              context->getInputShape(kInputTensor1),
+                              context->getInputBuffer<float>(kInputTensor2),
+                              context->getInputShape(kInputTensor2),
+                              context->getInputValue<int32_t>(kActivationScalar),
+                              context->getOutputBuffer<float>(kOutputTensor),
+                              context->getOutputShape(kOutputTensor));
+        case OperandType::TENSOR_QUANT8_ASYMM:
+            return mulQuant8(context->getInputBuffer<uint8_t>(kInputTensor1),
+                             context->getInputShape(kInputTensor1),
+                             context->getInputBuffer<uint8_t>(kInputTensor2),
+                             context->getInputShape(kInputTensor2),
+                             context->getInputValue<int32_t>(kActivationScalar),
+                             context->getOutputBuffer<uint8_t>(kOutputTensor),
+                             context->getOutputShape(kOutputTensor));
+        default:
+            NN_RET_CHECK_FAIL() << "Unsupported tensor type for operation MUL";
+    }
+}
+
+bool executeSub(IOperationExecutionContext* context) {
+    // Bypass execution in the case of zero-sized input.
+    if (getNumberOfElements(context->getOutputShape(kOutputTensor)) == 0) return true;
+    switch (context->getInputType(kInputTensor1)) {
+        case OperandType::TENSOR_FLOAT16:
+            return subFloat16(context->getInputBuffer<_Float16>(kInputTensor1),
+                              context->getInputShape(kInputTensor1),
+                              context->getInputBuffer<_Float16>(kInputTensor2),
+                              context->getInputShape(kInputTensor2),
+                              context->getInputValue<int32_t>(kActivationScalar),
+                              context->getOutputBuffer<_Float16>(kOutputTensor),
+                              context->getOutputShape(kOutputTensor));
+        case OperandType::TENSOR_FLOAT32:
+            return subFloat32(context->getInputBuffer<float>(kInputTensor1),
+                              context->getInputShape(kInputTensor1),
+                              context->getInputBuffer<float>(kInputTensor2),
+                              context->getInputShape(kInputTensor2),
+                              context->getInputValue<int32_t>(kActivationScalar),
+                              context->getOutputBuffer<float>(kOutputTensor),
+                              context->getOutputShape(kOutputTensor));
+        case OperandType::TENSOR_QUANT8_ASYMM:
+            return subQuant8(context->getInputBuffer<uint8_t>(kInputTensor1),
+                             context->getInputShape(kInputTensor1),
+                             context->getInputBuffer<uint8_t>(kInputTensor2),
+                             context->getInputShape(kInputTensor2),
+                             context->getInputValue<int32_t>(kActivationScalar),
+                             context->getOutputBuffer<uint8_t>(kOutputTensor),
+                             context->getOutputShape(kOutputTensor));
+        default:
+            NN_RET_CHECK_FAIL() << "Unsupported tensor type for operation SUB";
+    }
+}
+
+bool executeDiv(IOperationExecutionContext* context) {
+    // Bypass execution in the case of zero-sized input.
+    if (getNumberOfElements(context->getOutputShape(kOutputTensor)) == 0) return true;
+    switch (context->getInputType(kInputTensor1)) {
+        case OperandType::TENSOR_FLOAT16:
+            return divFloat16(context->getInputBuffer<_Float16>(kInputTensor1),
+                              context->getInputShape(kInputTensor1),
+                              context->getInputBuffer<_Float16>(kInputTensor2),
+                              context->getInputShape(kInputTensor2),
+                              context->getInputValue<int32_t>(kActivationScalar),
+                              context->getOutputBuffer<_Float16>(kOutputTensor),
+                              context->getOutputShape(kOutputTensor));
+        case OperandType::TENSOR_FLOAT32:
+            return divFloat32(context->getInputBuffer<float>(kInputTensor1),
+                              context->getInputShape(kInputTensor1),
+                              context->getInputBuffer<float>(kInputTensor2),
+                              context->getInputShape(kInputTensor2),
+                              context->getInputValue<int32_t>(kActivationScalar),
+                              context->getOutputBuffer<float>(kOutputTensor),
+                              context->getOutputShape(kOutputTensor));
+        default:
+            NN_RET_CHECK_FAIL() << "Unsupported tensor type for operation DIV";
+    }
+}
+
+}  // namespace broadcast
+
+using std::placeholders::_1;
+NN_REGISTER_OPERATION(ADD, "ADD", std::bind(broadcast::validate, OperationType::ADD, _1),
+                      broadcast::prepare, broadcast::executeAdd, .allowZeroSizedInput = true);
+NN_REGISTER_OPERATION(MUL, "MUL", std::bind(broadcast::validate, OperationType::MUL, _1),
+                      broadcast::prepare, broadcast::executeMul, .allowZeroSizedInput = true);
+NN_REGISTER_OPERATION(SUB, "SUB", std::bind(broadcast::validate, OperationType::SUB, _1),
+                      broadcast::prepare, broadcast::executeSub, .allowZeroSizedInput = true);
+NN_REGISTER_OPERATION(DIV, "DIV", std::bind(broadcast::validate, OperationType::DIV, _1),
+                      broadcast::prepare, broadcast::executeDiv, .allowZeroSizedInput = true);
+
+}  // namespace nn
+}  // namespace android
diff --git a/common/operations/SimpleMath.cpp b/common/operations/SimpleMath.cpp
index 50c0b5e..ad9711f 100644
--- a/common/operations/SimpleMath.cpp
+++ b/common/operations/SimpleMath.cpp
@@ -29,204 +29,6 @@
 namespace android {
 namespace nn {
 
-#define ANDROID_NN_MACRO_DISPATCH(macro)                                \
-    switch (activation) {                                               \
-        case (int32_t)FusedActivationFunc::NONE:                        \
-            macro(kNone);                                               \
-            break;                                                      \
-        case (int32_t)FusedActivationFunc::RELU:                        \
-            macro(kRelu);                                               \
-            break;                                                      \
-        case (int32_t)FusedActivationFunc::RELU1:                       \
-            macro(kRelu1);                                              \
-            break;                                                      \
-        case (int32_t)FusedActivationFunc::RELU6:                       \
-            macro(kRelu6);                                              \
-            break;                                                      \
-        default:                                                        \
-            LOG(ERROR) << "Unsupported fused activation function type"; \
-            return false;                                               \
-    }
-
-using binaryFunctionFloat32 = std::function<bool(
-        const float* in1, const Shape& shape1, const float* in2, const Shape& shape2,
-        int32_t activation, float* out, const Shape& shapeOut)>;
-
-bool binaryOperationFloat16(const _Float16* in1, const Shape& shape1, const _Float16* in2,
-                            const Shape& shape2, int32_t activation, _Float16* out,
-                            const Shape& shapeOut, binaryFunctionFloat32 operationFloat32) {
-    std::vector<float> in1_float32(getNumberOfElements(shape1));
-    convertFloat16ToFloat32(in1, &in1_float32);
-    std::vector<float> in2_float32(getNumberOfElements(shape2));
-    convertFloat16ToFloat32(in2, &in2_float32);
-    std::vector<float> out_float32(getNumberOfElements(shapeOut));
-
-    operationFloat32(in1_float32.data(), shape1, in2_float32.data(), shape2, activation,
-                     out_float32.data(), shapeOut);
-    convertFloat32ToFloat16(out_float32, out);
-
-    return true;
-}
-
-bool addFloat16(const _Float16* in1, const Shape& shape1, const _Float16* in2, const Shape& shape2,
-                int32_t activation, _Float16* out, const Shape& shapeOut) {
-    NNTRACE_TRANS("addFloat16");
-    return binaryOperationFloat16(in1, shape1, in2, shape2, activation, out, shapeOut, &addFloat32);
-}
-
-bool addFloat32(const float* in1, const Shape& shape1, const float* in2, const Shape& shape2,
-                int32_t activation, float* out, const Shape& shapeOut) {
-    NNTRACE_TRANS("addFloat32");
-    bool needBroadcast = !SameShape(shape1, shape2);
-    if (needBroadcast) {
-        NNTRACE_COMP_SWITCH("optimized_ops::BroadcastAdd");
-#define ANDROID_NN_BROADCAST_ADD(activation)                                              \
-    tflite::optimized_ops::BroadcastAdd<tflite::FusedActivationFunctionType::activation>( \
-            in1, convertShapeToDims(shape1), in2, convertShapeToDims(shape2), out,        \
-            convertShapeToDims(shapeOut))
-
-        ANDROID_NN_MACRO_DISPATCH(ANDROID_NN_BROADCAST_ADD)
-#undef ANDROID_NN_BROADCAST_ADD
-    } else {
-        NNTRACE_COMP_SWITCH("optimized_ops::Add");
-#define ANDROID_NN_ADD(activation)                                                 \
-    tflite::optimized_ops::Add<tflite::FusedActivationFunctionType::activation>(   \
-            in1, convertShapeToDims(shape1), in2, convertShapeToDims(shape2), out, \
-            convertShapeToDims(shapeOut))
-
-        ANDROID_NN_MACRO_DISPATCH(ANDROID_NN_ADD)
-#undef ANDROID_NN_ADD
-    }
-
-    return true;
-}
-
-bool addQuant8(const uint8_t* in1, const Shape& shape1, const uint8_t* in2, const Shape& shape2,
-               int32_t activation, uint8_t* out, const Shape& shapeOut) {
-    NNTRACE_TRANS("addQuant8");
-    bool needBroadcast = !SameShape(shape1, shape2);
-
-    const int32_t input1_offset = -shape1.offset;
-    const int32_t input2_offset = -shape2.offset;
-    const int32_t output_offset = shapeOut.offset;
-    const int left_shift = 20;
-    const double twice_max_input_scale = 2 * std::max(shape1.scale, shape2.scale);
-    const double real_input1_multiplier = shape1.scale / twice_max_input_scale;
-    const double real_input2_multiplier = shape2.scale / twice_max_input_scale;
-    const double real_output_multiplier =
-            twice_max_input_scale / ((1 << left_shift) * shapeOut.scale);
-
-    int32_t input1_multiplier;
-    int32_t input1_shift;
-    if (!QuantizeMultiplierSmallerThanOne(real_input1_multiplier, &input1_multiplier,
-                                          &input1_shift)) {
-        return false;
-    }
-    int32_t input2_multiplier;
-    int32_t input2_shift;
-    if (!QuantizeMultiplierSmallerThanOne(real_input2_multiplier, &input2_multiplier,
-                                          &input2_shift)) {
-        return false;
-    }
-    int32_t output_multiplier;
-    int32_t output_shift;
-    if (!QuantizeMultiplierSmallerThanOne(real_output_multiplier, &output_multiplier,
-                                          &output_shift)) {
-        return false;
-    }
-    int32_t output_activation_min;
-    int32_t output_activation_max;
-    CalculateActivationRangeUint8(activation, shapeOut, &output_activation_min,
-                                  &output_activation_max);
-
-    if (needBroadcast) {
-        NNTRACE_COMP_SWITCH("optimized_ops::BroadcastAdd");
-#define ANDROID_NN_BROADCAST_ADD(activation)                                                     \
-    tflite::optimized_ops::BroadcastAdd<tflite::FusedActivationFunctionType::activation>(        \
-            left_shift, in1, convertShapeToDims(shape1), input1_offset, input1_multiplier,       \
-            input1_shift, in2, convertShapeToDims(shape2), input2_offset, input2_multiplier,     \
-            input2_shift, output_offset, output_multiplier, output_shift, output_activation_min, \
-            output_activation_max, out, convertShapeToDims(shapeOut))
-
-        ANDROID_NN_MACRO_DISPATCH(ANDROID_NN_BROADCAST_ADD)
-#undef ANDROID_NN_BROADCAST_ADD
-    } else {
-        NNTRACE_COMP_SWITCH("optimized_ops::Add");
-#define ANDROID_NN_NORMAL_ADD(activation)                                                        \
-    tflite::optimized_ops::Add<tflite::FusedActivationFunctionType::activation>(                 \
-            left_shift, in1, convertShapeToDims(shape1), input1_offset, input1_multiplier,       \
-            input1_shift, in2, convertShapeToDims(shape2), input2_offset, input2_multiplier,     \
-            input2_shift, output_offset, output_multiplier, output_shift, output_activation_min, \
-            output_activation_max, out, convertShapeToDims(shapeOut))
-
-        ANDROID_NN_MACRO_DISPATCH(ANDROID_NN_NORMAL_ADD)
-#undef ANDROID_NN_NORMAL_ADD
-    }
-
-    return true;
-}
-
-bool mulFloat16(const _Float16* in1, const Shape& shape1, const _Float16* in2, const Shape& shape2,
-                int32_t activation, _Float16* out, const Shape& shapeOut) {
-    NNTRACE_TRANS("mulFloat16");
-    return binaryOperationFloat16(in1, shape1, in2, shape2, activation, out, shapeOut, &mulFloat32);
-}
-
-bool mulFloat32(const float* in1, const Shape& shape1, const float* in2, const Shape& shape2,
-                int32_t activation, float* out, const Shape& shapeOut) {
-    NNTRACE_TRANS("mulFloat32");
-    bool needBroadcast = !SameShape(shape1, shape2);
-
-    if (needBroadcast) {
-        NNTRACE_COMP_SWITCH("optimized_ops::BroadcastMul");
-#define ANDROID_NN_BROADCAST_MUL(activation)                                              \
-    tflite::optimized_ops::BroadcastMul<tflite::FusedActivationFunctionType::activation>( \
-            in1, convertShapeToDims(shape1), in2, convertShapeToDims(shape2), out,        \
-            convertShapeToDims(shapeOut))
-
-        ANDROID_NN_MACRO_DISPATCH(ANDROID_NN_BROADCAST_MUL)
-#undef ANDROID_NN_BROADCAST_MUL
-    } else {
-        float output_activation_min, output_activation_max;
-        CalculateActivationRangeFloat(activation, &output_activation_min, &output_activation_max);
-
-        NNTRACE_COMP_SWITCH("optimized_ops::Mul");
-        tflite::optimized_ops::Mul(in1, convertShapeToDims(shape1), in2, convertShapeToDims(shape2),
-                                   output_activation_min, output_activation_max, out,
-                                   convertShapeToDims(shapeOut));
-    }
-
-    return true;
-}
-
-bool mulQuant8(const uint8_t* in1, const Shape& shape1, const uint8_t* in2, const Shape& shape2,
-               int32_t activation, uint8_t* out, const Shape& shapeOut) {
-    NNTRACE_TRANS("mulQuant8");
-    const int32_t input1_offset = -shape1.offset;
-    const int32_t input2_offset = -shape2.offset;
-    const int32_t output_offset = shapeOut.offset;
-    const double input_product_scale = shape1.scale * shape2.scale;
-    const double real_multiplier = input_product_scale / shapeOut.scale;
-    int32 output_multiplier;
-    int output_shift;
-    if (!QuantizeMultiplierSmallerThanOne(real_multiplier, &output_multiplier, &output_shift)) {
-        return false;
-    }
-    int32_t output_activation_min;
-    int32_t output_activation_max;
-    CalculateActivationRangeUint8(activation, shapeOut, &output_activation_min,
-                                  &output_activation_max);
-
-    // Use BROADCAST version to handle the normal case.
-    NNTRACE_COMP_SWITCH("optimized_ops::BroadcastMul");
-    tflite::optimized_ops::BroadcastMul(in1, convertShapeToDims(shape1), input1_offset, in2,
-                                        convertShapeToDims(shape2), input2_offset, output_offset,
-                                        output_multiplier, output_shift, output_activation_min,
-                                        output_activation_max, out, convertShapeToDims(shapeOut));
-
-    return true;
-}
-
 bool floorFloat16(const _Float16* inputData, _Float16* outputData, const Shape& shape) {
     NNTRACE_TRANS("floorFloat16");
     std::vector<float> inputDataFloat32(getNumberOfElements(shape));
@@ -270,111 +72,6 @@
     return true;
 }
 
-bool subFloat16(const _Float16* in1, const Shape& shape1, const _Float16* in2, const Shape& shape2,
-                int32_t activation, _Float16* out, const Shape& shapeOut) {
-    NNTRACE_TRANS("subFloat16");
-    return binaryOperationFloat16(in1, shape1, in2, shape2, activation, out, shapeOut, &subFloat32);
-}
-
-bool subFloat32(const float* in1, const Shape& shape1, const float* in2, const Shape& shape2,
-                int32_t activation, float* out, const Shape& shapeOut) {
-    NNTRACE_TRANS("subFloat32");
-    NNTRACE_COMP_SWITCH("optimized_ops::Sub");
-    tflite::optimized_ops::Sub(in1, convertShapeToDims(shape1), in2, convertShapeToDims(shape2),
-                               out, convertShapeToDims(shapeOut));
-
-    // TFLite does not apply activation to broadcast sub.
-    float output_activation_min, output_activation_max;
-    CalculateActivationRangeFloat(activation, &output_activation_min, &output_activation_max);
-    uint32_t numOutputElements = getNumberOfElements(shapeOut);
-    for (uint32_t i = 0; i < numOutputElements; i++) {
-        out[i] = std::min(std::max(out[i], output_activation_min), output_activation_max);
-    }
-    return true;
-}
-
-bool subQuant8(const uint8_t* in1, const Shape& shape1, const uint8_t* in2, const Shape& shape2,
-               int32_t activation, uint8_t* out, const Shape& shapeOut) {
-    NNTRACE_TRANS("subQuant8");
-
-    const int32_t input1_offset = -shape1.offset;
-    const int32_t input2_offset = -shape2.offset;
-    const int32_t output_offset = shapeOut.offset;
-    const int left_shift = 20;
-    const double twice_max_input_scale = 2 * std::max(shape1.scale, shape2.scale);
-    const double real_input1_multiplier = shape1.scale / twice_max_input_scale;
-    const double real_input2_multiplier = shape2.scale / twice_max_input_scale;
-    const double real_output_multiplier =
-            twice_max_input_scale / ((1 << left_shift) * shapeOut.scale);
-
-    int32_t input1_multiplier;
-    int32_t input1_shift;
-    if (!QuantizeMultiplierSmallerThanOne(real_input1_multiplier, &input1_multiplier,
-                                          &input1_shift)) {
-        return false;
-    }
-    int32_t input2_multiplier;
-    int32_t input2_shift;
-    if (!QuantizeMultiplierSmallerThanOne(real_input2_multiplier, &input2_multiplier,
-                                          &input2_shift)) {
-        return false;
-    }
-    input2_multiplier *= -1;
-    int32_t output_multiplier;
-    int32_t output_shift;
-    if (!QuantizeMultiplierSmallerThanOne(real_output_multiplier, &output_multiplier,
-                                          &output_shift)) {
-        return false;
-    }
-    int32_t output_activation_min;
-    int32_t output_activation_max;
-    CalculateActivationRangeUint8(activation, shapeOut, &output_activation_min,
-                                  &output_activation_max);
-
-    // We are using tflite::optimized_ops::BroadcastAdd unconditionally here
-    // because tflite::optimized_ops::Add fails to pass some of the
-    // sub_quantized_different_scales tests.
-    NNTRACE_COMP_SWITCH("optimized_ops::BroadcastAdd");
-#define ANDROID_NN_BROADCAST_ADD(activation)                                                     \
-    tflite::optimized_ops::BroadcastAdd<tflite::FusedActivationFunctionType::activation>(        \
-            left_shift, in1, convertShapeToDims(shape1), input1_offset, input1_multiplier,       \
-            input1_shift, in2, convertShapeToDims(shape2), input2_offset, input2_multiplier,     \
-            input2_shift, output_offset, output_multiplier, output_shift, output_activation_min, \
-            output_activation_max, out, convertShapeToDims(shapeOut))
-
-    ANDROID_NN_MACRO_DISPATCH(ANDROID_NN_BROADCAST_ADD)
-#undef ANDROID_NN_BROADCAST_ADD
-
-    return true;
-}
-
-bool divFloat16(const _Float16* in1, const Shape& shape1, const _Float16* in2, const Shape& shape2,
-                int32_t activation, _Float16* out, const Shape& shapeOut) {
-    NNTRACE_TRANS("divFloat16");
-    return binaryOperationFloat16(in1, shape1, in2, shape2, activation, out, shapeOut, &divFloat32);
-}
-
-bool divFloat32(const float* in1, const Shape& shape1, const float* in2, const Shape& shape2,
-                int32_t activation, float* out, const Shape& shapeOut) {
-    NNTRACE_TRANS("divFloat32");
-    float output_activation_min, output_activation_max;
-    CalculateActivationRangeFloat(activation, &output_activation_min, &output_activation_max);
-
-    bool needBroadcast = !SameShape(shape1, shape2);
-    if (needBroadcast) {
-        NNTRACE_COMP_SWITCH("optimized_ops::BroadcastDiv");
-        tflite::optimized_ops::BroadcastDiv(
-                in1, convertShapeToDims(shape1), in2, convertShapeToDims(shape2),
-                output_activation_min, output_activation_max, out, convertShapeToDims(shapeOut));
-    } else {
-        NNTRACE_COMP_SWITCH("optimized_ops::Div");
-        tflite::optimized_ops::Div(in1, convertShapeToDims(shape1), in2, convertShapeToDims(shape2),
-                                   output_activation_min, output_activation_max, out,
-                                   convertShapeToDims(shapeOut));
-    }
-    return true;
-}
-
 bool meanFloat16(_Float16* inputData, const Shape& inputShape, const int32_t* axis,
                  const Shape& axisShape, bool keepDims, _Float16* outputData,
                  const Shape& outputShape) {
diff --git a/runtime/include/NeuralNetworks.h b/runtime/include/NeuralNetworks.h
index 1eeb4d1..e96c9ee 100644
--- a/runtime/include/NeuralNetworks.h
+++ b/runtime/include/NeuralNetworks.h
@@ -213,6 +213,10 @@
      *     input2.dimension = {5, 4, 3, 1}
      *     output.dimension = {5, 4, 3, 2}
      *
+     * Since API level 29, generic zero-sized input tensor is supported. Zero
+     * dimension is only compatible with 0 or 1. The size of the output
+     * dimension is zero if either of corresponding input dimension is zero.
+     *
      * Supported tensor {@link OperandCode}:
      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29)
      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
@@ -1438,6 +1442,10 @@
      * of the input operands. It starts with the trailing dimensions, and works
      * its way forward.
      *
+     * Since API level 29, generic zero-sized input tensor is supported. Zero
+     * dimension is only compatible with 0 or 1. The size of the output
+     * dimension is zero if either of corresponding input dimension is zero.
+     *
      * Supported tensor {@link OperandCode}:
      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29)
      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
@@ -1923,6 +1931,10 @@
      *     input2.dimension = {5, 4, 3, 1}
      *     output.dimension = {5, 4, 3, 2}
      *
+     * Since API level 29, generic zero-sized input tensor is supported. Zero
+     * dimension is only compatible with 0 or 1. The size of the output
+     * dimension is zero if either of corresponding input dimension is zero.
+     *
      * Supported tensor {@link OperandCode}:
      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29)
      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
@@ -2157,6 +2169,10 @@
      *     input2.dimension = {5, 4, 3, 1}
      *     output.dimension = {5, 4, 3, 2}
      *
+     * Since API level 29, generic zero-sized input tensor is supported. Zero
+     * dimension is only compatible with 0 or 1. The size of the output
+     * dimension is zero if either of corresponding input dimension is zero.
+     *
      * Supported tensor {@link OperandCode}:
      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29)
      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
diff --git a/runtime/test/for-cts/TestGeneratedOneFile.cpp b/runtime/test/for-cts/TestGeneratedOneFile.cpp
index bac9bb5..796bcff 100644
--- a/runtime/test/for-cts/TestGeneratedOneFile.cpp
+++ b/runtime/test/for-cts/TestGeneratedOneFile.cpp
@@ -326,8 +326,7 @@
 #include "../generated/tests/transpose_quant8_1.mod.py.cpp"
 #include "../generated/tests/transpose_relaxed.mod.py.cpp"
 #include "../generated/tests/abs.mod.py.cpp"
-#include "../generated/tests/add_broadcast_float16.mod.py.cpp"
-#include "../generated/tests/add_float16.mod.py.cpp"
+#include "../generated/tests/add_v1_2.mod.py.cpp"
 #include "../generated/tests/argmax_1.mod.py.cpp"
 #include "../generated/tests/argmax_2.mod.py.cpp"
 #include "../generated/tests/argmax_3.mod.py.cpp"
@@ -357,8 +356,7 @@
 #include "../generated/tests/dequantize_1_2.mod.py.cpp"
 #include "../generated/tests/dequantize_float16.mod.py.cpp"
 #include "../generated/tests/detection_postprocess.mod.py.cpp"
-#include "../generated/tests/div_broadcast_float16.mod.py.cpp"
-#include "../generated/tests/div_float16.mod.py.cpp"
+#include "../generated/tests/div_v1_2.mod.py.cpp"
 #include "../generated/tests/equal.mod.py.cpp"
 #include "../generated/tests/exp.mod.py.cpp"
 #include "../generated/tests/expand_dims.mod.py.cpp"
@@ -403,8 +401,7 @@
 #include "../generated/tests/maximum.mod.py.cpp"
 #include "../generated/tests/mean_float16.mod.py.cpp"
 #include "../generated/tests/minimum.mod.py.cpp"
-#include "../generated/tests/mul_broadcast_float16.mod.py.cpp"
-#include "../generated/tests/mul_float16.mod.py.cpp"
+#include "../generated/tests/mul_v1_2.mod.py.cpp"
 #include "../generated/tests/neg.mod.py.cpp"
 #include "../generated/tests/not_equal.mod.py.cpp"
 #include "../generated/tests/pad_float16.mod.py.cpp"
diff --git a/runtime/test/generated/all_generated_V1_2_vts_tests.cpp b/runtime/test/generated/all_generated_V1_2_vts_tests.cpp
index 66ed6c5..9c613f7 100644
--- a/runtime/test/generated/all_generated_V1_2_vts_tests.cpp
+++ b/runtime/test/generated/all_generated_V1_2_vts_tests.cpp
@@ -105,80 +105,200 @@
 
 
 #endif
-// Generated from: add_broadcast_float16.mod.py.
-namespace add_broadcast_float16 {
-// Generated add_broadcast_float16 test
-#include "examples/add_broadcast_float16.example.cpp"
+// Generated from: add_v1_2.mod.py.
+namespace add_v1_2 {
+// Generated add_v1_2 test
+#include "examples/add_v1_2.example.cpp"
 // Generated model constructor
-#include "vts_models/add_broadcast_float16.model.cpp"
-} // namespace add_broadcast_float16
+#include "vts_models/add_v1_2.model.cpp"
+} // namespace add_v1_2
 
-TEST_F(NeuralnetworksHidlTest, add_broadcast_float16) {
+TEST_F(NeuralnetworksHidlTest, add_v1_2) {
   generated_tests::Execute(device,
-                           add_broadcast_float16::createTestModel,
-                           add_broadcast_float16::is_ignored,
-                           add_broadcast_float16::get_examples());
+                           add_v1_2::createTestModel,
+                           add_v1_2::is_ignored,
+                           add_v1_2::get_examples());
 }
 
-TEST_F(ValidationTest, add_broadcast_float16) {
-  const Model model = add_broadcast_float16::createTestModel();
-  const std::vector<Request> requests = createRequests(add_broadcast_float16::get_examples());
+TEST_F(ValidationTest, add_v1_2) {
+  const Model model = add_v1_2::createTestModel();
+  const std::vector<Request> requests = createRequests(add_v1_2::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
 
 
 #ifdef NN_TEST_DYNAMIC_OUTPUT_SHAPE
-TEST_F(DynamicOutputShapeTest, add_broadcast_float16_dynamic_output_shape) {
+TEST_F(DynamicOutputShapeTest, add_v1_2_dynamic_output_shape) {
   generated_tests::Execute(device,
-                           add_broadcast_float16::createTestModel_dynamic_output_shape,
-                           add_broadcast_float16::is_ignored_dynamic_output_shape,
-                           add_broadcast_float16::get_examples_dynamic_output_shape(), true);
+                           add_v1_2::createTestModel_dynamic_output_shape,
+                           add_v1_2::is_ignored_dynamic_output_shape,
+                           add_v1_2::get_examples_dynamic_output_shape(), true);
 }
 
-TEST_F(ValidationTest, add_broadcast_float16_dynamic_output_shape) {
-  const Model model = add_broadcast_float16::createTestModel_dynamic_output_shape();
-  const std::vector<Request> requests = createRequests(add_broadcast_float16::get_examples_dynamic_output_shape());
+TEST_F(ValidationTest, add_v1_2_dynamic_output_shape) {
+  const Model model = add_v1_2::createTestModel_dynamic_output_shape();
+  const std::vector<Request> requests = createRequests(add_v1_2::get_examples_dynamic_output_shape());
   validateModel(model);
   validateRequests(model, requests);
 }
 
 
 #endif
-// Generated from: add_float16.mod.py.
-namespace add_float16 {
-// Generated add_float16 test
-#include "examples/add_float16.example.cpp"
-// Generated model constructor
-#include "vts_models/add_float16.model.cpp"
-} // namespace add_float16
-
-TEST_F(NeuralnetworksHidlTest, add_float16) {
+TEST_F(NeuralnetworksHidlTest, add_v1_2_2) {
   generated_tests::Execute(device,
-                           add_float16::createTestModel,
-                           add_float16::is_ignored,
-                           add_float16::get_examples());
+                           add_v1_2::createTestModel_2,
+                           add_v1_2::is_ignored_2,
+                           add_v1_2::get_examples_2());
 }
 
-TEST_F(ValidationTest, add_float16) {
-  const Model model = add_float16::createTestModel();
-  const std::vector<Request> requests = createRequests(add_float16::get_examples());
+TEST_F(ValidationTest, add_v1_2_2) {
+  const Model model = add_v1_2::createTestModel_2();
+  const std::vector<Request> requests = createRequests(add_v1_2::get_examples_2());
   validateModel(model);
   validateRequests(model, requests);
 }
 
 
 #ifdef NN_TEST_DYNAMIC_OUTPUT_SHAPE
-TEST_F(DynamicOutputShapeTest, add_float16_dynamic_output_shape) {
+TEST_F(DynamicOutputShapeTest, add_v1_2_dynamic_output_shape_2) {
   generated_tests::Execute(device,
-                           add_float16::createTestModel_dynamic_output_shape,
-                           add_float16::is_ignored_dynamic_output_shape,
-                           add_float16::get_examples_dynamic_output_shape(), true);
+                           add_v1_2::createTestModel_dynamic_output_shape_2,
+                           add_v1_2::is_ignored_dynamic_output_shape_2,
+                           add_v1_2::get_examples_dynamic_output_shape_2(), true);
 }
 
-TEST_F(ValidationTest, add_float16_dynamic_output_shape) {
-  const Model model = add_float16::createTestModel_dynamic_output_shape();
-  const std::vector<Request> requests = createRequests(add_float16::get_examples_dynamic_output_shape());
+TEST_F(ValidationTest, add_v1_2_dynamic_output_shape_2) {
+  const Model model = add_v1_2::createTestModel_dynamic_output_shape_2();
+  const std::vector<Request> requests = createRequests(add_v1_2::get_examples_dynamic_output_shape_2());
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+#endif
+TEST_F(NeuralnetworksHidlTest, add_v1_2_zero_sized) {
+  generated_tests::Execute(device,
+                           add_v1_2::createTestModel_zero_sized,
+                           add_v1_2::is_ignored_zero_sized,
+                           add_v1_2::get_examples_zero_sized());
+}
+
+TEST_F(ValidationTest, add_v1_2_zero_sized) {
+  const Model model = add_v1_2::createTestModel_zero_sized();
+  const std::vector<Request> requests = createRequests(add_v1_2::get_examples_zero_sized());
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+TEST_F(NeuralnetworksHidlTest, add_v1_2_zero_sized_relaxed) {
+  generated_tests::Execute(device,
+                           add_v1_2::createTestModel_zero_sized_relaxed,
+                           add_v1_2::is_ignored_zero_sized_relaxed,
+                           add_v1_2::get_examples_zero_sized_relaxed());
+}
+
+TEST_F(ValidationTest, add_v1_2_zero_sized_relaxed) {
+  const Model model = add_v1_2::createTestModel_zero_sized_relaxed();
+  const std::vector<Request> requests = createRequests(add_v1_2::get_examples_zero_sized_relaxed());
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+TEST_F(NeuralnetworksHidlTest, add_v1_2_zero_sized_quant8) {
+  generated_tests::Execute(device,
+                           add_v1_2::createTestModel_zero_sized_quant8,
+                           add_v1_2::is_ignored_zero_sized_quant8,
+                           add_v1_2::get_examples_zero_sized_quant8());
+}
+
+TEST_F(ValidationTest, add_v1_2_zero_sized_quant8) {
+  const Model model = add_v1_2::createTestModel_zero_sized_quant8();
+  const std::vector<Request> requests = createRequests(add_v1_2::get_examples_zero_sized_quant8());
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+TEST_F(NeuralnetworksHidlTest, add_v1_2_zero_sized_float16) {
+  generated_tests::Execute(device,
+                           add_v1_2::createTestModel_zero_sized_float16,
+                           add_v1_2::is_ignored_zero_sized_float16,
+                           add_v1_2::get_examples_zero_sized_float16());
+}
+
+TEST_F(ValidationTest, add_v1_2_zero_sized_float16) {
+  const Model model = add_v1_2::createTestModel_zero_sized_float16();
+  const std::vector<Request> requests = createRequests(add_v1_2::get_examples_zero_sized_float16());
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+#ifdef NN_TEST_DYNAMIC_OUTPUT_SHAPE
+TEST_F(DynamicOutputShapeTest, add_v1_2_zero_sized_dynamic_output_shape) {
+  generated_tests::Execute(device,
+                           add_v1_2::createTestModel_zero_sized_dynamic_output_shape,
+                           add_v1_2::is_ignored_zero_sized_dynamic_output_shape,
+                           add_v1_2::get_examples_zero_sized_dynamic_output_shape(), true);
+}
+
+TEST_F(ValidationTest, add_v1_2_zero_sized_dynamic_output_shape) {
+  const Model model = add_v1_2::createTestModel_zero_sized_dynamic_output_shape();
+  const std::vector<Request> requests = createRequests(add_v1_2::get_examples_zero_sized_dynamic_output_shape());
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+#endif
+#ifdef NN_TEST_DYNAMIC_OUTPUT_SHAPE
+TEST_F(DynamicOutputShapeTest, add_v1_2_zero_sized_dynamic_output_shape_relaxed) {
+  generated_tests::Execute(device,
+                           add_v1_2::createTestModel_zero_sized_dynamic_output_shape_relaxed,
+                           add_v1_2::is_ignored_zero_sized_dynamic_output_shape_relaxed,
+                           add_v1_2::get_examples_zero_sized_dynamic_output_shape_relaxed(), true);
+}
+
+TEST_F(ValidationTest, add_v1_2_zero_sized_dynamic_output_shape_relaxed) {
+  const Model model = add_v1_2::createTestModel_zero_sized_dynamic_output_shape_relaxed();
+  const std::vector<Request> requests = createRequests(add_v1_2::get_examples_zero_sized_dynamic_output_shape_relaxed());
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+#endif
+#ifdef NN_TEST_DYNAMIC_OUTPUT_SHAPE
+TEST_F(DynamicOutputShapeTest, add_v1_2_zero_sized_dynamic_output_shape_quant8) {
+  generated_tests::Execute(device,
+                           add_v1_2::createTestModel_zero_sized_dynamic_output_shape_quant8,
+                           add_v1_2::is_ignored_zero_sized_dynamic_output_shape_quant8,
+                           add_v1_2::get_examples_zero_sized_dynamic_output_shape_quant8(), true);
+}
+
+TEST_F(ValidationTest, add_v1_2_zero_sized_dynamic_output_shape_quant8) {
+  const Model model = add_v1_2::createTestModel_zero_sized_dynamic_output_shape_quant8();
+  const std::vector<Request> requests = createRequests(add_v1_2::get_examples_zero_sized_dynamic_output_shape_quant8());
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+#endif
+#ifdef NN_TEST_DYNAMIC_OUTPUT_SHAPE
+TEST_F(DynamicOutputShapeTest, add_v1_2_zero_sized_dynamic_output_shape_float16) {
+  generated_tests::Execute(device,
+                           add_v1_2::createTestModel_zero_sized_dynamic_output_shape_float16,
+                           add_v1_2::is_ignored_zero_sized_dynamic_output_shape_float16,
+                           add_v1_2::get_examples_zero_sized_dynamic_output_shape_float16(), true);
+}
+
+TEST_F(ValidationTest, add_v1_2_zero_sized_dynamic_output_shape_float16) {
+  const Model model = add_v1_2::createTestModel_zero_sized_dynamic_output_shape_float16();
+  const std::vector<Request> requests = createRequests(add_v1_2::get_examples_zero_sized_dynamic_output_shape_float16());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -18465,80 +18585,168 @@
 
 
 #endif
-// Generated from: div_broadcast_float16.mod.py.
-namespace div_broadcast_float16 {
-// Generated div_broadcast_float16 test
-#include "examples/div_broadcast_float16.example.cpp"
+// Generated from: div_v1_2.mod.py.
+namespace div_v1_2 {
+// Generated div_v1_2 test
+#include "examples/div_v1_2.example.cpp"
 // Generated model constructor
-#include "vts_models/div_broadcast_float16.model.cpp"
-} // namespace div_broadcast_float16
+#include "vts_models/div_v1_2.model.cpp"
+} // namespace div_v1_2
 
-TEST_F(NeuralnetworksHidlTest, div_broadcast_float16) {
+TEST_F(NeuralnetworksHidlTest, div_v1_2) {
   generated_tests::Execute(device,
-                           div_broadcast_float16::createTestModel,
-                           div_broadcast_float16::is_ignored,
-                           div_broadcast_float16::get_examples());
+                           div_v1_2::createTestModel,
+                           div_v1_2::is_ignored,
+                           div_v1_2::get_examples());
 }
 
-TEST_F(ValidationTest, div_broadcast_float16) {
-  const Model model = div_broadcast_float16::createTestModel();
-  const std::vector<Request> requests = createRequests(div_broadcast_float16::get_examples());
+TEST_F(ValidationTest, div_v1_2) {
+  const Model model = div_v1_2::createTestModel();
+  const std::vector<Request> requests = createRequests(div_v1_2::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
 
 
 #ifdef NN_TEST_DYNAMIC_OUTPUT_SHAPE
-TEST_F(DynamicOutputShapeTest, div_broadcast_float16_dynamic_output_shape) {
+TEST_F(DynamicOutputShapeTest, div_v1_2_dynamic_output_shape) {
   generated_tests::Execute(device,
-                           div_broadcast_float16::createTestModel_dynamic_output_shape,
-                           div_broadcast_float16::is_ignored_dynamic_output_shape,
-                           div_broadcast_float16::get_examples_dynamic_output_shape(), true);
+                           div_v1_2::createTestModel_dynamic_output_shape,
+                           div_v1_2::is_ignored_dynamic_output_shape,
+                           div_v1_2::get_examples_dynamic_output_shape(), true);
 }
 
-TEST_F(ValidationTest, div_broadcast_float16_dynamic_output_shape) {
-  const Model model = div_broadcast_float16::createTestModel_dynamic_output_shape();
-  const std::vector<Request> requests = createRequests(div_broadcast_float16::get_examples_dynamic_output_shape());
+TEST_F(ValidationTest, div_v1_2_dynamic_output_shape) {
+  const Model model = div_v1_2::createTestModel_dynamic_output_shape();
+  const std::vector<Request> requests = createRequests(div_v1_2::get_examples_dynamic_output_shape());
   validateModel(model);
   validateRequests(model, requests);
 }
 
 
 #endif
-// Generated from: div_float16.mod.py.
-namespace div_float16 {
-// Generated div_float16 test
-#include "examples/div_float16.example.cpp"
-// Generated model constructor
-#include "vts_models/div_float16.model.cpp"
-} // namespace div_float16
-
-TEST_F(NeuralnetworksHidlTest, div_float16) {
+TEST_F(NeuralnetworksHidlTest, div_v1_2_2) {
   generated_tests::Execute(device,
-                           div_float16::createTestModel,
-                           div_float16::is_ignored,
-                           div_float16::get_examples());
+                           div_v1_2::createTestModel_2,
+                           div_v1_2::is_ignored_2,
+                           div_v1_2::get_examples_2());
 }
 
-TEST_F(ValidationTest, div_float16) {
-  const Model model = div_float16::createTestModel();
-  const std::vector<Request> requests = createRequests(div_float16::get_examples());
+TEST_F(ValidationTest, div_v1_2_2) {
+  const Model model = div_v1_2::createTestModel_2();
+  const std::vector<Request> requests = createRequests(div_v1_2::get_examples_2());
   validateModel(model);
   validateRequests(model, requests);
 }
 
 
 #ifdef NN_TEST_DYNAMIC_OUTPUT_SHAPE
-TEST_F(DynamicOutputShapeTest, div_float16_dynamic_output_shape) {
+TEST_F(DynamicOutputShapeTest, div_v1_2_dynamic_output_shape_2) {
   generated_tests::Execute(device,
-                           div_float16::createTestModel_dynamic_output_shape,
-                           div_float16::is_ignored_dynamic_output_shape,
-                           div_float16::get_examples_dynamic_output_shape(), true);
+                           div_v1_2::createTestModel_dynamic_output_shape_2,
+                           div_v1_2::is_ignored_dynamic_output_shape_2,
+                           div_v1_2::get_examples_dynamic_output_shape_2(), true);
 }
 
-TEST_F(ValidationTest, div_float16_dynamic_output_shape) {
-  const Model model = div_float16::createTestModel_dynamic_output_shape();
-  const std::vector<Request> requests = createRequests(div_float16::get_examples_dynamic_output_shape());
+TEST_F(ValidationTest, div_v1_2_dynamic_output_shape_2) {
+  const Model model = div_v1_2::createTestModel_dynamic_output_shape_2();
+  const std::vector<Request> requests = createRequests(div_v1_2::get_examples_dynamic_output_shape_2());
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+#endif
+TEST_F(NeuralnetworksHidlTest, div_v1_2_zero_sized) {
+  generated_tests::Execute(device,
+                           div_v1_2::createTestModel_zero_sized,
+                           div_v1_2::is_ignored_zero_sized,
+                           div_v1_2::get_examples_zero_sized());
+}
+
+TEST_F(ValidationTest, div_v1_2_zero_sized) {
+  const Model model = div_v1_2::createTestModel_zero_sized();
+  const std::vector<Request> requests = createRequests(div_v1_2::get_examples_zero_sized());
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+TEST_F(NeuralnetworksHidlTest, div_v1_2_zero_sized_relaxed) {
+  generated_tests::Execute(device,
+                           div_v1_2::createTestModel_zero_sized_relaxed,
+                           div_v1_2::is_ignored_zero_sized_relaxed,
+                           div_v1_2::get_examples_zero_sized_relaxed());
+}
+
+TEST_F(ValidationTest, div_v1_2_zero_sized_relaxed) {
+  const Model model = div_v1_2::createTestModel_zero_sized_relaxed();
+  const std::vector<Request> requests = createRequests(div_v1_2::get_examples_zero_sized_relaxed());
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+TEST_F(NeuralnetworksHidlTest, div_v1_2_zero_sized_float16) {
+  generated_tests::Execute(device,
+                           div_v1_2::createTestModel_zero_sized_float16,
+                           div_v1_2::is_ignored_zero_sized_float16,
+                           div_v1_2::get_examples_zero_sized_float16());
+}
+
+TEST_F(ValidationTest, div_v1_2_zero_sized_float16) {
+  const Model model = div_v1_2::createTestModel_zero_sized_float16();
+  const std::vector<Request> requests = createRequests(div_v1_2::get_examples_zero_sized_float16());
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+#ifdef NN_TEST_DYNAMIC_OUTPUT_SHAPE
+TEST_F(DynamicOutputShapeTest, div_v1_2_zero_sized_dynamic_output_shape) {
+  generated_tests::Execute(device,
+                           div_v1_2::createTestModel_zero_sized_dynamic_output_shape,
+                           div_v1_2::is_ignored_zero_sized_dynamic_output_shape,
+                           div_v1_2::get_examples_zero_sized_dynamic_output_shape(), true);
+}
+
+TEST_F(ValidationTest, div_v1_2_zero_sized_dynamic_output_shape) {
+  const Model model = div_v1_2::createTestModel_zero_sized_dynamic_output_shape();
+  const std::vector<Request> requests = createRequests(div_v1_2::get_examples_zero_sized_dynamic_output_shape());
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+#endif
+#ifdef NN_TEST_DYNAMIC_OUTPUT_SHAPE
+TEST_F(DynamicOutputShapeTest, div_v1_2_zero_sized_dynamic_output_shape_relaxed) {
+  generated_tests::Execute(device,
+                           div_v1_2::createTestModel_zero_sized_dynamic_output_shape_relaxed,
+                           div_v1_2::is_ignored_zero_sized_dynamic_output_shape_relaxed,
+                           div_v1_2::get_examples_zero_sized_dynamic_output_shape_relaxed(), true);
+}
+
+TEST_F(ValidationTest, div_v1_2_zero_sized_dynamic_output_shape_relaxed) {
+  const Model model = div_v1_2::createTestModel_zero_sized_dynamic_output_shape_relaxed();
+  const std::vector<Request> requests = createRequests(div_v1_2::get_examples_zero_sized_dynamic_output_shape_relaxed());
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+#endif
+#ifdef NN_TEST_DYNAMIC_OUTPUT_SHAPE
+TEST_F(DynamicOutputShapeTest, div_v1_2_zero_sized_dynamic_output_shape_float16) {
+  generated_tests::Execute(device,
+                           div_v1_2::createTestModel_zero_sized_dynamic_output_shape_float16,
+                           div_v1_2::is_ignored_zero_sized_dynamic_output_shape_float16,
+                           div_v1_2::get_examples_zero_sized_dynamic_output_shape_float16(), true);
+}
+
+TEST_F(ValidationTest, div_v1_2_zero_sized_dynamic_output_shape_float16) {
+  const Model model = div_v1_2::createTestModel_zero_sized_dynamic_output_shape_float16();
+  const std::vector<Request> requests = createRequests(div_v1_2::get_examples_zero_sized_dynamic_output_shape_float16());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -45233,80 +45441,200 @@
 
 
 #endif
-// Generated from: mul_broadcast_float16.mod.py.
-namespace mul_broadcast_float16 {
-// Generated mul_broadcast_float16 test
-#include "examples/mul_broadcast_float16.example.cpp"
+// Generated from: mul_v1_2.mod.py.
+namespace mul_v1_2 {
+// Generated mul_v1_2 test
+#include "examples/mul_v1_2.example.cpp"
 // Generated model constructor
-#include "vts_models/mul_broadcast_float16.model.cpp"
-} // namespace mul_broadcast_float16
+#include "vts_models/mul_v1_2.model.cpp"
+} // namespace mul_v1_2
 
-TEST_F(NeuralnetworksHidlTest, mul_broadcast_float16) {
+TEST_F(NeuralnetworksHidlTest, mul_v1_2) {
   generated_tests::Execute(device,
-                           mul_broadcast_float16::createTestModel,
-                           mul_broadcast_float16::is_ignored,
-                           mul_broadcast_float16::get_examples());
+                           mul_v1_2::createTestModel,
+                           mul_v1_2::is_ignored,
+                           mul_v1_2::get_examples());
 }
 
-TEST_F(ValidationTest, mul_broadcast_float16) {
-  const Model model = mul_broadcast_float16::createTestModel();
-  const std::vector<Request> requests = createRequests(mul_broadcast_float16::get_examples());
+TEST_F(ValidationTest, mul_v1_2) {
+  const Model model = mul_v1_2::createTestModel();
+  const std::vector<Request> requests = createRequests(mul_v1_2::get_examples());
   validateModel(model);
   validateRequests(model, requests);
 }
 
 
 #ifdef NN_TEST_DYNAMIC_OUTPUT_SHAPE
-TEST_F(DynamicOutputShapeTest, mul_broadcast_float16_dynamic_output_shape) {
+TEST_F(DynamicOutputShapeTest, mul_v1_2_dynamic_output_shape) {
   generated_tests::Execute(device,
-                           mul_broadcast_float16::createTestModel_dynamic_output_shape,
-                           mul_broadcast_float16::is_ignored_dynamic_output_shape,
-                           mul_broadcast_float16::get_examples_dynamic_output_shape(), true);
+                           mul_v1_2::createTestModel_dynamic_output_shape,
+                           mul_v1_2::is_ignored_dynamic_output_shape,
+                           mul_v1_2::get_examples_dynamic_output_shape(), true);
 }
 
-TEST_F(ValidationTest, mul_broadcast_float16_dynamic_output_shape) {
-  const Model model = mul_broadcast_float16::createTestModel_dynamic_output_shape();
-  const std::vector<Request> requests = createRequests(mul_broadcast_float16::get_examples_dynamic_output_shape());
+TEST_F(ValidationTest, mul_v1_2_dynamic_output_shape) {
+  const Model model = mul_v1_2::createTestModel_dynamic_output_shape();
+  const std::vector<Request> requests = createRequests(mul_v1_2::get_examples_dynamic_output_shape());
   validateModel(model);
   validateRequests(model, requests);
 }
 
 
 #endif
-// Generated from: mul_float16.mod.py.
-namespace mul_float16 {
-// Generated mul_float16 test
-#include "examples/mul_float16.example.cpp"
-// Generated model constructor
-#include "vts_models/mul_float16.model.cpp"
-} // namespace mul_float16
-
-TEST_F(NeuralnetworksHidlTest, mul_float16) {
+TEST_F(NeuralnetworksHidlTest, mul_v1_2_2) {
   generated_tests::Execute(device,
-                           mul_float16::createTestModel,
-                           mul_float16::is_ignored,
-                           mul_float16::get_examples());
+                           mul_v1_2::createTestModel_2,
+                           mul_v1_2::is_ignored_2,
+                           mul_v1_2::get_examples_2());
 }
 
-TEST_F(ValidationTest, mul_float16) {
-  const Model model = mul_float16::createTestModel();
-  const std::vector<Request> requests = createRequests(mul_float16::get_examples());
+TEST_F(ValidationTest, mul_v1_2_2) {
+  const Model model = mul_v1_2::createTestModel_2();
+  const std::vector<Request> requests = createRequests(mul_v1_2::get_examples_2());
   validateModel(model);
   validateRequests(model, requests);
 }
 
 
 #ifdef NN_TEST_DYNAMIC_OUTPUT_SHAPE
-TEST_F(DynamicOutputShapeTest, mul_float16_dynamic_output_shape) {
+TEST_F(DynamicOutputShapeTest, mul_v1_2_dynamic_output_shape_2) {
   generated_tests::Execute(device,
-                           mul_float16::createTestModel_dynamic_output_shape,
-                           mul_float16::is_ignored_dynamic_output_shape,
-                           mul_float16::get_examples_dynamic_output_shape(), true);
+                           mul_v1_2::createTestModel_dynamic_output_shape_2,
+                           mul_v1_2::is_ignored_dynamic_output_shape_2,
+                           mul_v1_2::get_examples_dynamic_output_shape_2(), true);
 }
 
-TEST_F(ValidationTest, mul_float16_dynamic_output_shape) {
-  const Model model = mul_float16::createTestModel_dynamic_output_shape();
-  const std::vector<Request> requests = createRequests(mul_float16::get_examples_dynamic_output_shape());
+TEST_F(ValidationTest, mul_v1_2_dynamic_output_shape_2) {
+  const Model model = mul_v1_2::createTestModel_dynamic_output_shape_2();
+  const std::vector<Request> requests = createRequests(mul_v1_2::get_examples_dynamic_output_shape_2());
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+#endif
+TEST_F(NeuralnetworksHidlTest, mul_v1_2_zero_sized) {
+  generated_tests::Execute(device,
+                           mul_v1_2::createTestModel_zero_sized,
+                           mul_v1_2::is_ignored_zero_sized,
+                           mul_v1_2::get_examples_zero_sized());
+}
+
+TEST_F(ValidationTest, mul_v1_2_zero_sized) {
+  const Model model = mul_v1_2::createTestModel_zero_sized();
+  const std::vector<Request> requests = createRequests(mul_v1_2::get_examples_zero_sized());
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+TEST_F(NeuralnetworksHidlTest, mul_v1_2_zero_sized_relaxed) {
+  generated_tests::Execute(device,
+                           mul_v1_2::createTestModel_zero_sized_relaxed,
+                           mul_v1_2::is_ignored_zero_sized_relaxed,
+                           mul_v1_2::get_examples_zero_sized_relaxed());
+}
+
+TEST_F(ValidationTest, mul_v1_2_zero_sized_relaxed) {
+  const Model model = mul_v1_2::createTestModel_zero_sized_relaxed();
+  const std::vector<Request> requests = createRequests(mul_v1_2::get_examples_zero_sized_relaxed());
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+TEST_F(NeuralnetworksHidlTest, mul_v1_2_zero_sized_quant8) {
+  generated_tests::Execute(device,
+                           mul_v1_2::createTestModel_zero_sized_quant8,
+                           mul_v1_2::is_ignored_zero_sized_quant8,
+                           mul_v1_2::get_examples_zero_sized_quant8());
+}
+
+TEST_F(ValidationTest, mul_v1_2_zero_sized_quant8) {
+  const Model model = mul_v1_2::createTestModel_zero_sized_quant8();
+  const std::vector<Request> requests = createRequests(mul_v1_2::get_examples_zero_sized_quant8());
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+TEST_F(NeuralnetworksHidlTest, mul_v1_2_zero_sized_float16) {
+  generated_tests::Execute(device,
+                           mul_v1_2::createTestModel_zero_sized_float16,
+                           mul_v1_2::is_ignored_zero_sized_float16,
+                           mul_v1_2::get_examples_zero_sized_float16());
+}
+
+TEST_F(ValidationTest, mul_v1_2_zero_sized_float16) {
+  const Model model = mul_v1_2::createTestModel_zero_sized_float16();
+  const std::vector<Request> requests = createRequests(mul_v1_2::get_examples_zero_sized_float16());
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+#ifdef NN_TEST_DYNAMIC_OUTPUT_SHAPE
+TEST_F(DynamicOutputShapeTest, mul_v1_2_zero_sized_dynamic_output_shape) {
+  generated_tests::Execute(device,
+                           mul_v1_2::createTestModel_zero_sized_dynamic_output_shape,
+                           mul_v1_2::is_ignored_zero_sized_dynamic_output_shape,
+                           mul_v1_2::get_examples_zero_sized_dynamic_output_shape(), true);
+}
+
+TEST_F(ValidationTest, mul_v1_2_zero_sized_dynamic_output_shape) {
+  const Model model = mul_v1_2::createTestModel_zero_sized_dynamic_output_shape();
+  const std::vector<Request> requests = createRequests(mul_v1_2::get_examples_zero_sized_dynamic_output_shape());
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+#endif
+#ifdef NN_TEST_DYNAMIC_OUTPUT_SHAPE
+TEST_F(DynamicOutputShapeTest, mul_v1_2_zero_sized_dynamic_output_shape_relaxed) {
+  generated_tests::Execute(device,
+                           mul_v1_2::createTestModel_zero_sized_dynamic_output_shape_relaxed,
+                           mul_v1_2::is_ignored_zero_sized_dynamic_output_shape_relaxed,
+                           mul_v1_2::get_examples_zero_sized_dynamic_output_shape_relaxed(), true);
+}
+
+TEST_F(ValidationTest, mul_v1_2_zero_sized_dynamic_output_shape_relaxed) {
+  const Model model = mul_v1_2::createTestModel_zero_sized_dynamic_output_shape_relaxed();
+  const std::vector<Request> requests = createRequests(mul_v1_2::get_examples_zero_sized_dynamic_output_shape_relaxed());
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+#endif
+#ifdef NN_TEST_DYNAMIC_OUTPUT_SHAPE
+TEST_F(DynamicOutputShapeTest, mul_v1_2_zero_sized_dynamic_output_shape_quant8) {
+  generated_tests::Execute(device,
+                           mul_v1_2::createTestModel_zero_sized_dynamic_output_shape_quant8,
+                           mul_v1_2::is_ignored_zero_sized_dynamic_output_shape_quant8,
+                           mul_v1_2::get_examples_zero_sized_dynamic_output_shape_quant8(), true);
+}
+
+TEST_F(ValidationTest, mul_v1_2_zero_sized_dynamic_output_shape_quant8) {
+  const Model model = mul_v1_2::createTestModel_zero_sized_dynamic_output_shape_quant8();
+  const std::vector<Request> requests = createRequests(mul_v1_2::get_examples_zero_sized_dynamic_output_shape_quant8());
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+#endif
+#ifdef NN_TEST_DYNAMIC_OUTPUT_SHAPE
+TEST_F(DynamicOutputShapeTest, mul_v1_2_zero_sized_dynamic_output_shape_float16) {
+  generated_tests::Execute(device,
+                           mul_v1_2::createTestModel_zero_sized_dynamic_output_shape_float16,
+                           mul_v1_2::is_ignored_zero_sized_dynamic_output_shape_float16,
+                           mul_v1_2::get_examples_zero_sized_dynamic_output_shape_float16(), true);
+}
+
+TEST_F(ValidationTest, mul_v1_2_zero_sized_dynamic_output_shape_float16) {
+  const Model model = mul_v1_2::createTestModel_zero_sized_dynamic_output_shape_float16();
+  const std::vector<Request> requests = createRequests(mul_v1_2::get_examples_zero_sized_dynamic_output_shape_float16());
   validateModel(model);
   validateRequests(model, requests);
 }
@@ -70433,6 +70761,134 @@
 
 
 #endif
+TEST_F(NeuralnetworksHidlTest, sub_v1_2_zero_sized) {
+  generated_tests::Execute(device,
+                           sub_v1_2::createTestModel_zero_sized,
+                           sub_v1_2::is_ignored_zero_sized,
+                           sub_v1_2::get_examples_zero_sized());
+}
+
+TEST_F(ValidationTest, sub_v1_2_zero_sized) {
+  const Model model = sub_v1_2::createTestModel_zero_sized();
+  const std::vector<Request> requests = createRequests(sub_v1_2::get_examples_zero_sized());
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+TEST_F(NeuralnetworksHidlTest, sub_v1_2_zero_sized_relaxed) {
+  generated_tests::Execute(device,
+                           sub_v1_2::createTestModel_zero_sized_relaxed,
+                           sub_v1_2::is_ignored_zero_sized_relaxed,
+                           sub_v1_2::get_examples_zero_sized_relaxed());
+}
+
+TEST_F(ValidationTest, sub_v1_2_zero_sized_relaxed) {
+  const Model model = sub_v1_2::createTestModel_zero_sized_relaxed();
+  const std::vector<Request> requests = createRequests(sub_v1_2::get_examples_zero_sized_relaxed());
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+TEST_F(NeuralnetworksHidlTest, sub_v1_2_zero_sized_quant8) {
+  generated_tests::Execute(device,
+                           sub_v1_2::createTestModel_zero_sized_quant8,
+                           sub_v1_2::is_ignored_zero_sized_quant8,
+                           sub_v1_2::get_examples_zero_sized_quant8());
+}
+
+TEST_F(ValidationTest, sub_v1_2_zero_sized_quant8) {
+  const Model model = sub_v1_2::createTestModel_zero_sized_quant8();
+  const std::vector<Request> requests = createRequests(sub_v1_2::get_examples_zero_sized_quant8());
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+TEST_F(NeuralnetworksHidlTest, sub_v1_2_zero_sized_float16) {
+  generated_tests::Execute(device,
+                           sub_v1_2::createTestModel_zero_sized_float16,
+                           sub_v1_2::is_ignored_zero_sized_float16,
+                           sub_v1_2::get_examples_zero_sized_float16());
+}
+
+TEST_F(ValidationTest, sub_v1_2_zero_sized_float16) {
+  const Model model = sub_v1_2::createTestModel_zero_sized_float16();
+  const std::vector<Request> requests = createRequests(sub_v1_2::get_examples_zero_sized_float16());
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+#ifdef NN_TEST_DYNAMIC_OUTPUT_SHAPE
+TEST_F(DynamicOutputShapeTest, sub_v1_2_zero_sized_dynamic_output_shape) {
+  generated_tests::Execute(device,
+                           sub_v1_2::createTestModel_zero_sized_dynamic_output_shape,
+                           sub_v1_2::is_ignored_zero_sized_dynamic_output_shape,
+                           sub_v1_2::get_examples_zero_sized_dynamic_output_shape(), true);
+}
+
+TEST_F(ValidationTest, sub_v1_2_zero_sized_dynamic_output_shape) {
+  const Model model = sub_v1_2::createTestModel_zero_sized_dynamic_output_shape();
+  const std::vector<Request> requests = createRequests(sub_v1_2::get_examples_zero_sized_dynamic_output_shape());
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+#endif
+#ifdef NN_TEST_DYNAMIC_OUTPUT_SHAPE
+TEST_F(DynamicOutputShapeTest, sub_v1_2_zero_sized_dynamic_output_shape_relaxed) {
+  generated_tests::Execute(device,
+                           sub_v1_2::createTestModel_zero_sized_dynamic_output_shape_relaxed,
+                           sub_v1_2::is_ignored_zero_sized_dynamic_output_shape_relaxed,
+                           sub_v1_2::get_examples_zero_sized_dynamic_output_shape_relaxed(), true);
+}
+
+TEST_F(ValidationTest, sub_v1_2_zero_sized_dynamic_output_shape_relaxed) {
+  const Model model = sub_v1_2::createTestModel_zero_sized_dynamic_output_shape_relaxed();
+  const std::vector<Request> requests = createRequests(sub_v1_2::get_examples_zero_sized_dynamic_output_shape_relaxed());
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+#endif
+#ifdef NN_TEST_DYNAMIC_OUTPUT_SHAPE
+TEST_F(DynamicOutputShapeTest, sub_v1_2_zero_sized_dynamic_output_shape_quant8) {
+  generated_tests::Execute(device,
+                           sub_v1_2::createTestModel_zero_sized_dynamic_output_shape_quant8,
+                           sub_v1_2::is_ignored_zero_sized_dynamic_output_shape_quant8,
+                           sub_v1_2::get_examples_zero_sized_dynamic_output_shape_quant8(), true);
+}
+
+TEST_F(ValidationTest, sub_v1_2_zero_sized_dynamic_output_shape_quant8) {
+  const Model model = sub_v1_2::createTestModel_zero_sized_dynamic_output_shape_quant8();
+  const std::vector<Request> requests = createRequests(sub_v1_2::get_examples_zero_sized_dynamic_output_shape_quant8());
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+#endif
+#ifdef NN_TEST_DYNAMIC_OUTPUT_SHAPE
+TEST_F(DynamicOutputShapeTest, sub_v1_2_zero_sized_dynamic_output_shape_float16) {
+  generated_tests::Execute(device,
+                           sub_v1_2::createTestModel_zero_sized_dynamic_output_shape_float16,
+                           sub_v1_2::is_ignored_zero_sized_dynamic_output_shape_float16,
+                           sub_v1_2::get_examples_zero_sized_dynamic_output_shape_float16(), true);
+}
+
+TEST_F(ValidationTest, sub_v1_2_zero_sized_dynamic_output_shape_float16) {
+  const Model model = sub_v1_2::createTestModel_zero_sized_dynamic_output_shape_float16();
+  const std::vector<Request> requests = createRequests(sub_v1_2::get_examples_zero_sized_dynamic_output_shape_float16());
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+#endif
 // Generated from: sub_v1_2_broadcast.mod.py.
 namespace sub_v1_2_broadcast {
 // Generated sub_v1_2_broadcast test
diff --git a/runtime/test/generated/examples/add_broadcast_float16.example.cpp b/runtime/test/generated/examples/add_broadcast_float16.example.cpp
deleted file mode 100644
index b543c4b..0000000
--- a/runtime/test/generated/examples/add_broadcast_float16.example.cpp
+++ /dev/null
@@ -1,116 +0,0 @@
-// clang-format off
-// Generated file (from: add_broadcast_float16.mod.py). Do not edit
-std::vector<MixedTypedExample>& get_examples() {
-static std::vector<MixedTypedExample> examples = {
-// Begin of an example
-{
-.operands = {
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
-  // int -> Dimensions map
-  .operandDimensions = {{0, {1, 2}}, {1, {2, 2}}},
-  // int -> FLOAT32 map
-  .float32Operands = {},
-  // int -> INT32 map
-  .int32Operands = {},
-  // int -> QUANT8_ASYMM map
-  .quant8AsymmOperands = {},
-  // int -> QUANT16_SYMM map
-  .quant16SymmOperands = {},
-  // int -> FLOAT16 map
-  .float16Operands = {{0, {1.0f, 2.0f}}, {1, {1.0f, 2.0f, 3.0f, 4.0f}}},
-  // int -> BOOL8 map
-  .bool8Operands = {},
-  // int -> QUANT8_SYMM_PER_CHANNEL map
-  .quant8ChannelOperands = {},
-  // int -> QUANT16_ASYMM map
-  .quant16AsymmOperands = {},
-  // int -> QUANT8_SYMM map
-  .quant8SymmOperands = {},
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
-  // int -> Dimensions map
-  .operandDimensions = {{0, {2, 2}}},
-  // int -> FLOAT32 map
-  .float32Operands = {},
-  // int -> INT32 map
-  .int32Operands = {},
-  // int -> QUANT8_ASYMM map
-  .quant8AsymmOperands = {},
-  // int -> QUANT16_SYMM map
-  .quant16SymmOperands = {},
-  // int -> FLOAT16 map
-  .float16Operands = {{0, {2.0f, 4.0f, 4.0f, 6.0f}}},
-  // int -> BOOL8 map
-  .bool8Operands = {},
-  // int -> QUANT8_SYMM_PER_CHANNEL map
-  .quant8ChannelOperands = {},
-  // int -> QUANT16_ASYMM map
-  .quant16AsymmOperands = {},
-  // int -> QUANT8_SYMM map
-  .quant8SymmOperands = {},
-}
-},
-}, // End of an example
-};
-return examples;
-};
-
-std::vector<MixedTypedExample>& get_examples_dynamic_output_shape() {
-static std::vector<MixedTypedExample> examples_dynamic_output_shape = {
-// Begin of an example
-{
-.operands = {
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
-  // int -> Dimensions map
-  .operandDimensions = {{0, {1, 2}}, {1, {2, 2}}},
-  // int -> FLOAT32 map
-  .float32Operands = {},
-  // int -> INT32 map
-  .int32Operands = {},
-  // int -> QUANT8_ASYMM map
-  .quant8AsymmOperands = {},
-  // int -> QUANT16_SYMM map
-  .quant16SymmOperands = {},
-  // int -> FLOAT16 map
-  .float16Operands = {{0, {1.0f, 2.0f}}, {1, {1.0f, 2.0f, 3.0f, 4.0f}}},
-  // int -> BOOL8 map
-  .bool8Operands = {},
-  // int -> QUANT8_SYMM_PER_CHANNEL map
-  .quant8ChannelOperands = {},
-  // int -> QUANT16_ASYMM map
-  .quant16AsymmOperands = {},
-  // int -> QUANT8_SYMM map
-  .quant8SymmOperands = {},
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
-  // int -> Dimensions map
-  .operandDimensions = {{0, {2, 2}}},
-  // int -> FLOAT32 map
-  .float32Operands = {},
-  // int -> INT32 map
-  .int32Operands = {},
-  // int -> QUANT8_ASYMM map
-  .quant8AsymmOperands = {},
-  // int -> QUANT16_SYMM map
-  .quant16SymmOperands = {},
-  // int -> FLOAT16 map
-  .float16Operands = {{0, {2.0f, 4.0f, 4.0f, 6.0f}}},
-  // int -> BOOL8 map
-  .bool8Operands = {},
-  // int -> QUANT8_SYMM_PER_CHANNEL map
-  .quant8ChannelOperands = {},
-  // int -> QUANT16_ASYMM map
-  .quant16AsymmOperands = {},
-  // int -> QUANT8_SYMM map
-  .quant8SymmOperands = {},
-}
-},
-}, // End of an example
-};
-return examples_dynamic_output_shape;
-};
-
diff --git a/runtime/test/generated/examples/add_float16.example.cpp b/runtime/test/generated/examples/add_float16.example.cpp
deleted file mode 100644
index 656d922..0000000
--- a/runtime/test/generated/examples/add_float16.example.cpp
+++ /dev/null
@@ -1,116 +0,0 @@
-// clang-format off
-// Generated file (from: add_float16.mod.py). Do not edit
-std::vector<MixedTypedExample>& get_examples() {
-static std::vector<MixedTypedExample> examples = {
-// Begin of an example
-{
-.operands = {
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
-  // int -> Dimensions map
-  .operandDimensions = {{0, {3}}, {1, {3}}},
-  // int -> FLOAT32 map
-  .float32Operands = {},
-  // int -> INT32 map
-  .int32Operands = {},
-  // int -> QUANT8_ASYMM map
-  .quant8AsymmOperands = {},
-  // int -> QUANT16_SYMM map
-  .quant16SymmOperands = {},
-  // int -> FLOAT16 map
-  .float16Operands = {{0, {1.0009765625f, 1.0f, 2.5f}}, {1, {2e-23f, 0.0001f, 3.5f}}},
-  // int -> BOOL8 map
-  .bool8Operands = {},
-  // int -> QUANT8_SYMM_PER_CHANNEL map
-  .quant8ChannelOperands = {},
-  // int -> QUANT16_ASYMM map
-  .quant16AsymmOperands = {},
-  // int -> QUANT8_SYMM map
-  .quant8SymmOperands = {},
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
-  // int -> Dimensions map
-  .operandDimensions = {{0, {3}}},
-  // int -> FLOAT32 map
-  .float32Operands = {},
-  // int -> INT32 map
-  .int32Operands = {},
-  // int -> QUANT8_ASYMM map
-  .quant8AsymmOperands = {},
-  // int -> QUANT16_SYMM map
-  .quant16SymmOperands = {},
-  // int -> FLOAT16 map
-  .float16Operands = {{0, {1.0009765625f, 1.0f, 6.0f}}},
-  // int -> BOOL8 map
-  .bool8Operands = {},
-  // int -> QUANT8_SYMM_PER_CHANNEL map
-  .quant8ChannelOperands = {},
-  // int -> QUANT16_ASYMM map
-  .quant16AsymmOperands = {},
-  // int -> QUANT8_SYMM map
-  .quant8SymmOperands = {},
-}
-},
-}, // End of an example
-};
-return examples;
-};
-
-std::vector<MixedTypedExample>& get_examples_dynamic_output_shape() {
-static std::vector<MixedTypedExample> examples_dynamic_output_shape = {
-// Begin of an example
-{
-.operands = {
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
-  // int -> Dimensions map
-  .operandDimensions = {{0, {3}}, {1, {3}}},
-  // int -> FLOAT32 map
-  .float32Operands = {},
-  // int -> INT32 map
-  .int32Operands = {},
-  // int -> QUANT8_ASYMM map
-  .quant8AsymmOperands = {},
-  // int -> QUANT16_SYMM map
-  .quant16SymmOperands = {},
-  // int -> FLOAT16 map
-  .float16Operands = {{0, {1.0009765625f, 1.0f, 2.5f}}, {1, {2e-23f, 0.0001f, 3.5f}}},
-  // int -> BOOL8 map
-  .bool8Operands = {},
-  // int -> QUANT8_SYMM_PER_CHANNEL map
-  .quant8ChannelOperands = {},
-  // int -> QUANT16_ASYMM map
-  .quant16AsymmOperands = {},
-  // int -> QUANT8_SYMM map
-  .quant8SymmOperands = {},
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
-  // int -> Dimensions map
-  .operandDimensions = {{0, {3}}},
-  // int -> FLOAT32 map
-  .float32Operands = {},
-  // int -> INT32 map
-  .int32Operands = {},
-  // int -> QUANT8_ASYMM map
-  .quant8AsymmOperands = {},
-  // int -> QUANT16_SYMM map
-  .quant16SymmOperands = {},
-  // int -> FLOAT16 map
-  .float16Operands = {{0, {1.0009765625f, 1.0f, 6.0f}}},
-  // int -> BOOL8 map
-  .bool8Operands = {},
-  // int -> QUANT8_SYMM_PER_CHANNEL map
-  .quant8ChannelOperands = {},
-  // int -> QUANT16_ASYMM map
-  .quant16AsymmOperands = {},
-  // int -> QUANT8_SYMM map
-  .quant8SymmOperands = {},
-}
-},
-}, // End of an example
-};
-return examples_dynamic_output_shape;
-};
-
diff --git a/runtime/test/generated/examples/add_v1_2.example.cpp b/runtime/test/generated/examples/add_v1_2.example.cpp
new file mode 100644
index 0000000..c266753
--- /dev/null
+++ b/runtime/test/generated/examples/add_v1_2.example.cpp
@@ -0,0 +1,686 @@
+// clang-format off
+// Generated file (from: add_v1_2.mod.py). Do not edit
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> Dimensions map
+  .operandDimensions = {{0, {3}}, {1, {3}}},
+  // int -> FLOAT32 map
+  .float32Operands = {},
+  // int -> INT32 map
+  .int32Operands = {},
+  // int -> QUANT8_ASYMM map
+  .quant8AsymmOperands = {},
+  // int -> QUANT16_SYMM map
+  .quant16SymmOperands = {},
+  // int -> FLOAT16 map
+  .float16Operands = {{0, {1.0009765625f, 1.0f, 2.5f}}, {1, {2e-23f, 0.0001f, 3.5f}}},
+  // int -> BOOL8 map
+  .bool8Operands = {},
+  // int -> QUANT8_SYMM_PER_CHANNEL map
+  .quant8ChannelOperands = {},
+  // int -> QUANT16_ASYMM map
+  .quant16AsymmOperands = {},
+  // int -> QUANT8_SYMM map
+  .quant8SymmOperands = {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> Dimensions map
+  .operandDimensions = {{0, {3}}},
+  // int -> FLOAT32 map
+  .float32Operands = {},
+  // int -> INT32 map
+  .int32Operands = {},
+  // int -> QUANT8_ASYMM map
+  .quant8AsymmOperands = {},
+  // int -> QUANT16_SYMM map
+  .quant16SymmOperands = {},
+  // int -> FLOAT16 map
+  .float16Operands = {{0, {1.0009765625f, 1.0f, 6.0f}}},
+  // int -> BOOL8 map
+  .bool8Operands = {},
+  // int -> QUANT8_SYMM_PER_CHANNEL map
+  .quant8ChannelOperands = {},
+  // int -> QUANT16_ASYMM map
+  .quant16AsymmOperands = {},
+  // int -> QUANT8_SYMM map
+  .quant8SymmOperands = {},
+}
+},
+}, // End of an example
+};
+return examples;
+};
+
+std::vector<MixedTypedExample>& get_examples_dynamic_output_shape() {
+static std::vector<MixedTypedExample> examples_dynamic_output_shape = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> Dimensions map
+  .operandDimensions = {{0, {3}}, {1, {3}}},
+  // int -> FLOAT32 map
+  .float32Operands = {},
+  // int -> INT32 map
+  .int32Operands = {},
+  // int -> QUANT8_ASYMM map
+  .quant8AsymmOperands = {},
+  // int -> QUANT16_SYMM map
+  .quant16SymmOperands = {},
+  // int -> FLOAT16 map
+  .float16Operands = {{0, {1.0009765625f, 1.0f, 2.5f}}, {1, {2e-23f, 0.0001f, 3.5f}}},
+  // int -> BOOL8 map
+  .bool8Operands = {},
+  // int -> QUANT8_SYMM_PER_CHANNEL map
+  .quant8ChannelOperands = {},
+  // int -> QUANT16_ASYMM map
+  .quant16AsymmOperands = {},
+  // int -> QUANT8_SYMM map
+  .quant8SymmOperands = {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> Dimensions map
+  .operandDimensions = {{0, {3}}},
+  // int -> FLOAT32 map
+  .float32Operands = {},
+  // int -> INT32 map
+  .int32Operands = {},
+  // int -> QUANT8_ASYMM map
+  .quant8AsymmOperands = {},
+  // int -> QUANT16_SYMM map
+  .quant16SymmOperands = {},
+  // int -> FLOAT16 map
+  .float16Operands = {{0, {1.0009765625f, 1.0f, 6.0f}}},
+  // int -> BOOL8 map
+  .bool8Operands = {},
+  // int -> QUANT8_SYMM_PER_CHANNEL map
+  .quant8ChannelOperands = {},
+  // int -> QUANT16_ASYMM map
+  .quant16AsymmOperands = {},
+  // int -> QUANT8_SYMM map
+  .quant8SymmOperands = {},
+}
+},
+}, // End of an example
+};
+return examples_dynamic_output_shape;
+};
+
+std::vector<MixedTypedExample>& get_examples_2() {
+static std::vector<MixedTypedExample> examples_2 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> Dimensions map
+  .operandDimensions = {{0, {1, 2}}, {1, {2, 2}}},
+  // int -> FLOAT32 map
+  .float32Operands = {},
+  // int -> INT32 map
+  .int32Operands = {},
+  // int -> QUANT8_ASYMM map
+  .quant8AsymmOperands = {},
+  // int -> QUANT16_SYMM map
+  .quant16SymmOperands = {},
+  // int -> FLOAT16 map
+  .float16Operands = {{0, {1.0f, 2.0f}}, {1, {1.0f, 2.0f, 3.0f, 4.0f}}},
+  // int -> BOOL8 map
+  .bool8Operands = {},
+  // int -> QUANT8_SYMM_PER_CHANNEL map
+  .quant8ChannelOperands = {},
+  // int -> QUANT16_ASYMM map
+  .quant16AsymmOperands = {},
+  // int -> QUANT8_SYMM map
+  .quant8SymmOperands = {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> Dimensions map
+  .operandDimensions = {{0, {2, 2}}},
+  // int -> FLOAT32 map
+  .float32Operands = {},
+  // int -> INT32 map
+  .int32Operands = {},
+  // int -> QUANT8_ASYMM map
+  .quant8AsymmOperands = {},
+  // int -> QUANT16_SYMM map
+  .quant16SymmOperands = {},
+  // int -> FLOAT16 map
+  .float16Operands = {{0, {2.0f, 4.0f, 4.0f, 6.0f}}},
+  // int -> BOOL8 map
+  .bool8Operands = {},
+  // int -> QUANT8_SYMM_PER_CHANNEL map
+  .quant8ChannelOperands = {},
+  // int -> QUANT16_ASYMM map
+  .quant16AsymmOperands = {},
+  // int -> QUANT8_SYMM map
+  .quant8SymmOperands = {},
+}
+},
+}, // End of an example
+};
+return examples_2;
+};
+
+std::vector<MixedTypedExample>& get_examples_dynamic_output_shape_2() {
+static std::vector<MixedTypedExample> examples_dynamic_output_shape_2 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> Dimensions map
+  .operandDimensions = {{0, {1, 2}}, {1, {2, 2}}},
+  // int -> FLOAT32 map
+  .float32Operands = {},
+  // int -> INT32 map
+  .int32Operands = {},
+  // int -> QUANT8_ASYMM map
+  .quant8AsymmOperands = {},
+  // int -> QUANT16_SYMM map
+  .quant16SymmOperands = {},
+  // int -> FLOAT16 map
+  .float16Operands = {{0, {1.0f, 2.0f}}, {1, {1.0f, 2.0f, 3.0f, 4.0f}}},
+  // int -> BOOL8 map
+  .bool8Operands = {},
+  // int -> QUANT8_SYMM_PER_CHANNEL map
+  .quant8ChannelOperands = {},
+  // int -> QUANT16_ASYMM map
+  .quant16AsymmOperands = {},
+  // int -> QUANT8_SYMM map
+  .quant8SymmOperands = {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> Dimensions map
+  .operandDimensions = {{0, {2, 2}}},
+  // int -> FLOAT32 map
+  .float32Operands = {},
+  // int -> INT32 map
+  .int32Operands = {},
+  // int -> QUANT8_ASYMM map
+  .quant8AsymmOperands = {},
+  // int -> QUANT16_SYMM map
+  .quant16SymmOperands = {},
+  // int -> FLOAT16 map
+  .float16Operands = {{0, {2.0f, 4.0f, 4.0f, 6.0f}}},
+  // int -> BOOL8 map
+  .bool8Operands = {},
+  // int -> QUANT8_SYMM_PER_CHANNEL map
+  .quant8ChannelOperands = {},
+  // int -> QUANT16_ASYMM map
+  .quant16AsymmOperands = {},
+  // int -> QUANT8_SYMM map
+  .quant8SymmOperands = {},
+}
+},
+}, // End of an example
+};
+return examples_dynamic_output_shape_2;
+};
+
+std::vector<MixedTypedExample>& get_examples_zero_sized() {
+static std::vector<MixedTypedExample> examples_zero_sized = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> Dimensions map
+  .operandDimensions = {{0, {1, 1, 1, 2}}},
+  // int -> FLOAT32 map
+  .float32Operands = {{0, {1.0f, 2.0f}}},
+  // int -> INT32 map
+  .int32Operands = {},
+  // int -> QUANT8_ASYMM map
+  .quant8AsymmOperands = {},
+  // int -> QUANT16_SYMM map
+  .quant16SymmOperands = {},
+  // int -> FLOAT16 map
+  .float16Operands = {},
+  // int -> BOOL8 map
+  .bool8Operands = {},
+  // int -> QUANT8_SYMM_PER_CHANNEL map
+  .quant8ChannelOperands = {},
+  // int -> QUANT16_ASYMM map
+  .quant16AsymmOperands = {},
+  // int -> QUANT8_SYMM map
+  .quant8SymmOperands = {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> Dimensions map
+  .operandDimensions = {{0, {0}}, {1, {0}}, {2, {0, 2, 2, 2}}},
+  // int -> FLOAT32 map
+  .float32Operands = {{0, {0.0f}}, {2, {0.0f}}},
+  // int -> INT32 map
+  .int32Operands = {{1, {0}}},
+  // int -> QUANT8_ASYMM map
+  .quant8AsymmOperands = {},
+  // int -> QUANT16_SYMM map
+  .quant16SymmOperands = {},
+  // int -> FLOAT16 map
+  .float16Operands = {},
+  // int -> BOOL8 map
+  .bool8Operands = {},
+  // int -> QUANT8_SYMM_PER_CHANNEL map
+  .quant8ChannelOperands = {},
+  // int -> QUANT16_ASYMM map
+  .quant16AsymmOperands = {},
+  // int -> QUANT8_SYMM map
+  .quant8SymmOperands = {},
+}
+},
+}, // End of an example
+};
+return examples_zero_sized;
+};
+
+std::vector<MixedTypedExample>& get_examples_zero_sized_relaxed() {
+static std::vector<MixedTypedExample> examples_zero_sized_relaxed = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> Dimensions map
+  .operandDimensions = {{0, {1, 1, 1, 2}}},
+  // int -> FLOAT32 map
+  .float32Operands = {{0, {1.0f, 2.0f}}},
+  // int -> INT32 map
+  .int32Operands = {},
+  // int -> QUANT8_ASYMM map
+  .quant8AsymmOperands = {},
+  // int -> QUANT16_SYMM map
+  .quant16SymmOperands = {},
+  // int -> FLOAT16 map
+  .float16Operands = {},
+  // int -> BOOL8 map
+  .bool8Operands = {},
+  // int -> QUANT8_SYMM_PER_CHANNEL map
+  .quant8ChannelOperands = {},
+  // int -> QUANT16_ASYMM map
+  .quant16AsymmOperands = {},
+  // int -> QUANT8_SYMM map
+  .quant8SymmOperands = {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> Dimensions map
+  .operandDimensions = {{0, {0}}, {1, {0}}, {2, {0, 2, 2, 2}}},
+  // int -> FLOAT32 map
+  .float32Operands = {{0, {0.0f}}, {2, {0.0f}}},
+  // int -> INT32 map
+  .int32Operands = {{1, {0}}},
+  // int -> QUANT8_ASYMM map
+  .quant8AsymmOperands = {},
+  // int -> QUANT16_SYMM map
+  .quant16SymmOperands = {},
+  // int -> FLOAT16 map
+  .float16Operands = {},
+  // int -> BOOL8 map
+  .bool8Operands = {},
+  // int -> QUANT8_SYMM_PER_CHANNEL map
+  .quant8ChannelOperands = {},
+  // int -> QUANT16_ASYMM map
+  .quant16AsymmOperands = {},
+  // int -> QUANT8_SYMM map
+  .quant8SymmOperands = {},
+}
+},
+}, // End of an example
+};
+return examples_zero_sized_relaxed;
+};
+
+std::vector<MixedTypedExample>& get_examples_zero_sized_quant8() {
+static std::vector<MixedTypedExample> examples_zero_sized_quant8 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> Dimensions map
+  .operandDimensions = {{0, {1, 1, 1, 2}}},
+  // int -> FLOAT32 map
+  .float32Operands = {},
+  // int -> INT32 map
+  .int32Operands = {},
+  // int -> QUANT8_ASYMM map
+  .quant8AsymmOperands = {{0, {138, 148}}},
+  // int -> QUANT16_SYMM map
+  .quant16SymmOperands = {},
+  // int -> FLOAT16 map
+  .float16Operands = {},
+  // int -> BOOL8 map
+  .bool8Operands = {},
+  // int -> QUANT8_SYMM_PER_CHANNEL map
+  .quant8ChannelOperands = {},
+  // int -> QUANT16_ASYMM map
+  .quant16AsymmOperands = {},
+  // int -> QUANT8_SYMM map
+  .quant8SymmOperands = {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> Dimensions map
+  .operandDimensions = {{0, {0}}, {1, {0}}, {2, {0, 2, 2, 2}}},
+  // int -> FLOAT32 map
+  .float32Operands = {},
+  // int -> INT32 map
+  .int32Operands = {{1, {0}}},
+  // int -> QUANT8_ASYMM map
+  .quant8AsymmOperands = {{0, {0}}, {2, {0}}},
+  // int -> QUANT16_SYMM map
+  .quant16SymmOperands = {},
+  // int -> FLOAT16 map
+  .float16Operands = {},
+  // int -> BOOL8 map
+  .bool8Operands = {},
+  // int -> QUANT8_SYMM_PER_CHANNEL map
+  .quant8ChannelOperands = {},
+  // int -> QUANT16_ASYMM map
+  .quant16AsymmOperands = {},
+  // int -> QUANT8_SYMM map
+  .quant8SymmOperands = {},
+}
+},
+}, // End of an example
+};
+return examples_zero_sized_quant8;
+};
+
+std::vector<MixedTypedExample>& get_examples_zero_sized_float16() {
+static std::vector<MixedTypedExample> examples_zero_sized_float16 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> Dimensions map
+  .operandDimensions = {{0, {1, 1, 1, 2}}},
+  // int -> FLOAT32 map
+  .float32Operands = {},
+  // int -> INT32 map
+  .int32Operands = {},
+  // int -> QUANT8_ASYMM map
+  .quant8AsymmOperands = {},
+  // int -> QUANT16_SYMM map
+  .quant16SymmOperands = {},
+  // int -> FLOAT16 map
+  .float16Operands = {{0, {1.0f, 2.0f}}},
+  // int -> BOOL8 map
+  .bool8Operands = {},
+  // int -> QUANT8_SYMM_PER_CHANNEL map
+  .quant8ChannelOperands = {},
+  // int -> QUANT16_ASYMM map
+  .quant16AsymmOperands = {},
+  // int -> QUANT8_SYMM map
+  .quant8SymmOperands = {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> Dimensions map
+  .operandDimensions = {{0, {0}}, {1, {0}}, {2, {0, 2, 2, 2}}},
+  // int -> FLOAT32 map
+  .float32Operands = {},
+  // int -> INT32 map
+  .int32Operands = {{1, {0}}},
+  // int -> QUANT8_ASYMM map
+  .quant8AsymmOperands = {},
+  // int -> QUANT16_SYMM map
+  .quant16SymmOperands = {},
+  // int -> FLOAT16 map
+  .float16Operands = {{0, {0.0f}}, {2, {0.0f}}},
+  // int -> BOOL8 map
+  .bool8Operands = {},
+  // int -> QUANT8_SYMM_PER_CHANNEL map
+  .quant8ChannelOperands = {},
+  // int -> QUANT16_ASYMM map
+  .quant16AsymmOperands = {},
+  // int -> QUANT8_SYMM map
+  .quant8SymmOperands = {},
+}
+},
+}, // End of an example
+};
+return examples_zero_sized_float16;
+};
+
+std::vector<MixedTypedExample>& get_examples_zero_sized_dynamic_output_shape() {
+static std::vector<MixedTypedExample> examples_zero_sized_dynamic_output_shape = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> Dimensions map
+  .operandDimensions = {{0, {1, 1, 1, 2}}},
+  // int -> FLOAT32 map
+  .float32Operands = {{0, {1.0f, 2.0f}}},
+  // int -> INT32 map
+  .int32Operands = {},
+  // int -> QUANT8_ASYMM map
+  .quant8AsymmOperands = {},
+  // int -> QUANT16_SYMM map
+  .quant16SymmOperands = {},
+  // int -> FLOAT16 map
+  .float16Operands = {},
+  // int -> BOOL8 map
+  .bool8Operands = {},
+  // int -> QUANT8_SYMM_PER_CHANNEL map
+  .quant8ChannelOperands = {},
+  // int -> QUANT16_ASYMM map
+  .quant16AsymmOperands = {},
+  // int -> QUANT8_SYMM map
+  .quant8SymmOperands = {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> Dimensions map
+  .operandDimensions = {{0, {0}}, {1, {0}}, {2, {0, 2, 2, 2}}},
+  // int -> FLOAT32 map
+  .float32Operands = {{0, {0.0f}}, {2, {0.0f}}},
+  // int -> INT32 map
+  .int32Operands = {{1, {0}}},
+  // int -> QUANT8_ASYMM map
+  .quant8AsymmOperands = {},
+  // int -> QUANT16_SYMM map
+  .quant16SymmOperands = {},
+  // int -> FLOAT16 map
+  .float16Operands = {},
+  // int -> BOOL8 map
+  .bool8Operands = {},
+  // int -> QUANT8_SYMM_PER_CHANNEL map
+  .quant8ChannelOperands = {},
+  // int -> QUANT16_ASYMM map
+  .quant16AsymmOperands = {},
+  // int -> QUANT8_SYMM map
+  .quant8SymmOperands = {},
+}
+},
+}, // End of an example
+};
+return examples_zero_sized_dynamic_output_shape;
+};
+
+std::vector<MixedTypedExample>& get_examples_zero_sized_dynamic_output_shape_relaxed() {
+static std::vector<MixedTypedExample> examples_zero_sized_dynamic_output_shape_relaxed = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> Dimensions map
+  .operandDimensions = {{0, {1, 1, 1, 2}}},
+  // int -> FLOAT32 map
+  .float32Operands = {{0, {1.0f, 2.0f}}},
+  // int -> INT32 map
+  .int32Operands = {},
+  // int -> QUANT8_ASYMM map
+  .quant8AsymmOperands = {},
+  // int -> QUANT16_SYMM map
+  .quant16SymmOperands = {},
+  // int -> FLOAT16 map
+  .float16Operands = {},
+  // int -> BOOL8 map
+  .bool8Operands = {},
+  // int -> QUANT8_SYMM_PER_CHANNEL map
+  .quant8ChannelOperands = {},
+  // int -> QUANT16_ASYMM map
+  .quant16AsymmOperands = {},
+  // int -> QUANT8_SYMM map
+  .quant8SymmOperands = {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> Dimensions map
+  .operandDimensions = {{0, {0}}, {1, {0}}, {2, {0, 2, 2, 2}}},
+  // int -> FLOAT32 map
+  .float32Operands = {{0, {0.0f}}, {2, {0.0f}}},
+  // int -> INT32 map
+  .int32Operands = {{1, {0}}},
+  // int -> QUANT8_ASYMM map
+  .quant8AsymmOperands = {},
+  // int -> QUANT16_SYMM map
+  .quant16SymmOperands = {},
+  // int -> FLOAT16 map
+  .float16Operands = {},
+  // int -> BOOL8 map
+  .bool8Operands = {},
+  // int -> QUANT8_SYMM_PER_CHANNEL map
+  .quant8ChannelOperands = {},
+  // int -> QUANT16_ASYMM map
+  .quant16AsymmOperands = {},
+  // int -> QUANT8_SYMM map
+  .quant8SymmOperands = {},
+}
+},
+}, // End of an example
+};
+return examples_zero_sized_dynamic_output_shape_relaxed;
+};
+
+std::vector<MixedTypedExample>& get_examples_zero_sized_dynamic_output_shape_quant8() {
+static std::vector<MixedTypedExample> examples_zero_sized_dynamic_output_shape_quant8 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> Dimensions map
+  .operandDimensions = {{0, {1, 1, 1, 2}}},
+  // int -> FLOAT32 map
+  .float32Operands = {},
+  // int -> INT32 map
+  .int32Operands = {},
+  // int -> QUANT8_ASYMM map
+  .quant8AsymmOperands = {{0, {138, 148}}},
+  // int -> QUANT16_SYMM map
+  .quant16SymmOperands = {},
+  // int -> FLOAT16 map
+  .float16Operands = {},
+  // int -> BOOL8 map
+  .bool8Operands = {},
+  // int -> QUANT8_SYMM_PER_CHANNEL map
+  .quant8ChannelOperands = {},
+  // int -> QUANT16_ASYMM map
+  .quant16AsymmOperands = {},
+  // int -> QUANT8_SYMM map
+  .quant8SymmOperands = {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> Dimensions map
+  .operandDimensions = {{0, {0}}, {1, {0}}, {2, {0, 2, 2, 2}}},
+  // int -> FLOAT32 map
+  .float32Operands = {},
+  // int -> INT32 map
+  .int32Operands = {{1, {0}}},
+  // int -> QUANT8_ASYMM map
+  .quant8AsymmOperands = {{0, {0}}, {2, {0}}},
+  // int -> QUANT16_SYMM map
+  .quant16SymmOperands = {},
+  // int -> FLOAT16 map
+  .float16Operands = {},
+  // int -> BOOL8 map
+  .bool8Operands = {},
+  // int -> QUANT8_SYMM_PER_CHANNEL map
+  .quant8ChannelOperands = {},
+  // int -> QUANT16_ASYMM map
+  .quant16AsymmOperands = {},
+  // int -> QUANT8_SYMM map
+  .quant8SymmOperands = {},
+}
+},
+}, // End of an example
+};
+return examples_zero_sized_dynamic_output_shape_quant8;
+};
+
+std::vector<MixedTypedExample>& get_examples_zero_sized_dynamic_output_shape_float16() {
+static std::vector<MixedTypedExample> examples_zero_sized_dynamic_output_shape_float16 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> Dimensions map
+  .operandDimensions = {{0, {1, 1, 1, 2}}},
+  // int -> FLOAT32 map
+  .float32Operands = {},
+  // int -> INT32 map
+  .int32Operands = {},
+  // int -> QUANT8_ASYMM map
+  .quant8AsymmOperands = {},
+  // int -> QUANT16_SYMM map
+  .quant16SymmOperands = {},
+  // int -> FLOAT16 map
+  .float16Operands = {{0, {1.0f, 2.0f}}},
+  // int -> BOOL8 map
+  .bool8Operands = {},
+  // int -> QUANT8_SYMM_PER_CHANNEL map
+  .quant8ChannelOperands = {},
+  // int -> QUANT16_ASYMM map
+  .quant16AsymmOperands = {},
+  // int -> QUANT8_SYMM map
+  .quant8SymmOperands = {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> Dimensions map
+  .operandDimensions = {{0, {0}}, {1, {0}}, {2, {0, 2, 2, 2}}},
+  // int -> FLOAT32 map
+  .float32Operands = {},
+  // int -> INT32 map
+  .int32Operands = {{1, {0}}},
+  // int -> QUANT8_ASYMM map
+  .quant8AsymmOperands = {},
+  // int -> QUANT16_SYMM map
+  .quant16SymmOperands = {},
+  // int -> FLOAT16 map
+  .float16Operands = {{0, {0.0f}}, {2, {0.0f}}},
+  // int -> BOOL8 map
+  .bool8Operands = {},
+  // int -> QUANT8_SYMM_PER_CHANNEL map
+  .quant8ChannelOperands = {},
+  // int -> QUANT16_ASYMM map
+  .quant16AsymmOperands = {},
+  // int -> QUANT8_SYMM map
+  .quant8SymmOperands = {},
+}
+},
+}, // End of an example
+};
+return examples_zero_sized_dynamic_output_shape_float16;
+};
+
diff --git a/runtime/test/generated/examples/div_broadcast_float16.example.cpp b/runtime/test/generated/examples/div_broadcast_float16.example.cpp
deleted file mode 100644
index 8a266a7..0000000
--- a/runtime/test/generated/examples/div_broadcast_float16.example.cpp
+++ /dev/null
@@ -1,116 +0,0 @@
-// clang-format off
-// Generated file (from: div_broadcast_float16.mod.py). Do not edit
-std::vector<MixedTypedExample>& get_examples() {
-static std::vector<MixedTypedExample> examples = {
-// Begin of an example
-{
-.operands = {
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
-  // int -> Dimensions map
-  .operandDimensions = {{0, {2, 2}}, {1, {1, 2}}},
-  // int -> FLOAT32 map
-  .float32Operands = {},
-  // int -> INT32 map
-  .int32Operands = {},
-  // int -> QUANT8_ASYMM map
-  .quant8AsymmOperands = {},
-  // int -> QUANT16_SYMM map
-  .quant16SymmOperands = {},
-  // int -> FLOAT16 map
-  .float16Operands = {{0, {1.0f, 4.0f, 3.0f, 8.0f}}, {1, {1.0f, 2.0f}}},
-  // int -> BOOL8 map
-  .bool8Operands = {},
-  // int -> QUANT8_SYMM_PER_CHANNEL map
-  .quant8ChannelOperands = {},
-  // int -> QUANT16_ASYMM map
-  .quant16AsymmOperands = {},
-  // int -> QUANT8_SYMM map
-  .quant8SymmOperands = {},
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
-  // int -> Dimensions map
-  .operandDimensions = {{0, {2, 2}}},
-  // int -> FLOAT32 map
-  .float32Operands = {},
-  // int -> INT32 map
-  .int32Operands = {},
-  // int -> QUANT8_ASYMM map
-  .quant8AsymmOperands = {},
-  // int -> QUANT16_SYMM map
-  .quant16SymmOperands = {},
-  // int -> FLOAT16 map
-  .float16Operands = {{0, {1.0f, 2.0f, 3.0f, 4.0f}}},
-  // int -> BOOL8 map
-  .bool8Operands = {},
-  // int -> QUANT8_SYMM_PER_CHANNEL map
-  .quant8ChannelOperands = {},
-  // int -> QUANT16_ASYMM map
-  .quant16AsymmOperands = {},
-  // int -> QUANT8_SYMM map
-  .quant8SymmOperands = {},
-}
-},
-}, // End of an example
-};
-return examples;
-};
-
-std::vector<MixedTypedExample>& get_examples_dynamic_output_shape() {
-static std::vector<MixedTypedExample> examples_dynamic_output_shape = {
-// Begin of an example
-{
-.operands = {
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
-  // int -> Dimensions map
-  .operandDimensions = {{0, {2, 2}}, {1, {1, 2}}},
-  // int -> FLOAT32 map
-  .float32Operands = {},
-  // int -> INT32 map
-  .int32Operands = {},
-  // int -> QUANT8_ASYMM map
-  .quant8AsymmOperands = {},
-  // int -> QUANT16_SYMM map
-  .quant16SymmOperands = {},
-  // int -> FLOAT16 map
-  .float16Operands = {{0, {1.0f, 4.0f, 3.0f, 8.0f}}, {1, {1.0f, 2.0f}}},
-  // int -> BOOL8 map
-  .bool8Operands = {},
-  // int -> QUANT8_SYMM_PER_CHANNEL map
-  .quant8ChannelOperands = {},
-  // int -> QUANT16_ASYMM map
-  .quant16AsymmOperands = {},
-  // int -> QUANT8_SYMM map
-  .quant8SymmOperands = {},
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
-  // int -> Dimensions map
-  .operandDimensions = {{0, {2, 2}}},
-  // int -> FLOAT32 map
-  .float32Operands = {},
-  // int -> INT32 map
-  .int32Operands = {},
-  // int -> QUANT8_ASYMM map
-  .quant8AsymmOperands = {},
-  // int -> QUANT16_SYMM map
-  .quant16SymmOperands = {},
-  // int -> FLOAT16 map
-  .float16Operands = {{0, {1.0f, 2.0f, 3.0f, 4.0f}}},
-  // int -> BOOL8 map
-  .bool8Operands = {},
-  // int -> QUANT8_SYMM_PER_CHANNEL map
-  .quant8ChannelOperands = {},
-  // int -> QUANT16_ASYMM map
-  .quant16AsymmOperands = {},
-  // int -> QUANT8_SYMM map
-  .quant8SymmOperands = {},
-}
-},
-}, // End of an example
-};
-return examples_dynamic_output_shape;
-};
-
diff --git a/runtime/test/generated/examples/div_float16.example.cpp b/runtime/test/generated/examples/div_float16.example.cpp
deleted file mode 100644
index e59d5b9..0000000
--- a/runtime/test/generated/examples/div_float16.example.cpp
+++ /dev/null
@@ -1,116 +0,0 @@
-// clang-format off
-// Generated file (from: div_float16.mod.py). Do not edit
-std::vector<MixedTypedExample>& get_examples() {
-static std::vector<MixedTypedExample> examples = {
-// Begin of an example
-{
-.operands = {
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
-  // int -> Dimensions map
-  .operandDimensions = {{0, {3}}, {1, {3}}},
-  // int -> FLOAT32 map
-  .float32Operands = {},
-  // int -> INT32 map
-  .int32Operands = {},
-  // int -> QUANT8_ASYMM map
-  .quant8AsymmOperands = {},
-  // int -> QUANT16_SYMM map
-  .quant16SymmOperands = {},
-  // int -> FLOAT16 map
-  .float16Operands = {{0, {2.001953125f, 0.0001000165f, 8.75f}}, {1, {2.0f, 0.0001f, 3.5f}}},
-  // int -> BOOL8 map
-  .bool8Operands = {},
-  // int -> QUANT8_SYMM_PER_CHANNEL map
-  .quant8ChannelOperands = {},
-  // int -> QUANT16_ASYMM map
-  .quant16AsymmOperands = {},
-  // int -> QUANT8_SYMM map
-  .quant8SymmOperands = {},
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
-  // int -> Dimensions map
-  .operandDimensions = {{0, {3}}},
-  // int -> FLOAT32 map
-  .float32Operands = {},
-  // int -> INT32 map
-  .int32Operands = {},
-  // int -> QUANT8_ASYMM map
-  .quant8AsymmOperands = {},
-  // int -> QUANT16_SYMM map
-  .quant16SymmOperands = {},
-  // int -> FLOAT16 map
-  .float16Operands = {{0, {1.0009765625f, 1.0f, 2.5f}}},
-  // int -> BOOL8 map
-  .bool8Operands = {},
-  // int -> QUANT8_SYMM_PER_CHANNEL map
-  .quant8ChannelOperands = {},
-  // int -> QUANT16_ASYMM map
-  .quant16AsymmOperands = {},
-  // int -> QUANT8_SYMM map
-  .quant8SymmOperands = {},
-}
-},
-}, // End of an example
-};
-return examples;
-};
-
-std::vector<MixedTypedExample>& get_examples_dynamic_output_shape() {
-static std::vector<MixedTypedExample> examples_dynamic_output_shape = {
-// Begin of an example
-{
-.operands = {
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
-  // int -> Dimensions map
-  .operandDimensions = {{0, {3}}, {1, {3}}},
-  // int -> FLOAT32 map
-  .float32Operands = {},
-  // int -> INT32 map
-  .int32Operands = {},
-  // int -> QUANT8_ASYMM map
-  .quant8AsymmOperands = {},
-  // int -> QUANT16_SYMM map
-  .quant16SymmOperands = {},
-  // int -> FLOAT16 map
-  .float16Operands = {{0, {2.001953125f, 0.0001000165f, 8.75f}}, {1, {2.0f, 0.0001f, 3.5f}}},
-  // int -> BOOL8 map
-  .bool8Operands = {},
-  // int -> QUANT8_SYMM_PER_CHANNEL map
-  .quant8ChannelOperands = {},
-  // int -> QUANT16_ASYMM map
-  .quant16AsymmOperands = {},
-  // int -> QUANT8_SYMM map
-  .quant8SymmOperands = {},
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
-  // int -> Dimensions map
-  .operandDimensions = {{0, {3}}},
-  // int -> FLOAT32 map
-  .float32Operands = {},
-  // int -> INT32 map
-  .int32Operands = {},
-  // int -> QUANT8_ASYMM map
-  .quant8AsymmOperands = {},
-  // int -> QUANT16_SYMM map
-  .quant16SymmOperands = {},
-  // int -> FLOAT16 map
-  .float16Operands = {{0, {1.0009765625f, 1.0f, 2.5f}}},
-  // int -> BOOL8 map
-  .bool8Operands = {},
-  // int -> QUANT8_SYMM_PER_CHANNEL map
-  .quant8ChannelOperands = {},
-  // int -> QUANT16_ASYMM map
-  .quant16AsymmOperands = {},
-  // int -> QUANT8_SYMM map
-  .quant8SymmOperands = {},
-}
-},
-}, // End of an example
-};
-return examples_dynamic_output_shape;
-};
-
diff --git a/runtime/test/generated/examples/div_v1_2.example.cpp b/runtime/test/generated/examples/div_v1_2.example.cpp
new file mode 100644
index 0000000..9e6a085
--- /dev/null
+++ b/runtime/test/generated/examples/div_v1_2.example.cpp
@@ -0,0 +1,572 @@
+// clang-format off
+// Generated file (from: div_v1_2.mod.py). Do not edit
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> Dimensions map
+  .operandDimensions = {{0, {3}}, {1, {3}}},
+  // int -> FLOAT32 map
+  .float32Operands = {},
+  // int -> INT32 map
+  .int32Operands = {},
+  // int -> QUANT8_ASYMM map
+  .quant8AsymmOperands = {},
+  // int -> QUANT16_SYMM map
+  .quant16SymmOperands = {},
+  // int -> FLOAT16 map
+  .float16Operands = {{0, {2.001953125f, 0.0001000165f, 8.75f}}, {1, {2.0f, 0.0001f, 3.5f}}},
+  // int -> BOOL8 map
+  .bool8Operands = {},
+  // int -> QUANT8_SYMM_PER_CHANNEL map
+  .quant8ChannelOperands = {},
+  // int -> QUANT16_ASYMM map
+  .quant16AsymmOperands = {},
+  // int -> QUANT8_SYMM map
+  .quant8SymmOperands = {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> Dimensions map
+  .operandDimensions = {{0, {3}}},
+  // int -> FLOAT32 map
+  .float32Operands = {},
+  // int -> INT32 map
+  .int32Operands = {},
+  // int -> QUANT8_ASYMM map
+  .quant8AsymmOperands = {},
+  // int -> QUANT16_SYMM map
+  .quant16SymmOperands = {},
+  // int -> FLOAT16 map
+  .float16Operands = {{0, {1.0009765625f, 1.0f, 2.5f}}},
+  // int -> BOOL8 map
+  .bool8Operands = {},
+  // int -> QUANT8_SYMM_PER_CHANNEL map
+  .quant8ChannelOperands = {},
+  // int -> QUANT16_ASYMM map
+  .quant16AsymmOperands = {},
+  // int -> QUANT8_SYMM map
+  .quant8SymmOperands = {},
+}
+},
+}, // End of an example
+};
+return examples;
+};
+
+std::vector<MixedTypedExample>& get_examples_dynamic_output_shape() {
+static std::vector<MixedTypedExample> examples_dynamic_output_shape = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> Dimensions map
+  .operandDimensions = {{0, {3}}, {1, {3}}},
+  // int -> FLOAT32 map
+  .float32Operands = {},
+  // int -> INT32 map
+  .int32Operands = {},
+  // int -> QUANT8_ASYMM map
+  .quant8AsymmOperands = {},
+  // int -> QUANT16_SYMM map
+  .quant16SymmOperands = {},
+  // int -> FLOAT16 map
+  .float16Operands = {{0, {2.001953125f, 0.0001000165f, 8.75f}}, {1, {2.0f, 0.0001f, 3.5f}}},
+  // int -> BOOL8 map
+  .bool8Operands = {},
+  // int -> QUANT8_SYMM_PER_CHANNEL map
+  .quant8ChannelOperands = {},
+  // int -> QUANT16_ASYMM map
+  .quant16AsymmOperands = {},
+  // int -> QUANT8_SYMM map
+  .quant8SymmOperands = {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> Dimensions map
+  .operandDimensions = {{0, {3}}},
+  // int -> FLOAT32 map
+  .float32Operands = {},
+  // int -> INT32 map
+  .int32Operands = {},
+  // int -> QUANT8_ASYMM map
+  .quant8AsymmOperands = {},
+  // int -> QUANT16_SYMM map
+  .quant16SymmOperands = {},
+  // int -> FLOAT16 map
+  .float16Operands = {{0, {1.0009765625f, 1.0f, 2.5f}}},
+  // int -> BOOL8 map
+  .bool8Operands = {},
+  // int -> QUANT8_SYMM_PER_CHANNEL map
+  .quant8ChannelOperands = {},
+  // int -> QUANT16_ASYMM map
+  .quant16AsymmOperands = {},
+  // int -> QUANT8_SYMM map
+  .quant8SymmOperands = {},
+}
+},
+}, // End of an example
+};
+return examples_dynamic_output_shape;
+};
+
+std::vector<MixedTypedExample>& get_examples_2() {
+static std::vector<MixedTypedExample> examples_2 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> Dimensions map
+  .operandDimensions = {{0, {2, 2}}, {1, {1, 2}}},
+  // int -> FLOAT32 map
+  .float32Operands = {},
+  // int -> INT32 map
+  .int32Operands = {},
+  // int -> QUANT8_ASYMM map
+  .quant8AsymmOperands = {},
+  // int -> QUANT16_SYMM map
+  .quant16SymmOperands = {},
+  // int -> FLOAT16 map
+  .float16Operands = {{0, {1.0f, 4.0f, 3.0f, 8.0f}}, {1, {1.0f, 2.0f}}},
+  // int -> BOOL8 map
+  .bool8Operands = {},
+  // int -> QUANT8_SYMM_PER_CHANNEL map
+  .quant8ChannelOperands = {},
+  // int -> QUANT16_ASYMM map
+  .quant16AsymmOperands = {},
+  // int -> QUANT8_SYMM map
+  .quant8SymmOperands = {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> Dimensions map
+  .operandDimensions = {{0, {2, 2}}},
+  // int -> FLOAT32 map
+  .float32Operands = {},
+  // int -> INT32 map
+  .int32Operands = {},
+  // int -> QUANT8_ASYMM map
+  .quant8AsymmOperands = {},
+  // int -> QUANT16_SYMM map
+  .quant16SymmOperands = {},
+  // int -> FLOAT16 map
+  .float16Operands = {{0, {1.0f, 2.0f, 3.0f, 4.0f}}},
+  // int -> BOOL8 map
+  .bool8Operands = {},
+  // int -> QUANT8_SYMM_PER_CHANNEL map
+  .quant8ChannelOperands = {},
+  // int -> QUANT16_ASYMM map
+  .quant16AsymmOperands = {},
+  // int -> QUANT8_SYMM map
+  .quant8SymmOperands = {},
+}
+},
+}, // End of an example
+};
+return examples_2;
+};
+
+std::vector<MixedTypedExample>& get_examples_dynamic_output_shape_2() {
+static std::vector<MixedTypedExample> examples_dynamic_output_shape_2 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> Dimensions map
+  .operandDimensions = {{0, {2, 2}}, {1, {1, 2}}},
+  // int -> FLOAT32 map
+  .float32Operands = {},
+  // int -> INT32 map
+  .int32Operands = {},
+  // int -> QUANT8_ASYMM map
+  .quant8AsymmOperands = {},
+  // int -> QUANT16_SYMM map
+  .quant16SymmOperands = {},
+  // int -> FLOAT16 map
+  .float16Operands = {{0, {1.0f, 4.0f, 3.0f, 8.0f}}, {1, {1.0f, 2.0f}}},
+  // int -> BOOL8 map
+  .bool8Operands = {},
+  // int -> QUANT8_SYMM_PER_CHANNEL map
+  .quant8ChannelOperands = {},
+  // int -> QUANT16_ASYMM map
+  .quant16AsymmOperands = {},
+  // int -> QUANT8_SYMM map
+  .quant8SymmOperands = {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> Dimensions map
+  .operandDimensions = {{0, {2, 2}}},
+  // int -> FLOAT32 map
+  .float32Operands = {},
+  // int -> INT32 map
+  .int32Operands = {},
+  // int -> QUANT8_ASYMM map
+  .quant8AsymmOperands = {},
+  // int -> QUANT16_SYMM map
+  .quant16SymmOperands = {},
+  // int -> FLOAT16 map
+  .float16Operands = {{0, {1.0f, 2.0f, 3.0f, 4.0f}}},
+  // int -> BOOL8 map
+  .bool8Operands = {},
+  // int -> QUANT8_SYMM_PER_CHANNEL map
+  .quant8ChannelOperands = {},
+  // int -> QUANT16_ASYMM map
+  .quant16AsymmOperands = {},
+  // int -> QUANT8_SYMM map
+  .quant8SymmOperands = {},
+}
+},
+}, // End of an example
+};
+return examples_dynamic_output_shape_2;
+};
+
+std::vector<MixedTypedExample>& get_examples_zero_sized() {
+static std::vector<MixedTypedExample> examples_zero_sized = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> Dimensions map
+  .operandDimensions = {{0, {1, 1, 1, 2}}},
+  // int -> FLOAT32 map
+  .float32Operands = {{0, {1.0f, 2.0f}}},
+  // int -> INT32 map
+  .int32Operands = {},
+  // int -> QUANT8_ASYMM map
+  .quant8AsymmOperands = {},
+  // int -> QUANT16_SYMM map
+  .quant16SymmOperands = {},
+  // int -> FLOAT16 map
+  .float16Operands = {},
+  // int -> BOOL8 map
+  .bool8Operands = {},
+  // int -> QUANT8_SYMM_PER_CHANNEL map
+  .quant8ChannelOperands = {},
+  // int -> QUANT16_ASYMM map
+  .quant16AsymmOperands = {},
+  // int -> QUANT8_SYMM map
+  .quant8SymmOperands = {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> Dimensions map
+  .operandDimensions = {{0, {0}}, {1, {0}}, {2, {0, 2, 2, 2}}},
+  // int -> FLOAT32 map
+  .float32Operands = {{0, {0.0f}}, {2, {0.0f}}},
+  // int -> INT32 map
+  .int32Operands = {{1, {0}}},
+  // int -> QUANT8_ASYMM map
+  .quant8AsymmOperands = {},
+  // int -> QUANT16_SYMM map
+  .quant16SymmOperands = {},
+  // int -> FLOAT16 map
+  .float16Operands = {},
+  // int -> BOOL8 map
+  .bool8Operands = {},
+  // int -> QUANT8_SYMM_PER_CHANNEL map
+  .quant8ChannelOperands = {},
+  // int -> QUANT16_ASYMM map
+  .quant16AsymmOperands = {},
+  // int -> QUANT8_SYMM map
+  .quant8SymmOperands = {},
+}
+},
+}, // End of an example
+};
+return examples_zero_sized;
+};
+
+std::vector<MixedTypedExample>& get_examples_zero_sized_relaxed() {
+static std::vector<MixedTypedExample> examples_zero_sized_relaxed = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> Dimensions map
+  .operandDimensions = {{0, {1, 1, 1, 2}}},
+  // int -> FLOAT32 map
+  .float32Operands = {{0, {1.0f, 2.0f}}},
+  // int -> INT32 map
+  .int32Operands = {},
+  // int -> QUANT8_ASYMM map
+  .quant8AsymmOperands = {},
+  // int -> QUANT16_SYMM map
+  .quant16SymmOperands = {},
+  // int -> FLOAT16 map
+  .float16Operands = {},
+  // int -> BOOL8 map
+  .bool8Operands = {},
+  // int -> QUANT8_SYMM_PER_CHANNEL map
+  .quant8ChannelOperands = {},
+  // int -> QUANT16_ASYMM map
+  .quant16AsymmOperands = {},
+  // int -> QUANT8_SYMM map
+  .quant8SymmOperands = {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> Dimensions map
+  .operandDimensions = {{0, {0}}, {1, {0}}, {2, {0, 2, 2, 2}}},
+  // int -> FLOAT32 map
+  .float32Operands = {{0, {0.0f}}, {2, {0.0f}}},
+  // int -> INT32 map
+  .int32Operands = {{1, {0}}},
+  // int -> QUANT8_ASYMM map
+  .quant8AsymmOperands = {},
+  // int -> QUANT16_SYMM map
+  .quant16SymmOperands = {},
+  // int -> FLOAT16 map
+  .float16Operands = {},
+  // int -> BOOL8 map
+  .bool8Operands = {},
+  // int -> QUANT8_SYMM_PER_CHANNEL map
+  .quant8ChannelOperands = {},
+  // int -> QUANT16_ASYMM map
+  .quant16AsymmOperands = {},
+  // int -> QUANT8_SYMM map
+  .quant8SymmOperands = {},
+}
+},
+}, // End of an example
+};
+return examples_zero_sized_relaxed;
+};
+
+std::vector<MixedTypedExample>& get_examples_zero_sized_float16() {
+static std::vector<MixedTypedExample> examples_zero_sized_float16 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> Dimensions map
+  .operandDimensions = {{0, {1, 1, 1, 2}}},
+  // int -> FLOAT32 map
+  .float32Operands = {},
+  // int -> INT32 map
+  .int32Operands = {},
+  // int -> QUANT8_ASYMM map
+  .quant8AsymmOperands = {},
+  // int -> QUANT16_SYMM map
+  .quant16SymmOperands = {},
+  // int -> FLOAT16 map
+  .float16Operands = {{0, {1.0f, 2.0f}}},
+  // int -> BOOL8 map
+  .bool8Operands = {},
+  // int -> QUANT8_SYMM_PER_CHANNEL map
+  .quant8ChannelOperands = {},
+  // int -> QUANT16_ASYMM map
+  .quant16AsymmOperands = {},
+  // int -> QUANT8_SYMM map
+  .quant8SymmOperands = {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> Dimensions map
+  .operandDimensions = {{0, {0}}, {1, {0}}, {2, {0, 2, 2, 2}}},
+  // int -> FLOAT32 map
+  .float32Operands = {},
+  // int -> INT32 map
+  .int32Operands = {{1, {0}}},
+  // int -> QUANT8_ASYMM map
+  .quant8AsymmOperands = {},
+  // int -> QUANT16_SYMM map
+  .quant16SymmOperands = {},
+  // int -> FLOAT16 map
+  .float16Operands = {{0, {0.0f}}, {2, {0.0f}}},
+  // int -> BOOL8 map
+  .bool8Operands = {},
+  // int -> QUANT8_SYMM_PER_CHANNEL map
+  .quant8ChannelOperands = {},
+  // int -> QUANT16_ASYMM map
+  .quant16AsymmOperands = {},
+  // int -> QUANT8_SYMM map
+  .quant8SymmOperands = {},
+}
+},
+}, // End of an example
+};
+return examples_zero_sized_float16;
+};
+
+std::vector<MixedTypedExample>& get_examples_zero_sized_dynamic_output_shape() {
+static std::vector<MixedTypedExample> examples_zero_sized_dynamic_output_shape = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> Dimensions map
+  .operandDimensions = {{0, {1, 1, 1, 2}}},
+  // int -> FLOAT32 map
+  .float32Operands = {{0, {1.0f, 2.0f}}},
+  // int -> INT32 map
+  .int32Operands = {},
+  // int -> QUANT8_ASYMM map
+  .quant8AsymmOperands = {},
+  // int -> QUANT16_SYMM map
+  .quant16SymmOperands = {},
+  // int -> FLOAT16 map
+  .float16Operands = {},
+  // int -> BOOL8 map
+  .bool8Operands = {},
+  // int -> QUANT8_SYMM_PER_CHANNEL map
+  .quant8ChannelOperands = {},
+  // int -> QUANT16_ASYMM map
+  .quant16AsymmOperands = {},
+  // int -> QUANT8_SYMM map
+  .quant8SymmOperands = {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> Dimensions map
+  .operandDimensions = {{0, {0}}, {1, {0}}, {2, {0, 2, 2, 2}}},
+  // int -> FLOAT32 map
+  .float32Operands = {{0, {0.0f}}, {2, {0.0f}}},
+  // int -> INT32 map
+  .int32Operands = {{1, {0}}},
+  // int -> QUANT8_ASYMM map
+  .quant8AsymmOperands = {},
+  // int -> QUANT16_SYMM map
+  .quant16SymmOperands = {},
+  // int -> FLOAT16 map
+  .float16Operands = {},
+  // int -> BOOL8 map
+  .bool8Operands = {},
+  // int -> QUANT8_SYMM_PER_CHANNEL map
+  .quant8ChannelOperands = {},
+  // int -> QUANT16_ASYMM map
+  .quant16AsymmOperands = {},
+  // int -> QUANT8_SYMM map
+  .quant8SymmOperands = {},
+}
+},
+}, // End of an example
+};
+return examples_zero_sized_dynamic_output_shape;
+};
+
+std::vector<MixedTypedExample>& get_examples_zero_sized_dynamic_output_shape_relaxed() {
+static std::vector<MixedTypedExample> examples_zero_sized_dynamic_output_shape_relaxed = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> Dimensions map
+  .operandDimensions = {{0, {1, 1, 1, 2}}},
+  // int -> FLOAT32 map
+  .float32Operands = {{0, {1.0f, 2.0f}}},
+  // int -> INT32 map
+  .int32Operands = {},
+  // int -> QUANT8_ASYMM map
+  .quant8AsymmOperands = {},
+  // int -> QUANT16_SYMM map
+  .quant16SymmOperands = {},
+  // int -> FLOAT16 map
+  .float16Operands = {},
+  // int -> BOOL8 map
+  .bool8Operands = {},
+  // int -> QUANT8_SYMM_PER_CHANNEL map
+  .quant8ChannelOperands = {},
+  // int -> QUANT16_ASYMM map
+  .quant16AsymmOperands = {},
+  // int -> QUANT8_SYMM map
+  .quant8SymmOperands = {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> Dimensions map
+  .operandDimensions = {{0, {0}}, {1, {0}}, {2, {0, 2, 2, 2}}},
+  // int -> FLOAT32 map
+  .float32Operands = {{0, {0.0f}}, {2, {0.0f}}},
+  // int -> INT32 map
+  .int32Operands = {{1, {0}}},
+  // int -> QUANT8_ASYMM map
+  .quant8AsymmOperands = {},
+  // int -> QUANT16_SYMM map
+  .quant16SymmOperands = {},
+  // int -> FLOAT16 map
+  .float16Operands = {},
+  // int -> BOOL8 map
+  .bool8Operands = {},
+  // int -> QUANT8_SYMM_PER_CHANNEL map
+  .quant8ChannelOperands = {},
+  // int -> QUANT16_ASYMM map
+  .quant16AsymmOperands = {},
+  // int -> QUANT8_SYMM map
+  .quant8SymmOperands = {},
+}
+},
+}, // End of an example
+};
+return examples_zero_sized_dynamic_output_shape_relaxed;
+};
+
+std::vector<MixedTypedExample>& get_examples_zero_sized_dynamic_output_shape_float16() {
+static std::vector<MixedTypedExample> examples_zero_sized_dynamic_output_shape_float16 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> Dimensions map
+  .operandDimensions = {{0, {1, 1, 1, 2}}},
+  // int -> FLOAT32 map
+  .float32Operands = {},
+  // int -> INT32 map
+  .int32Operands = {},
+  // int -> QUANT8_ASYMM map
+  .quant8AsymmOperands = {},
+  // int -> QUANT16_SYMM map
+  .quant16SymmOperands = {},
+  // int -> FLOAT16 map
+  .float16Operands = {{0, {1.0f, 2.0f}}},
+  // int -> BOOL8 map
+  .bool8Operands = {},
+  // int -> QUANT8_SYMM_PER_CHANNEL map
+  .quant8ChannelOperands = {},
+  // int -> QUANT16_ASYMM map
+  .quant16AsymmOperands = {},
+  // int -> QUANT8_SYMM map
+  .quant8SymmOperands = {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> Dimensions map
+  .operandDimensions = {{0, {0}}, {1, {0}}, {2, {0, 2, 2, 2}}},
+  // int -> FLOAT32 map
+  .float32Operands = {},
+  // int -> INT32 map
+  .int32Operands = {{1, {0}}},
+  // int -> QUANT8_ASYMM map
+  .quant8AsymmOperands = {},
+  // int -> QUANT16_SYMM map
+  .quant16SymmOperands = {},
+  // int -> FLOAT16 map
+  .float16Operands = {{0, {0.0f}}, {2, {0.0f}}},
+  // int -> BOOL8 map
+  .bool8Operands = {},
+  // int -> QUANT8_SYMM_PER_CHANNEL map
+  .quant8ChannelOperands = {},
+  // int -> QUANT16_ASYMM map
+  .quant16AsymmOperands = {},
+  // int -> QUANT8_SYMM map
+  .quant8SymmOperands = {},
+}
+},
+}, // End of an example
+};
+return examples_zero_sized_dynamic_output_shape_float16;
+};
+
diff --git a/runtime/test/generated/examples/mul_broadcast_float16.example.cpp b/runtime/test/generated/examples/mul_broadcast_float16.example.cpp
deleted file mode 100644
index aec21ba..0000000
--- a/runtime/test/generated/examples/mul_broadcast_float16.example.cpp
+++ /dev/null
@@ -1,116 +0,0 @@
-// clang-format off
-// Generated file (from: mul_broadcast_float16.mod.py). Do not edit
-std::vector<MixedTypedExample>& get_examples() {
-static std::vector<MixedTypedExample> examples = {
-// Begin of an example
-{
-.operands = {
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
-  // int -> Dimensions map
-  .operandDimensions = {{0, {1, 2}}, {1, {2, 2}}},
-  // int -> FLOAT32 map
-  .float32Operands = {},
-  // int -> INT32 map
-  .int32Operands = {},
-  // int -> QUANT8_ASYMM map
-  .quant8AsymmOperands = {},
-  // int -> QUANT16_SYMM map
-  .quant16SymmOperands = {},
-  // int -> FLOAT16 map
-  .float16Operands = {{0, {1.0f, 2.0f}}, {1, {1.0f, 2.0f, 3.0f, 4.0f}}},
-  // int -> BOOL8 map
-  .bool8Operands = {},
-  // int -> QUANT8_SYMM_PER_CHANNEL map
-  .quant8ChannelOperands = {},
-  // int -> QUANT16_ASYMM map
-  .quant16AsymmOperands = {},
-  // int -> QUANT8_SYMM map
-  .quant8SymmOperands = {},
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
-  // int -> Dimensions map
-  .operandDimensions = {{0, {2, 2}}},
-  // int -> FLOAT32 map
-  .float32Operands = {},
-  // int -> INT32 map
-  .int32Operands = {},
-  // int -> QUANT8_ASYMM map
-  .quant8AsymmOperands = {},
-  // int -> QUANT16_SYMM map
-  .quant16SymmOperands = {},
-  // int -> FLOAT16 map
-  .float16Operands = {{0, {1.0f, 4.0f, 3.0f, 8.0f}}},
-  // int -> BOOL8 map
-  .bool8Operands = {},
-  // int -> QUANT8_SYMM_PER_CHANNEL map
-  .quant8ChannelOperands = {},
-  // int -> QUANT16_ASYMM map
-  .quant16AsymmOperands = {},
-  // int -> QUANT8_SYMM map
-  .quant8SymmOperands = {},
-}
-},
-}, // End of an example
-};
-return examples;
-};
-
-std::vector<MixedTypedExample>& get_examples_dynamic_output_shape() {
-static std::vector<MixedTypedExample> examples_dynamic_output_shape = {
-// Begin of an example
-{
-.operands = {
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
-  // int -> Dimensions map
-  .operandDimensions = {{0, {1, 2}}, {1, {2, 2}}},
-  // int -> FLOAT32 map
-  .float32Operands = {},
-  // int -> INT32 map
-  .int32Operands = {},
-  // int -> QUANT8_ASYMM map
-  .quant8AsymmOperands = {},
-  // int -> QUANT16_SYMM map
-  .quant16SymmOperands = {},
-  // int -> FLOAT16 map
-  .float16Operands = {{0, {1.0f, 2.0f}}, {1, {1.0f, 2.0f, 3.0f, 4.0f}}},
-  // int -> BOOL8 map
-  .bool8Operands = {},
-  // int -> QUANT8_SYMM_PER_CHANNEL map
-  .quant8ChannelOperands = {},
-  // int -> QUANT16_ASYMM map
-  .quant16AsymmOperands = {},
-  // int -> QUANT8_SYMM map
-  .quant8SymmOperands = {},
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
-  // int -> Dimensions map
-  .operandDimensions = {{0, {2, 2}}},
-  // int -> FLOAT32 map
-  .float32Operands = {},
-  // int -> INT32 map
-  .int32Operands = {},
-  // int -> QUANT8_ASYMM map
-  .quant8AsymmOperands = {},
-  // int -> QUANT16_SYMM map
-  .quant16SymmOperands = {},
-  // int -> FLOAT16 map
-  .float16Operands = {{0, {1.0f, 4.0f, 3.0f, 8.0f}}},
-  // int -> BOOL8 map
-  .bool8Operands = {},
-  // int -> QUANT8_SYMM_PER_CHANNEL map
-  .quant8ChannelOperands = {},
-  // int -> QUANT16_ASYMM map
-  .quant16AsymmOperands = {},
-  // int -> QUANT8_SYMM map
-  .quant8SymmOperands = {},
-}
-},
-}, // End of an example
-};
-return examples_dynamic_output_shape;
-};
-
diff --git a/runtime/test/generated/examples/mul_float16.example.cpp b/runtime/test/generated/examples/mul_float16.example.cpp
deleted file mode 100644
index 1307efa..0000000
--- a/runtime/test/generated/examples/mul_float16.example.cpp
+++ /dev/null
@@ -1,116 +0,0 @@
-// clang-format off
-// Generated file (from: mul_float16.mod.py). Do not edit
-std::vector<MixedTypedExample>& get_examples() {
-static std::vector<MixedTypedExample> examples = {
-// Begin of an example
-{
-.operands = {
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
-  // int -> Dimensions map
-  .operandDimensions = {{0, {3}}, {1, {3}}},
-  // int -> FLOAT32 map
-  .float32Operands = {},
-  // int -> INT32 map
-  .int32Operands = {},
-  // int -> QUANT8_ASYMM map
-  .quant8AsymmOperands = {},
-  // int -> QUANT16_SYMM map
-  .quant16SymmOperands = {},
-  // int -> FLOAT16 map
-  .float16Operands = {{0, {1.0009765625f, 1.0f, 2.5f}}, {1, {2.0f, 0.0001f, 3.5f}}},
-  // int -> BOOL8 map
-  .bool8Operands = {},
-  // int -> QUANT8_SYMM_PER_CHANNEL map
-  .quant8ChannelOperands = {},
-  // int -> QUANT16_ASYMM map
-  .quant16AsymmOperands = {},
-  // int -> QUANT8_SYMM map
-  .quant8SymmOperands = {},
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
-  // int -> Dimensions map
-  .operandDimensions = {{0, {3}}},
-  // int -> FLOAT32 map
-  .float32Operands = {},
-  // int -> INT32 map
-  .int32Operands = {},
-  // int -> QUANT8_ASYMM map
-  .quant8AsymmOperands = {},
-  // int -> QUANT16_SYMM map
-  .quant16SymmOperands = {},
-  // int -> FLOAT16 map
-  .float16Operands = {{0, {2.001953125f, 0.0001000165f, 8.75f}}},
-  // int -> BOOL8 map
-  .bool8Operands = {},
-  // int -> QUANT8_SYMM_PER_CHANNEL map
-  .quant8ChannelOperands = {},
-  // int -> QUANT16_ASYMM map
-  .quant16AsymmOperands = {},
-  // int -> QUANT8_SYMM map
-  .quant8SymmOperands = {},
-}
-},
-}, // End of an example
-};
-return examples;
-};
-
-std::vector<MixedTypedExample>& get_examples_dynamic_output_shape() {
-static std::vector<MixedTypedExample> examples_dynamic_output_shape = {
-// Begin of an example
-{
-.operands = {
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
-  // int -> Dimensions map
-  .operandDimensions = {{0, {3}}, {1, {3}}},
-  // int -> FLOAT32 map
-  .float32Operands = {},
-  // int -> INT32 map
-  .int32Operands = {},
-  // int -> QUANT8_ASYMM map
-  .quant8AsymmOperands = {},
-  // int -> QUANT16_SYMM map
-  .quant16SymmOperands = {},
-  // int -> FLOAT16 map
-  .float16Operands = {{0, {1.0009765625f, 1.0f, 2.5f}}, {1, {2.0f, 0.0001f, 3.5f}}},
-  // int -> BOOL8 map
-  .bool8Operands = {},
-  // int -> QUANT8_SYMM_PER_CHANNEL map
-  .quant8ChannelOperands = {},
-  // int -> QUANT16_ASYMM map
-  .quant16AsymmOperands = {},
-  // int -> QUANT8_SYMM map
-  .quant8SymmOperands = {},
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
-  // int -> Dimensions map
-  .operandDimensions = {{0, {3}}},
-  // int -> FLOAT32 map
-  .float32Operands = {},
-  // int -> INT32 map
-  .int32Operands = {},
-  // int -> QUANT8_ASYMM map
-  .quant8AsymmOperands = {},
-  // int -> QUANT16_SYMM map
-  .quant16SymmOperands = {},
-  // int -> FLOAT16 map
-  .float16Operands = {{0, {2.001953125f, 0.0001000165f, 8.75f}}},
-  // int -> BOOL8 map
-  .bool8Operands = {},
-  // int -> QUANT8_SYMM_PER_CHANNEL map
-  .quant8ChannelOperands = {},
-  // int -> QUANT16_ASYMM map
-  .quant16AsymmOperands = {},
-  // int -> QUANT8_SYMM map
-  .quant8SymmOperands = {},
-}
-},
-}, // End of an example
-};
-return examples_dynamic_output_shape;
-};
-
diff --git a/runtime/test/generated/examples/mul_v1_2.example.cpp b/runtime/test/generated/examples/mul_v1_2.example.cpp
new file mode 100644
index 0000000..0a3b1f7
--- /dev/null
+++ b/runtime/test/generated/examples/mul_v1_2.example.cpp
@@ -0,0 +1,686 @@
+// clang-format off
+// Generated file (from: mul_v1_2.mod.py). Do not edit
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> Dimensions map
+  .operandDimensions = {{0, {3}}, {1, {3}}},
+  // int -> FLOAT32 map
+  .float32Operands = {},
+  // int -> INT32 map
+  .int32Operands = {},
+  // int -> QUANT8_ASYMM map
+  .quant8AsymmOperands = {},
+  // int -> QUANT16_SYMM map
+  .quant16SymmOperands = {},
+  // int -> FLOAT16 map
+  .float16Operands = {{0, {1.0009765625f, 1.0f, 2.5f}}, {1, {2.0f, 0.0001f, 3.5f}}},
+  // int -> BOOL8 map
+  .bool8Operands = {},
+  // int -> QUANT8_SYMM_PER_CHANNEL map
+  .quant8ChannelOperands = {},
+  // int -> QUANT16_ASYMM map
+  .quant16AsymmOperands = {},
+  // int -> QUANT8_SYMM map
+  .quant8SymmOperands = {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> Dimensions map
+  .operandDimensions = {{0, {3}}},
+  // int -> FLOAT32 map
+  .float32Operands = {},
+  // int -> INT32 map
+  .int32Operands = {},
+  // int -> QUANT8_ASYMM map
+  .quant8AsymmOperands = {},
+  // int -> QUANT16_SYMM map
+  .quant16SymmOperands = {},
+  // int -> FLOAT16 map
+  .float16Operands = {{0, {2.001953125f, 0.0001000165f, 8.75f}}},
+  // int -> BOOL8 map
+  .bool8Operands = {},
+  // int -> QUANT8_SYMM_PER_CHANNEL map
+  .quant8ChannelOperands = {},
+  // int -> QUANT16_ASYMM map
+  .quant16AsymmOperands = {},
+  // int -> QUANT8_SYMM map
+  .quant8SymmOperands = {},
+}
+},
+}, // End of an example
+};
+return examples;
+};
+
+std::vector<MixedTypedExample>& get_examples_dynamic_output_shape() {
+static std::vector<MixedTypedExample> examples_dynamic_output_shape = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> Dimensions map
+  .operandDimensions = {{0, {3}}, {1, {3}}},
+  // int -> FLOAT32 map
+  .float32Operands = {},
+  // int -> INT32 map
+  .int32Operands = {},
+  // int -> QUANT8_ASYMM map
+  .quant8AsymmOperands = {},
+  // int -> QUANT16_SYMM map
+  .quant16SymmOperands = {},
+  // int -> FLOAT16 map
+  .float16Operands = {{0, {1.0009765625f, 1.0f, 2.5f}}, {1, {2.0f, 0.0001f, 3.5f}}},
+  // int -> BOOL8 map
+  .bool8Operands = {},
+  // int -> QUANT8_SYMM_PER_CHANNEL map
+  .quant8ChannelOperands = {},
+  // int -> QUANT16_ASYMM map
+  .quant16AsymmOperands = {},
+  // int -> QUANT8_SYMM map
+  .quant8SymmOperands = {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> Dimensions map
+  .operandDimensions = {{0, {3}}},
+  // int -> FLOAT32 map
+  .float32Operands = {},
+  // int -> INT32 map
+  .int32Operands = {},
+  // int -> QUANT8_ASYMM map
+  .quant8AsymmOperands = {},
+  // int -> QUANT16_SYMM map
+  .quant16SymmOperands = {},
+  // int -> FLOAT16 map
+  .float16Operands = {{0, {2.001953125f, 0.0001000165f, 8.75f}}},
+  // int -> BOOL8 map
+  .bool8Operands = {},
+  // int -> QUANT8_SYMM_PER_CHANNEL map
+  .quant8ChannelOperands = {},
+  // int -> QUANT16_ASYMM map
+  .quant16AsymmOperands = {},
+  // int -> QUANT8_SYMM map
+  .quant8SymmOperands = {},
+}
+},
+}, // End of an example
+};
+return examples_dynamic_output_shape;
+};
+
+std::vector<MixedTypedExample>& get_examples_2() {
+static std::vector<MixedTypedExample> examples_2 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> Dimensions map
+  .operandDimensions = {{0, {1, 2}}, {1, {2, 2}}},
+  // int -> FLOAT32 map
+  .float32Operands = {},
+  // int -> INT32 map
+  .int32Operands = {},
+  // int -> QUANT8_ASYMM map
+  .quant8AsymmOperands = {},
+  // int -> QUANT16_SYMM map
+  .quant16SymmOperands = {},
+  // int -> FLOAT16 map
+  .float16Operands = {{0, {1.0f, 2.0f}}, {1, {1.0f, 2.0f, 3.0f, 4.0f}}},
+  // int -> BOOL8 map
+  .bool8Operands = {},
+  // int -> QUANT8_SYMM_PER_CHANNEL map
+  .quant8ChannelOperands = {},
+  // int -> QUANT16_ASYMM map
+  .quant16AsymmOperands = {},
+  // int -> QUANT8_SYMM map
+  .quant8SymmOperands = {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> Dimensions map
+  .operandDimensions = {{0, {2, 2}}},
+  // int -> FLOAT32 map
+  .float32Operands = {},
+  // int -> INT32 map
+  .int32Operands = {},
+  // int -> QUANT8_ASYMM map
+  .quant8AsymmOperands = {},
+  // int -> QUANT16_SYMM map
+  .quant16SymmOperands = {},
+  // int -> FLOAT16 map
+  .float16Operands = {{0, {1.0f, 4.0f, 3.0f, 8.0f}}},
+  // int -> BOOL8 map
+  .bool8Operands = {},
+  // int -> QUANT8_SYMM_PER_CHANNEL map
+  .quant8ChannelOperands = {},
+  // int -> QUANT16_ASYMM map
+  .quant16AsymmOperands = {},
+  // int -> QUANT8_SYMM map
+  .quant8SymmOperands = {},
+}
+},
+}, // End of an example
+};
+return examples_2;
+};
+
+std::vector<MixedTypedExample>& get_examples_dynamic_output_shape_2() {
+static std::vector<MixedTypedExample> examples_dynamic_output_shape_2 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> Dimensions map
+  .operandDimensions = {{0, {1, 2}}, {1, {2, 2}}},
+  // int -> FLOAT32 map
+  .float32Operands = {},
+  // int -> INT32 map
+  .int32Operands = {},
+  // int -> QUANT8_ASYMM map
+  .quant8AsymmOperands = {},
+  // int -> QUANT16_SYMM map
+  .quant16SymmOperands = {},
+  // int -> FLOAT16 map
+  .float16Operands = {{0, {1.0f, 2.0f}}, {1, {1.0f, 2.0f, 3.0f, 4.0f}}},
+  // int -> BOOL8 map
+  .bool8Operands = {},
+  // int -> QUANT8_SYMM_PER_CHANNEL map
+  .quant8ChannelOperands = {},
+  // int -> QUANT16_ASYMM map
+  .quant16AsymmOperands = {},
+  // int -> QUANT8_SYMM map
+  .quant8SymmOperands = {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> Dimensions map
+  .operandDimensions = {{0, {2, 2}}},
+  // int -> FLOAT32 map
+  .float32Operands = {},
+  // int -> INT32 map
+  .int32Operands = {},
+  // int -> QUANT8_ASYMM map
+  .quant8AsymmOperands = {},
+  // int -> QUANT16_SYMM map
+  .quant16SymmOperands = {},
+  // int -> FLOAT16 map
+  .float16Operands = {{0, {1.0f, 4.0f, 3.0f, 8.0f}}},
+  // int -> BOOL8 map
+  .bool8Operands = {},
+  // int -> QUANT8_SYMM_PER_CHANNEL map
+  .quant8ChannelOperands = {},
+  // int -> QUANT16_ASYMM map
+  .quant16AsymmOperands = {},
+  // int -> QUANT8_SYMM map
+  .quant8SymmOperands = {},
+}
+},
+}, // End of an example
+};
+return examples_dynamic_output_shape_2;
+};
+
+std::vector<MixedTypedExample>& get_examples_zero_sized() {
+static std::vector<MixedTypedExample> examples_zero_sized = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> Dimensions map
+  .operandDimensions = {{0, {1, 1, 1, 2}}},
+  // int -> FLOAT32 map
+  .float32Operands = {{0, {1.0f, 2.0f}}},
+  // int -> INT32 map
+  .int32Operands = {},
+  // int -> QUANT8_ASYMM map
+  .quant8AsymmOperands = {},
+  // int -> QUANT16_SYMM map
+  .quant16SymmOperands = {},
+  // int -> FLOAT16 map
+  .float16Operands = {},
+  // int -> BOOL8 map
+  .bool8Operands = {},
+  // int -> QUANT8_SYMM_PER_CHANNEL map
+  .quant8ChannelOperands = {},
+  // int -> QUANT16_ASYMM map
+  .quant16AsymmOperands = {},
+  // int -> QUANT8_SYMM map
+  .quant8SymmOperands = {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> Dimensions map
+  .operandDimensions = {{0, {0}}, {1, {0}}, {2, {0, 2, 2, 2}}},
+  // int -> FLOAT32 map
+  .float32Operands = {{0, {0.0f}}, {2, {0.0f}}},
+  // int -> INT32 map
+  .int32Operands = {{1, {0}}},
+  // int -> QUANT8_ASYMM map
+  .quant8AsymmOperands = {},
+  // int -> QUANT16_SYMM map
+  .quant16SymmOperands = {},
+  // int -> FLOAT16 map
+  .float16Operands = {},
+  // int -> BOOL8 map
+  .bool8Operands = {},
+  // int -> QUANT8_SYMM_PER_CHANNEL map
+  .quant8ChannelOperands = {},
+  // int -> QUANT16_ASYMM map
+  .quant16AsymmOperands = {},
+  // int -> QUANT8_SYMM map
+  .quant8SymmOperands = {},
+}
+},
+}, // End of an example
+};
+return examples_zero_sized;
+};
+
+std::vector<MixedTypedExample>& get_examples_zero_sized_relaxed() {
+static std::vector<MixedTypedExample> examples_zero_sized_relaxed = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> Dimensions map
+  .operandDimensions = {{0, {1, 1, 1, 2}}},
+  // int -> FLOAT32 map
+  .float32Operands = {{0, {1.0f, 2.0f}}},
+  // int -> INT32 map
+  .int32Operands = {},
+  // int -> QUANT8_ASYMM map
+  .quant8AsymmOperands = {},
+  // int -> QUANT16_SYMM map
+  .quant16SymmOperands = {},
+  // int -> FLOAT16 map
+  .float16Operands = {},
+  // int -> BOOL8 map
+  .bool8Operands = {},
+  // int -> QUANT8_SYMM_PER_CHANNEL map
+  .quant8ChannelOperands = {},
+  // int -> QUANT16_ASYMM map
+  .quant16AsymmOperands = {},
+  // int -> QUANT8_SYMM map
+  .quant8SymmOperands = {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> Dimensions map
+  .operandDimensions = {{0, {0}}, {1, {0}}, {2, {0, 2, 2, 2}}},
+  // int -> FLOAT32 map
+  .float32Operands = {{0, {0.0f}}, {2, {0.0f}}},
+  // int -> INT32 map
+  .int32Operands = {{1, {0}}},
+  // int -> QUANT8_ASYMM map
+  .quant8AsymmOperands = {},
+  // int -> QUANT16_SYMM map
+  .quant16SymmOperands = {},
+  // int -> FLOAT16 map
+  .float16Operands = {},
+  // int -> BOOL8 map
+  .bool8Operands = {},
+  // int -> QUANT8_SYMM_PER_CHANNEL map
+  .quant8ChannelOperands = {},
+  // int -> QUANT16_ASYMM map
+  .quant16AsymmOperands = {},
+  // int -> QUANT8_SYMM map
+  .quant8SymmOperands = {},
+}
+},
+}, // End of an example
+};
+return examples_zero_sized_relaxed;
+};
+
+std::vector<MixedTypedExample>& get_examples_zero_sized_quant8() {
+static std::vector<MixedTypedExample> examples_zero_sized_quant8 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> Dimensions map
+  .operandDimensions = {{0, {1, 1, 1, 2}}},
+  // int -> FLOAT32 map
+  .float32Operands = {},
+  // int -> INT32 map
+  .int32Operands = {},
+  // int -> QUANT8_ASYMM map
+  .quant8AsymmOperands = {{0, {138, 148}}},
+  // int -> QUANT16_SYMM map
+  .quant16SymmOperands = {},
+  // int -> FLOAT16 map
+  .float16Operands = {},
+  // int -> BOOL8 map
+  .bool8Operands = {},
+  // int -> QUANT8_SYMM_PER_CHANNEL map
+  .quant8ChannelOperands = {},
+  // int -> QUANT16_ASYMM map
+  .quant16AsymmOperands = {},
+  // int -> QUANT8_SYMM map
+  .quant8SymmOperands = {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> Dimensions map
+  .operandDimensions = {{0, {0}}, {1, {0}}, {2, {0, 2, 2, 2}}},
+  // int -> FLOAT32 map
+  .float32Operands = {},
+  // int -> INT32 map
+  .int32Operands = {{1, {0}}},
+  // int -> QUANT8_ASYMM map
+  .quant8AsymmOperands = {{0, {0}}, {2, {0}}},
+  // int -> QUANT16_SYMM map
+  .quant16SymmOperands = {},
+  // int -> FLOAT16 map
+  .float16Operands = {},
+  // int -> BOOL8 map
+  .bool8Operands = {},
+  // int -> QUANT8_SYMM_PER_CHANNEL map
+  .quant8ChannelOperands = {},
+  // int -> QUANT16_ASYMM map
+  .quant16AsymmOperands = {},
+  // int -> QUANT8_SYMM map
+  .quant8SymmOperands = {},
+}
+},
+}, // End of an example
+};
+return examples_zero_sized_quant8;
+};
+
+std::vector<MixedTypedExample>& get_examples_zero_sized_float16() {
+static std::vector<MixedTypedExample> examples_zero_sized_float16 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> Dimensions map
+  .operandDimensions = {{0, {1, 1, 1, 2}}},
+  // int -> FLOAT32 map
+  .float32Operands = {},
+  // int -> INT32 map
+  .int32Operands = {},
+  // int -> QUANT8_ASYMM map
+  .quant8AsymmOperands = {},
+  // int -> QUANT16_SYMM map
+  .quant16SymmOperands = {},
+  // int -> FLOAT16 map
+  .float16Operands = {{0, {1.0f, 2.0f}}},
+  // int -> BOOL8 map
+  .bool8Operands = {},
+  // int -> QUANT8_SYMM_PER_CHANNEL map
+  .quant8ChannelOperands = {},
+  // int -> QUANT16_ASYMM map
+  .quant16AsymmOperands = {},
+  // int -> QUANT8_SYMM map
+  .quant8SymmOperands = {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> Dimensions map
+  .operandDimensions = {{0, {0}}, {1, {0}}, {2, {0, 2, 2, 2}}},
+  // int -> FLOAT32 map
+  .float32Operands = {},
+  // int -> INT32 map
+  .int32Operands = {{1, {0}}},
+  // int -> QUANT8_ASYMM map
+  .quant8AsymmOperands = {},
+  // int -> QUANT16_SYMM map
+  .quant16SymmOperands = {},
+  // int -> FLOAT16 map
+  .float16Operands = {{0, {0.0f}}, {2, {0.0f}}},
+  // int -> BOOL8 map
+  .bool8Operands = {},
+  // int -> QUANT8_SYMM_PER_CHANNEL map
+  .quant8ChannelOperands = {},
+  // int -> QUANT16_ASYMM map
+  .quant16AsymmOperands = {},
+  // int -> QUANT8_SYMM map
+  .quant8SymmOperands = {},
+}
+},
+}, // End of an example
+};
+return examples_zero_sized_float16;
+};
+
+std::vector<MixedTypedExample>& get_examples_zero_sized_dynamic_output_shape() {
+static std::vector<MixedTypedExample> examples_zero_sized_dynamic_output_shape = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> Dimensions map
+  .operandDimensions = {{0, {1, 1, 1, 2}}},
+  // int -> FLOAT32 map
+  .float32Operands = {{0, {1.0f, 2.0f}}},
+  // int -> INT32 map
+  .int32Operands = {},
+  // int -> QUANT8_ASYMM map
+  .quant8AsymmOperands = {},
+  // int -> QUANT16_SYMM map
+  .quant16SymmOperands = {},
+  // int -> FLOAT16 map
+  .float16Operands = {},
+  // int -> BOOL8 map
+  .bool8Operands = {},
+  // int -> QUANT8_SYMM_PER_CHANNEL map
+  .quant8ChannelOperands = {},
+  // int -> QUANT16_ASYMM map
+  .quant16AsymmOperands = {},
+  // int -> QUANT8_SYMM map
+  .quant8SymmOperands = {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> Dimensions map
+  .operandDimensions = {{0, {0}}, {1, {0}}, {2, {0, 2, 2, 2}}},
+  // int -> FLOAT32 map
+  .float32Operands = {{0, {0.0f}}, {2, {0.0f}}},
+  // int -> INT32 map
+  .int32Operands = {{1, {0}}},
+  // int -> QUANT8_ASYMM map
+  .quant8AsymmOperands = {},
+  // int -> QUANT16_SYMM map
+  .quant16SymmOperands = {},
+  // int -> FLOAT16 map
+  .float16Operands = {},
+  // int -> BOOL8 map
+  .bool8Operands = {},
+  // int -> QUANT8_SYMM_PER_CHANNEL map
+  .quant8ChannelOperands = {},
+  // int -> QUANT16_ASYMM map
+  .quant16AsymmOperands = {},
+  // int -> QUANT8_SYMM map
+  .quant8SymmOperands = {},
+}
+},
+}, // End of an example
+};
+return examples_zero_sized_dynamic_output_shape;
+};
+
+std::vector<MixedTypedExample>& get_examples_zero_sized_dynamic_output_shape_relaxed() {
+static std::vector<MixedTypedExample> examples_zero_sized_dynamic_output_shape_relaxed = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> Dimensions map
+  .operandDimensions = {{0, {1, 1, 1, 2}}},
+  // int -> FLOAT32 map
+  .float32Operands = {{0, {1.0f, 2.0f}}},
+  // int -> INT32 map
+  .int32Operands = {},
+  // int -> QUANT8_ASYMM map
+  .quant8AsymmOperands = {},
+  // int -> QUANT16_SYMM map
+  .quant16SymmOperands = {},
+  // int -> FLOAT16 map
+  .float16Operands = {},
+  // int -> BOOL8 map
+  .bool8Operands = {},
+  // int -> QUANT8_SYMM_PER_CHANNEL map
+  .quant8ChannelOperands = {},
+  // int -> QUANT16_ASYMM map
+  .quant16AsymmOperands = {},
+  // int -> QUANT8_SYMM map
+  .quant8SymmOperands = {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> Dimensions map
+  .operandDimensions = {{0, {0}}, {1, {0}}, {2, {0, 2, 2, 2}}},
+  // int -> FLOAT32 map
+  .float32Operands = {{0, {0.0f}}, {2, {0.0f}}},
+  // int -> INT32 map
+  .int32Operands = {{1, {0}}},
+  // int -> QUANT8_ASYMM map
+  .quant8AsymmOperands = {},
+  // int -> QUANT16_SYMM map
+  .quant16SymmOperands = {},
+  // int -> FLOAT16 map
+  .float16Operands = {},
+  // int -> BOOL8 map
+  .bool8Operands = {},
+  // int -> QUANT8_SYMM_PER_CHANNEL map
+  .quant8ChannelOperands = {},
+  // int -> QUANT16_ASYMM map
+  .quant16AsymmOperands = {},
+  // int -> QUANT8_SYMM map
+  .quant8SymmOperands = {},
+}
+},
+}, // End of an example
+};
+return examples_zero_sized_dynamic_output_shape_relaxed;
+};
+
+std::vector<MixedTypedExample>& get_examples_zero_sized_dynamic_output_shape_quant8() {
+static std::vector<MixedTypedExample> examples_zero_sized_dynamic_output_shape_quant8 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> Dimensions map
+  .operandDimensions = {{0, {1, 1, 1, 2}}},
+  // int -> FLOAT32 map
+  .float32Operands = {},
+  // int -> INT32 map
+  .int32Operands = {},
+  // int -> QUANT8_ASYMM map
+  .quant8AsymmOperands = {{0, {138, 148}}},
+  // int -> QUANT16_SYMM map
+  .quant16SymmOperands = {},
+  // int -> FLOAT16 map
+  .float16Operands = {},
+  // int -> BOOL8 map
+  .bool8Operands = {},
+  // int -> QUANT8_SYMM_PER_CHANNEL map
+  .quant8ChannelOperands = {},
+  // int -> QUANT16_ASYMM map
+  .quant16AsymmOperands = {},
+  // int -> QUANT8_SYMM map
+  .quant8SymmOperands = {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> Dimensions map
+  .operandDimensions = {{0, {0}}, {1, {0}}, {2, {0, 2, 2, 2}}},
+  // int -> FLOAT32 map
+  .float32Operands = {},
+  // int -> INT32 map
+  .int32Operands = {{1, {0}}},
+  // int -> QUANT8_ASYMM map
+  .quant8AsymmOperands = {{0, {0}}, {2, {0}}},
+  // int -> QUANT16_SYMM map
+  .quant16SymmOperands = {},
+  // int -> FLOAT16 map
+  .float16Operands = {},
+  // int -> BOOL8 map
+  .bool8Operands = {},
+  // int -> QUANT8_SYMM_PER_CHANNEL map
+  .quant8ChannelOperands = {},
+  // int -> QUANT16_ASYMM map
+  .quant16AsymmOperands = {},
+  // int -> QUANT8_SYMM map
+  .quant8SymmOperands = {},
+}
+},
+}, // End of an example
+};
+return examples_zero_sized_dynamic_output_shape_quant8;
+};
+
+std::vector<MixedTypedExample>& get_examples_zero_sized_dynamic_output_shape_float16() {
+static std::vector<MixedTypedExample> examples_zero_sized_dynamic_output_shape_float16 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> Dimensions map
+  .operandDimensions = {{0, {1, 1, 1, 2}}},
+  // int -> FLOAT32 map
+  .float32Operands = {},
+  // int -> INT32 map
+  .int32Operands = {},
+  // int -> QUANT8_ASYMM map
+  .quant8AsymmOperands = {},
+  // int -> QUANT16_SYMM map
+  .quant16SymmOperands = {},
+  // int -> FLOAT16 map
+  .float16Operands = {{0, {1.0f, 2.0f}}},
+  // int -> BOOL8 map
+  .bool8Operands = {},
+  // int -> QUANT8_SYMM_PER_CHANNEL map
+  .quant8ChannelOperands = {},
+  // int -> QUANT16_ASYMM map
+  .quant16AsymmOperands = {},
+  // int -> QUANT8_SYMM map
+  .quant8SymmOperands = {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> Dimensions map
+  .operandDimensions = {{0, {0}}, {1, {0}}, {2, {0, 2, 2, 2}}},
+  // int -> FLOAT32 map
+  .float32Operands = {},
+  // int -> INT32 map
+  .int32Operands = {{1, {0}}},
+  // int -> QUANT8_ASYMM map
+  .quant8AsymmOperands = {},
+  // int -> QUANT16_SYMM map
+  .quant16SymmOperands = {},
+  // int -> FLOAT16 map
+  .float16Operands = {{0, {0.0f}}, {2, {0.0f}}},
+  // int -> BOOL8 map
+  .bool8Operands = {},
+  // int -> QUANT8_SYMM_PER_CHANNEL map
+  .quant8ChannelOperands = {},
+  // int -> QUANT16_ASYMM map
+  .quant16AsymmOperands = {},
+  // int -> QUANT8_SYMM map
+  .quant8SymmOperands = {},
+}
+},
+}, // End of an example
+};
+return examples_zero_sized_dynamic_output_shape_float16;
+};
+
diff --git a/runtime/test/generated/examples/sub_v1_2.example.cpp b/runtime/test/generated/examples/sub_v1_2.example.cpp
index 38032e5..7ddb1c3 100644
--- a/runtime/test/generated/examples/sub_v1_2.example.cpp
+++ b/runtime/test/generated/examples/sub_v1_2.example.cpp
@@ -1026,3 +1026,459 @@
 return examples_quant8_dynamic_output_shape;
 };
 
+std::vector<MixedTypedExample>& get_examples_zero_sized() {
+static std::vector<MixedTypedExample> examples_zero_sized = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> Dimensions map
+  .operandDimensions = {{0, {1, 1, 1, 2}}},
+  // int -> FLOAT32 map
+  .float32Operands = {{0, {1.0f, 2.0f}}},
+  // int -> INT32 map
+  .int32Operands = {},
+  // int -> QUANT8_ASYMM map
+  .quant8AsymmOperands = {},
+  // int -> QUANT16_SYMM map
+  .quant16SymmOperands = {},
+  // int -> FLOAT16 map
+  .float16Operands = {},
+  // int -> BOOL8 map
+  .bool8Operands = {},
+  // int -> QUANT8_SYMM_PER_CHANNEL map
+  .quant8ChannelOperands = {},
+  // int -> QUANT16_ASYMM map
+  .quant16AsymmOperands = {},
+  // int -> QUANT8_SYMM map
+  .quant8SymmOperands = {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> Dimensions map
+  .operandDimensions = {{0, {0}}, {1, {0}}, {2, {0, 2, 2, 2}}},
+  // int -> FLOAT32 map
+  .float32Operands = {{0, {0.0f}}, {2, {0.0f}}},
+  // int -> INT32 map
+  .int32Operands = {{1, {0}}},
+  // int -> QUANT8_ASYMM map
+  .quant8AsymmOperands = {},
+  // int -> QUANT16_SYMM map
+  .quant16SymmOperands = {},
+  // int -> FLOAT16 map
+  .float16Operands = {},
+  // int -> BOOL8 map
+  .bool8Operands = {},
+  // int -> QUANT8_SYMM_PER_CHANNEL map
+  .quant8ChannelOperands = {},
+  // int -> QUANT16_ASYMM map
+  .quant16AsymmOperands = {},
+  // int -> QUANT8_SYMM map
+  .quant8SymmOperands = {},
+}
+},
+}, // End of an example
+};
+return examples_zero_sized;
+};
+
+std::vector<MixedTypedExample>& get_examples_zero_sized_relaxed() {
+static std::vector<MixedTypedExample> examples_zero_sized_relaxed = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> Dimensions map
+  .operandDimensions = {{0, {1, 1, 1, 2}}},
+  // int -> FLOAT32 map
+  .float32Operands = {{0, {1.0f, 2.0f}}},
+  // int -> INT32 map
+  .int32Operands = {},
+  // int -> QUANT8_ASYMM map
+  .quant8AsymmOperands = {},
+  // int -> QUANT16_SYMM map
+  .quant16SymmOperands = {},
+  // int -> FLOAT16 map
+  .float16Operands = {},
+  // int -> BOOL8 map
+  .bool8Operands = {},
+  // int -> QUANT8_SYMM_PER_CHANNEL map
+  .quant8ChannelOperands = {},
+  // int -> QUANT16_ASYMM map
+  .quant16AsymmOperands = {},
+  // int -> QUANT8_SYMM map
+  .quant8SymmOperands = {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> Dimensions map
+  .operandDimensions = {{0, {0}}, {1, {0}}, {2, {0, 2, 2, 2}}},
+  // int -> FLOAT32 map
+  .float32Operands = {{0, {0.0f}}, {2, {0.0f}}},
+  // int -> INT32 map
+  .int32Operands = {{1, {0}}},
+  // int -> QUANT8_ASYMM map
+  .quant8AsymmOperands = {},
+  // int -> QUANT16_SYMM map
+  .quant16SymmOperands = {},
+  // int -> FLOAT16 map
+  .float16Operands = {},
+  // int -> BOOL8 map
+  .bool8Operands = {},
+  // int -> QUANT8_SYMM_PER_CHANNEL map
+  .quant8ChannelOperands = {},
+  // int -> QUANT16_ASYMM map
+  .quant16AsymmOperands = {},
+  // int -> QUANT8_SYMM map
+  .quant8SymmOperands = {},
+}
+},
+}, // End of an example
+};
+return examples_zero_sized_relaxed;
+};
+
+std::vector<MixedTypedExample>& get_examples_zero_sized_quant8() {
+static std::vector<MixedTypedExample> examples_zero_sized_quant8 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> Dimensions map
+  .operandDimensions = {{0, {1, 1, 1, 2}}},
+  // int -> FLOAT32 map
+  .float32Operands = {},
+  // int -> INT32 map
+  .int32Operands = {},
+  // int -> QUANT8_ASYMM map
+  .quant8AsymmOperands = {{0, {138, 148}}},
+  // int -> QUANT16_SYMM map
+  .quant16SymmOperands = {},
+  // int -> FLOAT16 map
+  .float16Operands = {},
+  // int -> BOOL8 map
+  .bool8Operands = {},
+  // int -> QUANT8_SYMM_PER_CHANNEL map
+  .quant8ChannelOperands = {},
+  // int -> QUANT16_ASYMM map
+  .quant16AsymmOperands = {},
+  // int -> QUANT8_SYMM map
+  .quant8SymmOperands = {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> Dimensions map
+  .operandDimensions = {{0, {0}}, {1, {0}}, {2, {0, 2, 2, 2}}},
+  // int -> FLOAT32 map
+  .float32Operands = {},
+  // int -> INT32 map
+  .int32Operands = {{1, {0}}},
+  // int -> QUANT8_ASYMM map
+  .quant8AsymmOperands = {{0, {0}}, {2, {0}}},
+  // int -> QUANT16_SYMM map
+  .quant16SymmOperands = {},
+  // int -> FLOAT16 map
+  .float16Operands = {},
+  // int -> BOOL8 map
+  .bool8Operands = {},
+  // int -> QUANT8_SYMM_PER_CHANNEL map
+  .quant8ChannelOperands = {},
+  // int -> QUANT16_ASYMM map
+  .quant16AsymmOperands = {},
+  // int -> QUANT8_SYMM map
+  .quant8SymmOperands = {},
+}
+},
+}, // End of an example
+};
+return examples_zero_sized_quant8;
+};
+
+std::vector<MixedTypedExample>& get_examples_zero_sized_float16() {
+static std::vector<MixedTypedExample> examples_zero_sized_float16 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> Dimensions map
+  .operandDimensions = {{0, {1, 1, 1, 2}}},
+  // int -> FLOAT32 map
+  .float32Operands = {},
+  // int -> INT32 map
+  .int32Operands = {},
+  // int -> QUANT8_ASYMM map
+  .quant8AsymmOperands = {},
+  // int -> QUANT16_SYMM map
+  .quant16SymmOperands = {},
+  // int -> FLOAT16 map
+  .float16Operands = {{0, {1.0f, 2.0f}}},
+  // int -> BOOL8 map
+  .bool8Operands = {},
+  // int -> QUANT8_SYMM_PER_CHANNEL map
+  .quant8ChannelOperands = {},
+  // int -> QUANT16_ASYMM map
+  .quant16AsymmOperands = {},
+  // int -> QUANT8_SYMM map
+  .quant8SymmOperands = {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> Dimensions map
+  .operandDimensions = {{0, {0}}, {1, {0}}, {2, {0, 2, 2, 2}}},
+  // int -> FLOAT32 map
+  .float32Operands = {},
+  // int -> INT32 map
+  .int32Operands = {{1, {0}}},
+  // int -> QUANT8_ASYMM map
+  .quant8AsymmOperands = {},
+  // int -> QUANT16_SYMM map
+  .quant16SymmOperands = {},
+  // int -> FLOAT16 map
+  .float16Operands = {{0, {0.0f}}, {2, {0.0f}}},
+  // int -> BOOL8 map
+  .bool8Operands = {},
+  // int -> QUANT8_SYMM_PER_CHANNEL map
+  .quant8ChannelOperands = {},
+  // int -> QUANT16_ASYMM map
+  .quant16AsymmOperands = {},
+  // int -> QUANT8_SYMM map
+  .quant8SymmOperands = {},
+}
+},
+}, // End of an example
+};
+return examples_zero_sized_float16;
+};
+
+std::vector<MixedTypedExample>& get_examples_zero_sized_dynamic_output_shape() {
+static std::vector<MixedTypedExample> examples_zero_sized_dynamic_output_shape = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> Dimensions map
+  .operandDimensions = {{0, {1, 1, 1, 2}}},
+  // int -> FLOAT32 map
+  .float32Operands = {{0, {1.0f, 2.0f}}},
+  // int -> INT32 map
+  .int32Operands = {},
+  // int -> QUANT8_ASYMM map
+  .quant8AsymmOperands = {},
+  // int -> QUANT16_SYMM map
+  .quant16SymmOperands = {},
+  // int -> FLOAT16 map
+  .float16Operands = {},
+  // int -> BOOL8 map
+  .bool8Operands = {},
+  // int -> QUANT8_SYMM_PER_CHANNEL map
+  .quant8ChannelOperands = {},
+  // int -> QUANT16_ASYMM map
+  .quant16AsymmOperands = {},
+  // int -> QUANT8_SYMM map
+  .quant8SymmOperands = {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> Dimensions map
+  .operandDimensions = {{0, {0}}, {1, {0}}, {2, {0, 2, 2, 2}}},
+  // int -> FLOAT32 map
+  .float32Operands = {{0, {0.0f}}, {2, {0.0f}}},
+  // int -> INT32 map
+  .int32Operands = {{1, {0}}},
+  // int -> QUANT8_ASYMM map
+  .quant8AsymmOperands = {},
+  // int -> QUANT16_SYMM map
+  .quant16SymmOperands = {},
+  // int -> FLOAT16 map
+  .float16Operands = {},
+  // int -> BOOL8 map
+  .bool8Operands = {},
+  // int -> QUANT8_SYMM_PER_CHANNEL map
+  .quant8ChannelOperands = {},
+  // int -> QUANT16_ASYMM map
+  .quant16AsymmOperands = {},
+  // int -> QUANT8_SYMM map
+  .quant8SymmOperands = {},
+}
+},
+}, // End of an example
+};
+return examples_zero_sized_dynamic_output_shape;
+};
+
+std::vector<MixedTypedExample>& get_examples_zero_sized_dynamic_output_shape_relaxed() {
+static std::vector<MixedTypedExample> examples_zero_sized_dynamic_output_shape_relaxed = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> Dimensions map
+  .operandDimensions = {{0, {1, 1, 1, 2}}},
+  // int -> FLOAT32 map
+  .float32Operands = {{0, {1.0f, 2.0f}}},
+  // int -> INT32 map
+  .int32Operands = {},
+  // int -> QUANT8_ASYMM map
+  .quant8AsymmOperands = {},
+  // int -> QUANT16_SYMM map
+  .quant16SymmOperands = {},
+  // int -> FLOAT16 map
+  .float16Operands = {},
+  // int -> BOOL8 map
+  .bool8Operands = {},
+  // int -> QUANT8_SYMM_PER_CHANNEL map
+  .quant8ChannelOperands = {},
+  // int -> QUANT16_ASYMM map
+  .quant16AsymmOperands = {},
+  // int -> QUANT8_SYMM map
+  .quant8SymmOperands = {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> Dimensions map
+  .operandDimensions = {{0, {0}}, {1, {0}}, {2, {0, 2, 2, 2}}},
+  // int -> FLOAT32 map
+  .float32Operands = {{0, {0.0f}}, {2, {0.0f}}},
+  // int -> INT32 map
+  .int32Operands = {{1, {0}}},
+  // int -> QUANT8_ASYMM map
+  .quant8AsymmOperands = {},
+  // int -> QUANT16_SYMM map
+  .quant16SymmOperands = {},
+  // int -> FLOAT16 map
+  .float16Operands = {},
+  // int -> BOOL8 map
+  .bool8Operands = {},
+  // int -> QUANT8_SYMM_PER_CHANNEL map
+  .quant8ChannelOperands = {},
+  // int -> QUANT16_ASYMM map
+  .quant16AsymmOperands = {},
+  // int -> QUANT8_SYMM map
+  .quant8SymmOperands = {},
+}
+},
+}, // End of an example
+};
+return examples_zero_sized_dynamic_output_shape_relaxed;
+};
+
+std::vector<MixedTypedExample>& get_examples_zero_sized_dynamic_output_shape_quant8() {
+static std::vector<MixedTypedExample> examples_zero_sized_dynamic_output_shape_quant8 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> Dimensions map
+  .operandDimensions = {{0, {1, 1, 1, 2}}},
+  // int -> FLOAT32 map
+  .float32Operands = {},
+  // int -> INT32 map
+  .int32Operands = {},
+  // int -> QUANT8_ASYMM map
+  .quant8AsymmOperands = {{0, {138, 148}}},
+  // int -> QUANT16_SYMM map
+  .quant16SymmOperands = {},
+  // int -> FLOAT16 map
+  .float16Operands = {},
+  // int -> BOOL8 map
+  .bool8Operands = {},
+  // int -> QUANT8_SYMM_PER_CHANNEL map
+  .quant8ChannelOperands = {},
+  // int -> QUANT16_ASYMM map
+  .quant16AsymmOperands = {},
+  // int -> QUANT8_SYMM map
+  .quant8SymmOperands = {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> Dimensions map
+  .operandDimensions = {{0, {0}}, {1, {0}}, {2, {0, 2, 2, 2}}},
+  // int -> FLOAT32 map
+  .float32Operands = {},
+  // int -> INT32 map
+  .int32Operands = {{1, {0}}},
+  // int -> QUANT8_ASYMM map
+  .quant8AsymmOperands = {{0, {0}}, {2, {0}}},
+  // int -> QUANT16_SYMM map
+  .quant16SymmOperands = {},
+  // int -> FLOAT16 map
+  .float16Operands = {},
+  // int -> BOOL8 map
+  .bool8Operands = {},
+  // int -> QUANT8_SYMM_PER_CHANNEL map
+  .quant8ChannelOperands = {},
+  // int -> QUANT16_ASYMM map
+  .quant16AsymmOperands = {},
+  // int -> QUANT8_SYMM map
+  .quant8SymmOperands = {},
+}
+},
+}, // End of an example
+};
+return examples_zero_sized_dynamic_output_shape_quant8;
+};
+
+std::vector<MixedTypedExample>& get_examples_zero_sized_dynamic_output_shape_float16() {
+static std::vector<MixedTypedExample> examples_zero_sized_dynamic_output_shape_float16 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> Dimensions map
+  .operandDimensions = {{0, {1, 1, 1, 2}}},
+  // int -> FLOAT32 map
+  .float32Operands = {},
+  // int -> INT32 map
+  .int32Operands = {},
+  // int -> QUANT8_ASYMM map
+  .quant8AsymmOperands = {},
+  // int -> QUANT16_SYMM map
+  .quant16SymmOperands = {},
+  // int -> FLOAT16 map
+  .float16Operands = {{0, {1.0f, 2.0f}}},
+  // int -> BOOL8 map
+  .bool8Operands = {},
+  // int -> QUANT8_SYMM_PER_CHANNEL map
+  .quant8ChannelOperands = {},
+  // int -> QUANT16_ASYMM map
+  .quant16AsymmOperands = {},
+  // int -> QUANT8_SYMM map
+  .quant8SymmOperands = {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> Dimensions map
+  .operandDimensions = {{0, {0}}, {1, {0}}, {2, {0, 2, 2, 2}}},
+  // int -> FLOAT32 map
+  .float32Operands = {},
+  // int -> INT32 map
+  .int32Operands = {{1, {0}}},
+  // int -> QUANT8_ASYMM map
+  .quant8AsymmOperands = {},
+  // int -> QUANT16_SYMM map
+  .quant16SymmOperands = {},
+  // int -> FLOAT16 map
+  .float16Operands = {{0, {0.0f}}, {2, {0.0f}}},
+  // int -> BOOL8 map
+  .bool8Operands = {},
+  // int -> QUANT8_SYMM_PER_CHANNEL map
+  .quant8ChannelOperands = {},
+  // int -> QUANT16_ASYMM map
+  .quant16AsymmOperands = {},
+  // int -> QUANT8_SYMM map
+  .quant8SymmOperands = {},
+}
+},
+}, // End of an example
+};
+return examples_zero_sized_dynamic_output_shape_float16;
+};
+
diff --git a/runtime/test/generated/models/add_broadcast_float16.model.cpp b/runtime/test/generated/models/add_broadcast_float16.model.cpp
deleted file mode 100644
index c63254d..0000000
--- a/runtime/test/generated/models/add_broadcast_float16.model.cpp
+++ /dev/null
@@ -1,53 +0,0 @@
-// clang-format off
-// Generated file (from: add_broadcast_float16.mod.py). Do not edit
-void CreateModel(Model *model) {
-  OperandType type0(Type::TENSOR_FLOAT16, {1, 2});
-  OperandType type1(Type::TENSOR_FLOAT16, {2, 2});
-  OperandType type2(Type::INT32, {});
-  // Phase 1, operands
-  auto op1 = model->addOperand(&type0);
-  auto op2 = model->addOperand(&type1);
-  auto act = model->addOperand(&type2);
-  auto op3 = model->addOperand(&type1);
-  // Phase 2, operations
-  static int32_t act_init[] = {0};
-  model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
-  model->addOperation(ANEURALNETWORKS_ADD, {op1, op2, act}, {op3});
-  // Phase 3, inputs and outputs
-  model->identifyInputsAndOutputs(
-    {op1, op2},
-    {op3});
-  assert(model->isValid());
-}
-
-inline bool is_ignored(int i) {
-  static std::set<int> ignore = {};
-  return ignore.find(i) != ignore.end();
-}
-
-void CreateModel_dynamic_output_shape(Model *model) {
-  OperandType type0(Type::TENSOR_FLOAT16, {1, 2});
-  OperandType type1(Type::TENSOR_FLOAT16, {2, 2});
-  OperandType type2(Type::INT32, {});
-  OperandType type3(Type::TENSOR_FLOAT16, {0, 0});
-  // Phase 1, operands
-  auto op1 = model->addOperand(&type0);
-  auto op2 = model->addOperand(&type1);
-  auto act = model->addOperand(&type2);
-  auto op3 = model->addOperand(&type3);
-  // Phase 2, operations
-  static int32_t act_init[] = {0};
-  model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
-  model->addOperation(ANEURALNETWORKS_ADD, {op1, op2, act}, {op3});
-  // Phase 3, inputs and outputs
-  model->identifyInputsAndOutputs(
-    {op1, op2},
-    {op3});
-  assert(model->isValid());
-}
-
-inline bool is_ignored_dynamic_output_shape(int i) {
-  static std::set<int> ignore = {};
-  return ignore.find(i) != ignore.end();
-}
-
diff --git a/runtime/test/generated/models/add_float16.model.cpp b/runtime/test/generated/models/add_float16.model.cpp
deleted file mode 100644
index 867caef..0000000
--- a/runtime/test/generated/models/add_float16.model.cpp
+++ /dev/null
@@ -1,51 +0,0 @@
-// clang-format off
-// Generated file (from: add_float16.mod.py). Do not edit
-void CreateModel(Model *model) {
-  OperandType type0(Type::TENSOR_FLOAT16, {3});
-  OperandType type1(Type::INT32, {});
-  // Phase 1, operands
-  auto op1 = model->addOperand(&type0);
-  auto op2 = model->addOperand(&type0);
-  auto act = model->addOperand(&type1);
-  auto op3 = model->addOperand(&type0);
-  // Phase 2, operations
-  static int32_t act_init[] = {0};
-  model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
-  model->addOperation(ANEURALNETWORKS_ADD, {op1, op2, act}, {op3});
-  // Phase 3, inputs and outputs
-  model->identifyInputsAndOutputs(
-    {op1, op2},
-    {op3});
-  assert(model->isValid());
-}
-
-inline bool is_ignored(int i) {
-  static std::set<int> ignore = {};
-  return ignore.find(i) != ignore.end();
-}
-
-void CreateModel_dynamic_output_shape(Model *model) {
-  OperandType type0(Type::TENSOR_FLOAT16, {3});
-  OperandType type1(Type::INT32, {});
-  OperandType type2(Type::TENSOR_FLOAT16, {0});
-  // Phase 1, operands
-  auto op1 = model->addOperand(&type0);
-  auto op2 = model->addOperand(&type0);
-  auto act = model->addOperand(&type1);
-  auto op3 = model->addOperand(&type2);
-  // Phase 2, operations
-  static int32_t act_init[] = {0};
-  model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
-  model->addOperation(ANEURALNETWORKS_ADD, {op1, op2, act}, {op3});
-  // Phase 3, inputs and outputs
-  model->identifyInputsAndOutputs(
-    {op1, op2},
-    {op3});
-  assert(model->isValid());
-}
-
-inline bool is_ignored_dynamic_output_shape(int i) {
-  static std::set<int> ignore = {};
-  return ignore.find(i) != ignore.end();
-}
-
diff --git a/runtime/test/generated/models/add_v1_2.model.cpp b/runtime/test/generated/models/add_v1_2.model.cpp
new file mode 100644
index 0000000..5ff25c2
--- /dev/null
+++ b/runtime/test/generated/models/add_v1_2.model.cpp
@@ -0,0 +1,766 @@
+// clang-format off
+// Generated file (from: add_v1_2.mod.py). Do not edit
+void CreateModel(Model *model) {
+  OperandType type0(Type::TENSOR_FLOAT16, {3});
+  OperandType type1(Type::INT32, {});
+  // Phase 1, operands
+  auto op1 = model->addOperand(&type0);
+  auto op2 = model->addOperand(&type0);
+  auto act = model->addOperand(&type1);
+  auto op3 = model->addOperand(&type0);
+  // Phase 2, operations
+  static int32_t act_init[] = {0};
+  model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
+  model->addOperation(ANEURALNETWORKS_ADD, {op1, op2, act}, {op3});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {op1, op2},
+    {op3});
+  assert(model->isValid());
+}
+
+inline bool is_ignored(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_dynamic_output_shape(Model *model) {
+  OperandType type0(Type::TENSOR_FLOAT16, {3});
+  OperandType type1(Type::INT32, {});
+  OperandType type15(Type::TENSOR_FLOAT16, {0});
+  // Phase 1, operands
+  auto op1 = model->addOperand(&type0);
+  auto op2 = model->addOperand(&type0);
+  auto act = model->addOperand(&type1);
+  auto op3 = model->addOperand(&type15);
+  // Phase 2, operations
+  static int32_t act_init[] = {0};
+  model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
+  model->addOperation(ANEURALNETWORKS_ADD, {op1, op2, act}, {op3});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {op1, op2},
+    {op3});
+  assert(model->isValid());
+}
+
+inline bool is_ignored_dynamic_output_shape(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_2(Model *model) {
+  OperandType type1(Type::INT32, {});
+  OperandType type2(Type::TENSOR_FLOAT16, {1, 2});
+  OperandType type3(Type::TENSOR_FLOAT16, {2, 2});
+  // Phase 1, operands
+  auto op11 = model->addOperand(&type2);
+  auto op21 = model->addOperand(&type3);
+  auto act1 = model->addOperand(&type1);
+  auto op31 = model->addOperand(&type3);
+  // Phase 2, operations
+  static int32_t act1_init[] = {0};
+  model->setOperandValue(act1, act1_init, sizeof(int32_t) * 1);
+  model->addOperation(ANEURALNETWORKS_ADD, {op11, op21, act1}, {op31});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {op11, op21},
+    {op31});
+  assert(model->isValid());
+}
+
+inline bool is_ignored_2(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_dynamic_output_shape_2(Model *model) {
+  OperandType type1(Type::INT32, {});
+  OperandType type16(Type::TENSOR_FLOAT16, {0, 0});
+  OperandType type2(Type::TENSOR_FLOAT16, {1, 2});
+  OperandType type3(Type::TENSOR_FLOAT16, {2, 2});
+  // Phase 1, operands
+  auto op11 = model->addOperand(&type2);
+  auto op21 = model->addOperand(&type3);
+  auto act1 = model->addOperand(&type1);
+  auto op31 = model->addOperand(&type16);
+  // Phase 2, operations
+  static int32_t act1_init[] = {0};
+  model->setOperandValue(act1, act1_init, sizeof(int32_t) * 1);
+  model->addOperation(ANEURALNETWORKS_ADD, {op11, op21, act1}, {op31});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {op11, op21},
+    {op31});
+  assert(model->isValid());
+}
+
+inline bool is_ignored_dynamic_output_shape_2(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_zero_sized(Model *model) {
+  OperandType type1(Type::INT32, {});
+  OperandType type10(Type::FLOAT32, {});
+  OperandType type11(Type::BOOL, {});
+  OperandType type12(Type::TENSOR_FLOAT32, {1, 1, 1, 2});
+  OperandType type13(Type::TENSOR_FLOAT32, {0, 2, 2, 2});
+  OperandType type14(Type::TENSOR_FLOAT32, {1, 2, 2, 1});
+  OperandType type4(Type::TENSOR_FLOAT32, {1, 2});
+  OperandType type5(Type::TENSOR_FLOAT32, {1, 8});
+  OperandType type6(Type::TENSOR_FLOAT32, {0});
+  OperandType type7(Type::TENSOR_INT32, {0});
+  OperandType type8(Type::TENSOR_FLOAT32, {0, 4});
+  OperandType type9(Type::TENSOR_INT32, {1});
+  // Phase 1, operands
+  auto scores = model->addOperand(&type4);
+  auto roi = model->addOperand(&type5);
+  auto param = model->addOperand(&type9);
+  auto param1 = model->addOperand(&type10);
+  auto param2 = model->addOperand(&type10);
+  auto param3 = model->addOperand(&type1);
+  auto scoresOut = model->addOperand(&type6);
+  auto roiOut = model->addOperand(&type8);
+  auto classesOut = model->addOperand(&type7);
+  auto batchSplitOut = model->addOperand(&type7);
+  auto in = model->addOperand(&type12);
+  auto param4 = model->addOperand(&type1);
+  auto param5 = model->addOperand(&type1);
+  auto param6 = model->addOperand(&type10);
+  auto param7 = model->addOperand(&type10);
+  auto param8 = model->addOperand(&type1);
+  auto param9 = model->addOperand(&type1);
+  auto layout = model->addOperand(&type11);
+  auto featureMap = model->addOperand(&type13);
+  auto op = model->addOperand(&type14);
+  auto param10 = model->addOperand(&type1);
+  auto out = model->addOperand(&type13);
+  // Phase 2, operations
+  static float scores_init[] = {0.9f, 0.1f};
+  model->setOperandValue(scores, scores_init, sizeof(float) * 2);
+  static float roi_init[] = {1.0f, 1.0f, 10.0f, 10.0f, 0.0f, 0.0f, 10.0f, 10.0f};
+  model->setOperandValue(roi, roi_init, sizeof(float) * 8);
+  static int32_t param_init[] = {0};
+  model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
+  static float param1_init[] = {0.3f};
+  model->setOperandValue(param1, param1_init, sizeof(float) * 1);
+  static float param2_init[] = {0.4f};
+  model->setOperandValue(param2, param2_init, sizeof(float) * 1);
+  static int32_t param3_init[] = {-1};
+  model->setOperandValue(param3, param3_init, sizeof(int32_t) * 1);
+  static int32_t param4_init[] = {2};
+  model->setOperandValue(param4, param4_init, sizeof(int32_t) * 1);
+  static int32_t param5_init[] = {2};
+  model->setOperandValue(param5, param5_init, sizeof(int32_t) * 1);
+  static float param6_init[] = {2.0f};
+  model->setOperandValue(param6, param6_init, sizeof(float) * 1);
+  static float param7_init[] = {2.0f};
+  model->setOperandValue(param7, param7_init, sizeof(float) * 1);
+  static int32_t param8_init[] = {4};
+  model->setOperandValue(param8, param8_init, sizeof(int32_t) * 1);
+  static int32_t param9_init[] = {4};
+  model->setOperandValue(param9, param9_init, sizeof(int32_t) * 1);
+  static bool8 layout_init[] = {false};
+  model->setOperandValue(layout, layout_init, sizeof(bool8) * 1);
+  static float op_init[] = {1.0f, 2.0f, 3.0f, 4.0f};
+  model->setOperandValue(op, op_init, sizeof(float) * 4);
+  static int32_t param10_init[] = {0};
+  model->setOperandValue(param10, param10_init, sizeof(int32_t) * 1);
+  model->addOperation(ANEURALNETWORKS_BOX_WITH_NMS_LIMIT, {scores, roi, param, param1, param2, param3}, {scoresOut, roiOut, classesOut, batchSplitOut});
+  model->addOperation(ANEURALNETWORKS_ROI_ALIGN, {in, roiOut, batchSplitOut, param4, param5, param6, param7, param8, param9, layout}, {featureMap});
+  model->addOperation(ANEURALNETWORKS_ADD, {featureMap, op, param10}, {out});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {in},
+    {scoresOut, classesOut, out});
+  assert(model->isValid());
+}
+
+inline bool is_ignored_zero_sized(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_zero_sized_relaxed(Model *model) {
+  OperandType type1(Type::INT32, {});
+  OperandType type10(Type::FLOAT32, {});
+  OperandType type11(Type::BOOL, {});
+  OperandType type12(Type::TENSOR_FLOAT32, {1, 1, 1, 2});
+  OperandType type13(Type::TENSOR_FLOAT32, {0, 2, 2, 2});
+  OperandType type14(Type::TENSOR_FLOAT32, {1, 2, 2, 1});
+  OperandType type4(Type::TENSOR_FLOAT32, {1, 2});
+  OperandType type5(Type::TENSOR_FLOAT32, {1, 8});
+  OperandType type6(Type::TENSOR_FLOAT32, {0});
+  OperandType type7(Type::TENSOR_INT32, {0});
+  OperandType type8(Type::TENSOR_FLOAT32, {0, 4});
+  OperandType type9(Type::TENSOR_INT32, {1});
+  // Phase 1, operands
+  auto scores = model->addOperand(&type4);
+  auto roi = model->addOperand(&type5);
+  auto param = model->addOperand(&type9);
+  auto param1 = model->addOperand(&type10);
+  auto param2 = model->addOperand(&type10);
+  auto param3 = model->addOperand(&type1);
+  auto scoresOut = model->addOperand(&type6);
+  auto roiOut = model->addOperand(&type8);
+  auto classesOut = model->addOperand(&type7);
+  auto batchSplitOut = model->addOperand(&type7);
+  auto in = model->addOperand(&type12);
+  auto param4 = model->addOperand(&type1);
+  auto param5 = model->addOperand(&type1);
+  auto param6 = model->addOperand(&type10);
+  auto param7 = model->addOperand(&type10);
+  auto param8 = model->addOperand(&type1);
+  auto param9 = model->addOperand(&type1);
+  auto layout = model->addOperand(&type11);
+  auto featureMap = model->addOperand(&type13);
+  auto op = model->addOperand(&type14);
+  auto param10 = model->addOperand(&type1);
+  auto out = model->addOperand(&type13);
+  // Phase 2, operations
+  static float scores_init[] = {0.9f, 0.1f};
+  model->setOperandValue(scores, scores_init, sizeof(float) * 2);
+  static float roi_init[] = {1.0f, 1.0f, 10.0f, 10.0f, 0.0f, 0.0f, 10.0f, 10.0f};
+  model->setOperandValue(roi, roi_init, sizeof(float) * 8);
+  static int32_t param_init[] = {0};
+  model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
+  static float param1_init[] = {0.3f};
+  model->setOperandValue(param1, param1_init, sizeof(float) * 1);
+  static float param2_init[] = {0.4f};
+  model->setOperandValue(param2, param2_init, sizeof(float) * 1);
+  static int32_t param3_init[] = {-1};
+  model->setOperandValue(param3, param3_init, sizeof(int32_t) * 1);
+  static int32_t param4_init[] = {2};
+  model->setOperandValue(param4, param4_init, sizeof(int32_t) * 1);
+  static int32_t param5_init[] = {2};
+  model->setOperandValue(param5, param5_init, sizeof(int32_t) * 1);
+  static float param6_init[] = {2.0f};
+  model->setOperandValue(param6, param6_init, sizeof(float) * 1);
+  static float param7_init[] = {2.0f};
+  model->setOperandValue(param7, param7_init, sizeof(float) * 1);
+  static int32_t param8_init[] = {4};
+  model->setOperandValue(param8, param8_init, sizeof(int32_t) * 1);
+  static int32_t param9_init[] = {4};
+  model->setOperandValue(param9, param9_init, sizeof(int32_t) * 1);
+  static bool8 layout_init[] = {false};
+  model->setOperandValue(layout, layout_init, sizeof(bool8) * 1);
+  static float op_init[] = {1.0f, 2.0f, 3.0f, 4.0f};
+  model->setOperandValue(op, op_init, sizeof(float) * 4);
+  static int32_t param10_init[] = {0};
+  model->setOperandValue(param10, param10_init, sizeof(int32_t) * 1);
+  model->addOperation(ANEURALNETWORKS_BOX_WITH_NMS_LIMIT, {scores, roi, param, param1, param2, param3}, {scoresOut, roiOut, classesOut, batchSplitOut});
+  model->addOperation(ANEURALNETWORKS_ROI_ALIGN, {in, roiOut, batchSplitOut, param4, param5, param6, param7, param8, param9, layout}, {featureMap});
+  model->addOperation(ANEURALNETWORKS_ADD, {featureMap, op, param10}, {out});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {in},
+    {scoresOut, classesOut, out});
+  // Phase 4: set relaxed execution
+  model->relaxComputationFloat32toFloat16(true);
+  assert(model->isValid());
+}
+
+inline bool is_ignored_zero_sized_relaxed(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_zero_sized_quant8(Model *model) {
+  OperandType type1(Type::INT32, {});
+  OperandType type10(Type::FLOAT32, {});
+  OperandType type11(Type::BOOL, {});
+  OperandType type17(Type::TENSOR_QUANT8_ASYMM, {0, 2, 2, 2}, 0.1f, 128);
+  OperandType type18(Type::TENSOR_QUANT8_ASYMM, {1, 1, 1, 2}, 0.1f, 128);
+  OperandType type19(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 1}, 0.1f, 128);
+  OperandType type20(Type::TENSOR_QUANT16_ASYMM, {1, 8}, 0.125f, 0);
+  OperandType type21(Type::TENSOR_QUANT16_ASYMM, {0, 4}, 0.125f, 0);
+  OperandType type22(Type::TENSOR_QUANT8_ASYMM, {1, 2}, 0.1f, 128);
+  OperandType type23(Type::TENSOR_QUANT8_ASYMM, {0}, 0.1f, 128);
+  OperandType type7(Type::TENSOR_INT32, {0});
+  OperandType type9(Type::TENSOR_INT32, {1});
+  // Phase 1, operands
+  auto scores = model->addOperand(&type22);
+  auto roi = model->addOperand(&type20);
+  auto param = model->addOperand(&type9);
+  auto param1 = model->addOperand(&type10);
+  auto param2 = model->addOperand(&type10);
+  auto param3 = model->addOperand(&type1);
+  auto scoresOut = model->addOperand(&type23);
+  auto roiOut = model->addOperand(&type21);
+  auto classesOut = model->addOperand(&type7);
+  auto batchSplitOut = model->addOperand(&type7);
+  auto in = model->addOperand(&type18);
+  auto param4 = model->addOperand(&type1);
+  auto param5 = model->addOperand(&type1);
+  auto param6 = model->addOperand(&type10);
+  auto param7 = model->addOperand(&type10);
+  auto param8 = model->addOperand(&type1);
+  auto param9 = model->addOperand(&type1);
+  auto layout = model->addOperand(&type11);
+  auto featureMap = model->addOperand(&type17);
+  auto op = model->addOperand(&type19);
+  auto param10 = model->addOperand(&type1);
+  auto out = model->addOperand(&type17);
+  // Phase 2, operations
+  static uint8_t scores_init[] = {137, 129};
+  model->setOperandValue(scores, scores_init, sizeof(uint8_t) * 2);
+  static uint16_t roi_init[] = {8, 8, 80, 80, 0, 0, 80, 80};
+  model->setOperandValue(roi, roi_init, sizeof(uint16_t) * 8);
+  static int32_t param_init[] = {0};
+  model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
+  static float param1_init[] = {0.3f};
+  model->setOperandValue(param1, param1_init, sizeof(float) * 1);
+  static float param2_init[] = {0.4f};
+  model->setOperandValue(param2, param2_init, sizeof(float) * 1);
+  static int32_t param3_init[] = {-1};
+  model->setOperandValue(param3, param3_init, sizeof(int32_t) * 1);
+  static int32_t param4_init[] = {2};
+  model->setOperandValue(param4, param4_init, sizeof(int32_t) * 1);
+  static int32_t param5_init[] = {2};
+  model->setOperandValue(param5, param5_init, sizeof(int32_t) * 1);
+  static float param6_init[] = {2.0f};
+  model->setOperandValue(param6, param6_init, sizeof(float) * 1);
+  static float param7_init[] = {2.0f};
+  model->setOperandValue(param7, param7_init, sizeof(float) * 1);
+  static int32_t param8_init[] = {4};
+  model->setOperandValue(param8, param8_init, sizeof(int32_t) * 1);
+  static int32_t param9_init[] = {4};
+  model->setOperandValue(param9, param9_init, sizeof(int32_t) * 1);
+  static bool8 layout_init[] = {false};
+  model->setOperandValue(layout, layout_init, sizeof(bool8) * 1);
+  static uint8_t op_init[] = {138, 148, 158, 168};
+  model->setOperandValue(op, op_init, sizeof(uint8_t) * 4);
+  static int32_t param10_init[] = {0};
+  model->setOperandValue(param10, param10_init, sizeof(int32_t) * 1);
+  model->addOperation(ANEURALNETWORKS_BOX_WITH_NMS_LIMIT, {scores, roi, param, param1, param2, param3}, {scoresOut, roiOut, classesOut, batchSplitOut});
+  model->addOperation(ANEURALNETWORKS_ROI_ALIGN, {in, roiOut, batchSplitOut, param4, param5, param6, param7, param8, param9, layout}, {featureMap});
+  model->addOperation(ANEURALNETWORKS_ADD, {featureMap, op, param10}, {out});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {in},
+    {scoresOut, classesOut, out});
+  assert(model->isValid());
+}
+
+inline bool is_ignored_zero_sized_quant8(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_zero_sized_float16(Model *model) {
+  OperandType type1(Type::INT32, {});
+  OperandType type11(Type::BOOL, {});
+  OperandType type24(Type::TENSOR_FLOAT16, {0, 2, 2, 2});
+  OperandType type25(Type::TENSOR_FLOAT16, {1, 1, 1, 2});
+  OperandType type26(Type::TENSOR_FLOAT16, {1, 2, 2, 1});
+  OperandType type27(Type::FLOAT16, {});
+  OperandType type28(Type::TENSOR_FLOAT16, {1, 8});
+  OperandType type29(Type::TENSOR_FLOAT16, {0, 4});
+  OperandType type30(Type::TENSOR_FLOAT16, {1, 2});
+  OperandType type31(Type::TENSOR_FLOAT16, {0});
+  OperandType type7(Type::TENSOR_INT32, {0});
+  OperandType type9(Type::TENSOR_INT32, {1});
+  // Phase 1, operands
+  auto scores = model->addOperand(&type30);
+  auto roi = model->addOperand(&type28);
+  auto param = model->addOperand(&type9);
+  auto param1 = model->addOperand(&type27);
+  auto param2 = model->addOperand(&type27);
+  auto param3 = model->addOperand(&type1);
+  auto scoresOut = model->addOperand(&type31);
+  auto roiOut = model->addOperand(&type29);
+  auto classesOut = model->addOperand(&type7);
+  auto batchSplitOut = model->addOperand(&type7);
+  auto in = model->addOperand(&type25);
+  auto param4 = model->addOperand(&type1);
+  auto param5 = model->addOperand(&type1);
+  auto param6 = model->addOperand(&type27);
+  auto param7 = model->addOperand(&type27);
+  auto param8 = model->addOperand(&type1);
+  auto param9 = model->addOperand(&type1);
+  auto layout = model->addOperand(&type11);
+  auto featureMap = model->addOperand(&type24);
+  auto op = model->addOperand(&type26);
+  auto param10 = model->addOperand(&type1);
+  auto out = model->addOperand(&type24);
+  // Phase 2, operations
+  static _Float16 scores_init[] = {0.8999999761581421f, 0.10000000149011612f};
+  model->setOperandValue(scores, scores_init, sizeof(_Float16) * 2);
+  static _Float16 roi_init[] = {1.0f, 1.0f, 10.0f, 10.0f, 0.0f, 0.0f, 10.0f, 10.0f};
+  model->setOperandValue(roi, roi_init, sizeof(_Float16) * 8);
+  static int32_t param_init[] = {0};
+  model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
+  static _Float16 param1_init[] = {0.30000001192092896f};
+  model->setOperandValue(param1, param1_init, sizeof(_Float16) * 1);
+  static _Float16 param2_init[] = {0.4000000059604645f};
+  model->setOperandValue(param2, param2_init, sizeof(_Float16) * 1);
+  static int32_t param3_init[] = {-1};
+  model->setOperandValue(param3, param3_init, sizeof(int32_t) * 1);
+  static int32_t param4_init[] = {2};
+  model->setOperandValue(param4, param4_init, sizeof(int32_t) * 1);
+  static int32_t param5_init[] = {2};
+  model->setOperandValue(param5, param5_init, sizeof(int32_t) * 1);
+  static _Float16 param6_init[] = {2.0f};
+  model->setOperandValue(param6, param6_init, sizeof(_Float16) * 1);
+  static _Float16 param7_init[] = {2.0f};
+  model->setOperandValue(param7, param7_init, sizeof(_Float16) * 1);
+  static int32_t param8_init[] = {4};
+  model->setOperandValue(param8, param8_init, sizeof(int32_t) * 1);
+  static int32_t param9_init[] = {4};
+  model->setOperandValue(param9, param9_init, sizeof(int32_t) * 1);
+  static bool8 layout_init[] = {false};
+  model->setOperandValue(layout, layout_init, sizeof(bool8) * 1);
+  static _Float16 op_init[] = {1.0f, 2.0f, 3.0f, 4.0f};
+  model->setOperandValue(op, op_init, sizeof(_Float16) * 4);
+  static int32_t param10_init[] = {0};
+  model->setOperandValue(param10, param10_init, sizeof(int32_t) * 1);
+  model->addOperation(ANEURALNETWORKS_BOX_WITH_NMS_LIMIT, {scores, roi, param, param1, param2, param3}, {scoresOut, roiOut, classesOut, batchSplitOut});
+  model->addOperation(ANEURALNETWORKS_ROI_ALIGN, {in, roiOut, batchSplitOut, param4, param5, param6, param7, param8, param9, layout}, {featureMap});
+  model->addOperation(ANEURALNETWORKS_ADD, {featureMap, op, param10}, {out});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {in},
+    {scoresOut, classesOut, out});
+  assert(model->isValid());
+}
+
+inline bool is_ignored_zero_sized_float16(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_zero_sized_dynamic_output_shape(Model *model) {
+  OperandType type1(Type::INT32, {});
+  OperandType type10(Type::FLOAT32, {});
+  OperandType type11(Type::BOOL, {});
+  OperandType type12(Type::TENSOR_FLOAT32, {1, 1, 1, 2});
+  OperandType type13(Type::TENSOR_FLOAT32, {0, 2, 2, 2});
+  OperandType type14(Type::TENSOR_FLOAT32, {1, 2, 2, 1});
+  OperandType type32(Type::TENSOR_FLOAT32, {0, 0, 0, 0});
+  OperandType type4(Type::TENSOR_FLOAT32, {1, 2});
+  OperandType type5(Type::TENSOR_FLOAT32, {1, 8});
+  OperandType type6(Type::TENSOR_FLOAT32, {0});
+  OperandType type7(Type::TENSOR_INT32, {0});
+  OperandType type8(Type::TENSOR_FLOAT32, {0, 4});
+  OperandType type9(Type::TENSOR_INT32, {1});
+  // Phase 1, operands
+  auto scores = model->addOperand(&type4);
+  auto roi = model->addOperand(&type5);
+  auto param = model->addOperand(&type9);
+  auto param1 = model->addOperand(&type10);
+  auto param2 = model->addOperand(&type10);
+  auto param3 = model->addOperand(&type1);
+  auto scoresOut = model->addOperand(&type6);
+  auto roiOut = model->addOperand(&type8);
+  auto classesOut = model->addOperand(&type7);
+  auto batchSplitOut = model->addOperand(&type7);
+  auto in = model->addOperand(&type12);
+  auto param4 = model->addOperand(&type1);
+  auto param5 = model->addOperand(&type1);
+  auto param6 = model->addOperand(&type10);
+  auto param7 = model->addOperand(&type10);
+  auto param8 = model->addOperand(&type1);
+  auto param9 = model->addOperand(&type1);
+  auto layout = model->addOperand(&type11);
+  auto featureMap = model->addOperand(&type13);
+  auto op = model->addOperand(&type14);
+  auto param10 = model->addOperand(&type1);
+  auto out = model->addOperand(&type32);
+  // Phase 2, operations
+  static float scores_init[] = {0.9f, 0.1f};
+  model->setOperandValue(scores, scores_init, sizeof(float) * 2);
+  static float roi_init[] = {1.0f, 1.0f, 10.0f, 10.0f, 0.0f, 0.0f, 10.0f, 10.0f};
+  model->setOperandValue(roi, roi_init, sizeof(float) * 8);
+  static int32_t param_init[] = {0};
+  model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
+  static float param1_init[] = {0.3f};
+  model->setOperandValue(param1, param1_init, sizeof(float) * 1);
+  static float param2_init[] = {0.4f};
+  model->setOperandValue(param2, param2_init, sizeof(float) * 1);
+  static int32_t param3_init[] = {-1};
+  model->setOperandValue(param3, param3_init, sizeof(int32_t) * 1);
+  static int32_t param4_init[] = {2};
+  model->setOperandValue(param4, param4_init, sizeof(int32_t) * 1);
+  static int32_t param5_init[] = {2};
+  model->setOperandValue(param5, param5_init, sizeof(int32_t) * 1);
+  static float param6_init[] = {2.0f};
+  model->setOperandValue(param6, param6_init, sizeof(float) * 1);
+  static float param7_init[] = {2.0f};
+  model->setOperandValue(param7, param7_init, sizeof(float) * 1);
+  static int32_t param8_init[] = {4};
+  model->setOperandValue(param8, param8_init, sizeof(int32_t) * 1);
+  static int32_t param9_init[] = {4};
+  model->setOperandValue(param9, param9_init, sizeof(int32_t) * 1);
+  static bool8 layout_init[] = {false};
+  model->setOperandValue(layout, layout_init, sizeof(bool8) * 1);
+  static float op_init[] = {1.0f, 2.0f, 3.0f, 4.0f};
+  model->setOperandValue(op, op_init, sizeof(float) * 4);
+  static int32_t param10_init[] = {0};
+  model->setOperandValue(param10, param10_init, sizeof(int32_t) * 1);
+  model->addOperation(ANEURALNETWORKS_BOX_WITH_NMS_LIMIT, {scores, roi, param, param1, param2, param3}, {scoresOut, roiOut, classesOut, batchSplitOut});
+  model->addOperation(ANEURALNETWORKS_ROI_ALIGN, {in, roiOut, batchSplitOut, param4, param5, param6, param7, param8, param9, layout}, {featureMap});
+  model->addOperation(ANEURALNETWORKS_ADD, {featureMap, op, param10}, {out});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {in},
+    {scoresOut, classesOut, out});
+  assert(model->isValid());
+}
+
+inline bool is_ignored_zero_sized_dynamic_output_shape(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_zero_sized_dynamic_output_shape_relaxed(Model *model) {
+  OperandType type1(Type::INT32, {});
+  OperandType type10(Type::FLOAT32, {});
+  OperandType type11(Type::BOOL, {});
+  OperandType type12(Type::TENSOR_FLOAT32, {1, 1, 1, 2});
+  OperandType type13(Type::TENSOR_FLOAT32, {0, 2, 2, 2});
+  OperandType type14(Type::TENSOR_FLOAT32, {1, 2, 2, 1});
+  OperandType type32(Type::TENSOR_FLOAT32, {0, 0, 0, 0});
+  OperandType type4(Type::TENSOR_FLOAT32, {1, 2});
+  OperandType type5(Type::TENSOR_FLOAT32, {1, 8});
+  OperandType type6(Type::TENSOR_FLOAT32, {0});
+  OperandType type7(Type::TENSOR_INT32, {0});
+  OperandType type8(Type::TENSOR_FLOAT32, {0, 4});
+  OperandType type9(Type::TENSOR_INT32, {1});
+  // Phase 1, operands
+  auto scores = model->addOperand(&type4);
+  auto roi = model->addOperand(&type5);
+  auto param = model->addOperand(&type9);
+  auto param1 = model->addOperand(&type10);
+  auto param2 = model->addOperand(&type10);
+  auto param3 = model->addOperand(&type1);
+  auto scoresOut = model->addOperand(&type6);
+  auto roiOut = model->addOperand(&type8);
+  auto classesOut = model->addOperand(&type7);
+  auto batchSplitOut = model->addOperand(&type7);
+  auto in = model->addOperand(&type12);
+  auto param4 = model->addOperand(&type1);
+  auto param5 = model->addOperand(&type1);
+  auto param6 = model->addOperand(&type10);
+  auto param7 = model->addOperand(&type10);
+  auto param8 = model->addOperand(&type1);
+  auto param9 = model->addOperand(&type1);
+  auto layout = model->addOperand(&type11);
+  auto featureMap = model->addOperand(&type13);
+  auto op = model->addOperand(&type14);
+  auto param10 = model->addOperand(&type1);
+  auto out = model->addOperand(&type32);
+  // Phase 2, operations
+  static float scores_init[] = {0.9f, 0.1f};
+  model->setOperandValue(scores, scores_init, sizeof(float) * 2);
+  static float roi_init[] = {1.0f, 1.0f, 10.0f, 10.0f, 0.0f, 0.0f, 10.0f, 10.0f};
+  model->setOperandValue(roi, roi_init, sizeof(float) * 8);
+  static int32_t param_init[] = {0};
+  model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
+  static float param1_init[] = {0.3f};
+  model->setOperandValue(param1, param1_init, sizeof(float) * 1);
+  static float param2_init[] = {0.4f};
+  model->setOperandValue(param2, param2_init, sizeof(float) * 1);
+  static int32_t param3_init[] = {-1};
+  model->setOperandValue(param3, param3_init, sizeof(int32_t) * 1);
+  static int32_t param4_init[] = {2};
+  model->setOperandValue(param4, param4_init, sizeof(int32_t) * 1);
+  static int32_t param5_init[] = {2};
+  model->setOperandValue(param5, param5_init, sizeof(int32_t) * 1);
+  static float param6_init[] = {2.0f};
+  model->setOperandValue(param6, param6_init, sizeof(float) * 1);
+  static float param7_init[] = {2.0f};
+  model->setOperandValue(param7, param7_init, sizeof(float) * 1);
+  static int32_t param8_init[] = {4};
+  model->setOperandValue(param8, param8_init, sizeof(int32_t) * 1);
+  static int32_t param9_init[] = {4};
+  model->setOperandValue(param9, param9_init, sizeof(int32_t) * 1);
+  static bool8 layout_init[] = {false};
+  model->setOperandValue(layout, layout_init, sizeof(bool8) * 1);
+  static float op_init[] = {1.0f, 2.0f, 3.0f, 4.0f};
+  model->setOperandValue(op, op_init, sizeof(float) * 4);
+  static int32_t param10_init[] = {0};
+  model->setOperandValue(param10, param10_init, sizeof(int32_t) * 1);
+  model->addOperation(ANEURALNETWORKS_BOX_WITH_NMS_LIMIT, {scores, roi, param, param1, param2, param3}, {scoresOut, roiOut, classesOut, batchSplitOut});
+  model->addOperation(ANEURALNETWORKS_ROI_ALIGN, {in, roiOut, batchSplitOut, param4, param5, param6, param7, param8, param9, layout}, {featureMap});
+  model->addOperation(ANEURALNETWORKS_ADD, {featureMap, op, param10}, {out});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {in},
+    {scoresOut, classesOut, out});
+  // Phase 4: set relaxed execution
+  model->relaxComputationFloat32toFloat16(true);
+  assert(model->isValid());
+}
+
+inline bool is_ignored_zero_sized_dynamic_output_shape_relaxed(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_zero_sized_dynamic_output_shape_quant8(Model *model) {
+  OperandType type1(Type::INT32, {});
+  OperandType type10(Type::FLOAT32, {});
+  OperandType type11(Type::BOOL, {});
+  OperandType type17(Type::TENSOR_QUANT8_ASYMM, {0, 2, 2, 2}, 0.1f, 128);
+  OperandType type18(Type::TENSOR_QUANT8_ASYMM, {1, 1, 1, 2}, 0.1f, 128);
+  OperandType type19(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 1}, 0.1f, 128);
+  OperandType type20(Type::TENSOR_QUANT16_ASYMM, {1, 8}, 0.125f, 0);
+  OperandType type21(Type::TENSOR_QUANT16_ASYMM, {0, 4}, 0.125f, 0);
+  OperandType type22(Type::TENSOR_QUANT8_ASYMM, {1, 2}, 0.1f, 128);
+  OperandType type23(Type::TENSOR_QUANT8_ASYMM, {0}, 0.1f, 128);
+  OperandType type33(Type::TENSOR_QUANT8_ASYMM, {0, 0, 0, 0}, 0.1f, 128);
+  OperandType type7(Type::TENSOR_INT32, {0});
+  OperandType type9(Type::TENSOR_INT32, {1});
+  // Phase 1, operands
+  auto scores = model->addOperand(&type22);
+  auto roi = model->addOperand(&type20);
+  auto param = model->addOperand(&type9);
+  auto param1 = model->addOperand(&type10);
+  auto param2 = model->addOperand(&type10);
+  auto param3 = model->addOperand(&type1);
+  auto scoresOut = model->addOperand(&type23);
+  auto roiOut = model->addOperand(&type21);
+  auto classesOut = model->addOperand(&type7);
+  auto batchSplitOut = model->addOperand(&type7);
+  auto in = model->addOperand(&type18);
+  auto param4 = model->addOperand(&type1);
+  auto param5 = model->addOperand(&type1);
+  auto param6 = model->addOperand(&type10);
+  auto param7 = model->addOperand(&type10);
+  auto param8 = model->addOperand(&type1);
+  auto param9 = model->addOperand(&type1);
+  auto layout = model->addOperand(&type11);
+  auto featureMap = model->addOperand(&type17);
+  auto op = model->addOperand(&type19);
+  auto param10 = model->addOperand(&type1);
+  auto out = model->addOperand(&type33);
+  // Phase 2, operations
+  static uint8_t scores_init[] = {137, 129};
+  model->setOperandValue(scores, scores_init, sizeof(uint8_t) * 2);
+  static uint16_t roi_init[] = {8, 8, 80, 80, 0, 0, 80, 80};
+  model->setOperandValue(roi, roi_init, sizeof(uint16_t) * 8);
+  static int32_t param_init[] = {0};
+  model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
+  static float param1_init[] = {0.3f};
+  model->setOperandValue(param1, param1_init, sizeof(float) * 1);
+  static float param2_init[] = {0.4f};
+  model->setOperandValue(param2, param2_init, sizeof(float) * 1);
+  static int32_t param3_init[] = {-1};
+  model->setOperandValue(param3, param3_init, sizeof(int32_t) * 1);
+  static int32_t param4_init[] = {2};
+  model->setOperandValue(param4, param4_init, sizeof(int32_t) * 1);
+  static int32_t param5_init[] = {2};
+  model->setOperandValue(param5, param5_init, sizeof(int32_t) * 1);
+  static float param6_init[] = {2.0f};
+  model->setOperandValue(param6, param6_init, sizeof(float) * 1);
+  static float param7_init[] = {2.0f};
+  model->setOperandValue(param7, param7_init, sizeof(float) * 1);
+  static int32_t param8_init[] = {4};
+  model->setOperandValue(param8, param8_init, sizeof(int32_t) * 1);
+  static int32_t param9_init[] = {4};
+  model->setOperandValue(param9, param9_init, sizeof(int32_t) * 1);
+  static bool8 layout_init[] = {false};
+  model->setOperandValue(layout, layout_init, sizeof(bool8) * 1);
+  static uint8_t op_init[] = {138, 148, 158, 168};
+  model->setOperandValue(op, op_init, sizeof(uint8_t) * 4);
+  static int32_t param10_init[] = {0};
+  model->setOperandValue(param10, param10_init, sizeof(int32_t) * 1);
+  model->addOperation(ANEURALNETWORKS_BOX_WITH_NMS_LIMIT, {scores, roi, param, param1, param2, param3}, {scoresOut, roiOut, classesOut, batchSplitOut});
+  model->addOperation(ANEURALNETWORKS_ROI_ALIGN, {in, roiOut, batchSplitOut, param4, param5, param6, param7, param8, param9, layout}, {featureMap});
+  model->addOperation(ANEURALNETWORKS_ADD, {featureMap, op, param10}, {out});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {in},
+    {scoresOut, classesOut, out});
+  assert(model->isValid());
+}
+
+inline bool is_ignored_zero_sized_dynamic_output_shape_quant8(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_zero_sized_dynamic_output_shape_float16(Model *model) {
+  OperandType type1(Type::INT32, {});
+  OperandType type11(Type::BOOL, {});
+  OperandType type15(Type::TENSOR_FLOAT16, {0});
+  OperandType type24(Type::TENSOR_FLOAT16, {0, 2, 2, 2});
+  OperandType type25(Type::TENSOR_FLOAT16, {1, 1, 1, 2});
+  OperandType type26(Type::TENSOR_FLOAT16, {1, 2, 2, 1});
+  OperandType type27(Type::FLOAT16, {});
+  OperandType type28(Type::TENSOR_FLOAT16, {1, 8});
+  OperandType type29(Type::TENSOR_FLOAT16, {0, 4});
+  OperandType type30(Type::TENSOR_FLOAT16, {1, 2});
+  OperandType type34(Type::TENSOR_FLOAT16, {0, 0, 0, 0});
+  OperandType type7(Type::TENSOR_INT32, {0});
+  OperandType type9(Type::TENSOR_INT32, {1});
+  // Phase 1, operands
+  auto scores = model->addOperand(&type30);
+  auto roi = model->addOperand(&type28);
+  auto param = model->addOperand(&type9);
+  auto param1 = model->addOperand(&type27);
+  auto param2 = model->addOperand(&type27);
+  auto param3 = model->addOperand(&type1);
+  auto scoresOut = model->addOperand(&type15);
+  auto roiOut = model->addOperand(&type29);
+  auto classesOut = model->addOperand(&type7);
+  auto batchSplitOut = model->addOperand(&type7);
+  auto in = model->addOperand(&type25);
+  auto param4 = model->addOperand(&type1);
+  auto param5 = model->addOperand(&type1);
+  auto param6 = model->addOperand(&type27);
+  auto param7 = model->addOperand(&type27);
+  auto param8 = model->addOperand(&type1);
+  auto param9 = model->addOperand(&type1);
+  auto layout = model->addOperand(&type11);
+  auto featureMap = model->addOperand(&type24);
+  auto op = model->addOperand(&type26);
+  auto param10 = model->addOperand(&type1);
+  auto out = model->addOperand(&type34);
+  // Phase 2, operations
+  static _Float16 scores_init[] = {0.8999999761581421f, 0.10000000149011612f};
+  model->setOperandValue(scores, scores_init, sizeof(_Float16) * 2);
+  static _Float16 roi_init[] = {1.0f, 1.0f, 10.0f, 10.0f, 0.0f, 0.0f, 10.0f, 10.0f};
+  model->setOperandValue(roi, roi_init, sizeof(_Float16) * 8);
+  static int32_t param_init[] = {0};
+  model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
+  static _Float16 param1_init[] = {0.30000001192092896f};
+  model->setOperandValue(param1, param1_init, sizeof(_Float16) * 1);
+  static _Float16 param2_init[] = {0.4000000059604645f};
+  model->setOperandValue(param2, param2_init, sizeof(_Float16) * 1);
+  static int32_t param3_init[] = {-1};
+  model->setOperandValue(param3, param3_init, sizeof(int32_t) * 1);
+  static int32_t param4_init[] = {2};
+  model->setOperandValue(param4, param4_init, sizeof(int32_t) * 1);
+  static int32_t param5_init[] = {2};
+  model->setOperandValue(param5, param5_init, sizeof(int32_t) * 1);
+  static _Float16 param6_init[] = {2.0f};
+  model->setOperandValue(param6, param6_init, sizeof(_Float16) * 1);
+  static _Float16 param7_init[] = {2.0f};
+  model->setOperandValue(param7, param7_init, sizeof(_Float16) * 1);
+  static int32_t param8_init[] = {4};
+  model->setOperandValue(param8, param8_init, sizeof(int32_t) * 1);
+  static int32_t param9_init[] = {4};
+  model->setOperandValue(param9, param9_init, sizeof(int32_t) * 1);
+  static bool8 layout_init[] = {false};
+  model->setOperandValue(layout, layout_init, sizeof(bool8) * 1);
+  static _Float16 op_init[] = {1.0f, 2.0f, 3.0f, 4.0f};
+  model->setOperandValue(op, op_init, sizeof(_Float16) * 4);
+  static int32_t param10_init[] = {0};
+  model->setOperandValue(param10, param10_init, sizeof(int32_t) * 1);
+  model->addOperation(ANEURALNETWORKS_BOX_WITH_NMS_LIMIT, {scores, roi, param, param1, param2, param3}, {scoresOut, roiOut, classesOut, batchSplitOut});
+  model->addOperation(ANEURALNETWORKS_ROI_ALIGN, {in, roiOut, batchSplitOut, param4, param5, param6, param7, param8, param9, layout}, {featureMap});
+  model->addOperation(ANEURALNETWORKS_ADD, {featureMap, op, param10}, {out});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {in},
+    {scoresOut, classesOut, out});
+  assert(model->isValid());
+}
+
+inline bool is_ignored_zero_sized_dynamic_output_shape_float16(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
diff --git a/runtime/test/generated/models/div_broadcast_float16.model.cpp b/runtime/test/generated/models/div_broadcast_float16.model.cpp
deleted file mode 100644
index e30c994..0000000
--- a/runtime/test/generated/models/div_broadcast_float16.model.cpp
+++ /dev/null
@@ -1,53 +0,0 @@
-// clang-format off
-// Generated file (from: div_broadcast_float16.mod.py). Do not edit
-void CreateModel(Model *model) {
-  OperandType type0(Type::TENSOR_FLOAT16, {2, 2});
-  OperandType type1(Type::TENSOR_FLOAT16, {1, 2});
-  OperandType type2(Type::INT32, {});
-  // Phase 1, operands
-  auto op1 = model->addOperand(&type0);
-  auto op2 = model->addOperand(&type1);
-  auto act = model->addOperand(&type2);
-  auto op3 = model->addOperand(&type0);
-  // Phase 2, operations
-  static int32_t act_init[] = {0};
-  model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
-  model->addOperation(ANEURALNETWORKS_DIV, {op1, op2, act}, {op3});
-  // Phase 3, inputs and outputs
-  model->identifyInputsAndOutputs(
-    {op1, op2},
-    {op3});
-  assert(model->isValid());
-}
-
-inline bool is_ignored(int i) {
-  static std::set<int> ignore = {};
-  return ignore.find(i) != ignore.end();
-}
-
-void CreateModel_dynamic_output_shape(Model *model) {
-  OperandType type0(Type::TENSOR_FLOAT16, {2, 2});
-  OperandType type1(Type::TENSOR_FLOAT16, {1, 2});
-  OperandType type2(Type::INT32, {});
-  OperandType type3(Type::TENSOR_FLOAT16, {0, 0});
-  // Phase 1, operands
-  auto op1 = model->addOperand(&type0);
-  auto op2 = model->addOperand(&type1);
-  auto act = model->addOperand(&type2);
-  auto op3 = model->addOperand(&type3);
-  // Phase 2, operations
-  static int32_t act_init[] = {0};
-  model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
-  model->addOperation(ANEURALNETWORKS_DIV, {op1, op2, act}, {op3});
-  // Phase 3, inputs and outputs
-  model->identifyInputsAndOutputs(
-    {op1, op2},
-    {op3});
-  assert(model->isValid());
-}
-
-inline bool is_ignored_dynamic_output_shape(int i) {
-  static std::set<int> ignore = {};
-  return ignore.find(i) != ignore.end();
-}
-
diff --git a/runtime/test/generated/models/div_float16.model.cpp b/runtime/test/generated/models/div_float16.model.cpp
deleted file mode 100644
index 609045d..0000000
--- a/runtime/test/generated/models/div_float16.model.cpp
+++ /dev/null
@@ -1,51 +0,0 @@
-// clang-format off
-// Generated file (from: div_float16.mod.py). Do not edit
-void CreateModel(Model *model) {
-  OperandType type0(Type::TENSOR_FLOAT16, {3});
-  OperandType type1(Type::INT32, {});
-  // Phase 1, operands
-  auto op1 = model->addOperand(&type0);
-  auto op2 = model->addOperand(&type0);
-  auto act = model->addOperand(&type1);
-  auto op3 = model->addOperand(&type0);
-  // Phase 2, operations
-  static int32_t act_init[] = {0};
-  model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
-  model->addOperation(ANEURALNETWORKS_DIV, {op1, op2, act}, {op3});
-  // Phase 3, inputs and outputs
-  model->identifyInputsAndOutputs(
-    {op1, op2},
-    {op3});
-  assert(model->isValid());
-}
-
-inline bool is_ignored(int i) {
-  static std::set<int> ignore = {};
-  return ignore.find(i) != ignore.end();
-}
-
-void CreateModel_dynamic_output_shape(Model *model) {
-  OperandType type0(Type::TENSOR_FLOAT16, {3});
-  OperandType type1(Type::INT32, {});
-  OperandType type2(Type::TENSOR_FLOAT16, {0});
-  // Phase 1, operands
-  auto op1 = model->addOperand(&type0);
-  auto op2 = model->addOperand(&type0);
-  auto act = model->addOperand(&type1);
-  auto op3 = model->addOperand(&type2);
-  // Phase 2, operations
-  static int32_t act_init[] = {0};
-  model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
-  model->addOperation(ANEURALNETWORKS_DIV, {op1, op2, act}, {op3});
-  // Phase 3, inputs and outputs
-  model->identifyInputsAndOutputs(
-    {op1, op2},
-    {op3});
-  assert(model->isValid());
-}
-
-inline bool is_ignored_dynamic_output_shape(int i) {
-  static std::set<int> ignore = {};
-  return ignore.find(i) != ignore.end();
-}
-
diff --git a/runtime/test/generated/models/div_v1_2.model.cpp b/runtime/test/generated/models/div_v1_2.model.cpp
new file mode 100644
index 0000000..5436469
--- /dev/null
+++ b/runtime/test/generated/models/div_v1_2.model.cpp
@@ -0,0 +1,601 @@
+// clang-format off
+// Generated file (from: div_v1_2.mod.py). Do not edit
+void CreateModel(Model *model) {
+  OperandType type0(Type::TENSOR_FLOAT16, {3});
+  OperandType type1(Type::INT32, {});
+  // Phase 1, operands
+  auto op1 = model->addOperand(&type0);
+  auto op2 = model->addOperand(&type0);
+  auto act = model->addOperand(&type1);
+  auto op3 = model->addOperand(&type0);
+  // Phase 2, operations
+  static int32_t act_init[] = {0};
+  model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
+  model->addOperation(ANEURALNETWORKS_DIV, {op1, op2, act}, {op3});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {op1, op2},
+    {op3});
+  assert(model->isValid());
+}
+
+inline bool is_ignored(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_dynamic_output_shape(Model *model) {
+  OperandType type0(Type::TENSOR_FLOAT16, {3});
+  OperandType type1(Type::INT32, {});
+  OperandType type15(Type::TENSOR_FLOAT16, {0});
+  // Phase 1, operands
+  auto op1 = model->addOperand(&type0);
+  auto op2 = model->addOperand(&type0);
+  auto act = model->addOperand(&type1);
+  auto op3 = model->addOperand(&type15);
+  // Phase 2, operations
+  static int32_t act_init[] = {0};
+  model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
+  model->addOperation(ANEURALNETWORKS_DIV, {op1, op2, act}, {op3});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {op1, op2},
+    {op3});
+  assert(model->isValid());
+}
+
+inline bool is_ignored_dynamic_output_shape(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_2(Model *model) {
+  OperandType type1(Type::INT32, {});
+  OperandType type2(Type::TENSOR_FLOAT16, {2, 2});
+  OperandType type3(Type::TENSOR_FLOAT16, {1, 2});
+  // Phase 1, operands
+  auto op11 = model->addOperand(&type2);
+  auto op21 = model->addOperand(&type3);
+  auto act1 = model->addOperand(&type1);
+  auto op31 = model->addOperand(&type2);
+  // Phase 2, operations
+  static int32_t act1_init[] = {0};
+  model->setOperandValue(act1, act1_init, sizeof(int32_t) * 1);
+  model->addOperation(ANEURALNETWORKS_DIV, {op11, op21, act1}, {op31});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {op11, op21},
+    {op31});
+  assert(model->isValid());
+}
+
+inline bool is_ignored_2(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_dynamic_output_shape_2(Model *model) {
+  OperandType type1(Type::INT32, {});
+  OperandType type16(Type::TENSOR_FLOAT16, {0, 0});
+  OperandType type2(Type::TENSOR_FLOAT16, {2, 2});
+  OperandType type3(Type::TENSOR_FLOAT16, {1, 2});
+  // Phase 1, operands
+  auto op11 = model->addOperand(&type2);
+  auto op21 = model->addOperand(&type3);
+  auto act1 = model->addOperand(&type1);
+  auto op31 = model->addOperand(&type16);
+  // Phase 2, operations
+  static int32_t act1_init[] = {0};
+  model->setOperandValue(act1, act1_init, sizeof(int32_t) * 1);
+  model->addOperation(ANEURALNETWORKS_DIV, {op11, op21, act1}, {op31});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {op11, op21},
+    {op31});
+  assert(model->isValid());
+}
+
+inline bool is_ignored_dynamic_output_shape_2(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_zero_sized(Model *model) {
+  OperandType type1(Type::INT32, {});
+  OperandType type10(Type::FLOAT32, {});
+  OperandType type11(Type::BOOL, {});
+  OperandType type12(Type::TENSOR_FLOAT32, {1, 1, 1, 2});
+  OperandType type13(Type::TENSOR_FLOAT32, {0, 2, 2, 2});
+  OperandType type14(Type::TENSOR_FLOAT32, {1, 2, 2, 1});
+  OperandType type4(Type::TENSOR_FLOAT32, {1, 2});
+  OperandType type5(Type::TENSOR_FLOAT32, {1, 8});
+  OperandType type6(Type::TENSOR_FLOAT32, {0});
+  OperandType type7(Type::TENSOR_INT32, {0});
+  OperandType type8(Type::TENSOR_FLOAT32, {0, 4});
+  OperandType type9(Type::TENSOR_INT32, {1});
+  // Phase 1, operands
+  auto scores = model->addOperand(&type4);
+  auto roi = model->addOperand(&type5);
+  auto param = model->addOperand(&type9);
+  auto param1 = model->addOperand(&type10);
+  auto param2 = model->addOperand(&type10);
+  auto param3 = model->addOperand(&type1);
+  auto scoresOut = model->addOperand(&type6);
+  auto roiOut = model->addOperand(&type8);
+  auto classesOut = model->addOperand(&type7);
+  auto batchSplitOut = model->addOperand(&type7);
+  auto in = model->addOperand(&type12);
+  auto param4 = model->addOperand(&type1);
+  auto param5 = model->addOperand(&type1);
+  auto param6 = model->addOperand(&type10);
+  auto param7 = model->addOperand(&type10);
+  auto param8 = model->addOperand(&type1);
+  auto param9 = model->addOperand(&type1);
+  auto layout = model->addOperand(&type11);
+  auto featureMap = model->addOperand(&type13);
+  auto op = model->addOperand(&type14);
+  auto param10 = model->addOperand(&type1);
+  auto out = model->addOperand(&type13);
+  // Phase 2, operations
+  static float scores_init[] = {0.9f, 0.1f};
+  model->setOperandValue(scores, scores_init, sizeof(float) * 2);
+  static float roi_init[] = {1.0f, 1.0f, 10.0f, 10.0f, 0.0f, 0.0f, 10.0f, 10.0f};
+  model->setOperandValue(roi, roi_init, sizeof(float) * 8);
+  static int32_t param_init[] = {0};
+  model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
+  static float param1_init[] = {0.3f};
+  model->setOperandValue(param1, param1_init, sizeof(float) * 1);
+  static float param2_init[] = {0.4f};
+  model->setOperandValue(param2, param2_init, sizeof(float) * 1);
+  static int32_t param3_init[] = {-1};
+  model->setOperandValue(param3, param3_init, sizeof(int32_t) * 1);
+  static int32_t param4_init[] = {2};
+  model->setOperandValue(param4, param4_init, sizeof(int32_t) * 1);
+  static int32_t param5_init[] = {2};
+  model->setOperandValue(param5, param5_init, sizeof(int32_t) * 1);
+  static float param6_init[] = {2.0f};
+  model->setOperandValue(param6, param6_init, sizeof(float) * 1);
+  static float param7_init[] = {2.0f};
+  model->setOperandValue(param7, param7_init, sizeof(float) * 1);
+  static int32_t param8_init[] = {4};
+  model->setOperandValue(param8, param8_init, sizeof(int32_t) * 1);
+  static int32_t param9_init[] = {4};
+  model->setOperandValue(param9, param9_init, sizeof(int32_t) * 1);
+  static bool8 layout_init[] = {false};
+  model->setOperandValue(layout, layout_init, sizeof(bool8) * 1);
+  static float op_init[] = {1.0f, 2.0f, 3.0f, 4.0f};
+  model->setOperandValue(op, op_init, sizeof(float) * 4);
+  static int32_t param10_init[] = {0};
+  model->setOperandValue(param10, param10_init, sizeof(int32_t) * 1);
+  model->addOperation(ANEURALNETWORKS_BOX_WITH_NMS_LIMIT, {scores, roi, param, param1, param2, param3}, {scoresOut, roiOut, classesOut, batchSplitOut});
+  model->addOperation(ANEURALNETWORKS_ROI_ALIGN, {in, roiOut, batchSplitOut, param4, param5, param6, param7, param8, param9, layout}, {featureMap});
+  model->addOperation(ANEURALNETWORKS_DIV, {featureMap, op, param10}, {out});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {in},
+    {scoresOut, classesOut, out});
+  assert(model->isValid());
+}
+
+inline bool is_ignored_zero_sized(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_zero_sized_relaxed(Model *model) {
+  OperandType type1(Type::INT32, {});
+  OperandType type10(Type::FLOAT32, {});
+  OperandType type11(Type::BOOL, {});
+  OperandType type12(Type::TENSOR_FLOAT32, {1, 1, 1, 2});
+  OperandType type13(Type::TENSOR_FLOAT32, {0, 2, 2, 2});
+  OperandType type14(Type::TENSOR_FLOAT32, {1, 2, 2, 1});
+  OperandType type4(Type::TENSOR_FLOAT32, {1, 2});
+  OperandType type5(Type::TENSOR_FLOAT32, {1, 8});
+  OperandType type6(Type::TENSOR_FLOAT32, {0});
+  OperandType type7(Type::TENSOR_INT32, {0});
+  OperandType type8(Type::TENSOR_FLOAT32, {0, 4});
+  OperandType type9(Type::TENSOR_INT32, {1});
+  // Phase 1, operands
+  auto scores = model->addOperand(&type4);
+  auto roi = model->addOperand(&type5);
+  auto param = model->addOperand(&type9);
+  auto param1 = model->addOperand(&type10);
+  auto param2 = model->addOperand(&type10);
+  auto param3 = model->addOperand(&type1);
+  auto scoresOut = model->addOperand(&type6);
+  auto roiOut = model->addOperand(&type8);
+  auto classesOut = model->addOperand(&type7);
+  auto batchSplitOut = model->addOperand(&type7);
+  auto in = model->addOperand(&type12);
+  auto param4 = model->addOperand(&type1);
+  auto param5 = model->addOperand(&type1);
+  auto param6 = model->addOperand(&type10);
+  auto param7 = model->addOperand(&type10);
+  auto param8 = model->addOperand(&type1);
+  auto param9 = model->addOperand(&type1);
+  auto layout = model->addOperand(&type11);
+  auto featureMap = model->addOperand(&type13);
+  auto op = model->addOperand(&type14);
+  auto param10 = model->addOperand(&type1);
+  auto out = model->addOperand(&type13);
+  // Phase 2, operations
+  static float scores_init[] = {0.9f, 0.1f};
+  model->setOperandValue(scores, scores_init, sizeof(float) * 2);
+  static float roi_init[] = {1.0f, 1.0f, 10.0f, 10.0f, 0.0f, 0.0f, 10.0f, 10.0f};
+  model->setOperandValue(roi, roi_init, sizeof(float) * 8);
+  static int32_t param_init[] = {0};
+  model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
+  static float param1_init[] = {0.3f};
+  model->setOperandValue(param1, param1_init, sizeof(float) * 1);
+  static float param2_init[] = {0.4f};
+  model->setOperandValue(param2, param2_init, sizeof(float) * 1);
+  static int32_t param3_init[] = {-1};
+  model->setOperandValue(param3, param3_init, sizeof(int32_t) * 1);
+  static int32_t param4_init[] = {2};
+  model->setOperandValue(param4, param4_init, sizeof(int32_t) * 1);
+  static int32_t param5_init[] = {2};
+  model->setOperandValue(param5, param5_init, sizeof(int32_t) * 1);
+  static float param6_init[] = {2.0f};
+  model->setOperandValue(param6, param6_init, sizeof(float) * 1);
+  static float param7_init[] = {2.0f};
+  model->setOperandValue(param7, param7_init, sizeof(float) * 1);
+  static int32_t param8_init[] = {4};
+  model->setOperandValue(param8, param8_init, sizeof(int32_t) * 1);
+  static int32_t param9_init[] = {4};
+  model->setOperandValue(param9, param9_init, sizeof(int32_t) * 1);
+  static bool8 layout_init[] = {false};
+  model->setOperandValue(layout, layout_init, sizeof(bool8) * 1);
+  static float op_init[] = {1.0f, 2.0f, 3.0f, 4.0f};
+  model->setOperandValue(op, op_init, sizeof(float) * 4);
+  static int32_t param10_init[] = {0};
+  model->setOperandValue(param10, param10_init, sizeof(int32_t) * 1);
+  model->addOperation(ANEURALNETWORKS_BOX_WITH_NMS_LIMIT, {scores, roi, param, param1, param2, param3}, {scoresOut, roiOut, classesOut, batchSplitOut});
+  model->addOperation(ANEURALNETWORKS_ROI_ALIGN, {in, roiOut, batchSplitOut, param4, param5, param6, param7, param8, param9, layout}, {featureMap});
+  model->addOperation(ANEURALNETWORKS_DIV, {featureMap, op, param10}, {out});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {in},
+    {scoresOut, classesOut, out});
+  // Phase 4: set relaxed execution
+  model->relaxComputationFloat32toFloat16(true);
+  assert(model->isValid());
+}
+
+inline bool is_ignored_zero_sized_relaxed(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_zero_sized_float16(Model *model) {
+  OperandType type1(Type::INT32, {});
+  OperandType type11(Type::BOOL, {});
+  OperandType type17(Type::TENSOR_FLOAT16, {0, 2, 2, 2});
+  OperandType type18(Type::TENSOR_FLOAT16, {1, 1, 1, 2});
+  OperandType type19(Type::TENSOR_FLOAT16, {1, 2, 2, 1});
+  OperandType type20(Type::FLOAT16, {});
+  OperandType type21(Type::TENSOR_FLOAT16, {1, 8});
+  OperandType type22(Type::TENSOR_FLOAT16, {0, 4});
+  OperandType type23(Type::TENSOR_FLOAT16, {1, 2});
+  OperandType type24(Type::TENSOR_FLOAT16, {0});
+  OperandType type7(Type::TENSOR_INT32, {0});
+  OperandType type9(Type::TENSOR_INT32, {1});
+  // Phase 1, operands
+  auto scores = model->addOperand(&type23);
+  auto roi = model->addOperand(&type21);
+  auto param = model->addOperand(&type9);
+  auto param1 = model->addOperand(&type20);
+  auto param2 = model->addOperand(&type20);
+  auto param3 = model->addOperand(&type1);
+  auto scoresOut = model->addOperand(&type24);
+  auto roiOut = model->addOperand(&type22);
+  auto classesOut = model->addOperand(&type7);
+  auto batchSplitOut = model->addOperand(&type7);
+  auto in = model->addOperand(&type18);
+  auto param4 = model->addOperand(&type1);
+  auto param5 = model->addOperand(&type1);
+  auto param6 = model->addOperand(&type20);
+  auto param7 = model->addOperand(&type20);
+  auto param8 = model->addOperand(&type1);
+  auto param9 = model->addOperand(&type1);
+  auto layout = model->addOperand(&type11);
+  auto featureMap = model->addOperand(&type17);
+  auto op = model->addOperand(&type19);
+  auto param10 = model->addOperand(&type1);
+  auto out = model->addOperand(&type17);
+  // Phase 2, operations
+  static _Float16 scores_init[] = {0.8999999761581421f, 0.10000000149011612f};
+  model->setOperandValue(scores, scores_init, sizeof(_Float16) * 2);
+  static _Float16 roi_init[] = {1.0f, 1.0f, 10.0f, 10.0f, 0.0f, 0.0f, 10.0f, 10.0f};
+  model->setOperandValue(roi, roi_init, sizeof(_Float16) * 8);
+  static int32_t param_init[] = {0};
+  model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
+  static _Float16 param1_init[] = {0.30000001192092896f};
+  model->setOperandValue(param1, param1_init, sizeof(_Float16) * 1);
+  static _Float16 param2_init[] = {0.4000000059604645f};
+  model->setOperandValue(param2, param2_init, sizeof(_Float16) * 1);
+  static int32_t param3_init[] = {-1};
+  model->setOperandValue(param3, param3_init, sizeof(int32_t) * 1);
+  static int32_t param4_init[] = {2};
+  model->setOperandValue(param4, param4_init, sizeof(int32_t) * 1);
+  static int32_t param5_init[] = {2};
+  model->setOperandValue(param5, param5_init, sizeof(int32_t) * 1);
+  static _Float16 param6_init[] = {2.0f};
+  model->setOperandValue(param6, param6_init, sizeof(_Float16) * 1);
+  static _Float16 param7_init[] = {2.0f};
+  model->setOperandValue(param7, param7_init, sizeof(_Float16) * 1);
+  static int32_t param8_init[] = {4};
+  model->setOperandValue(param8, param8_init, sizeof(int32_t) * 1);
+  static int32_t param9_init[] = {4};
+  model->setOperandValue(param9, param9_init, sizeof(int32_t) * 1);
+  static bool8 layout_init[] = {false};
+  model->setOperandValue(layout, layout_init, sizeof(bool8) * 1);
+  static _Float16 op_init[] = {1.0f, 2.0f, 3.0f, 4.0f};
+  model->setOperandValue(op, op_init, sizeof(_Float16) * 4);
+  static int32_t param10_init[] = {0};
+  model->setOperandValue(param10, param10_init, sizeof(int32_t) * 1);
+  model->addOperation(ANEURALNETWORKS_BOX_WITH_NMS_LIMIT, {scores, roi, param, param1, param2, param3}, {scoresOut, roiOut, classesOut, batchSplitOut});
+  model->addOperation(ANEURALNETWORKS_ROI_ALIGN, {in, roiOut, batchSplitOut, param4, param5, param6, param7, param8, param9, layout}, {featureMap});
+  model->addOperation(ANEURALNETWORKS_DIV, {featureMap, op, param10}, {out});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {in},
+    {scoresOut, classesOut, out});
+  assert(model->isValid());
+}
+
+inline bool is_ignored_zero_sized_float16(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_zero_sized_dynamic_output_shape(Model *model) {
+  OperandType type1(Type::INT32, {});
+  OperandType type10(Type::FLOAT32, {});
+  OperandType type11(Type::BOOL, {});
+  OperandType type12(Type::TENSOR_FLOAT32, {1, 1, 1, 2});
+  OperandType type13(Type::TENSOR_FLOAT32, {0, 2, 2, 2});
+  OperandType type14(Type::TENSOR_FLOAT32, {1, 2, 2, 1});
+  OperandType type25(Type::TENSOR_FLOAT32, {0, 0, 0, 0});
+  OperandType type4(Type::TENSOR_FLOAT32, {1, 2});
+  OperandType type5(Type::TENSOR_FLOAT32, {1, 8});
+  OperandType type6(Type::TENSOR_FLOAT32, {0});
+  OperandType type7(Type::TENSOR_INT32, {0});
+  OperandType type8(Type::TENSOR_FLOAT32, {0, 4});
+  OperandType type9(Type::TENSOR_INT32, {1});
+  // Phase 1, operands
+  auto scores = model->addOperand(&type4);
+  auto roi = model->addOperand(&type5);
+  auto param = model->addOperand(&type9);
+  auto param1 = model->addOperand(&type10);
+  auto param2 = model->addOperand(&type10);
+  auto param3 = model->addOperand(&type1);
+  auto scoresOut = model->addOperand(&type6);
+  auto roiOut = model->addOperand(&type8);
+  auto classesOut = model->addOperand(&type7);
+  auto batchSplitOut = model->addOperand(&type7);
+  auto in = model->addOperand(&type12);
+  auto param4 = model->addOperand(&type1);
+  auto param5 = model->addOperand(&type1);
+  auto param6 = model->addOperand(&type10);
+  auto param7 = model->addOperand(&type10);
+  auto param8 = model->addOperand(&type1);
+  auto param9 = model->addOperand(&type1);
+  auto layout = model->addOperand(&type11);
+  auto featureMap = model->addOperand(&type13);
+  auto op = model->addOperand(&type14);
+  auto param10 = model->addOperand(&type1);
+  auto out = model->addOperand(&type25);
+  // Phase 2, operations
+  static float scores_init[] = {0.9f, 0.1f};
+  model->setOperandValue(scores, scores_init, sizeof(float) * 2);
+  static float roi_init[] = {1.0f, 1.0f, 10.0f, 10.0f, 0.0f, 0.0f, 10.0f, 10.0f};
+  model->setOperandValue(roi, roi_init, sizeof(float) * 8);
+  static int32_t param_init[] = {0};
+  model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
+  static float param1_init[] = {0.3f};
+  model->setOperandValue(param1, param1_init, sizeof(float) * 1);
+  static float param2_init[] = {0.4f};
+  model->setOperandValue(param2, param2_init, sizeof(float) * 1);
+  static int32_t param3_init[] = {-1};
+  model->setOperandValue(param3, param3_init, sizeof(int32_t) * 1);
+  static int32_t param4_init[] = {2};
+  model->setOperandValue(param4, param4_init, sizeof(int32_t) * 1);
+  static int32_t param5_init[] = {2};
+  model->setOperandValue(param5, param5_init, sizeof(int32_t) * 1);
+  static float param6_init[] = {2.0f};
+  model->setOperandValue(param6, param6_init, sizeof(float) * 1);
+  static float param7_init[] = {2.0f};
+  model->setOperandValue(param7, param7_init, sizeof(float) * 1);
+  static int32_t param8_init[] = {4};
+  model->setOperandValue(param8, param8_init, sizeof(int32_t) * 1);
+  static int32_t param9_init[] = {4};
+  model->setOperandValue(param9, param9_init, sizeof(int32_t) * 1);
+  static bool8 layout_init[] = {false};
+  model->setOperandValue(layout, layout_init, sizeof(bool8) * 1);
+  static float op_init[] = {1.0f, 2.0f, 3.0f, 4.0f};
+  model->setOperandValue(op, op_init, sizeof(float) * 4);
+  static int32_t param10_init[] = {0};
+  model->setOperandValue(param10, param10_init, sizeof(int32_t) * 1);
+  model->addOperation(ANEURALNETWORKS_BOX_WITH_NMS_LIMIT, {scores, roi, param, param1, param2, param3}, {scoresOut, roiOut, classesOut, batchSplitOut});
+  model->addOperation(ANEURALNETWORKS_ROI_ALIGN, {in, roiOut, batchSplitOut, param4, param5, param6, param7, param8, param9, layout}, {featureMap});
+  model->addOperation(ANEURALNETWORKS_DIV, {featureMap, op, param10}, {out});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {in},
+    {scoresOut, classesOut, out});
+  assert(model->isValid());
+}
+
+inline bool is_ignored_zero_sized_dynamic_output_shape(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_zero_sized_dynamic_output_shape_relaxed(Model *model) {
+  OperandType type1(Type::INT32, {});
+  OperandType type10(Type::FLOAT32, {});
+  OperandType type11(Type::BOOL, {});
+  OperandType type12(Type::TENSOR_FLOAT32, {1, 1, 1, 2});
+  OperandType type13(Type::TENSOR_FLOAT32, {0, 2, 2, 2});
+  OperandType type14(Type::TENSOR_FLOAT32, {1, 2, 2, 1});
+  OperandType type25(Type::TENSOR_FLOAT32, {0, 0, 0, 0});
+  OperandType type4(Type::TENSOR_FLOAT32, {1, 2});
+  OperandType type5(Type::TENSOR_FLOAT32, {1, 8});
+  OperandType type6(Type::TENSOR_FLOAT32, {0});
+  OperandType type7(Type::TENSOR_INT32, {0});
+  OperandType type8(Type::TENSOR_FLOAT32, {0, 4});
+  OperandType type9(Type::TENSOR_INT32, {1});
+  // Phase 1, operands
+  auto scores = model->addOperand(&type4);
+  auto roi = model->addOperand(&type5);
+  auto param = model->addOperand(&type9);
+  auto param1 = model->addOperand(&type10);
+  auto param2 = model->addOperand(&type10);
+  auto param3 = model->addOperand(&type1);
+  auto scoresOut = model->addOperand(&type6);
+  auto roiOut = model->addOperand(&type8);
+  auto classesOut = model->addOperand(&type7);
+  auto batchSplitOut = model->addOperand(&type7);
+  auto in = model->addOperand(&type12);
+  auto param4 = model->addOperand(&type1);
+  auto param5 = model->addOperand(&type1);
+  auto param6 = model->addOperand(&type10);
+  auto param7 = model->addOperand(&type10);
+  auto param8 = model->addOperand(&type1);
+  auto param9 = model->addOperand(&type1);
+  auto layout = model->addOperand(&type11);
+  auto featureMap = model->addOperand(&type13);
+  auto op = model->addOperand(&type14);
+  auto param10 = model->addOperand(&type1);
+  auto out = model->addOperand(&type25);
+  // Phase 2, operations
+  static float scores_init[] = {0.9f, 0.1f};
+  model->setOperandValue(scores, scores_init, sizeof(float) * 2);
+  static float roi_init[] = {1.0f, 1.0f, 10.0f, 10.0f, 0.0f, 0.0f, 10.0f, 10.0f};
+  model->setOperandValue(roi, roi_init, sizeof(float) * 8);
+  static int32_t param_init[] = {0};
+  model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
+  static float param1_init[] = {0.3f};
+  model->setOperandValue(param1, param1_init, sizeof(float) * 1);
+  static float param2_init[] = {0.4f};
+  model->setOperandValue(param2, param2_init, sizeof(float) * 1);
+  static int32_t param3_init[] = {-1};
+  model->setOperandValue(param3, param3_init, sizeof(int32_t) * 1);
+  static int32_t param4_init[] = {2};
+  model->setOperandValue(param4, param4_init, sizeof(int32_t) * 1);
+  static int32_t param5_init[] = {2};
+  model->setOperandValue(param5, param5_init, sizeof(int32_t) * 1);
+  static float param6_init[] = {2.0f};
+  model->setOperandValue(param6, param6_init, sizeof(float) * 1);
+  static float param7_init[] = {2.0f};
+  model->setOperandValue(param7, param7_init, sizeof(float) * 1);
+  static int32_t param8_init[] = {4};
+  model->setOperandValue(param8, param8_init, sizeof(int32_t) * 1);
+  static int32_t param9_init[] = {4};
+  model->setOperandValue(param9, param9_init, sizeof(int32_t) * 1);
+  static bool8 layout_init[] = {false};
+  model->setOperandValue(layout, layout_init, sizeof(bool8) * 1);
+  static float op_init[] = {1.0f, 2.0f, 3.0f, 4.0f};
+  model->setOperandValue(op, op_init, sizeof(float) * 4);
+  static int32_t param10_init[] = {0};
+  model->setOperandValue(param10, param10_init, sizeof(int32_t) * 1);
+  model->addOperation(ANEURALNETWORKS_BOX_WITH_NMS_LIMIT, {scores, roi, param, param1, param2, param3}, {scoresOut, roiOut, classesOut, batchSplitOut});
+  model->addOperation(ANEURALNETWORKS_ROI_ALIGN, {in, roiOut, batchSplitOut, param4, param5, param6, param7, param8, param9, layout}, {featureMap});
+  model->addOperation(ANEURALNETWORKS_DIV, {featureMap, op, param10}, {out});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {in},
+    {scoresOut, classesOut, out});
+  // Phase 4: set relaxed execution
+  model->relaxComputationFloat32toFloat16(true);
+  assert(model->isValid());
+}
+
+inline bool is_ignored_zero_sized_dynamic_output_shape_relaxed(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_zero_sized_dynamic_output_shape_float16(Model *model) {
+  OperandType type1(Type::INT32, {});
+  OperandType type11(Type::BOOL, {});
+  OperandType type15(Type::TENSOR_FLOAT16, {0});
+  OperandType type17(Type::TENSOR_FLOAT16, {0, 2, 2, 2});
+  OperandType type18(Type::TENSOR_FLOAT16, {1, 1, 1, 2});
+  OperandType type19(Type::TENSOR_FLOAT16, {1, 2, 2, 1});
+  OperandType type20(Type::FLOAT16, {});
+  OperandType type21(Type::TENSOR_FLOAT16, {1, 8});
+  OperandType type22(Type::TENSOR_FLOAT16, {0, 4});
+  OperandType type23(Type::TENSOR_FLOAT16, {1, 2});
+  OperandType type26(Type::TENSOR_FLOAT16, {0, 0, 0, 0});
+  OperandType type7(Type::TENSOR_INT32, {0});
+  OperandType type9(Type::TENSOR_INT32, {1});
+  // Phase 1, operands
+  auto scores = model->addOperand(&type23);
+  auto roi = model->addOperand(&type21);
+  auto param = model->addOperand(&type9);
+  auto param1 = model->addOperand(&type20);
+  auto param2 = model->addOperand(&type20);
+  auto param3 = model->addOperand(&type1);
+  auto scoresOut = model->addOperand(&type15);
+  auto roiOut = model->addOperand(&type22);
+  auto classesOut = model->addOperand(&type7);
+  auto batchSplitOut = model->addOperand(&type7);
+  auto in = model->addOperand(&type18);
+  auto param4 = model->addOperand(&type1);
+  auto param5 = model->addOperand(&type1);
+  auto param6 = model->addOperand(&type20);
+  auto param7 = model->addOperand(&type20);
+  auto param8 = model->addOperand(&type1);
+  auto param9 = model->addOperand(&type1);
+  auto layout = model->addOperand(&type11);
+  auto featureMap = model->addOperand(&type17);
+  auto op = model->addOperand(&type19);
+  auto param10 = model->addOperand(&type1);
+  auto out = model->addOperand(&type26);
+  // Phase 2, operations
+  static _Float16 scores_init[] = {0.8999999761581421f, 0.10000000149011612f};
+  model->setOperandValue(scores, scores_init, sizeof(_Float16) * 2);
+  static _Float16 roi_init[] = {1.0f, 1.0f, 10.0f, 10.0f, 0.0f, 0.0f, 10.0f, 10.0f};
+  model->setOperandValue(roi, roi_init, sizeof(_Float16) * 8);
+  static int32_t param_init[] = {0};
+  model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
+  static _Float16 param1_init[] = {0.30000001192092896f};
+  model->setOperandValue(param1, param1_init, sizeof(_Float16) * 1);
+  static _Float16 param2_init[] = {0.4000000059604645f};
+  model->setOperandValue(param2, param2_init, sizeof(_Float16) * 1);
+  static int32_t param3_init[] = {-1};
+  model->setOperandValue(param3, param3_init, sizeof(int32_t) * 1);
+  static int32_t param4_init[] = {2};
+  model->setOperandValue(param4, param4_init, sizeof(int32_t) * 1);
+  static int32_t param5_init[] = {2};
+  model->setOperandValue(param5, param5_init, sizeof(int32_t) * 1);
+  static _Float16 param6_init[] = {2.0f};
+  model->setOperandValue(param6, param6_init, sizeof(_Float16) * 1);
+  static _Float16 param7_init[] = {2.0f};
+  model->setOperandValue(param7, param7_init, sizeof(_Float16) * 1);
+  static int32_t param8_init[] = {4};
+  model->setOperandValue(param8, param8_init, sizeof(int32_t) * 1);
+  static int32_t param9_init[] = {4};
+  model->setOperandValue(param9, param9_init, sizeof(int32_t) * 1);
+  static bool8 layout_init[] = {false};
+  model->setOperandValue(layout, layout_init, sizeof(bool8) * 1);
+  static _Float16 op_init[] = {1.0f, 2.0f, 3.0f, 4.0f};
+  model->setOperandValue(op, op_init, sizeof(_Float16) * 4);
+  static int32_t param10_init[] = {0};
+  model->setOperandValue(param10, param10_init, sizeof(int32_t) * 1);
+  model->addOperation(ANEURALNETWORKS_BOX_WITH_NMS_LIMIT, {scores, roi, param, param1, param2, param3}, {scoresOut, roiOut, classesOut, batchSplitOut});
+  model->addOperation(ANEURALNETWORKS_ROI_ALIGN, {in, roiOut, batchSplitOut, param4, param5, param6, param7, param8, param9, layout}, {featureMap});
+  model->addOperation(ANEURALNETWORKS_DIV, {featureMap, op, param10}, {out});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {in},
+    {scoresOut, classesOut, out});
+  assert(model->isValid());
+}
+
+inline bool is_ignored_zero_sized_dynamic_output_shape_float16(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
diff --git a/runtime/test/generated/models/mul_broadcast_float16.model.cpp b/runtime/test/generated/models/mul_broadcast_float16.model.cpp
deleted file mode 100644
index e421bff..0000000
--- a/runtime/test/generated/models/mul_broadcast_float16.model.cpp
+++ /dev/null
@@ -1,53 +0,0 @@
-// clang-format off
-// Generated file (from: mul_broadcast_float16.mod.py). Do not edit
-void CreateModel(Model *model) {
-  OperandType type0(Type::TENSOR_FLOAT16, {1, 2});
-  OperandType type1(Type::TENSOR_FLOAT16, {2, 2});
-  OperandType type2(Type::INT32, {});
-  // Phase 1, operands
-  auto op1 = model->addOperand(&type0);
-  auto op2 = model->addOperand(&type1);
-  auto act = model->addOperand(&type2);
-  auto op3 = model->addOperand(&type1);
-  // Phase 2, operations
-  static int32_t act_init[] = {0};
-  model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
-  model->addOperation(ANEURALNETWORKS_MUL, {op1, op2, act}, {op3});
-  // Phase 3, inputs and outputs
-  model->identifyInputsAndOutputs(
-    {op1, op2},
-    {op3});
-  assert(model->isValid());
-}
-
-inline bool is_ignored(int i) {
-  static std::set<int> ignore = {};
-  return ignore.find(i) != ignore.end();
-}
-
-void CreateModel_dynamic_output_shape(Model *model) {
-  OperandType type0(Type::TENSOR_FLOAT16, {1, 2});
-  OperandType type1(Type::TENSOR_FLOAT16, {2, 2});
-  OperandType type2(Type::INT32, {});
-  OperandType type3(Type::TENSOR_FLOAT16, {0, 0});
-  // Phase 1, operands
-  auto op1 = model->addOperand(&type0);
-  auto op2 = model->addOperand(&type1);
-  auto act = model->addOperand(&type2);
-  auto op3 = model->addOperand(&type3);
-  // Phase 2, operations
-  static int32_t act_init[] = {0};
-  model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
-  model->addOperation(ANEURALNETWORKS_MUL, {op1, op2, act}, {op3});
-  // Phase 3, inputs and outputs
-  model->identifyInputsAndOutputs(
-    {op1, op2},
-    {op3});
-  assert(model->isValid());
-}
-
-inline bool is_ignored_dynamic_output_shape(int i) {
-  static std::set<int> ignore = {};
-  return ignore.find(i) != ignore.end();
-}
-
diff --git a/runtime/test/generated/models/mul_float16.model.cpp b/runtime/test/generated/models/mul_float16.model.cpp
deleted file mode 100644
index 71de0de..0000000
--- a/runtime/test/generated/models/mul_float16.model.cpp
+++ /dev/null
@@ -1,51 +0,0 @@
-// clang-format off
-// Generated file (from: mul_float16.mod.py). Do not edit
-void CreateModel(Model *model) {
-  OperandType type0(Type::TENSOR_FLOAT16, {3});
-  OperandType type1(Type::INT32, {});
-  // Phase 1, operands
-  auto op1 = model->addOperand(&type0);
-  auto op2 = model->addOperand(&type0);
-  auto act = model->addOperand(&type1);
-  auto op3 = model->addOperand(&type0);
-  // Phase 2, operations
-  static int32_t act_init[] = {0};
-  model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
-  model->addOperation(ANEURALNETWORKS_MUL, {op1, op2, act}, {op3});
-  // Phase 3, inputs and outputs
-  model->identifyInputsAndOutputs(
-    {op1, op2},
-    {op3});
-  assert(model->isValid());
-}
-
-inline bool is_ignored(int i) {
-  static std::set<int> ignore = {};
-  return ignore.find(i) != ignore.end();
-}
-
-void CreateModel_dynamic_output_shape(Model *model) {
-  OperandType type0(Type::TENSOR_FLOAT16, {3});
-  OperandType type1(Type::INT32, {});
-  OperandType type2(Type::TENSOR_FLOAT16, {0});
-  // Phase 1, operands
-  auto op1 = model->addOperand(&type0);
-  auto op2 = model->addOperand(&type0);
-  auto act = model->addOperand(&type1);
-  auto op3 = model->addOperand(&type2);
-  // Phase 2, operations
-  static int32_t act_init[] = {0};
-  model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
-  model->addOperation(ANEURALNETWORKS_MUL, {op1, op2, act}, {op3});
-  // Phase 3, inputs and outputs
-  model->identifyInputsAndOutputs(
-    {op1, op2},
-    {op3});
-  assert(model->isValid());
-}
-
-inline bool is_ignored_dynamic_output_shape(int i) {
-  static std::set<int> ignore = {};
-  return ignore.find(i) != ignore.end();
-}
-
diff --git a/runtime/test/generated/models/mul_v1_2.model.cpp b/runtime/test/generated/models/mul_v1_2.model.cpp
new file mode 100644
index 0000000..254cf8b
--- /dev/null
+++ b/runtime/test/generated/models/mul_v1_2.model.cpp
@@ -0,0 +1,766 @@
+// clang-format off
+// Generated file (from: mul_v1_2.mod.py). Do not edit
+void CreateModel(Model *model) {
+  OperandType type0(Type::TENSOR_FLOAT16, {3});
+  OperandType type1(Type::INT32, {});
+  // Phase 1, operands
+  auto op1 = model->addOperand(&type0);
+  auto op2 = model->addOperand(&type0);
+  auto act = model->addOperand(&type1);
+  auto op3 = model->addOperand(&type0);
+  // Phase 2, operations
+  static int32_t act_init[] = {0};
+  model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
+  model->addOperation(ANEURALNETWORKS_MUL, {op1, op2, act}, {op3});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {op1, op2},
+    {op3});
+  assert(model->isValid());
+}
+
+inline bool is_ignored(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_dynamic_output_shape(Model *model) {
+  OperandType type0(Type::TENSOR_FLOAT16, {3});
+  OperandType type1(Type::INT32, {});
+  OperandType type15(Type::TENSOR_FLOAT16, {0});
+  // Phase 1, operands
+  auto op1 = model->addOperand(&type0);
+  auto op2 = model->addOperand(&type0);
+  auto act = model->addOperand(&type1);
+  auto op3 = model->addOperand(&type15);
+  // Phase 2, operations
+  static int32_t act_init[] = {0};
+  model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
+  model->addOperation(ANEURALNETWORKS_MUL, {op1, op2, act}, {op3});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {op1, op2},
+    {op3});
+  assert(model->isValid());
+}
+
+inline bool is_ignored_dynamic_output_shape(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_2(Model *model) {
+  OperandType type1(Type::INT32, {});
+  OperandType type2(Type::TENSOR_FLOAT16, {1, 2});
+  OperandType type3(Type::TENSOR_FLOAT16, {2, 2});
+  // Phase 1, operands
+  auto op11 = model->addOperand(&type2);
+  auto op21 = model->addOperand(&type3);
+  auto act1 = model->addOperand(&type1);
+  auto op31 = model->addOperand(&type3);
+  // Phase 2, operations
+  static int32_t act1_init[] = {0};
+  model->setOperandValue(act1, act1_init, sizeof(int32_t) * 1);
+  model->addOperation(ANEURALNETWORKS_MUL, {op11, op21, act1}, {op31});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {op11, op21},
+    {op31});
+  assert(model->isValid());
+}
+
+inline bool is_ignored_2(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_dynamic_output_shape_2(Model *model) {
+  OperandType type1(Type::INT32, {});
+  OperandType type16(Type::TENSOR_FLOAT16, {0, 0});
+  OperandType type2(Type::TENSOR_FLOAT16, {1, 2});
+  OperandType type3(Type::TENSOR_FLOAT16, {2, 2});
+  // Phase 1, operands
+  auto op11 = model->addOperand(&type2);
+  auto op21 = model->addOperand(&type3);
+  auto act1 = model->addOperand(&type1);
+  auto op31 = model->addOperand(&type16);
+  // Phase 2, operations
+  static int32_t act1_init[] = {0};
+  model->setOperandValue(act1, act1_init, sizeof(int32_t) * 1);
+  model->addOperation(ANEURALNETWORKS_MUL, {op11, op21, act1}, {op31});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {op11, op21},
+    {op31});
+  assert(model->isValid());
+}
+
+inline bool is_ignored_dynamic_output_shape_2(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_zero_sized(Model *model) {
+  OperandType type1(Type::INT32, {});
+  OperandType type10(Type::FLOAT32, {});
+  OperandType type11(Type::BOOL, {});
+  OperandType type12(Type::TENSOR_FLOAT32, {1, 1, 1, 2});
+  OperandType type13(Type::TENSOR_FLOAT32, {0, 2, 2, 2});
+  OperandType type14(Type::TENSOR_FLOAT32, {1, 2, 2, 1});
+  OperandType type4(Type::TENSOR_FLOAT32, {1, 2});
+  OperandType type5(Type::TENSOR_FLOAT32, {1, 8});
+  OperandType type6(Type::TENSOR_FLOAT32, {0});
+  OperandType type7(Type::TENSOR_INT32, {0});
+  OperandType type8(Type::TENSOR_FLOAT32, {0, 4});
+  OperandType type9(Type::TENSOR_INT32, {1});
+  // Phase 1, operands
+  auto scores = model->addOperand(&type4);
+  auto roi = model->addOperand(&type5);
+  auto param = model->addOperand(&type9);
+  auto param1 = model->addOperand(&type10);
+  auto param2 = model->addOperand(&type10);
+  auto param3 = model->addOperand(&type1);
+  auto scoresOut = model->addOperand(&type6);
+  auto roiOut = model->addOperand(&type8);
+  auto classesOut = model->addOperand(&type7);
+  auto batchSplitOut = model->addOperand(&type7);
+  auto in = model->addOperand(&type12);
+  auto param4 = model->addOperand(&type1);
+  auto param5 = model->addOperand(&type1);
+  auto param6 = model->addOperand(&type10);
+  auto param7 = model->addOperand(&type10);
+  auto param8 = model->addOperand(&type1);
+  auto param9 = model->addOperand(&type1);
+  auto layout = model->addOperand(&type11);
+  auto featureMap = model->addOperand(&type13);
+  auto op = model->addOperand(&type14);
+  auto param10 = model->addOperand(&type1);
+  auto out = model->addOperand(&type13);
+  // Phase 2, operations
+  static float scores_init[] = {0.9f, 0.1f};
+  model->setOperandValue(scores, scores_init, sizeof(float) * 2);
+  static float roi_init[] = {1.0f, 1.0f, 10.0f, 10.0f, 0.0f, 0.0f, 10.0f, 10.0f};
+  model->setOperandValue(roi, roi_init, sizeof(float) * 8);
+  static int32_t param_init[] = {0};
+  model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
+  static float param1_init[] = {0.3f};
+  model->setOperandValue(param1, param1_init, sizeof(float) * 1);
+  static float param2_init[] = {0.4f};
+  model->setOperandValue(param2, param2_init, sizeof(float) * 1);
+  static int32_t param3_init[] = {-1};
+  model->setOperandValue(param3, param3_init, sizeof(int32_t) * 1);
+  static int32_t param4_init[] = {2};
+  model->setOperandValue(param4, param4_init, sizeof(int32_t) * 1);
+  static int32_t param5_init[] = {2};
+  model->setOperandValue(param5, param5_init, sizeof(int32_t) * 1);
+  static float param6_init[] = {2.0f};
+  model->setOperandValue(param6, param6_init, sizeof(float) * 1);
+  static float param7_init[] = {2.0f};
+  model->setOperandValue(param7, param7_init, sizeof(float) * 1);
+  static int32_t param8_init[] = {4};
+  model->setOperandValue(param8, param8_init, sizeof(int32_t) * 1);
+  static int32_t param9_init[] = {4};
+  model->setOperandValue(param9, param9_init, sizeof(int32_t) * 1);
+  static bool8 layout_init[] = {false};
+  model->setOperandValue(layout, layout_init, sizeof(bool8) * 1);
+  static float op_init[] = {1.0f, 2.0f, 3.0f, 4.0f};
+  model->setOperandValue(op, op_init, sizeof(float) * 4);
+  static int32_t param10_init[] = {0};
+  model->setOperandValue(param10, param10_init, sizeof(int32_t) * 1);
+  model->addOperation(ANEURALNETWORKS_BOX_WITH_NMS_LIMIT, {scores, roi, param, param1, param2, param3}, {scoresOut, roiOut, classesOut, batchSplitOut});
+  model->addOperation(ANEURALNETWORKS_ROI_ALIGN, {in, roiOut, batchSplitOut, param4, param5, param6, param7, param8, param9, layout}, {featureMap});
+  model->addOperation(ANEURALNETWORKS_MUL, {featureMap, op, param10}, {out});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {in},
+    {scoresOut, classesOut, out});
+  assert(model->isValid());
+}
+
+inline bool is_ignored_zero_sized(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_zero_sized_relaxed(Model *model) {
+  OperandType type1(Type::INT32, {});
+  OperandType type10(Type::FLOAT32, {});
+  OperandType type11(Type::BOOL, {});
+  OperandType type12(Type::TENSOR_FLOAT32, {1, 1, 1, 2});
+  OperandType type13(Type::TENSOR_FLOAT32, {0, 2, 2, 2});
+  OperandType type14(Type::TENSOR_FLOAT32, {1, 2, 2, 1});
+  OperandType type4(Type::TENSOR_FLOAT32, {1, 2});
+  OperandType type5(Type::TENSOR_FLOAT32, {1, 8});
+  OperandType type6(Type::TENSOR_FLOAT32, {0});
+  OperandType type7(Type::TENSOR_INT32, {0});
+  OperandType type8(Type::TENSOR_FLOAT32, {0, 4});
+  OperandType type9(Type::TENSOR_INT32, {1});
+  // Phase 1, operands
+  auto scores = model->addOperand(&type4);
+  auto roi = model->addOperand(&type5);
+  auto param = model->addOperand(&type9);
+  auto param1 = model->addOperand(&type10);
+  auto param2 = model->addOperand(&type10);
+  auto param3 = model->addOperand(&type1);
+  auto scoresOut = model->addOperand(&type6);
+  auto roiOut = model->addOperand(&type8);
+  auto classesOut = model->addOperand(&type7);
+  auto batchSplitOut = model->addOperand(&type7);
+  auto in = model->addOperand(&type12);
+  auto param4 = model->addOperand(&type1);
+  auto param5 = model->addOperand(&type1);
+  auto param6 = model->addOperand(&type10);
+  auto param7 = model->addOperand(&type10);
+  auto param8 = model->addOperand(&type1);
+  auto param9 = model->addOperand(&type1);
+  auto layout = model->addOperand(&type11);
+  auto featureMap = model->addOperand(&type13);
+  auto op = model->addOperand(&type14);
+  auto param10 = model->addOperand(&type1);
+  auto out = model->addOperand(&type13);
+  // Phase 2, operations
+  static float scores_init[] = {0.9f, 0.1f};
+  model->setOperandValue(scores, scores_init, sizeof(float) * 2);
+  static float roi_init[] = {1.0f, 1.0f, 10.0f, 10.0f, 0.0f, 0.0f, 10.0f, 10.0f};
+  model->setOperandValue(roi, roi_init, sizeof(float) * 8);
+  static int32_t param_init[] = {0};
+  model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
+  static float param1_init[] = {0.3f};
+  model->setOperandValue(param1, param1_init, sizeof(float) * 1);
+  static float param2_init[] = {0.4f};
+  model->setOperandValue(param2, param2_init, sizeof(float) * 1);
+  static int32_t param3_init[] = {-1};
+  model->setOperandValue(param3, param3_init, sizeof(int32_t) * 1);
+  static int32_t param4_init[] = {2};
+  model->setOperandValue(param4, param4_init, sizeof(int32_t) * 1);
+  static int32_t param5_init[] = {2};
+  model->setOperandValue(param5, param5_init, sizeof(int32_t) * 1);
+  static float param6_init[] = {2.0f};
+  model->setOperandValue(param6, param6_init, sizeof(float) * 1);
+  static float param7_init[] = {2.0f};
+  model->setOperandValue(param7, param7_init, sizeof(float) * 1);
+  static int32_t param8_init[] = {4};
+  model->setOperandValue(param8, param8_init, sizeof(int32_t) * 1);
+  static int32_t param9_init[] = {4};
+  model->setOperandValue(param9, param9_init, sizeof(int32_t) * 1);
+  static bool8 layout_init[] = {false};
+  model->setOperandValue(layout, layout_init, sizeof(bool8) * 1);
+  static float op_init[] = {1.0f, 2.0f, 3.0f, 4.0f};
+  model->setOperandValue(op, op_init, sizeof(float) * 4);
+  static int32_t param10_init[] = {0};
+  model->setOperandValue(param10, param10_init, sizeof(int32_t) * 1);
+  model->addOperation(ANEURALNETWORKS_BOX_WITH_NMS_LIMIT, {scores, roi, param, param1, param2, param3}, {scoresOut, roiOut, classesOut, batchSplitOut});
+  model->addOperation(ANEURALNETWORKS_ROI_ALIGN, {in, roiOut, batchSplitOut, param4, param5, param6, param7, param8, param9, layout}, {featureMap});
+  model->addOperation(ANEURALNETWORKS_MUL, {featureMap, op, param10}, {out});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {in},
+    {scoresOut, classesOut, out});
+  // Phase 4: set relaxed execution
+  model->relaxComputationFloat32toFloat16(true);
+  assert(model->isValid());
+}
+
+inline bool is_ignored_zero_sized_relaxed(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_zero_sized_quant8(Model *model) {
+  OperandType type1(Type::INT32, {});
+  OperandType type10(Type::FLOAT32, {});
+  OperandType type11(Type::BOOL, {});
+  OperandType type17(Type::TENSOR_QUANT8_ASYMM, {0, 2, 2, 2}, 0.1f, 128);
+  OperandType type18(Type::TENSOR_QUANT8_ASYMM, {1, 1, 1, 2}, 0.1f, 128);
+  OperandType type19(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 1}, 0.1f, 128);
+  OperandType type20(Type::TENSOR_QUANT16_ASYMM, {1, 8}, 0.125f, 0);
+  OperandType type21(Type::TENSOR_QUANT16_ASYMM, {0, 4}, 0.125f, 0);
+  OperandType type22(Type::TENSOR_QUANT8_ASYMM, {1, 2}, 0.1f, 128);
+  OperandType type23(Type::TENSOR_QUANT8_ASYMM, {0}, 0.1f, 128);
+  OperandType type7(Type::TENSOR_INT32, {0});
+  OperandType type9(Type::TENSOR_INT32, {1});
+  // Phase 1, operands
+  auto scores = model->addOperand(&type22);
+  auto roi = model->addOperand(&type20);
+  auto param = model->addOperand(&type9);
+  auto param1 = model->addOperand(&type10);
+  auto param2 = model->addOperand(&type10);
+  auto param3 = model->addOperand(&type1);
+  auto scoresOut = model->addOperand(&type23);
+  auto roiOut = model->addOperand(&type21);
+  auto classesOut = model->addOperand(&type7);
+  auto batchSplitOut = model->addOperand(&type7);
+  auto in = model->addOperand(&type18);
+  auto param4 = model->addOperand(&type1);
+  auto param5 = model->addOperand(&type1);
+  auto param6 = model->addOperand(&type10);
+  auto param7 = model->addOperand(&type10);
+  auto param8 = model->addOperand(&type1);
+  auto param9 = model->addOperand(&type1);
+  auto layout = model->addOperand(&type11);
+  auto featureMap = model->addOperand(&type17);
+  auto op = model->addOperand(&type19);
+  auto param10 = model->addOperand(&type1);
+  auto out = model->addOperand(&type17);
+  // Phase 2, operations
+  static uint8_t scores_init[] = {137, 129};
+  model->setOperandValue(scores, scores_init, sizeof(uint8_t) * 2);
+  static uint16_t roi_init[] = {8, 8, 80, 80, 0, 0, 80, 80};
+  model->setOperandValue(roi, roi_init, sizeof(uint16_t) * 8);
+  static int32_t param_init[] = {0};
+  model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
+  static float param1_init[] = {0.3f};
+  model->setOperandValue(param1, param1_init, sizeof(float) * 1);
+  static float param2_init[] = {0.4f};
+  model->setOperandValue(param2, param2_init, sizeof(float) * 1);
+  static int32_t param3_init[] = {-1};
+  model->setOperandValue(param3, param3_init, sizeof(int32_t) * 1);
+  static int32_t param4_init[] = {2};
+  model->setOperandValue(param4, param4_init, sizeof(int32_t) * 1);
+  static int32_t param5_init[] = {2};
+  model->setOperandValue(param5, param5_init, sizeof(int32_t) * 1);
+  static float param6_init[] = {2.0f};
+  model->setOperandValue(param6, param6_init, sizeof(float) * 1);
+  static float param7_init[] = {2.0f};
+  model->setOperandValue(param7, param7_init, sizeof(float) * 1);
+  static int32_t param8_init[] = {4};
+  model->setOperandValue(param8, param8_init, sizeof(int32_t) * 1);
+  static int32_t param9_init[] = {4};
+  model->setOperandValue(param9, param9_init, sizeof(int32_t) * 1);
+  static bool8 layout_init[] = {false};
+  model->setOperandValue(layout, layout_init, sizeof(bool8) * 1);
+  static uint8_t op_init[] = {138, 148, 158, 168};
+  model->setOperandValue(op, op_init, sizeof(uint8_t) * 4);
+  static int32_t param10_init[] = {0};
+  model->setOperandValue(param10, param10_init, sizeof(int32_t) * 1);
+  model->addOperation(ANEURALNETWORKS_BOX_WITH_NMS_LIMIT, {scores, roi, param, param1, param2, param3}, {scoresOut, roiOut, classesOut, batchSplitOut});
+  model->addOperation(ANEURALNETWORKS_ROI_ALIGN, {in, roiOut, batchSplitOut, param4, param5, param6, param7, param8, param9, layout}, {featureMap});
+  model->addOperation(ANEURALNETWORKS_MUL, {featureMap, op, param10}, {out});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {in},
+    {scoresOut, classesOut, out});
+  assert(model->isValid());
+}
+
+inline bool is_ignored_zero_sized_quant8(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_zero_sized_float16(Model *model) {
+  OperandType type1(Type::INT32, {});
+  OperandType type11(Type::BOOL, {});
+  OperandType type24(Type::TENSOR_FLOAT16, {0, 2, 2, 2});
+  OperandType type25(Type::TENSOR_FLOAT16, {1, 1, 1, 2});
+  OperandType type26(Type::TENSOR_FLOAT16, {1, 2, 2, 1});
+  OperandType type27(Type::FLOAT16, {});
+  OperandType type28(Type::TENSOR_FLOAT16, {1, 8});
+  OperandType type29(Type::TENSOR_FLOAT16, {0, 4});
+  OperandType type30(Type::TENSOR_FLOAT16, {1, 2});
+  OperandType type31(Type::TENSOR_FLOAT16, {0});
+  OperandType type7(Type::TENSOR_INT32, {0});
+  OperandType type9(Type::TENSOR_INT32, {1});
+  // Phase 1, operands
+  auto scores = model->addOperand(&type30);
+  auto roi = model->addOperand(&type28);
+  auto param = model->addOperand(&type9);
+  auto param1 = model->addOperand(&type27);
+  auto param2 = model->addOperand(&type27);
+  auto param3 = model->addOperand(&type1);
+  auto scoresOut = model->addOperand(&type31);
+  auto roiOut = model->addOperand(&type29);
+  auto classesOut = model->addOperand(&type7);
+  auto batchSplitOut = model->addOperand(&type7);
+  auto in = model->addOperand(&type25);
+  auto param4 = model->addOperand(&type1);
+  auto param5 = model->addOperand(&type1);
+  auto param6 = model->addOperand(&type27);
+  auto param7 = model->addOperand(&type27);
+  auto param8 = model->addOperand(&type1);
+  auto param9 = model->addOperand(&type1);
+  auto layout = model->addOperand(&type11);
+  auto featureMap = model->addOperand(&type24);
+  auto op = model->addOperand(&type26);
+  auto param10 = model->addOperand(&type1);
+  auto out = model->addOperand(&type24);
+  // Phase 2, operations
+  static _Float16 scores_init[] = {0.8999999761581421f, 0.10000000149011612f};
+  model->setOperandValue(scores, scores_init, sizeof(_Float16) * 2);
+  static _Float16 roi_init[] = {1.0f, 1.0f, 10.0f, 10.0f, 0.0f, 0.0f, 10.0f, 10.0f};
+  model->setOperandValue(roi, roi_init, sizeof(_Float16) * 8);
+  static int32_t param_init[] = {0};
+  model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
+  static _Float16 param1_init[] = {0.30000001192092896f};
+  model->setOperandValue(param1, param1_init, sizeof(_Float16) * 1);
+  static _Float16 param2_init[] = {0.4000000059604645f};
+  model->setOperandValue(param2, param2_init, sizeof(_Float16) * 1);
+  static int32_t param3_init[] = {-1};
+  model->setOperandValue(param3, param3_init, sizeof(int32_t) * 1);
+  static int32_t param4_init[] = {2};
+  model->setOperandValue(param4, param4_init, sizeof(int32_t) * 1);
+  static int32_t param5_init[] = {2};
+  model->setOperandValue(param5, param5_init, sizeof(int32_t) * 1);
+  static _Float16 param6_init[] = {2.0f};
+  model->setOperandValue(param6, param6_init, sizeof(_Float16) * 1);
+  static _Float16 param7_init[] = {2.0f};
+  model->setOperandValue(param7, param7_init, sizeof(_Float16) * 1);
+  static int32_t param8_init[] = {4};
+  model->setOperandValue(param8, param8_init, sizeof(int32_t) * 1);
+  static int32_t param9_init[] = {4};
+  model->setOperandValue(param9, param9_init, sizeof(int32_t) * 1);
+  static bool8 layout_init[] = {false};
+  model->setOperandValue(layout, layout_init, sizeof(bool8) * 1);
+  static _Float16 op_init[] = {1.0f, 2.0f, 3.0f, 4.0f};
+  model->setOperandValue(op, op_init, sizeof(_Float16) * 4);
+  static int32_t param10_init[] = {0};
+  model->setOperandValue(param10, param10_init, sizeof(int32_t) * 1);
+  model->addOperation(ANEURALNETWORKS_BOX_WITH_NMS_LIMIT, {scores, roi, param, param1, param2, param3}, {scoresOut, roiOut, classesOut, batchSplitOut});
+  model->addOperation(ANEURALNETWORKS_ROI_ALIGN, {in, roiOut, batchSplitOut, param4, param5, param6, param7, param8, param9, layout}, {featureMap});
+  model->addOperation(ANEURALNETWORKS_MUL, {featureMap, op, param10}, {out});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {in},
+    {scoresOut, classesOut, out});
+  assert(model->isValid());
+}
+
+inline bool is_ignored_zero_sized_float16(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_zero_sized_dynamic_output_shape(Model *model) {
+  OperandType type1(Type::INT32, {});
+  OperandType type10(Type::FLOAT32, {});
+  OperandType type11(Type::BOOL, {});
+  OperandType type12(Type::TENSOR_FLOAT32, {1, 1, 1, 2});
+  OperandType type13(Type::TENSOR_FLOAT32, {0, 2, 2, 2});
+  OperandType type14(Type::TENSOR_FLOAT32, {1, 2, 2, 1});
+  OperandType type32(Type::TENSOR_FLOAT32, {0, 0, 0, 0});
+  OperandType type4(Type::TENSOR_FLOAT32, {1, 2});
+  OperandType type5(Type::TENSOR_FLOAT32, {1, 8});
+  OperandType type6(Type::TENSOR_FLOAT32, {0});
+  OperandType type7(Type::TENSOR_INT32, {0});
+  OperandType type8(Type::TENSOR_FLOAT32, {0, 4});
+  OperandType type9(Type::TENSOR_INT32, {1});
+  // Phase 1, operands
+  auto scores = model->addOperand(&type4);
+  auto roi = model->addOperand(&type5);
+  auto param = model->addOperand(&type9);
+  auto param1 = model->addOperand(&type10);
+  auto param2 = model->addOperand(&type10);
+  auto param3 = model->addOperand(&type1);
+  auto scoresOut = model->addOperand(&type6);
+  auto roiOut = model->addOperand(&type8);
+  auto classesOut = model->addOperand(&type7);
+  auto batchSplitOut = model->addOperand(&type7);
+  auto in = model->addOperand(&type12);
+  auto param4 = model->addOperand(&type1);
+  auto param5 = model->addOperand(&type1);
+  auto param6 = model->addOperand(&type10);
+  auto param7 = model->addOperand(&type10);
+  auto param8 = model->addOperand(&type1);
+  auto param9 = model->addOperand(&type1);
+  auto layout = model->addOperand(&type11);
+  auto featureMap = model->addOperand(&type13);
+  auto op = model->addOperand(&type14);
+  auto param10 = model->addOperand(&type1);
+  auto out = model->addOperand(&type32);
+  // Phase 2, operations
+  static float scores_init[] = {0.9f, 0.1f};
+  model->setOperandValue(scores, scores_init, sizeof(float) * 2);
+  static float roi_init[] = {1.0f, 1.0f, 10.0f, 10.0f, 0.0f, 0.0f, 10.0f, 10.0f};
+  model->setOperandValue(roi, roi_init, sizeof(float) * 8);
+  static int32_t param_init[] = {0};
+  model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
+  static float param1_init[] = {0.3f};
+  model->setOperandValue(param1, param1_init, sizeof(float) * 1);
+  static float param2_init[] = {0.4f};
+  model->setOperandValue(param2, param2_init, sizeof(float) * 1);
+  static int32_t param3_init[] = {-1};
+  model->setOperandValue(param3, param3_init, sizeof(int32_t) * 1);
+  static int32_t param4_init[] = {2};
+  model->setOperandValue(param4, param4_init, sizeof(int32_t) * 1);
+  static int32_t param5_init[] = {2};
+  model->setOperandValue(param5, param5_init, sizeof(int32_t) * 1);
+  static float param6_init[] = {2.0f};
+  model->setOperandValue(param6, param6_init, sizeof(float) * 1);
+  static float param7_init[] = {2.0f};
+  model->setOperandValue(param7, param7_init, sizeof(float) * 1);
+  static int32_t param8_init[] = {4};
+  model->setOperandValue(param8, param8_init, sizeof(int32_t) * 1);
+  static int32_t param9_init[] = {4};
+  model->setOperandValue(param9, param9_init, sizeof(int32_t) * 1);
+  static bool8 layout_init[] = {false};
+  model->setOperandValue(layout, layout_init, sizeof(bool8) * 1);
+  static float op_init[] = {1.0f, 2.0f, 3.0f, 4.0f};
+  model->setOperandValue(op, op_init, sizeof(float) * 4);
+  static int32_t param10_init[] = {0};
+  model->setOperandValue(param10, param10_init, sizeof(int32_t) * 1);
+  model->addOperation(ANEURALNETWORKS_BOX_WITH_NMS_LIMIT, {scores, roi, param, param1, param2, param3}, {scoresOut, roiOut, classesOut, batchSplitOut});
+  model->addOperation(ANEURALNETWORKS_ROI_ALIGN, {in, roiOut, batchSplitOut, param4, param5, param6, param7, param8, param9, layout}, {featureMap});
+  model->addOperation(ANEURALNETWORKS_MUL, {featureMap, op, param10}, {out});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {in},
+    {scoresOut, classesOut, out});
+  assert(model->isValid());
+}
+
+inline bool is_ignored_zero_sized_dynamic_output_shape(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_zero_sized_dynamic_output_shape_relaxed(Model *model) {
+  OperandType type1(Type::INT32, {});
+  OperandType type10(Type::FLOAT32, {});
+  OperandType type11(Type::BOOL, {});
+  OperandType type12(Type::TENSOR_FLOAT32, {1, 1, 1, 2});
+  OperandType type13(Type::TENSOR_FLOAT32, {0, 2, 2, 2});
+  OperandType type14(Type::TENSOR_FLOAT32, {1, 2, 2, 1});
+  OperandType type32(Type::TENSOR_FLOAT32, {0, 0, 0, 0});
+  OperandType type4(Type::TENSOR_FLOAT32, {1, 2});
+  OperandType type5(Type::TENSOR_FLOAT32, {1, 8});
+  OperandType type6(Type::TENSOR_FLOAT32, {0});
+  OperandType type7(Type::TENSOR_INT32, {0});
+  OperandType type8(Type::TENSOR_FLOAT32, {0, 4});
+  OperandType type9(Type::TENSOR_INT32, {1});
+  // Phase 1, operands
+  auto scores = model->addOperand(&type4);
+  auto roi = model->addOperand(&type5);
+  auto param = model->addOperand(&type9);
+  auto param1 = model->addOperand(&type10);
+  auto param2 = model->addOperand(&type10);
+  auto param3 = model->addOperand(&type1);
+  auto scoresOut = model->addOperand(&type6);
+  auto roiOut = model->addOperand(&type8);
+  auto classesOut = model->addOperand(&type7);
+  auto batchSplitOut = model->addOperand(&type7);
+  auto in = model->addOperand(&type12);
+  auto param4 = model->addOperand(&type1);
+  auto param5 = model->addOperand(&type1);
+  auto param6 = model->addOperand(&type10);
+  auto param7 = model->addOperand(&type10);
+  auto param8 = model->addOperand(&type1);
+  auto param9 = model->addOperand(&type1);
+  auto layout = model->addOperand(&type11);
+  auto featureMap = model->addOperand(&type13);
+  auto op = model->addOperand(&type14);
+  auto param10 = model->addOperand(&type1);
+  auto out = model->addOperand(&type32);
+  // Phase 2, operations
+  static float scores_init[] = {0.9f, 0.1f};
+  model->setOperandValue(scores, scores_init, sizeof(float) * 2);
+  static float roi_init[] = {1.0f, 1.0f, 10.0f, 10.0f, 0.0f, 0.0f, 10.0f, 10.0f};
+  model->setOperandValue(roi, roi_init, sizeof(float) * 8);
+  static int32_t param_init[] = {0};
+  model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
+  static float param1_init[] = {0.3f};
+  model->setOperandValue(param1, param1_init, sizeof(float) * 1);
+  static float param2_init[] = {0.4f};
+  model->setOperandValue(param2, param2_init, sizeof(float) * 1);
+  static int32_t param3_init[] = {-1};
+  model->setOperandValue(param3, param3_init, sizeof(int32_t) * 1);
+  static int32_t param4_init[] = {2};
+  model->setOperandValue(param4, param4_init, sizeof(int32_t) * 1);
+  static int32_t param5_init[] = {2};
+  model->setOperandValue(param5, param5_init, sizeof(int32_t) * 1);
+  static float param6_init[] = {2.0f};
+  model->setOperandValue(param6, param6_init, sizeof(float) * 1);
+  static float param7_init[] = {2.0f};
+  model->setOperandValue(param7, param7_init, sizeof(float) * 1);
+  static int32_t param8_init[] = {4};
+  model->setOperandValue(param8, param8_init, sizeof(int32_t) * 1);
+  static int32_t param9_init[] = {4};
+  model->setOperandValue(param9, param9_init, sizeof(int32_t) * 1);
+  static bool8 layout_init[] = {false};
+  model->setOperandValue(layout, layout_init, sizeof(bool8) * 1);
+  static float op_init[] = {1.0f, 2.0f, 3.0f, 4.0f};
+  model->setOperandValue(op, op_init, sizeof(float) * 4);
+  static int32_t param10_init[] = {0};
+  model->setOperandValue(param10, param10_init, sizeof(int32_t) * 1);
+  model->addOperation(ANEURALNETWORKS_BOX_WITH_NMS_LIMIT, {scores, roi, param, param1, param2, param3}, {scoresOut, roiOut, classesOut, batchSplitOut});
+  model->addOperation(ANEURALNETWORKS_ROI_ALIGN, {in, roiOut, batchSplitOut, param4, param5, param6, param7, param8, param9, layout}, {featureMap});
+  model->addOperation(ANEURALNETWORKS_MUL, {featureMap, op, param10}, {out});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {in},
+    {scoresOut, classesOut, out});
+  // Phase 4: set relaxed execution
+  model->relaxComputationFloat32toFloat16(true);
+  assert(model->isValid());
+}
+
+inline bool is_ignored_zero_sized_dynamic_output_shape_relaxed(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_zero_sized_dynamic_output_shape_quant8(Model *model) {
+  OperandType type1(Type::INT32, {});
+  OperandType type10(Type::FLOAT32, {});
+  OperandType type11(Type::BOOL, {});
+  OperandType type17(Type::TENSOR_QUANT8_ASYMM, {0, 2, 2, 2}, 0.1f, 128);
+  OperandType type18(Type::TENSOR_QUANT8_ASYMM, {1, 1, 1, 2}, 0.1f, 128);
+  OperandType type19(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 1}, 0.1f, 128);
+  OperandType type20(Type::TENSOR_QUANT16_ASYMM, {1, 8}, 0.125f, 0);
+  OperandType type21(Type::TENSOR_QUANT16_ASYMM, {0, 4}, 0.125f, 0);
+  OperandType type22(Type::TENSOR_QUANT8_ASYMM, {1, 2}, 0.1f, 128);
+  OperandType type23(Type::TENSOR_QUANT8_ASYMM, {0}, 0.1f, 128);
+  OperandType type33(Type::TENSOR_QUANT8_ASYMM, {0, 0, 0, 0}, 0.1f, 128);
+  OperandType type7(Type::TENSOR_INT32, {0});
+  OperandType type9(Type::TENSOR_INT32, {1});
+  // Phase 1, operands
+  auto scores = model->addOperand(&type22);
+  auto roi = model->addOperand(&type20);
+  auto param = model->addOperand(&type9);
+  auto param1 = model->addOperand(&type10);
+  auto param2 = model->addOperand(&type10);
+  auto param3 = model->addOperand(&type1);
+  auto scoresOut = model->addOperand(&type23);
+  auto roiOut = model->addOperand(&type21);
+  auto classesOut = model->addOperand(&type7);
+  auto batchSplitOut = model->addOperand(&type7);
+  auto in = model->addOperand(&type18);
+  auto param4 = model->addOperand(&type1);
+  auto param5 = model->addOperand(&type1);
+  auto param6 = model->addOperand(&type10);
+  auto param7 = model->addOperand(&type10);
+  auto param8 = model->addOperand(&type1);
+  auto param9 = model->addOperand(&type1);
+  auto layout = model->addOperand(&type11);
+  auto featureMap = model->addOperand(&type17);
+  auto op = model->addOperand(&type19);
+  auto param10 = model->addOperand(&type1);
+  auto out = model->addOperand(&type33);
+  // Phase 2, operations
+  static uint8_t scores_init[] = {137, 129};
+  model->setOperandValue(scores, scores_init, sizeof(uint8_t) * 2);
+  static uint16_t roi_init[] = {8, 8, 80, 80, 0, 0, 80, 80};
+  model->setOperandValue(roi, roi_init, sizeof(uint16_t) * 8);
+  static int32_t param_init[] = {0};
+  model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
+  static float param1_init[] = {0.3f};
+  model->setOperandValue(param1, param1_init, sizeof(float) * 1);
+  static float param2_init[] = {0.4f};
+  model->setOperandValue(param2, param2_init, sizeof(float) * 1);
+  static int32_t param3_init[] = {-1};
+  model->setOperandValue(param3, param3_init, sizeof(int32_t) * 1);
+  static int32_t param4_init[] = {2};
+  model->setOperandValue(param4, param4_init, sizeof(int32_t) * 1);
+  static int32_t param5_init[] = {2};
+  model->setOperandValue(param5, param5_init, sizeof(int32_t) * 1);
+  static float param6_init[] = {2.0f};
+  model->setOperandValue(param6, param6_init, sizeof(float) * 1);
+  static float param7_init[] = {2.0f};
+  model->setOperandValue(param7, param7_init, sizeof(float) * 1);
+  static int32_t param8_init[] = {4};
+  model->setOperandValue(param8, param8_init, sizeof(int32_t) * 1);
+  static int32_t param9_init[] = {4};
+  model->setOperandValue(param9, param9_init, sizeof(int32_t) * 1);
+  static bool8 layout_init[] = {false};
+  model->setOperandValue(layout, layout_init, sizeof(bool8) * 1);
+  static uint8_t op_init[] = {138, 148, 158, 168};
+  model->setOperandValue(op, op_init, sizeof(uint8_t) * 4);
+  static int32_t param10_init[] = {0};
+  model->setOperandValue(param10, param10_init, sizeof(int32_t) * 1);
+  model->addOperation(ANEURALNETWORKS_BOX_WITH_NMS_LIMIT, {scores, roi, param, param1, param2, param3}, {scoresOut, roiOut, classesOut, batchSplitOut});
+  model->addOperation(ANEURALNETWORKS_ROI_ALIGN, {in, roiOut, batchSplitOut, param4, param5, param6, param7, param8, param9, layout}, {featureMap});
+  model->addOperation(ANEURALNETWORKS_MUL, {featureMap, op, param10}, {out});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {in},
+    {scoresOut, classesOut, out});
+  assert(model->isValid());
+}
+
+inline bool is_ignored_zero_sized_dynamic_output_shape_quant8(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_zero_sized_dynamic_output_shape_float16(Model *model) {
+  OperandType type1(Type::INT32, {});
+  OperandType type11(Type::BOOL, {});
+  OperandType type15(Type::TENSOR_FLOAT16, {0});
+  OperandType type24(Type::TENSOR_FLOAT16, {0, 2, 2, 2});
+  OperandType type25(Type::TENSOR_FLOAT16, {1, 1, 1, 2});
+  OperandType type26(Type::TENSOR_FLOAT16, {1, 2, 2, 1});
+  OperandType type27(Type::FLOAT16, {});
+  OperandType type28(Type::TENSOR_FLOAT16, {1, 8});
+  OperandType type29(Type::TENSOR_FLOAT16, {0, 4});
+  OperandType type30(Type::TENSOR_FLOAT16, {1, 2});
+  OperandType type34(Type::TENSOR_FLOAT16, {0, 0, 0, 0});
+  OperandType type7(Type::TENSOR_INT32, {0});
+  OperandType type9(Type::TENSOR_INT32, {1});
+  // Phase 1, operands
+  auto scores = model->addOperand(&type30);
+  auto roi = model->addOperand(&type28);
+  auto param = model->addOperand(&type9);
+  auto param1 = model->addOperand(&type27);
+  auto param2 = model->addOperand(&type27);
+  auto param3 = model->addOperand(&type1);
+  auto scoresOut = model->addOperand(&type15);
+  auto roiOut = model->addOperand(&type29);
+  auto classesOut = model->addOperand(&type7);
+  auto batchSplitOut = model->addOperand(&type7);
+  auto in = model->addOperand(&type25);
+  auto param4 = model->addOperand(&type1);
+  auto param5 = model->addOperand(&type1);
+  auto param6 = model->addOperand(&type27);
+  auto param7 = model->addOperand(&type27);
+  auto param8 = model->addOperand(&type1);
+  auto param9 = model->addOperand(&type1);
+  auto layout = model->addOperand(&type11);
+  auto featureMap = model->addOperand(&type24);
+  auto op = model->addOperand(&type26);
+  auto param10 = model->addOperand(&type1);
+  auto out = model->addOperand(&type34);
+  // Phase 2, operations
+  static _Float16 scores_init[] = {0.8999999761581421f, 0.10000000149011612f};
+  model->setOperandValue(scores, scores_init, sizeof(_Float16) * 2);
+  static _Float16 roi_init[] = {1.0f, 1.0f, 10.0f, 10.0f, 0.0f, 0.0f, 10.0f, 10.0f};
+  model->setOperandValue(roi, roi_init, sizeof(_Float16) * 8);
+  static int32_t param_init[] = {0};
+  model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
+  static _Float16 param1_init[] = {0.30000001192092896f};
+  model->setOperandValue(param1, param1_init, sizeof(_Float16) * 1);
+  static _Float16 param2_init[] = {0.4000000059604645f};
+  model->setOperandValue(param2, param2_init, sizeof(_Float16) * 1);
+  static int32_t param3_init[] = {-1};
+  model->setOperandValue(param3, param3_init, sizeof(int32_t) * 1);
+  static int32_t param4_init[] = {2};
+  model->setOperandValue(param4, param4_init, sizeof(int32_t) * 1);
+  static int32_t param5_init[] = {2};
+  model->setOperandValue(param5, param5_init, sizeof(int32_t) * 1);
+  static _Float16 param6_init[] = {2.0f};
+  model->setOperandValue(param6, param6_init, sizeof(_Float16) * 1);
+  static _Float16 param7_init[] = {2.0f};
+  model->setOperandValue(param7, param7_init, sizeof(_Float16) * 1);
+  static int32_t param8_init[] = {4};
+  model->setOperandValue(param8, param8_init, sizeof(int32_t) * 1);
+  static int32_t param9_init[] = {4};
+  model->setOperandValue(param9, param9_init, sizeof(int32_t) * 1);
+  static bool8 layout_init[] = {false};
+  model->setOperandValue(layout, layout_init, sizeof(bool8) * 1);
+  static _Float16 op_init[] = {1.0f, 2.0f, 3.0f, 4.0f};
+  model->setOperandValue(op, op_init, sizeof(_Float16) * 4);
+  static int32_t param10_init[] = {0};
+  model->setOperandValue(param10, param10_init, sizeof(int32_t) * 1);
+  model->addOperation(ANEURALNETWORKS_BOX_WITH_NMS_LIMIT, {scores, roi, param, param1, param2, param3}, {scoresOut, roiOut, classesOut, batchSplitOut});
+  model->addOperation(ANEURALNETWORKS_ROI_ALIGN, {in, roiOut, batchSplitOut, param4, param5, param6, param7, param8, param9, layout}, {featureMap});
+  model->addOperation(ANEURALNETWORKS_MUL, {featureMap, op, param10}, {out});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {in},
+    {scoresOut, classesOut, out});
+  assert(model->isValid());
+}
+
+inline bool is_ignored_zero_sized_dynamic_output_shape_float16(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
diff --git a/runtime/test/generated/models/sub_v1_2.model.cpp b/runtime/test/generated/models/sub_v1_2.model.cpp
index 30e599a..323e1b1 100644
--- a/runtime/test/generated/models/sub_v1_2.model.cpp
+++ b/runtime/test/generated/models/sub_v1_2.model.cpp
@@ -98,12 +98,12 @@
 
 void CreateModel_float16_none(Model *model) {
   OperandType type1(Type::INT32, {});
-  OperandType type3(Type::TENSOR_FLOAT16, {1, 2, 2, 1});
+  OperandType type13(Type::TENSOR_FLOAT16, {1, 2, 2, 1});
   // Phase 1, operands
-  auto input0 = model->addOperand(&type3);
-  auto input1 = model->addOperand(&type3);
+  auto input0 = model->addOperand(&type13);
+  auto input1 = model->addOperand(&type13);
   auto act = model->addOperand(&type1);
-  auto output0 = model->addOperand(&type3);
+  auto output0 = model->addOperand(&type13);
   // Phase 2, operations
   static int32_t act_init[] = {0};
   model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
@@ -122,12 +122,12 @@
 
 void CreateModel_float16_relu(Model *model) {
   OperandType type1(Type::INT32, {});
-  OperandType type3(Type::TENSOR_FLOAT16, {1, 2, 2, 1});
+  OperandType type13(Type::TENSOR_FLOAT16, {1, 2, 2, 1});
   // Phase 1, operands
-  auto input0 = model->addOperand(&type3);
-  auto input1 = model->addOperand(&type3);
+  auto input0 = model->addOperand(&type13);
+  auto input1 = model->addOperand(&type13);
   auto act = model->addOperand(&type1);
-  auto output0 = model->addOperand(&type3);
+  auto output0 = model->addOperand(&type13);
   // Phase 2, operations
   static int32_t act_init[] = {1};
   model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
@@ -146,12 +146,12 @@
 
 void CreateModel_float16_relu1(Model *model) {
   OperandType type1(Type::INT32, {});
-  OperandType type3(Type::TENSOR_FLOAT16, {1, 2, 2, 1});
+  OperandType type13(Type::TENSOR_FLOAT16, {1, 2, 2, 1});
   // Phase 1, operands
-  auto input0 = model->addOperand(&type3);
-  auto input1 = model->addOperand(&type3);
+  auto input0 = model->addOperand(&type13);
+  auto input1 = model->addOperand(&type13);
   auto act = model->addOperand(&type1);
-  auto output0 = model->addOperand(&type3);
+  auto output0 = model->addOperand(&type13);
   // Phase 2, operations
   static int32_t act_init[] = {2};
   model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
@@ -170,12 +170,12 @@
 
 void CreateModel_float16_relu6(Model *model) {
   OperandType type1(Type::INT32, {});
-  OperandType type3(Type::TENSOR_FLOAT16, {1, 2, 2, 1});
+  OperandType type13(Type::TENSOR_FLOAT16, {1, 2, 2, 1});
   // Phase 1, operands
-  auto input0 = model->addOperand(&type3);
-  auto input1 = model->addOperand(&type3);
+  auto input0 = model->addOperand(&type13);
+  auto input1 = model->addOperand(&type13);
   auto act = model->addOperand(&type1);
-  auto output0 = model->addOperand(&type3);
+  auto output0 = model->addOperand(&type13);
   // Phase 2, operations
   static int32_t act_init[] = {3};
   model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
@@ -195,12 +195,12 @@
 void CreateModel_dynamic_output_shape_none(Model *model) {
   OperandType type0(Type::TENSOR_FLOAT32, {1, 2, 2, 1});
   OperandType type1(Type::INT32, {});
-  OperandType type4(Type::TENSOR_FLOAT32, {0, 0, 0, 0});
+  OperandType type14(Type::TENSOR_FLOAT32, {0, 0, 0, 0});
   // Phase 1, operands
   auto input0 = model->addOperand(&type0);
   auto input1 = model->addOperand(&type0);
   auto act = model->addOperand(&type1);
-  auto output0 = model->addOperand(&type4);
+  auto output0 = model->addOperand(&type14);
   // Phase 2, operations
   static int32_t act_init[] = {0};
   model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
@@ -220,12 +220,12 @@
 void CreateModel_dynamic_output_shape_relu(Model *model) {
   OperandType type0(Type::TENSOR_FLOAT32, {1, 2, 2, 1});
   OperandType type1(Type::INT32, {});
-  OperandType type4(Type::TENSOR_FLOAT32, {0, 0, 0, 0});
+  OperandType type14(Type::TENSOR_FLOAT32, {0, 0, 0, 0});
   // Phase 1, operands
   auto input0 = model->addOperand(&type0);
   auto input1 = model->addOperand(&type0);
   auto act = model->addOperand(&type1);
-  auto output0 = model->addOperand(&type4);
+  auto output0 = model->addOperand(&type14);
   // Phase 2, operations
   static int32_t act_init[] = {1};
   model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
@@ -245,12 +245,12 @@
 void CreateModel_dynamic_output_shape_relu1(Model *model) {
   OperandType type0(Type::TENSOR_FLOAT32, {1, 2, 2, 1});
   OperandType type1(Type::INT32, {});
-  OperandType type4(Type::TENSOR_FLOAT32, {0, 0, 0, 0});
+  OperandType type14(Type::TENSOR_FLOAT32, {0, 0, 0, 0});
   // Phase 1, operands
   auto input0 = model->addOperand(&type0);
   auto input1 = model->addOperand(&type0);
   auto act = model->addOperand(&type1);
-  auto output0 = model->addOperand(&type4);
+  auto output0 = model->addOperand(&type14);
   // Phase 2, operations
   static int32_t act_init[] = {2};
   model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
@@ -270,12 +270,12 @@
 void CreateModel_dynamic_output_shape_relu6(Model *model) {
   OperandType type0(Type::TENSOR_FLOAT32, {1, 2, 2, 1});
   OperandType type1(Type::INT32, {});
-  OperandType type4(Type::TENSOR_FLOAT32, {0, 0, 0, 0});
+  OperandType type14(Type::TENSOR_FLOAT32, {0, 0, 0, 0});
   // Phase 1, operands
   auto input0 = model->addOperand(&type0);
   auto input1 = model->addOperand(&type0);
   auto act = model->addOperand(&type1);
-  auto output0 = model->addOperand(&type4);
+  auto output0 = model->addOperand(&type14);
   // Phase 2, operations
   static int32_t act_init[] = {3};
   model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
@@ -294,13 +294,13 @@
 
 void CreateModel_dynamic_output_shape_float16_none(Model *model) {
   OperandType type1(Type::INT32, {});
-  OperandType type3(Type::TENSOR_FLOAT16, {1, 2, 2, 1});
-  OperandType type5(Type::TENSOR_FLOAT16, {0, 0, 0, 0});
+  OperandType type13(Type::TENSOR_FLOAT16, {1, 2, 2, 1});
+  OperandType type15(Type::TENSOR_FLOAT16, {0, 0, 0, 0});
   // Phase 1, operands
-  auto input0 = model->addOperand(&type3);
-  auto input1 = model->addOperand(&type3);
+  auto input0 = model->addOperand(&type13);
+  auto input1 = model->addOperand(&type13);
   auto act = model->addOperand(&type1);
-  auto output0 = model->addOperand(&type5);
+  auto output0 = model->addOperand(&type15);
   // Phase 2, operations
   static int32_t act_init[] = {0};
   model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
@@ -319,13 +319,13 @@
 
 void CreateModel_dynamic_output_shape_float16_relu(Model *model) {
   OperandType type1(Type::INT32, {});
-  OperandType type3(Type::TENSOR_FLOAT16, {1, 2, 2, 1});
-  OperandType type5(Type::TENSOR_FLOAT16, {0, 0, 0, 0});
+  OperandType type13(Type::TENSOR_FLOAT16, {1, 2, 2, 1});
+  OperandType type15(Type::TENSOR_FLOAT16, {0, 0, 0, 0});
   // Phase 1, operands
-  auto input0 = model->addOperand(&type3);
-  auto input1 = model->addOperand(&type3);
+  auto input0 = model->addOperand(&type13);
+  auto input1 = model->addOperand(&type13);
   auto act = model->addOperand(&type1);
-  auto output0 = model->addOperand(&type5);
+  auto output0 = model->addOperand(&type15);
   // Phase 2, operations
   static int32_t act_init[] = {1};
   model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
@@ -344,13 +344,13 @@
 
 void CreateModel_dynamic_output_shape_float16_relu1(Model *model) {
   OperandType type1(Type::INT32, {});
-  OperandType type3(Type::TENSOR_FLOAT16, {1, 2, 2, 1});
-  OperandType type5(Type::TENSOR_FLOAT16, {0, 0, 0, 0});
+  OperandType type13(Type::TENSOR_FLOAT16, {1, 2, 2, 1});
+  OperandType type15(Type::TENSOR_FLOAT16, {0, 0, 0, 0});
   // Phase 1, operands
-  auto input0 = model->addOperand(&type3);
-  auto input1 = model->addOperand(&type3);
+  auto input0 = model->addOperand(&type13);
+  auto input1 = model->addOperand(&type13);
   auto act = model->addOperand(&type1);
-  auto output0 = model->addOperand(&type5);
+  auto output0 = model->addOperand(&type15);
   // Phase 2, operations
   static int32_t act_init[] = {2};
   model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
@@ -369,13 +369,13 @@
 
 void CreateModel_dynamic_output_shape_float16_relu6(Model *model) {
   OperandType type1(Type::INT32, {});
-  OperandType type3(Type::TENSOR_FLOAT16, {1, 2, 2, 1});
-  OperandType type5(Type::TENSOR_FLOAT16, {0, 0, 0, 0});
+  OperandType type13(Type::TENSOR_FLOAT16, {1, 2, 2, 1});
+  OperandType type15(Type::TENSOR_FLOAT16, {0, 0, 0, 0});
   // Phase 1, operands
-  auto input0 = model->addOperand(&type3);
-  auto input1 = model->addOperand(&type3);
+  auto input0 = model->addOperand(&type13);
+  auto input1 = model->addOperand(&type13);
   auto act = model->addOperand(&type1);
-  auto output0 = model->addOperand(&type5);
+  auto output0 = model->addOperand(&type15);
   // Phase 2, operations
   static int32_t act_init[] = {3};
   model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
@@ -418,13 +418,13 @@
 
 void CreateModel_quant8_dynamic_output_shape(Model *model) {
   OperandType type1(Type::INT32, {});
+  OperandType type16(Type::TENSOR_QUANT8_ASYMM, {0, 0, 0, 0}, 0.5f, 0);
   OperandType type2(Type::TENSOR_QUANT8_ASYMM, {2, 4, 16, 2}, 0.5f, 0);
-  OperandType type6(Type::TENSOR_QUANT8_ASYMM, {0, 0, 0, 0}, 0.5f, 0);
   // Phase 1, operands
   auto input01 = model->addOperand(&type2);
   auto input11 = model->addOperand(&type2);
   auto param = model->addOperand(&type1);
-  auto output01 = model->addOperand(&type6);
+  auto output01 = model->addOperand(&type16);
   // Phase 2, operations
   static int32_t param_init[] = {0};
   model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
@@ -441,3 +441,667 @@
   return ignore.find(i) != ignore.end();
 }
 
+void CreateModel_zero_sized(Model *model) {
+  OperandType type0(Type::TENSOR_FLOAT32, {1, 2, 2, 1});
+  OperandType type1(Type::INT32, {});
+  OperandType type10(Type::BOOL, {});
+  OperandType type11(Type::TENSOR_FLOAT32, {1, 1, 1, 2});
+  OperandType type12(Type::TENSOR_FLOAT32, {0, 2, 2, 2});
+  OperandType type3(Type::TENSOR_FLOAT32, {1, 2});
+  OperandType type4(Type::TENSOR_FLOAT32, {1, 8});
+  OperandType type5(Type::TENSOR_FLOAT32, {0});
+  OperandType type6(Type::TENSOR_INT32, {0});
+  OperandType type7(Type::TENSOR_FLOAT32, {0, 4});
+  OperandType type8(Type::TENSOR_INT32, {1});
+  OperandType type9(Type::FLOAT32, {});
+  // Phase 1, operands
+  auto scores = model->addOperand(&type3);
+  auto roi = model->addOperand(&type4);
+  auto param1 = model->addOperand(&type8);
+  auto param2 = model->addOperand(&type9);
+  auto param3 = model->addOperand(&type9);
+  auto param4 = model->addOperand(&type1);
+  auto scoresOut = model->addOperand(&type5);
+  auto roiOut = model->addOperand(&type7);
+  auto classesOut = model->addOperand(&type6);
+  auto batchSplitOut = model->addOperand(&type6);
+  auto in = model->addOperand(&type11);
+  auto param5 = model->addOperand(&type1);
+  auto param6 = model->addOperand(&type1);
+  auto param7 = model->addOperand(&type9);
+  auto param8 = model->addOperand(&type9);
+  auto param9 = model->addOperand(&type1);
+  auto param10 = model->addOperand(&type1);
+  auto layout = model->addOperand(&type10);
+  auto featureMap = model->addOperand(&type12);
+  auto op = model->addOperand(&type0);
+  auto param11 = model->addOperand(&type1);
+  auto out = model->addOperand(&type12);
+  // Phase 2, operations
+  static float scores_init[] = {0.9f, 0.1f};
+  model->setOperandValue(scores, scores_init, sizeof(float) * 2);
+  static float roi_init[] = {1.0f, 1.0f, 10.0f, 10.0f, 0.0f, 0.0f, 10.0f, 10.0f};
+  model->setOperandValue(roi, roi_init, sizeof(float) * 8);
+  static int32_t param1_init[] = {0};
+  model->setOperandValue(param1, param1_init, sizeof(int32_t) * 1);
+  static float param2_init[] = {0.3f};
+  model->setOperandValue(param2, param2_init, sizeof(float) * 1);
+  static float param3_init[] = {0.4f};
+  model->setOperandValue(param3, param3_init, sizeof(float) * 1);
+  static int32_t param4_init[] = {-1};
+  model->setOperandValue(param4, param4_init, sizeof(int32_t) * 1);
+  static int32_t param5_init[] = {2};
+  model->setOperandValue(param5, param5_init, sizeof(int32_t) * 1);
+  static int32_t param6_init[] = {2};
+  model->setOperandValue(param6, param6_init, sizeof(int32_t) * 1);
+  static float param7_init[] = {2.0f};
+  model->setOperandValue(param7, param7_init, sizeof(float) * 1);
+  static float param8_init[] = {2.0f};
+  model->setOperandValue(param8, param8_init, sizeof(float) * 1);
+  static int32_t param9_init[] = {4};
+  model->setOperandValue(param9, param9_init, sizeof(int32_t) * 1);
+  static int32_t param10_init[] = {4};
+  model->setOperandValue(param10, param10_init, sizeof(int32_t) * 1);
+  static bool8 layout_init[] = {false};
+  model->setOperandValue(layout, layout_init, sizeof(bool8) * 1);
+  static float op_init[] = {1.0f, 2.0f, 3.0f, 4.0f};
+  model->setOperandValue(op, op_init, sizeof(float) * 4);
+  static int32_t param11_init[] = {0};
+  model->setOperandValue(param11, param11_init, sizeof(int32_t) * 1);
+  model->addOperation(ANEURALNETWORKS_BOX_WITH_NMS_LIMIT, {scores, roi, param1, param2, param3, param4}, {scoresOut, roiOut, classesOut, batchSplitOut});
+  model->addOperation(ANEURALNETWORKS_ROI_ALIGN, {in, roiOut, batchSplitOut, param5, param6, param7, param8, param9, param10, layout}, {featureMap});
+  model->addOperation(ANEURALNETWORKS_SUB, {featureMap, op, param11}, {out});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {in},
+    {scoresOut, classesOut, out});
+  assert(model->isValid());
+}
+
+inline bool is_ignored_zero_sized(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_zero_sized_relaxed(Model *model) {
+  OperandType type0(Type::TENSOR_FLOAT32, {1, 2, 2, 1});
+  OperandType type1(Type::INT32, {});
+  OperandType type10(Type::BOOL, {});
+  OperandType type11(Type::TENSOR_FLOAT32, {1, 1, 1, 2});
+  OperandType type12(Type::TENSOR_FLOAT32, {0, 2, 2, 2});
+  OperandType type3(Type::TENSOR_FLOAT32, {1, 2});
+  OperandType type4(Type::TENSOR_FLOAT32, {1, 8});
+  OperandType type5(Type::TENSOR_FLOAT32, {0});
+  OperandType type6(Type::TENSOR_INT32, {0});
+  OperandType type7(Type::TENSOR_FLOAT32, {0, 4});
+  OperandType type8(Type::TENSOR_INT32, {1});
+  OperandType type9(Type::FLOAT32, {});
+  // Phase 1, operands
+  auto scores = model->addOperand(&type3);
+  auto roi = model->addOperand(&type4);
+  auto param1 = model->addOperand(&type8);
+  auto param2 = model->addOperand(&type9);
+  auto param3 = model->addOperand(&type9);
+  auto param4 = model->addOperand(&type1);
+  auto scoresOut = model->addOperand(&type5);
+  auto roiOut = model->addOperand(&type7);
+  auto classesOut = model->addOperand(&type6);
+  auto batchSplitOut = model->addOperand(&type6);
+  auto in = model->addOperand(&type11);
+  auto param5 = model->addOperand(&type1);
+  auto param6 = model->addOperand(&type1);
+  auto param7 = model->addOperand(&type9);
+  auto param8 = model->addOperand(&type9);
+  auto param9 = model->addOperand(&type1);
+  auto param10 = model->addOperand(&type1);
+  auto layout = model->addOperand(&type10);
+  auto featureMap = model->addOperand(&type12);
+  auto op = model->addOperand(&type0);
+  auto param11 = model->addOperand(&type1);
+  auto out = model->addOperand(&type12);
+  // Phase 2, operations
+  static float scores_init[] = {0.9f, 0.1f};
+  model->setOperandValue(scores, scores_init, sizeof(float) * 2);
+  static float roi_init[] = {1.0f, 1.0f, 10.0f, 10.0f, 0.0f, 0.0f, 10.0f, 10.0f};
+  model->setOperandValue(roi, roi_init, sizeof(float) * 8);
+  static int32_t param1_init[] = {0};
+  model->setOperandValue(param1, param1_init, sizeof(int32_t) * 1);
+  static float param2_init[] = {0.3f};
+  model->setOperandValue(param2, param2_init, sizeof(float) * 1);
+  static float param3_init[] = {0.4f};
+  model->setOperandValue(param3, param3_init, sizeof(float) * 1);
+  static int32_t param4_init[] = {-1};
+  model->setOperandValue(param4, param4_init, sizeof(int32_t) * 1);
+  static int32_t param5_init[] = {2};
+  model->setOperandValue(param5, param5_init, sizeof(int32_t) * 1);
+  static int32_t param6_init[] = {2};
+  model->setOperandValue(param6, param6_init, sizeof(int32_t) * 1);
+  static float param7_init[] = {2.0f};
+  model->setOperandValue(param7, param7_init, sizeof(float) * 1);
+  static float param8_init[] = {2.0f};
+  model->setOperandValue(param8, param8_init, sizeof(float) * 1);
+  static int32_t param9_init[] = {4};
+  model->setOperandValue(param9, param9_init, sizeof(int32_t) * 1);
+  static int32_t param10_init[] = {4};
+  model->setOperandValue(param10, param10_init, sizeof(int32_t) * 1);
+  static bool8 layout_init[] = {false};
+  model->setOperandValue(layout, layout_init, sizeof(bool8) * 1);
+  static float op_init[] = {1.0f, 2.0f, 3.0f, 4.0f};
+  model->setOperandValue(op, op_init, sizeof(float) * 4);
+  static int32_t param11_init[] = {0};
+  model->setOperandValue(param11, param11_init, sizeof(int32_t) * 1);
+  model->addOperation(ANEURALNETWORKS_BOX_WITH_NMS_LIMIT, {scores, roi, param1, param2, param3, param4}, {scoresOut, roiOut, classesOut, batchSplitOut});
+  model->addOperation(ANEURALNETWORKS_ROI_ALIGN, {in, roiOut, batchSplitOut, param5, param6, param7, param8, param9, param10, layout}, {featureMap});
+  model->addOperation(ANEURALNETWORKS_SUB, {featureMap, op, param11}, {out});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {in},
+    {scoresOut, classesOut, out});
+  // Phase 4: set relaxed execution
+  model->relaxComputationFloat32toFloat16(true);
+  assert(model->isValid());
+}
+
+inline bool is_ignored_zero_sized_relaxed(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_zero_sized_quant8(Model *model) {
+  OperandType type1(Type::INT32, {});
+  OperandType type10(Type::BOOL, {});
+  OperandType type17(Type::TENSOR_QUANT8_ASYMM, {0, 2, 2, 2}, 0.1f, 128);
+  OperandType type18(Type::TENSOR_QUANT8_ASYMM, {1, 1, 1, 2}, 0.1f, 128);
+  OperandType type19(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 1}, 0.1f, 128);
+  OperandType type20(Type::TENSOR_QUANT16_ASYMM, {1, 8}, 0.125f, 0);
+  OperandType type21(Type::TENSOR_QUANT16_ASYMM, {0, 4}, 0.125f, 0);
+  OperandType type22(Type::TENSOR_QUANT8_ASYMM, {1, 2}, 0.1f, 128);
+  OperandType type23(Type::TENSOR_QUANT8_ASYMM, {0}, 0.1f, 128);
+  OperandType type6(Type::TENSOR_INT32, {0});
+  OperandType type8(Type::TENSOR_INT32, {1});
+  OperandType type9(Type::FLOAT32, {});
+  // Phase 1, operands
+  auto scores = model->addOperand(&type22);
+  auto roi = model->addOperand(&type20);
+  auto param1 = model->addOperand(&type8);
+  auto param2 = model->addOperand(&type9);
+  auto param3 = model->addOperand(&type9);
+  auto param4 = model->addOperand(&type1);
+  auto scoresOut = model->addOperand(&type23);
+  auto roiOut = model->addOperand(&type21);
+  auto classesOut = model->addOperand(&type6);
+  auto batchSplitOut = model->addOperand(&type6);
+  auto in = model->addOperand(&type18);
+  auto param5 = model->addOperand(&type1);
+  auto param6 = model->addOperand(&type1);
+  auto param7 = model->addOperand(&type9);
+  auto param8 = model->addOperand(&type9);
+  auto param9 = model->addOperand(&type1);
+  auto param10 = model->addOperand(&type1);
+  auto layout = model->addOperand(&type10);
+  auto featureMap = model->addOperand(&type17);
+  auto op = model->addOperand(&type19);
+  auto param11 = model->addOperand(&type1);
+  auto out = model->addOperand(&type17);
+  // Phase 2, operations
+  static uint8_t scores_init[] = {137, 129};
+  model->setOperandValue(scores, scores_init, sizeof(uint8_t) * 2);
+  static uint16_t roi_init[] = {8, 8, 80, 80, 0, 0, 80, 80};
+  model->setOperandValue(roi, roi_init, sizeof(uint16_t) * 8);
+  static int32_t param1_init[] = {0};
+  model->setOperandValue(param1, param1_init, sizeof(int32_t) * 1);
+  static float param2_init[] = {0.3f};
+  model->setOperandValue(param2, param2_init, sizeof(float) * 1);
+  static float param3_init[] = {0.4f};
+  model->setOperandValue(param3, param3_init, sizeof(float) * 1);
+  static int32_t param4_init[] = {-1};
+  model->setOperandValue(param4, param4_init, sizeof(int32_t) * 1);
+  static int32_t param5_init[] = {2};
+  model->setOperandValue(param5, param5_init, sizeof(int32_t) * 1);
+  static int32_t param6_init[] = {2};
+  model->setOperandValue(param6, param6_init, sizeof(int32_t) * 1);
+  static float param7_init[] = {2.0f};
+  model->setOperandValue(param7, param7_init, sizeof(float) * 1);
+  static float param8_init[] = {2.0f};
+  model->setOperandValue(param8, param8_init, sizeof(float) * 1);
+  static int32_t param9_init[] = {4};
+  model->setOperandValue(param9, param9_init, sizeof(int32_t) * 1);
+  static int32_t param10_init[] = {4};
+  model->setOperandValue(param10, param10_init, sizeof(int32_t) * 1);
+  static bool8 layout_init[] = {false};
+  model->setOperandValue(layout, layout_init, sizeof(bool8) * 1);
+  static uint8_t op_init[] = {138, 148, 158, 168};
+  model->setOperandValue(op, op_init, sizeof(uint8_t) * 4);
+  static int32_t param11_init[] = {0};
+  model->setOperandValue(param11, param11_init, sizeof(int32_t) * 1);
+  model->addOperation(ANEURALNETWORKS_BOX_WITH_NMS_LIMIT, {scores, roi, param1, param2, param3, param4}, {scoresOut, roiOut, classesOut, batchSplitOut});
+  model->addOperation(ANEURALNETWORKS_ROI_ALIGN, {in, roiOut, batchSplitOut, param5, param6, param7, param8, param9, param10, layout}, {featureMap});
+  model->addOperation(ANEURALNETWORKS_SUB, {featureMap, op, param11}, {out});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {in},
+    {scoresOut, classesOut, out});
+  assert(model->isValid());
+}
+
+inline bool is_ignored_zero_sized_quant8(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_zero_sized_float16(Model *model) {
+  OperandType type1(Type::INT32, {});
+  OperandType type10(Type::BOOL, {});
+  OperandType type13(Type::TENSOR_FLOAT16, {1, 2, 2, 1});
+  OperandType type24(Type::TENSOR_FLOAT16, {0, 2, 2, 2});
+  OperandType type25(Type::TENSOR_FLOAT16, {1, 1, 1, 2});
+  OperandType type26(Type::FLOAT16, {});
+  OperandType type27(Type::TENSOR_FLOAT16, {1, 8});
+  OperandType type28(Type::TENSOR_FLOAT16, {0, 4});
+  OperandType type29(Type::TENSOR_FLOAT16, {1, 2});
+  OperandType type30(Type::TENSOR_FLOAT16, {0});
+  OperandType type6(Type::TENSOR_INT32, {0});
+  OperandType type8(Type::TENSOR_INT32, {1});
+  // Phase 1, operands
+  auto scores = model->addOperand(&type29);
+  auto roi = model->addOperand(&type27);
+  auto param1 = model->addOperand(&type8);
+  auto param2 = model->addOperand(&type26);
+  auto param3 = model->addOperand(&type26);
+  auto param4 = model->addOperand(&type1);
+  auto scoresOut = model->addOperand(&type30);
+  auto roiOut = model->addOperand(&type28);
+  auto classesOut = model->addOperand(&type6);
+  auto batchSplitOut = model->addOperand(&type6);
+  auto in = model->addOperand(&type25);
+  auto param5 = model->addOperand(&type1);
+  auto param6 = model->addOperand(&type1);
+  auto param7 = model->addOperand(&type26);
+  auto param8 = model->addOperand(&type26);
+  auto param9 = model->addOperand(&type1);
+  auto param10 = model->addOperand(&type1);
+  auto layout = model->addOperand(&type10);
+  auto featureMap = model->addOperand(&type24);
+  auto op = model->addOperand(&type13);
+  auto param11 = model->addOperand(&type1);
+  auto out = model->addOperand(&type24);
+  // Phase 2, operations
+  static _Float16 scores_init[] = {0.8999999761581421f, 0.10000000149011612f};
+  model->setOperandValue(scores, scores_init, sizeof(_Float16) * 2);
+  static _Float16 roi_init[] = {1.0f, 1.0f, 10.0f, 10.0f, 0.0f, 0.0f, 10.0f, 10.0f};
+  model->setOperandValue(roi, roi_init, sizeof(_Float16) * 8);
+  static int32_t param1_init[] = {0};
+  model->setOperandValue(param1, param1_init, sizeof(int32_t) * 1);
+  static _Float16 param2_init[] = {0.30000001192092896f};
+  model->setOperandValue(param2, param2_init, sizeof(_Float16) * 1);
+  static _Float16 param3_init[] = {0.4000000059604645f};
+  model->setOperandValue(param3, param3_init, sizeof(_Float16) * 1);
+  static int32_t param4_init[] = {-1};
+  model->setOperandValue(param4, param4_init, sizeof(int32_t) * 1);
+  static int32_t param5_init[] = {2};
+  model->setOperandValue(param5, param5_init, sizeof(int32_t) * 1);
+  static int32_t param6_init[] = {2};
+  model->setOperandValue(param6, param6_init, sizeof(int32_t) * 1);
+  static _Float16 param7_init[] = {2.0f};
+  model->setOperandValue(param7, param7_init, sizeof(_Float16) * 1);
+  static _Float16 param8_init[] = {2.0f};
+  model->setOperandValue(param8, param8_init, sizeof(_Float16) * 1);
+  static int32_t param9_init[] = {4};
+  model->setOperandValue(param9, param9_init, sizeof(int32_t) * 1);
+  static int32_t param10_init[] = {4};
+  model->setOperandValue(param10, param10_init, sizeof(int32_t) * 1);
+  static bool8 layout_init[] = {false};
+  model->setOperandValue(layout, layout_init, sizeof(bool8) * 1);
+  static _Float16 op_init[] = {1.0f, 2.0f, 3.0f, 4.0f};
+  model->setOperandValue(op, op_init, sizeof(_Float16) * 4);
+  static int32_t param11_init[] = {0};
+  model->setOperandValue(param11, param11_init, sizeof(int32_t) * 1);
+  model->addOperation(ANEURALNETWORKS_BOX_WITH_NMS_LIMIT, {scores, roi, param1, param2, param3, param4}, {scoresOut, roiOut, classesOut, batchSplitOut});
+  model->addOperation(ANEURALNETWORKS_ROI_ALIGN, {in, roiOut, batchSplitOut, param5, param6, param7, param8, param9, param10, layout}, {featureMap});
+  model->addOperation(ANEURALNETWORKS_SUB, {featureMap, op, param11}, {out});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {in},
+    {scoresOut, classesOut, out});
+  assert(model->isValid());
+}
+
+inline bool is_ignored_zero_sized_float16(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_zero_sized_dynamic_output_shape(Model *model) {
+  OperandType type0(Type::TENSOR_FLOAT32, {1, 2, 2, 1});
+  OperandType type1(Type::INT32, {});
+  OperandType type10(Type::BOOL, {});
+  OperandType type11(Type::TENSOR_FLOAT32, {1, 1, 1, 2});
+  OperandType type12(Type::TENSOR_FLOAT32, {0, 2, 2, 2});
+  OperandType type14(Type::TENSOR_FLOAT32, {0, 0, 0, 0});
+  OperandType type3(Type::TENSOR_FLOAT32, {1, 2});
+  OperandType type4(Type::TENSOR_FLOAT32, {1, 8});
+  OperandType type5(Type::TENSOR_FLOAT32, {0});
+  OperandType type6(Type::TENSOR_INT32, {0});
+  OperandType type7(Type::TENSOR_FLOAT32, {0, 4});
+  OperandType type8(Type::TENSOR_INT32, {1});
+  OperandType type9(Type::FLOAT32, {});
+  // Phase 1, operands
+  auto scores = model->addOperand(&type3);
+  auto roi = model->addOperand(&type4);
+  auto param1 = model->addOperand(&type8);
+  auto param2 = model->addOperand(&type9);
+  auto param3 = model->addOperand(&type9);
+  auto param4 = model->addOperand(&type1);
+  auto scoresOut = model->addOperand(&type5);
+  auto roiOut = model->addOperand(&type7);
+  auto classesOut = model->addOperand(&type6);
+  auto batchSplitOut = model->addOperand(&type6);
+  auto in = model->addOperand(&type11);
+  auto param5 = model->addOperand(&type1);
+  auto param6 = model->addOperand(&type1);
+  auto param7 = model->addOperand(&type9);
+  auto param8 = model->addOperand(&type9);
+  auto param9 = model->addOperand(&type1);
+  auto param10 = model->addOperand(&type1);
+  auto layout = model->addOperand(&type10);
+  auto featureMap = model->addOperand(&type12);
+  auto op = model->addOperand(&type0);
+  auto param11 = model->addOperand(&type1);
+  auto out = model->addOperand(&type14);
+  // Phase 2, operations
+  static float scores_init[] = {0.9f, 0.1f};
+  model->setOperandValue(scores, scores_init, sizeof(float) * 2);
+  static float roi_init[] = {1.0f, 1.0f, 10.0f, 10.0f, 0.0f, 0.0f, 10.0f, 10.0f};
+  model->setOperandValue(roi, roi_init, sizeof(float) * 8);
+  static int32_t param1_init[] = {0};
+  model->setOperandValue(param1, param1_init, sizeof(int32_t) * 1);
+  static float param2_init[] = {0.3f};
+  model->setOperandValue(param2, param2_init, sizeof(float) * 1);
+  static float param3_init[] = {0.4f};
+  model->setOperandValue(param3, param3_init, sizeof(float) * 1);
+  static int32_t param4_init[] = {-1};
+  model->setOperandValue(param4, param4_init, sizeof(int32_t) * 1);
+  static int32_t param5_init[] = {2};
+  model->setOperandValue(param5, param5_init, sizeof(int32_t) * 1);
+  static int32_t param6_init[] = {2};
+  model->setOperandValue(param6, param6_init, sizeof(int32_t) * 1);
+  static float param7_init[] = {2.0f};
+  model->setOperandValue(param7, param7_init, sizeof(float) * 1);
+  static float param8_init[] = {2.0f};
+  model->setOperandValue(param8, param8_init, sizeof(float) * 1);
+  static int32_t param9_init[] = {4};
+  model->setOperandValue(param9, param9_init, sizeof(int32_t) * 1);
+  static int32_t param10_init[] = {4};
+  model->setOperandValue(param10, param10_init, sizeof(int32_t) * 1);
+  static bool8 layout_init[] = {false};
+  model->setOperandValue(layout, layout_init, sizeof(bool8) * 1);
+  static float op_init[] = {1.0f, 2.0f, 3.0f, 4.0f};
+  model->setOperandValue(op, op_init, sizeof(float) * 4);
+  static int32_t param11_init[] = {0};
+  model->setOperandValue(param11, param11_init, sizeof(int32_t) * 1);
+  model->addOperation(ANEURALNETWORKS_BOX_WITH_NMS_LIMIT, {scores, roi, param1, param2, param3, param4}, {scoresOut, roiOut, classesOut, batchSplitOut});
+  model->addOperation(ANEURALNETWORKS_ROI_ALIGN, {in, roiOut, batchSplitOut, param5, param6, param7, param8, param9, param10, layout}, {featureMap});
+  model->addOperation(ANEURALNETWORKS_SUB, {featureMap, op, param11}, {out});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {in},
+    {scoresOut, classesOut, out});
+  assert(model->isValid());
+}
+
+inline bool is_ignored_zero_sized_dynamic_output_shape(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_zero_sized_dynamic_output_shape_relaxed(Model *model) {
+  OperandType type0(Type::TENSOR_FLOAT32, {1, 2, 2, 1});
+  OperandType type1(Type::INT32, {});
+  OperandType type10(Type::BOOL, {});
+  OperandType type11(Type::TENSOR_FLOAT32, {1, 1, 1, 2});
+  OperandType type12(Type::TENSOR_FLOAT32, {0, 2, 2, 2});
+  OperandType type14(Type::TENSOR_FLOAT32, {0, 0, 0, 0});
+  OperandType type3(Type::TENSOR_FLOAT32, {1, 2});
+  OperandType type4(Type::TENSOR_FLOAT32, {1, 8});
+  OperandType type5(Type::TENSOR_FLOAT32, {0});
+  OperandType type6(Type::TENSOR_INT32, {0});
+  OperandType type7(Type::TENSOR_FLOAT32, {0, 4});
+  OperandType type8(Type::TENSOR_INT32, {1});
+  OperandType type9(Type::FLOAT32, {});
+  // Phase 1, operands
+  auto scores = model->addOperand(&type3);
+  auto roi = model->addOperand(&type4);
+  auto param1 = model->addOperand(&type8);
+  auto param2 = model->addOperand(&type9);
+  auto param3 = model->addOperand(&type9);
+  auto param4 = model->addOperand(&type1);
+  auto scoresOut = model->addOperand(&type5);
+  auto roiOut = model->addOperand(&type7);
+  auto classesOut = model->addOperand(&type6);
+  auto batchSplitOut = model->addOperand(&type6);
+  auto in = model->addOperand(&type11);
+  auto param5 = model->addOperand(&type1);
+  auto param6 = model->addOperand(&type1);
+  auto param7 = model->addOperand(&type9);
+  auto param8 = model->addOperand(&type9);
+  auto param9 = model->addOperand(&type1);
+  auto param10 = model->addOperand(&type1);
+  auto layout = model->addOperand(&type10);
+  auto featureMap = model->addOperand(&type12);
+  auto op = model->addOperand(&type0);
+  auto param11 = model->addOperand(&type1);
+  auto out = model->addOperand(&type14);
+  // Phase 2, operations
+  static float scores_init[] = {0.9f, 0.1f};
+  model->setOperandValue(scores, scores_init, sizeof(float) * 2);
+  static float roi_init[] = {1.0f, 1.0f, 10.0f, 10.0f, 0.0f, 0.0f, 10.0f, 10.0f};
+  model->setOperandValue(roi, roi_init, sizeof(float) * 8);
+  static int32_t param1_init[] = {0};
+  model->setOperandValue(param1, param1_init, sizeof(int32_t) * 1);
+  static float param2_init[] = {0.3f};
+  model->setOperandValue(param2, param2_init, sizeof(float) * 1);
+  static float param3_init[] = {0.4f};
+  model->setOperandValue(param3, param3_init, sizeof(float) * 1);
+  static int32_t param4_init[] = {-1};
+  model->setOperandValue(param4, param4_init, sizeof(int32_t) * 1);
+  static int32_t param5_init[] = {2};
+  model->setOperandValue(param5, param5_init, sizeof(int32_t) * 1);
+  static int32_t param6_init[] = {2};
+  model->setOperandValue(param6, param6_init, sizeof(int32_t) * 1);
+  static float param7_init[] = {2.0f};
+  model->setOperandValue(param7, param7_init, sizeof(float) * 1);
+  static float param8_init[] = {2.0f};
+  model->setOperandValue(param8, param8_init, sizeof(float) * 1);
+  static int32_t param9_init[] = {4};
+  model->setOperandValue(param9, param9_init, sizeof(int32_t) * 1);
+  static int32_t param10_init[] = {4};
+  model->setOperandValue(param10, param10_init, sizeof(int32_t) * 1);
+  static bool8 layout_init[] = {false};
+  model->setOperandValue(layout, layout_init, sizeof(bool8) * 1);
+  static float op_init[] = {1.0f, 2.0f, 3.0f, 4.0f};
+  model->setOperandValue(op, op_init, sizeof(float) * 4);
+  static int32_t param11_init[] = {0};
+  model->setOperandValue(param11, param11_init, sizeof(int32_t) * 1);
+  model->addOperation(ANEURALNETWORKS_BOX_WITH_NMS_LIMIT, {scores, roi, param1, param2, param3, param4}, {scoresOut, roiOut, classesOut, batchSplitOut});
+  model->addOperation(ANEURALNETWORKS_ROI_ALIGN, {in, roiOut, batchSplitOut, param5, param6, param7, param8, param9, param10, layout}, {featureMap});
+  model->addOperation(ANEURALNETWORKS_SUB, {featureMap, op, param11}, {out});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {in},
+    {scoresOut, classesOut, out});
+  // Phase 4: set relaxed execution
+  model->relaxComputationFloat32toFloat16(true);
+  assert(model->isValid());
+}
+
+inline bool is_ignored_zero_sized_dynamic_output_shape_relaxed(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_zero_sized_dynamic_output_shape_quant8(Model *model) {
+  OperandType type1(Type::INT32, {});
+  OperandType type10(Type::BOOL, {});
+  OperandType type17(Type::TENSOR_QUANT8_ASYMM, {0, 2, 2, 2}, 0.1f, 128);
+  OperandType type18(Type::TENSOR_QUANT8_ASYMM, {1, 1, 1, 2}, 0.1f, 128);
+  OperandType type19(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 1}, 0.1f, 128);
+  OperandType type20(Type::TENSOR_QUANT16_ASYMM, {1, 8}, 0.125f, 0);
+  OperandType type21(Type::TENSOR_QUANT16_ASYMM, {0, 4}, 0.125f, 0);
+  OperandType type22(Type::TENSOR_QUANT8_ASYMM, {1, 2}, 0.1f, 128);
+  OperandType type23(Type::TENSOR_QUANT8_ASYMM, {0}, 0.1f, 128);
+  OperandType type31(Type::TENSOR_QUANT8_ASYMM, {0, 0, 0, 0}, 0.1f, 128);
+  OperandType type6(Type::TENSOR_INT32, {0});
+  OperandType type8(Type::TENSOR_INT32, {1});
+  OperandType type9(Type::FLOAT32, {});
+  // Phase 1, operands
+  auto scores = model->addOperand(&type22);
+  auto roi = model->addOperand(&type20);
+  auto param1 = model->addOperand(&type8);
+  auto param2 = model->addOperand(&type9);
+  auto param3 = model->addOperand(&type9);
+  auto param4 = model->addOperand(&type1);
+  auto scoresOut = model->addOperand(&type23);
+  auto roiOut = model->addOperand(&type21);
+  auto classesOut = model->addOperand(&type6);
+  auto batchSplitOut = model->addOperand(&type6);
+  auto in = model->addOperand(&type18);
+  auto param5 = model->addOperand(&type1);
+  auto param6 = model->addOperand(&type1);
+  auto param7 = model->addOperand(&type9);
+  auto param8 = model->addOperand(&type9);
+  auto param9 = model->addOperand(&type1);
+  auto param10 = model->addOperand(&type1);
+  auto layout = model->addOperand(&type10);
+  auto featureMap = model->addOperand(&type17);
+  auto op = model->addOperand(&type19);
+  auto param11 = model->addOperand(&type1);
+  auto out = model->addOperand(&type31);
+  // Phase 2, operations
+  static uint8_t scores_init[] = {137, 129};
+  model->setOperandValue(scores, scores_init, sizeof(uint8_t) * 2);
+  static uint16_t roi_init[] = {8, 8, 80, 80, 0, 0, 80, 80};
+  model->setOperandValue(roi, roi_init, sizeof(uint16_t) * 8);
+  static int32_t param1_init[] = {0};
+  model->setOperandValue(param1, param1_init, sizeof(int32_t) * 1);
+  static float param2_init[] = {0.3f};
+  model->setOperandValue(param2, param2_init, sizeof(float) * 1);
+  static float param3_init[] = {0.4f};
+  model->setOperandValue(param3, param3_init, sizeof(float) * 1);
+  static int32_t param4_init[] = {-1};
+  model->setOperandValue(param4, param4_init, sizeof(int32_t) * 1);
+  static int32_t param5_init[] = {2};
+  model->setOperandValue(param5, param5_init, sizeof(int32_t) * 1);
+  static int32_t param6_init[] = {2};
+  model->setOperandValue(param6, param6_init, sizeof(int32_t) * 1);
+  static float param7_init[] = {2.0f};
+  model->setOperandValue(param7, param7_init, sizeof(float) * 1);
+  static float param8_init[] = {2.0f};
+  model->setOperandValue(param8, param8_init, sizeof(float) * 1);
+  static int32_t param9_init[] = {4};
+  model->setOperandValue(param9, param9_init, sizeof(int32_t) * 1);
+  static int32_t param10_init[] = {4};
+  model->setOperandValue(param10, param10_init, sizeof(int32_t) * 1);
+  static bool8 layout_init[] = {false};
+  model->setOperandValue(layout, layout_init, sizeof(bool8) * 1);
+  static uint8_t op_init[] = {138, 148, 158, 168};
+  model->setOperandValue(op, op_init, sizeof(uint8_t) * 4);
+  static int32_t param11_init[] = {0};
+  model->setOperandValue(param11, param11_init, sizeof(int32_t) * 1);
+  model->addOperation(ANEURALNETWORKS_BOX_WITH_NMS_LIMIT, {scores, roi, param1, param2, param3, param4}, {scoresOut, roiOut, classesOut, batchSplitOut});
+  model->addOperation(ANEURALNETWORKS_ROI_ALIGN, {in, roiOut, batchSplitOut, param5, param6, param7, param8, param9, param10, layout}, {featureMap});
+  model->addOperation(ANEURALNETWORKS_SUB, {featureMap, op, param11}, {out});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {in},
+    {scoresOut, classesOut, out});
+  assert(model->isValid());
+}
+
+inline bool is_ignored_zero_sized_dynamic_output_shape_quant8(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_zero_sized_dynamic_output_shape_float16(Model *model) {
+  OperandType type1(Type::INT32, {});
+  OperandType type10(Type::BOOL, {});
+  OperandType type13(Type::TENSOR_FLOAT16, {1, 2, 2, 1});
+  OperandType type15(Type::TENSOR_FLOAT16, {0, 0, 0, 0});
+  OperandType type24(Type::TENSOR_FLOAT16, {0, 2, 2, 2});
+  OperandType type25(Type::TENSOR_FLOAT16, {1, 1, 1, 2});
+  OperandType type26(Type::FLOAT16, {});
+  OperandType type27(Type::TENSOR_FLOAT16, {1, 8});
+  OperandType type28(Type::TENSOR_FLOAT16, {0, 4});
+  OperandType type29(Type::TENSOR_FLOAT16, {1, 2});
+  OperandType type32(Type::TENSOR_FLOAT16, {0});
+  OperandType type6(Type::TENSOR_INT32, {0});
+  OperandType type8(Type::TENSOR_INT32, {1});
+  // Phase 1, operands
+  auto scores = model->addOperand(&type29);
+  auto roi = model->addOperand(&type27);
+  auto param1 = model->addOperand(&type8);
+  auto param2 = model->addOperand(&type26);
+  auto param3 = model->addOperand(&type26);
+  auto param4 = model->addOperand(&type1);
+  auto scoresOut = model->addOperand(&type32);
+  auto roiOut = model->addOperand(&type28);
+  auto classesOut = model->addOperand(&type6);
+  auto batchSplitOut = model->addOperand(&type6);
+  auto in = model->addOperand(&type25);
+  auto param5 = model->addOperand(&type1);
+  auto param6 = model->addOperand(&type1);
+  auto param7 = model->addOperand(&type26);
+  auto param8 = model->addOperand(&type26);
+  auto param9 = model->addOperand(&type1);
+  auto param10 = model->addOperand(&type1);
+  auto layout = model->addOperand(&type10);
+  auto featureMap = model->addOperand(&type24);
+  auto op = model->addOperand(&type13);
+  auto param11 = model->addOperand(&type1);
+  auto out = model->addOperand(&type15);
+  // Phase 2, operations
+  static _Float16 scores_init[] = {0.8999999761581421f, 0.10000000149011612f};
+  model->setOperandValue(scores, scores_init, sizeof(_Float16) * 2);
+  static _Float16 roi_init[] = {1.0f, 1.0f, 10.0f, 10.0f, 0.0f, 0.0f, 10.0f, 10.0f};
+  model->setOperandValue(roi, roi_init, sizeof(_Float16) * 8);
+  static int32_t param1_init[] = {0};
+  model->setOperandValue(param1, param1_init, sizeof(int32_t) * 1);
+  static _Float16 param2_init[] = {0.30000001192092896f};
+  model->setOperandValue(param2, param2_init, sizeof(_Float16) * 1);
+  static _Float16 param3_init[] = {0.4000000059604645f};
+  model->setOperandValue(param3, param3_init, sizeof(_Float16) * 1);
+  static int32_t param4_init[] = {-1};
+  model->setOperandValue(param4, param4_init, sizeof(int32_t) * 1);
+  static int32_t param5_init[] = {2};
+  model->setOperandValue(param5, param5_init, sizeof(int32_t) * 1);
+  static int32_t param6_init[] = {2};
+  model->setOperandValue(param6, param6_init, sizeof(int32_t) * 1);
+  static _Float16 param7_init[] = {2.0f};
+  model->setOperandValue(param7, param7_init, sizeof(_Float16) * 1);
+  static _Float16 param8_init[] = {2.0f};
+  model->setOperandValue(param8, param8_init, sizeof(_Float16) * 1);
+  static int32_t param9_init[] = {4};
+  model->setOperandValue(param9, param9_init, sizeof(int32_t) * 1);
+  static int32_t param10_init[] = {4};
+  model->setOperandValue(param10, param10_init, sizeof(int32_t) * 1);
+  static bool8 layout_init[] = {false};
+  model->setOperandValue(layout, layout_init, sizeof(bool8) * 1);
+  static _Float16 op_init[] = {1.0f, 2.0f, 3.0f, 4.0f};
+  model->setOperandValue(op, op_init, sizeof(_Float16) * 4);
+  static int32_t param11_init[] = {0};
+  model->setOperandValue(param11, param11_init, sizeof(int32_t) * 1);
+  model->addOperation(ANEURALNETWORKS_BOX_WITH_NMS_LIMIT, {scores, roi, param1, param2, param3, param4}, {scoresOut, roiOut, classesOut, batchSplitOut});
+  model->addOperation(ANEURALNETWORKS_ROI_ALIGN, {in, roiOut, batchSplitOut, param5, param6, param7, param8, param9, param10, layout}, {featureMap});
+  model->addOperation(ANEURALNETWORKS_SUB, {featureMap, op, param11}, {out});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {in},
+    {scoresOut, classesOut, out});
+  assert(model->isValid());
+}
+
+inline bool is_ignored_zero_sized_dynamic_output_shape_float16(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
diff --git a/runtime/test/generated/tests/add_broadcast_float16.mod.py.cpp b/runtime/test/generated/tests/add_broadcast_float16.mod.py.cpp
deleted file mode 100644
index 11a75ed..0000000
--- a/runtime/test/generated/tests/add_broadcast_float16.mod.py.cpp
+++ /dev/null
@@ -1,23 +0,0 @@
-// clang-format off
-// Generated file (from: add_broadcast_float16.mod.py). Do not edit
-#include "../../TestGenerated.h"
-
-namespace add_broadcast_float16 {
-// Generated add_broadcast_float16 test
-#include "generated/examples/add_broadcast_float16.example.cpp"
-// Generated model constructor
-#include "generated/models/add_broadcast_float16.model.cpp"
-} // namespace add_broadcast_float16
-
-TEST_F(GeneratedTests, add_broadcast_float16) {
-    execute(add_broadcast_float16::CreateModel,
-            add_broadcast_float16::is_ignored,
-            add_broadcast_float16::get_examples());
-}
-
-TEST_F(DynamicOutputShapeTest, add_broadcast_float16_dynamic_output_shape) {
-    execute(add_broadcast_float16::CreateModel_dynamic_output_shape,
-            add_broadcast_float16::is_ignored_dynamic_output_shape,
-            add_broadcast_float16::get_examples_dynamic_output_shape());
-}
-
diff --git a/runtime/test/generated/tests/add_float16.mod.py.cpp b/runtime/test/generated/tests/add_float16.mod.py.cpp
deleted file mode 100644
index ca34965..0000000
--- a/runtime/test/generated/tests/add_float16.mod.py.cpp
+++ /dev/null
@@ -1,23 +0,0 @@
-// clang-format off
-// Generated file (from: add_float16.mod.py). Do not edit
-#include "../../TestGenerated.h"
-
-namespace add_float16 {
-// Generated add_float16 test
-#include "generated/examples/add_float16.example.cpp"
-// Generated model constructor
-#include "generated/models/add_float16.model.cpp"
-} // namespace add_float16
-
-TEST_F(GeneratedTests, add_float16) {
-    execute(add_float16::CreateModel,
-            add_float16::is_ignored,
-            add_float16::get_examples());
-}
-
-TEST_F(DynamicOutputShapeTest, add_float16_dynamic_output_shape) {
-    execute(add_float16::CreateModel_dynamic_output_shape,
-            add_float16::is_ignored_dynamic_output_shape,
-            add_float16::get_examples_dynamic_output_shape());
-}
-
diff --git a/runtime/test/generated/tests/add_v1_2.mod.py.cpp b/runtime/test/generated/tests/add_v1_2.mod.py.cpp
new file mode 100644
index 0000000..4c06e86
--- /dev/null
+++ b/runtime/test/generated/tests/add_v1_2.mod.py.cpp
@@ -0,0 +1,83 @@
+// clang-format off
+// Generated file (from: add_v1_2.mod.py). Do not edit
+#include "../../TestGenerated.h"
+
+namespace add_v1_2 {
+// Generated add_v1_2 test
+#include "generated/examples/add_v1_2.example.cpp"
+// Generated model constructor
+#include "generated/models/add_v1_2.model.cpp"
+} // namespace add_v1_2
+
+TEST_F(GeneratedTests, add_v1_2) {
+    execute(add_v1_2::CreateModel,
+            add_v1_2::is_ignored,
+            add_v1_2::get_examples());
+}
+
+TEST_F(DynamicOutputShapeTest, add_v1_2_dynamic_output_shape) {
+    execute(add_v1_2::CreateModel_dynamic_output_shape,
+            add_v1_2::is_ignored_dynamic_output_shape,
+            add_v1_2::get_examples_dynamic_output_shape());
+}
+
+TEST_F(GeneratedTests, add_v1_2_2) {
+    execute(add_v1_2::CreateModel_2,
+            add_v1_2::is_ignored_2,
+            add_v1_2::get_examples_2());
+}
+
+TEST_F(DynamicOutputShapeTest, add_v1_2_dynamic_output_shape_2) {
+    execute(add_v1_2::CreateModel_dynamic_output_shape_2,
+            add_v1_2::is_ignored_dynamic_output_shape_2,
+            add_v1_2::get_examples_dynamic_output_shape_2());
+}
+
+TEST_F(GeneratedTests, add_v1_2_zero_sized) {
+    execute(add_v1_2::CreateModel_zero_sized,
+            add_v1_2::is_ignored_zero_sized,
+            add_v1_2::get_examples_zero_sized());
+}
+
+TEST_F(GeneratedTests, add_v1_2_zero_sized_relaxed) {
+    execute(add_v1_2::CreateModel_zero_sized_relaxed,
+            add_v1_2::is_ignored_zero_sized_relaxed,
+            add_v1_2::get_examples_zero_sized_relaxed());
+}
+
+TEST_F(GeneratedTests, add_v1_2_zero_sized_quant8) {
+    execute(add_v1_2::CreateModel_zero_sized_quant8,
+            add_v1_2::is_ignored_zero_sized_quant8,
+            add_v1_2::get_examples_zero_sized_quant8());
+}
+
+TEST_F(GeneratedTests, add_v1_2_zero_sized_float16) {
+    execute(add_v1_2::CreateModel_zero_sized_float16,
+            add_v1_2::is_ignored_zero_sized_float16,
+            add_v1_2::get_examples_zero_sized_float16());
+}
+
+TEST_F(DynamicOutputShapeTest, add_v1_2_zero_sized_dynamic_output_shape) {
+    execute(add_v1_2::CreateModel_zero_sized_dynamic_output_shape,
+            add_v1_2::is_ignored_zero_sized_dynamic_output_shape,
+            add_v1_2::get_examples_zero_sized_dynamic_output_shape());
+}
+
+TEST_F(DynamicOutputShapeTest, add_v1_2_zero_sized_dynamic_output_shape_relaxed) {
+    execute(add_v1_2::CreateModel_zero_sized_dynamic_output_shape_relaxed,
+            add_v1_2::is_ignored_zero_sized_dynamic_output_shape_relaxed,
+            add_v1_2::get_examples_zero_sized_dynamic_output_shape_relaxed());
+}
+
+TEST_F(DynamicOutputShapeTest, add_v1_2_zero_sized_dynamic_output_shape_quant8) {
+    execute(add_v1_2::CreateModel_zero_sized_dynamic_output_shape_quant8,
+            add_v1_2::is_ignored_zero_sized_dynamic_output_shape_quant8,
+            add_v1_2::get_examples_zero_sized_dynamic_output_shape_quant8());
+}
+
+TEST_F(DynamicOutputShapeTest, add_v1_2_zero_sized_dynamic_output_shape_float16) {
+    execute(add_v1_2::CreateModel_zero_sized_dynamic_output_shape_float16,
+            add_v1_2::is_ignored_zero_sized_dynamic_output_shape_float16,
+            add_v1_2::get_examples_zero_sized_dynamic_output_shape_float16());
+}
+
diff --git a/runtime/test/generated/tests/div_broadcast_float16.mod.py.cpp b/runtime/test/generated/tests/div_broadcast_float16.mod.py.cpp
deleted file mode 100644
index dcb5771..0000000
--- a/runtime/test/generated/tests/div_broadcast_float16.mod.py.cpp
+++ /dev/null
@@ -1,23 +0,0 @@
-// clang-format off
-// Generated file (from: div_broadcast_float16.mod.py). Do not edit
-#include "../../TestGenerated.h"
-
-namespace div_broadcast_float16 {
-// Generated div_broadcast_float16 test
-#include "generated/examples/div_broadcast_float16.example.cpp"
-// Generated model constructor
-#include "generated/models/div_broadcast_float16.model.cpp"
-} // namespace div_broadcast_float16
-
-TEST_F(GeneratedTests, div_broadcast_float16) {
-    execute(div_broadcast_float16::CreateModel,
-            div_broadcast_float16::is_ignored,
-            div_broadcast_float16::get_examples());
-}
-
-TEST_F(DynamicOutputShapeTest, div_broadcast_float16_dynamic_output_shape) {
-    execute(div_broadcast_float16::CreateModel_dynamic_output_shape,
-            div_broadcast_float16::is_ignored_dynamic_output_shape,
-            div_broadcast_float16::get_examples_dynamic_output_shape());
-}
-
diff --git a/runtime/test/generated/tests/div_float16.mod.py.cpp b/runtime/test/generated/tests/div_float16.mod.py.cpp
deleted file mode 100644
index b0b8f1f..0000000
--- a/runtime/test/generated/tests/div_float16.mod.py.cpp
+++ /dev/null
@@ -1,23 +0,0 @@
-// clang-format off
-// Generated file (from: div_float16.mod.py). Do not edit
-#include "../../TestGenerated.h"
-
-namespace div_float16 {
-// Generated div_float16 test
-#include "generated/examples/div_float16.example.cpp"
-// Generated model constructor
-#include "generated/models/div_float16.model.cpp"
-} // namespace div_float16
-
-TEST_F(GeneratedTests, div_float16) {
-    execute(div_float16::CreateModel,
-            div_float16::is_ignored,
-            div_float16::get_examples());
-}
-
-TEST_F(DynamicOutputShapeTest, div_float16_dynamic_output_shape) {
-    execute(div_float16::CreateModel_dynamic_output_shape,
-            div_float16::is_ignored_dynamic_output_shape,
-            div_float16::get_examples_dynamic_output_shape());
-}
-
diff --git a/runtime/test/generated/tests/div_v1_2.mod.py.cpp b/runtime/test/generated/tests/div_v1_2.mod.py.cpp
new file mode 100644
index 0000000..0ca5e1b
--- /dev/null
+++ b/runtime/test/generated/tests/div_v1_2.mod.py.cpp
@@ -0,0 +1,71 @@
+// clang-format off
+// Generated file (from: div_v1_2.mod.py). Do not edit
+#include "../../TestGenerated.h"
+
+namespace div_v1_2 {
+// Generated div_v1_2 test
+#include "generated/examples/div_v1_2.example.cpp"
+// Generated model constructor
+#include "generated/models/div_v1_2.model.cpp"
+} // namespace div_v1_2
+
+TEST_F(GeneratedTests, div_v1_2) {
+    execute(div_v1_2::CreateModel,
+            div_v1_2::is_ignored,
+            div_v1_2::get_examples());
+}
+
+TEST_F(DynamicOutputShapeTest, div_v1_2_dynamic_output_shape) {
+    execute(div_v1_2::CreateModel_dynamic_output_shape,
+            div_v1_2::is_ignored_dynamic_output_shape,
+            div_v1_2::get_examples_dynamic_output_shape());
+}
+
+TEST_F(GeneratedTests, div_v1_2_2) {
+    execute(div_v1_2::CreateModel_2,
+            div_v1_2::is_ignored_2,
+            div_v1_2::get_examples_2());
+}
+
+TEST_F(DynamicOutputShapeTest, div_v1_2_dynamic_output_shape_2) {
+    execute(div_v1_2::CreateModel_dynamic_output_shape_2,
+            div_v1_2::is_ignored_dynamic_output_shape_2,
+            div_v1_2::get_examples_dynamic_output_shape_2());
+}
+
+TEST_F(GeneratedTests, div_v1_2_zero_sized) {
+    execute(div_v1_2::CreateModel_zero_sized,
+            div_v1_2::is_ignored_zero_sized,
+            div_v1_2::get_examples_zero_sized());
+}
+
+TEST_F(GeneratedTests, div_v1_2_zero_sized_relaxed) {
+    execute(div_v1_2::CreateModel_zero_sized_relaxed,
+            div_v1_2::is_ignored_zero_sized_relaxed,
+            div_v1_2::get_examples_zero_sized_relaxed());
+}
+
+TEST_F(GeneratedTests, div_v1_2_zero_sized_float16) {
+    execute(div_v1_2::CreateModel_zero_sized_float16,
+            div_v1_2::is_ignored_zero_sized_float16,
+            div_v1_2::get_examples_zero_sized_float16());
+}
+
+TEST_F(DynamicOutputShapeTest, div_v1_2_zero_sized_dynamic_output_shape) {
+    execute(div_v1_2::CreateModel_zero_sized_dynamic_output_shape,
+            div_v1_2::is_ignored_zero_sized_dynamic_output_shape,
+            div_v1_2::get_examples_zero_sized_dynamic_output_shape());
+}
+
+TEST_F(DynamicOutputShapeTest, div_v1_2_zero_sized_dynamic_output_shape_relaxed) {
+    execute(div_v1_2::CreateModel_zero_sized_dynamic_output_shape_relaxed,
+            div_v1_2::is_ignored_zero_sized_dynamic_output_shape_relaxed,
+            div_v1_2::get_examples_zero_sized_dynamic_output_shape_relaxed());
+}
+
+TEST_F(DynamicOutputShapeTest, div_v1_2_zero_sized_dynamic_output_shape_float16) {
+    execute(div_v1_2::CreateModel_zero_sized_dynamic_output_shape_float16,
+            div_v1_2::is_ignored_zero_sized_dynamic_output_shape_float16,
+            div_v1_2::get_examples_zero_sized_dynamic_output_shape_float16());
+}
+
diff --git a/runtime/test/generated/tests/mul_broadcast_float16.mod.py.cpp b/runtime/test/generated/tests/mul_broadcast_float16.mod.py.cpp
deleted file mode 100644
index 3eff0a2..0000000
--- a/runtime/test/generated/tests/mul_broadcast_float16.mod.py.cpp
+++ /dev/null
@@ -1,23 +0,0 @@
-// clang-format off
-// Generated file (from: mul_broadcast_float16.mod.py). Do not edit
-#include "../../TestGenerated.h"
-
-namespace mul_broadcast_float16 {
-// Generated mul_broadcast_float16 test
-#include "generated/examples/mul_broadcast_float16.example.cpp"
-// Generated model constructor
-#include "generated/models/mul_broadcast_float16.model.cpp"
-} // namespace mul_broadcast_float16
-
-TEST_F(GeneratedTests, mul_broadcast_float16) {
-    execute(mul_broadcast_float16::CreateModel,
-            mul_broadcast_float16::is_ignored,
-            mul_broadcast_float16::get_examples());
-}
-
-TEST_F(DynamicOutputShapeTest, mul_broadcast_float16_dynamic_output_shape) {
-    execute(mul_broadcast_float16::CreateModel_dynamic_output_shape,
-            mul_broadcast_float16::is_ignored_dynamic_output_shape,
-            mul_broadcast_float16::get_examples_dynamic_output_shape());
-}
-
diff --git a/runtime/test/generated/tests/mul_float16.mod.py.cpp b/runtime/test/generated/tests/mul_float16.mod.py.cpp
deleted file mode 100644
index bd4a890..0000000
--- a/runtime/test/generated/tests/mul_float16.mod.py.cpp
+++ /dev/null
@@ -1,23 +0,0 @@
-// clang-format off
-// Generated file (from: mul_float16.mod.py). Do not edit
-#include "../../TestGenerated.h"
-
-namespace mul_float16 {
-// Generated mul_float16 test
-#include "generated/examples/mul_float16.example.cpp"
-// Generated model constructor
-#include "generated/models/mul_float16.model.cpp"
-} // namespace mul_float16
-
-TEST_F(GeneratedTests, mul_float16) {
-    execute(mul_float16::CreateModel,
-            mul_float16::is_ignored,
-            mul_float16::get_examples());
-}
-
-TEST_F(DynamicOutputShapeTest, mul_float16_dynamic_output_shape) {
-    execute(mul_float16::CreateModel_dynamic_output_shape,
-            mul_float16::is_ignored_dynamic_output_shape,
-            mul_float16::get_examples_dynamic_output_shape());
-}
-
diff --git a/runtime/test/generated/tests/mul_v1_2.mod.py.cpp b/runtime/test/generated/tests/mul_v1_2.mod.py.cpp
new file mode 100644
index 0000000..bd93a2c
--- /dev/null
+++ b/runtime/test/generated/tests/mul_v1_2.mod.py.cpp
@@ -0,0 +1,83 @@
+// clang-format off
+// Generated file (from: mul_v1_2.mod.py). Do not edit
+#include "../../TestGenerated.h"
+
+namespace mul_v1_2 {
+// Generated mul_v1_2 test
+#include "generated/examples/mul_v1_2.example.cpp"
+// Generated model constructor
+#include "generated/models/mul_v1_2.model.cpp"
+} // namespace mul_v1_2
+
+TEST_F(GeneratedTests, mul_v1_2) {
+    execute(mul_v1_2::CreateModel,
+            mul_v1_2::is_ignored,
+            mul_v1_2::get_examples());
+}
+
+TEST_F(DynamicOutputShapeTest, mul_v1_2_dynamic_output_shape) {
+    execute(mul_v1_2::CreateModel_dynamic_output_shape,
+            mul_v1_2::is_ignored_dynamic_output_shape,
+            mul_v1_2::get_examples_dynamic_output_shape());
+}
+
+TEST_F(GeneratedTests, mul_v1_2_2) {
+    execute(mul_v1_2::CreateModel_2,
+            mul_v1_2::is_ignored_2,
+            mul_v1_2::get_examples_2());
+}
+
+TEST_F(DynamicOutputShapeTest, mul_v1_2_dynamic_output_shape_2) {
+    execute(mul_v1_2::CreateModel_dynamic_output_shape_2,
+            mul_v1_2::is_ignored_dynamic_output_shape_2,
+            mul_v1_2::get_examples_dynamic_output_shape_2());
+}
+
+TEST_F(GeneratedTests, mul_v1_2_zero_sized) {
+    execute(mul_v1_2::CreateModel_zero_sized,
+            mul_v1_2::is_ignored_zero_sized,
+            mul_v1_2::get_examples_zero_sized());
+}
+
+TEST_F(GeneratedTests, mul_v1_2_zero_sized_relaxed) {
+    execute(mul_v1_2::CreateModel_zero_sized_relaxed,
+            mul_v1_2::is_ignored_zero_sized_relaxed,
+            mul_v1_2::get_examples_zero_sized_relaxed());
+}
+
+TEST_F(GeneratedTests, mul_v1_2_zero_sized_quant8) {
+    execute(mul_v1_2::CreateModel_zero_sized_quant8,
+            mul_v1_2::is_ignored_zero_sized_quant8,
+            mul_v1_2::get_examples_zero_sized_quant8());
+}
+
+TEST_F(GeneratedTests, mul_v1_2_zero_sized_float16) {
+    execute(mul_v1_2::CreateModel_zero_sized_float16,
+            mul_v1_2::is_ignored_zero_sized_float16,
+            mul_v1_2::get_examples_zero_sized_float16());
+}
+
+TEST_F(DynamicOutputShapeTest, mul_v1_2_zero_sized_dynamic_output_shape) {
+    execute(mul_v1_2::CreateModel_zero_sized_dynamic_output_shape,
+            mul_v1_2::is_ignored_zero_sized_dynamic_output_shape,
+            mul_v1_2::get_examples_zero_sized_dynamic_output_shape());
+}
+
+TEST_F(DynamicOutputShapeTest, mul_v1_2_zero_sized_dynamic_output_shape_relaxed) {
+    execute(mul_v1_2::CreateModel_zero_sized_dynamic_output_shape_relaxed,
+            mul_v1_2::is_ignored_zero_sized_dynamic_output_shape_relaxed,
+            mul_v1_2::get_examples_zero_sized_dynamic_output_shape_relaxed());
+}
+
+TEST_F(DynamicOutputShapeTest, mul_v1_2_zero_sized_dynamic_output_shape_quant8) {
+    execute(mul_v1_2::CreateModel_zero_sized_dynamic_output_shape_quant8,
+            mul_v1_2::is_ignored_zero_sized_dynamic_output_shape_quant8,
+            mul_v1_2::get_examples_zero_sized_dynamic_output_shape_quant8());
+}
+
+TEST_F(DynamicOutputShapeTest, mul_v1_2_zero_sized_dynamic_output_shape_float16) {
+    execute(mul_v1_2::CreateModel_zero_sized_dynamic_output_shape_float16,
+            mul_v1_2::is_ignored_zero_sized_dynamic_output_shape_float16,
+            mul_v1_2::get_examples_zero_sized_dynamic_output_shape_float16());
+}
+
diff --git a/runtime/test/generated/tests/sub_v1_2.mod.py.cpp b/runtime/test/generated/tests/sub_v1_2.mod.py.cpp
index 3da0cc0..4a6e81f 100644
--- a/runtime/test/generated/tests/sub_v1_2.mod.py.cpp
+++ b/runtime/test/generated/tests/sub_v1_2.mod.py.cpp
@@ -117,3 +117,51 @@
             sub_v1_2::get_examples_quant8_dynamic_output_shape());
 }
 
+TEST_F(GeneratedTests, sub_v1_2_zero_sized) {
+    execute(sub_v1_2::CreateModel_zero_sized,
+            sub_v1_2::is_ignored_zero_sized,
+            sub_v1_2::get_examples_zero_sized());
+}
+
+TEST_F(GeneratedTests, sub_v1_2_zero_sized_relaxed) {
+    execute(sub_v1_2::CreateModel_zero_sized_relaxed,
+            sub_v1_2::is_ignored_zero_sized_relaxed,
+            sub_v1_2::get_examples_zero_sized_relaxed());
+}
+
+TEST_F(GeneratedTests, sub_v1_2_zero_sized_quant8) {
+    execute(sub_v1_2::CreateModel_zero_sized_quant8,
+            sub_v1_2::is_ignored_zero_sized_quant8,
+            sub_v1_2::get_examples_zero_sized_quant8());
+}
+
+TEST_F(GeneratedTests, sub_v1_2_zero_sized_float16) {
+    execute(sub_v1_2::CreateModel_zero_sized_float16,
+            sub_v1_2::is_ignored_zero_sized_float16,
+            sub_v1_2::get_examples_zero_sized_float16());
+}
+
+TEST_F(DynamicOutputShapeTest, sub_v1_2_zero_sized_dynamic_output_shape) {
+    execute(sub_v1_2::CreateModel_zero_sized_dynamic_output_shape,
+            sub_v1_2::is_ignored_zero_sized_dynamic_output_shape,
+            sub_v1_2::get_examples_zero_sized_dynamic_output_shape());
+}
+
+TEST_F(DynamicOutputShapeTest, sub_v1_2_zero_sized_dynamic_output_shape_relaxed) {
+    execute(sub_v1_2::CreateModel_zero_sized_dynamic_output_shape_relaxed,
+            sub_v1_2::is_ignored_zero_sized_dynamic_output_shape_relaxed,
+            sub_v1_2::get_examples_zero_sized_dynamic_output_shape_relaxed());
+}
+
+TEST_F(DynamicOutputShapeTest, sub_v1_2_zero_sized_dynamic_output_shape_quant8) {
+    execute(sub_v1_2::CreateModel_zero_sized_dynamic_output_shape_quant8,
+            sub_v1_2::is_ignored_zero_sized_dynamic_output_shape_quant8,
+            sub_v1_2::get_examples_zero_sized_dynamic_output_shape_quant8());
+}
+
+TEST_F(DynamicOutputShapeTest, sub_v1_2_zero_sized_dynamic_output_shape_float16) {
+    execute(sub_v1_2::CreateModel_zero_sized_dynamic_output_shape_float16,
+            sub_v1_2::is_ignored_zero_sized_dynamic_output_shape_float16,
+            sub_v1_2::get_examples_zero_sized_dynamic_output_shape_float16());
+}
+
diff --git a/runtime/test/generated/vts_models/add_broadcast_float16.model.cpp b/runtime/test/generated/vts_models/add_broadcast_float16.model.cpp
deleted file mode 100644
index 2cacf63..0000000
--- a/runtime/test/generated/vts_models/add_broadcast_float16.model.cpp
+++ /dev/null
@@ -1,144 +0,0 @@
-// clang-format off
-// Generated file (from: add_broadcast_float16.mod.py). Do not edit
-// Create the model
-Model createTestModel() {
-    const std::vector<Operand> operands = {
-        {
-            .type = OperandType::TENSOR_FLOAT16,
-            .dimensions = {1, 2},
-            .numberOfConsumers = 1,
-            .scale = 0.0f,
-            .zeroPoint = 0,
-            .lifetime = OperandLifeTime::MODEL_INPUT,
-            .location = {.poolIndex = 0, .offset = 0, .length = 0},
-        },
-        {
-            .type = OperandType::TENSOR_FLOAT16,
-            .dimensions = {2, 2},
-            .numberOfConsumers = 1,
-            .scale = 0.0f,
-            .zeroPoint = 0,
-            .lifetime = OperandLifeTime::MODEL_INPUT,
-            .location = {.poolIndex = 0, .offset = 0, .length = 0},
-        },
-        {
-            .type = OperandType::INT32,
-            .dimensions = {},
-            .numberOfConsumers = 1,
-            .scale = 0.0f,
-            .zeroPoint = 0,
-            .lifetime = OperandLifeTime::CONSTANT_COPY,
-            .location = {.poolIndex = 0, .offset = 0, .length = 4},
-        },
-        {
-            .type = OperandType::TENSOR_FLOAT16,
-            .dimensions = {2, 2},
-            .numberOfConsumers = 0,
-            .scale = 0.0f,
-            .zeroPoint = 0,
-            .lifetime = OperandLifeTime::MODEL_OUTPUT,
-            .location = {.poolIndex = 0, .offset = 0, .length = 0},
-        }
-    };
-
-    const std::vector<Operation> operations = {
-        {
-            .type = OperationType::ADD,
-            .inputs = {0, 1, 2},
-            .outputs = {3},
-        }
-    };
-
-    const std::vector<uint32_t> inputIndexes = {0, 1};
-    const std::vector<uint32_t> outputIndexes = {3};
-    std::vector<uint8_t> operandValues = {
-      0, 0, 0, 0
-    };
-    const std::vector<hidl_memory> pools = {};
-
-    return {
-        .operands = operands,
-        .operations = operations,
-        .inputIndexes = inputIndexes,
-        .outputIndexes = outputIndexes,
-        .operandValues = operandValues,
-        .pools = pools,
-    };
-}
-
-inline bool is_ignored(int i) {
-  static std::set<int> ignore = {};
-  return ignore.find(i) != ignore.end();
-}
-
-// Create the model
-Model createTestModel_dynamic_output_shape() {
-    const std::vector<Operand> operands = {
-        {
-            .type = OperandType::TENSOR_FLOAT16,
-            .dimensions = {1, 2},
-            .numberOfConsumers = 1,
-            .scale = 0.0f,
-            .zeroPoint = 0,
-            .lifetime = OperandLifeTime::MODEL_INPUT,
-            .location = {.poolIndex = 0, .offset = 0, .length = 0},
-        },
-        {
-            .type = OperandType::TENSOR_FLOAT16,
-            .dimensions = {2, 2},
-            .numberOfConsumers = 1,
-            .scale = 0.0f,
-            .zeroPoint = 0,
-            .lifetime = OperandLifeTime::MODEL_INPUT,
-            .location = {.poolIndex = 0, .offset = 0, .length = 0},
-        },
-        {
-            .type = OperandType::INT32,
-            .dimensions = {},
-            .numberOfConsumers = 1,
-            .scale = 0.0f,
-            .zeroPoint = 0,
-            .lifetime = OperandLifeTime::CONSTANT_COPY,
-            .location = {.poolIndex = 0, .offset = 0, .length = 4},
-        },
-        {
-            .type = OperandType::TENSOR_FLOAT16,
-            .dimensions = {0, 0},
-            .numberOfConsumers = 0,
-            .scale = 0.0f,
-            .zeroPoint = 0,
-            .lifetime = OperandLifeTime::MODEL_OUTPUT,
-            .location = {.poolIndex = 0, .offset = 0, .length = 0},
-        }
-    };
-
-    const std::vector<Operation> operations = {
-        {
-            .type = OperationType::ADD,
-            .inputs = {0, 1, 2},
-            .outputs = {3},
-        }
-    };
-
-    const std::vector<uint32_t> inputIndexes = {0, 1};
-    const std::vector<uint32_t> outputIndexes = {3};
-    std::vector<uint8_t> operandValues = {
-      0, 0, 0, 0
-    };
-    const std::vector<hidl_memory> pools = {};
-
-    return {
-        .operands = operands,
-        .operations = operations,
-        .inputIndexes = inputIndexes,
-        .outputIndexes = outputIndexes,
-        .operandValues = operandValues,
-        .pools = pools,
-    };
-}
-
-inline bool is_ignored_dynamic_output_shape(int i) {
-  static std::set<int> ignore = {};
-  return ignore.find(i) != ignore.end();
-}
-
diff --git a/runtime/test/generated/vts_models/add_float16.model.cpp b/runtime/test/generated/vts_models/add_float16.model.cpp
deleted file mode 100644
index 028c6c5..0000000
--- a/runtime/test/generated/vts_models/add_float16.model.cpp
+++ /dev/null
@@ -1,144 +0,0 @@
-// clang-format off
-// Generated file (from: add_float16.mod.py). Do not edit
-// Create the model
-Model createTestModel() {
-    const std::vector<Operand> operands = {
-        {
-            .type = OperandType::TENSOR_FLOAT16,
-            .dimensions = {3},
-            .numberOfConsumers = 1,
-            .scale = 0.0f,
-            .zeroPoint = 0,
-            .lifetime = OperandLifeTime::MODEL_INPUT,
-            .location = {.poolIndex = 0, .offset = 0, .length = 0},
-        },
-        {
-            .type = OperandType::TENSOR_FLOAT16,
-            .dimensions = {3},
-            .numberOfConsumers = 1,
-            .scale = 0.0f,
-            .zeroPoint = 0,
-            .lifetime = OperandLifeTime::MODEL_INPUT,
-            .location = {.poolIndex = 0, .offset = 0, .length = 0},
-        },
-        {
-            .type = OperandType::INT32,
-            .dimensions = {},
-            .numberOfConsumers = 1,
-            .scale = 0.0f,
-            .zeroPoint = 0,
-            .lifetime = OperandLifeTime::CONSTANT_COPY,
-            .location = {.poolIndex = 0, .offset = 0, .length = 4},
-        },
-        {
-            .type = OperandType::TENSOR_FLOAT16,
-            .dimensions = {3},
-            .numberOfConsumers = 0,
-            .scale = 0.0f,
-            .zeroPoint = 0,
-            .lifetime = OperandLifeTime::MODEL_OUTPUT,
-            .location = {.poolIndex = 0, .offset = 0, .length = 0},
-        }
-    };
-
-    const std::vector<Operation> operations = {
-        {
-            .type = OperationType::ADD,
-            .inputs = {0, 1, 2},
-            .outputs = {3},
-        }
-    };
-
-    const std::vector<uint32_t> inputIndexes = {0, 1};
-    const std::vector<uint32_t> outputIndexes = {3};
-    std::vector<uint8_t> operandValues = {
-      0, 0, 0, 0
-    };
-    const std::vector<hidl_memory> pools = {};
-
-    return {
-        .operands = operands,
-        .operations = operations,
-        .inputIndexes = inputIndexes,
-        .outputIndexes = outputIndexes,
-        .operandValues = operandValues,
-        .pools = pools,
-    };
-}
-
-inline bool is_ignored(int i) {
-  static std::set<int> ignore = {};
-  return ignore.find(i) != ignore.end();
-}
-
-// Create the model
-Model createTestModel_dynamic_output_shape() {
-    const std::vector<Operand> operands = {
-        {
-            .type = OperandType::TENSOR_FLOAT16,
-            .dimensions = {3},
-            .numberOfConsumers = 1,
-            .scale = 0.0f,
-            .zeroPoint = 0,
-            .lifetime = OperandLifeTime::MODEL_INPUT,
-            .location = {.poolIndex = 0, .offset = 0, .length = 0},
-        },
-        {
-            .type = OperandType::TENSOR_FLOAT16,
-            .dimensions = {3},
-            .numberOfConsumers = 1,
-            .scale = 0.0f,
-            .zeroPoint = 0,
-            .lifetime = OperandLifeTime::MODEL_INPUT,
-            .location = {.poolIndex = 0, .offset = 0, .length = 0},
-        },
-        {
-            .type = OperandType::INT32,
-            .dimensions = {},
-            .numberOfConsumers = 1,
-            .scale = 0.0f,
-            .zeroPoint = 0,
-            .lifetime = OperandLifeTime::CONSTANT_COPY,
-            .location = {.poolIndex = 0, .offset = 0, .length = 4},
-        },
-        {
-            .type = OperandType::TENSOR_FLOAT16,
-            .dimensions = {0},
-            .numberOfConsumers = 0,
-            .scale = 0.0f,
-            .zeroPoint = 0,
-            .lifetime = OperandLifeTime::MODEL_OUTPUT,
-            .location = {.poolIndex = 0, .offset = 0, .length = 0},
-        }
-    };
-
-    const std::vector<Operation> operations = {
-        {
-            .type = OperationType::ADD,
-            .inputs = {0, 1, 2},
-            .outputs = {3},
-        }
-    };
-
-    const std::vector<uint32_t> inputIndexes = {0, 1};
-    const std::vector<uint32_t> outputIndexes = {3};
-    std::vector<uint8_t> operandValues = {
-      0, 0, 0, 0
-    };
-    const std::vector<hidl_memory> pools = {};
-
-    return {
-        .operands = operands,
-        .operations = operations,
-        .inputIndexes = inputIndexes,
-        .outputIndexes = outputIndexes,
-        .operandValues = operandValues,
-        .pools = pools,
-    };
-}
-
-inline bool is_ignored_dynamic_output_shape(int i) {
-  static std::set<int> ignore = {};
-  return ignore.find(i) != ignore.end();
-}
-
diff --git a/runtime/test/generated/vts_models/add_v1_2.model.cpp b/runtime/test/generated/vts_models/add_v1_2.model.cpp
new file mode 100644
index 0000000..bd2e7d0
--- /dev/null
+++ b/runtime/test/generated/vts_models/add_v1_2.model.cpp
@@ -0,0 +1,2232 @@
+// clang-format off
+// Generated file (from: add_v1_2.mod.py). Do not edit
+// Create the model
+Model createTestModel() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {3},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {3},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 4},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {3},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::ADD,
+            .inputs = {0, 1, 2},
+            .outputs = {3},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0, 1};
+    const std::vector<uint32_t> outputIndexes = {3};
+    std::vector<uint8_t> operandValues = {
+      0, 0, 0, 0
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_dynamic_output_shape() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {3},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {3},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 4},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {0},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::ADD,
+            .inputs = {0, 1, 2},
+            .outputs = {3},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0, 1};
+    const std::vector<uint32_t> outputIndexes = {3};
+    std::vector<uint8_t> operandValues = {
+      0, 0, 0, 0
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored_dynamic_output_shape(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_2() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {1, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {2, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 4},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {2, 2},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::ADD,
+            .inputs = {0, 1, 2},
+            .outputs = {3},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0, 1};
+    const std::vector<uint32_t> outputIndexes = {3};
+    std::vector<uint8_t> operandValues = {
+      0, 0, 0, 0
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored_2(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_dynamic_output_shape_2() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {1, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {2, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 4},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {0, 0},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::ADD,
+            .inputs = {0, 1, 2},
+            .outputs = {3},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0, 1};
+    const std::vector<uint32_t> outputIndexes = {3};
+    std::vector<uint8_t> operandValues = {
+      0, 0, 0, 0
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored_dynamic_output_shape_2(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_zero_sized() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {1, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 8},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {1, 8},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 8, .length = 32},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {1},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 40, .length = 4},
+        },
+        {
+            .type = OperandType::FLOAT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 44, .length = 4},
+        },
+        {
+            .type = OperandType::FLOAT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 48, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 52, .length = 4},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {0},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {0, 4},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::TEMPORARY_VARIABLE,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {0},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {0},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::TEMPORARY_VARIABLE,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {1, 1, 1, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 56, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 60, .length = 4},
+        },
+        {
+            .type = OperandType::FLOAT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 64, .length = 4},
+        },
+        {
+            .type = OperandType::FLOAT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 68, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 72, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 76, .length = 4},
+        },
+        {
+            .type = OperandType::BOOL,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 80, .length = 1},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {0, 2, 2, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::TEMPORARY_VARIABLE,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {1, 2, 2, 1},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 81, .length = 16},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 97, .length = 4},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {0, 2, 2, 2},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::BOX_WITH_NMS_LIMIT,
+            .inputs = {0, 1, 2, 3, 4, 5},
+            .outputs = {6, 7, 8, 9},
+        },
+        {
+            .type = OperationType::ROI_ALIGN,
+            .inputs = {10, 7, 9, 11, 12, 13, 14, 15, 16, 17},
+            .outputs = {18},
+        },
+        {
+            .type = OperationType::ADD,
+            .inputs = {18, 19, 20},
+            .outputs = {21},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {10};
+    const std::vector<uint32_t> outputIndexes = {6, 8, 21};
+    std::vector<uint8_t> operandValues = {
+      102, 102, 102, 63, 205, 204, 204, 61, 0, 0, 128, 63, 0, 0, 128, 63, 0, 0, 32, 65, 0, 0, 32, 65, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 65, 0, 0, 32, 65, 0, 0, 0, 0, 154, 153, 153, 62, 205, 204, 204, 62, 255, 255, 255, 255, 2, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 64, 0, 0, 0, 64, 4, 0, 0, 0, 4, 0, 0, 0, 0, 0, 0, 128, 63, 0, 0, 0, 64, 0, 0, 64, 64, 0, 0, 128, 64, 0, 0, 0, 0
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored_zero_sized(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_zero_sized_relaxed() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {1, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 8},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {1, 8},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 8, .length = 32},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {1},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 40, .length = 4},
+        },
+        {
+            .type = OperandType::FLOAT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 44, .length = 4},
+        },
+        {
+            .type = OperandType::FLOAT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 48, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 52, .length = 4},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {0},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {0, 4},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::TEMPORARY_VARIABLE,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {0},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {0},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::TEMPORARY_VARIABLE,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {1, 1, 1, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 56, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 60, .length = 4},
+        },
+        {
+            .type = OperandType::FLOAT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 64, .length = 4},
+        },
+        {
+            .type = OperandType::FLOAT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 68, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 72, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 76, .length = 4},
+        },
+        {
+            .type = OperandType::BOOL,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 80, .length = 1},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {0, 2, 2, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::TEMPORARY_VARIABLE,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {1, 2, 2, 1},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 81, .length = 16},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 97, .length = 4},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {0, 2, 2, 2},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::BOX_WITH_NMS_LIMIT,
+            .inputs = {0, 1, 2, 3, 4, 5},
+            .outputs = {6, 7, 8, 9},
+        },
+        {
+            .type = OperationType::ROI_ALIGN,
+            .inputs = {10, 7, 9, 11, 12, 13, 14, 15, 16, 17},
+            .outputs = {18},
+        },
+        {
+            .type = OperationType::ADD,
+            .inputs = {18, 19, 20},
+            .outputs = {21},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {10};
+    const std::vector<uint32_t> outputIndexes = {6, 8, 21};
+    std::vector<uint8_t> operandValues = {
+      102, 102, 102, 63, 205, 204, 204, 61, 0, 0, 128, 63, 0, 0, 128, 63, 0, 0, 32, 65, 0, 0, 32, 65, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 65, 0, 0, 32, 65, 0, 0, 0, 0, 154, 153, 153, 62, 205, 204, 204, 62, 255, 255, 255, 255, 2, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 64, 0, 0, 0, 64, 4, 0, 0, 0, 4, 0, 0, 0, 0, 0, 0, 128, 63, 0, 0, 0, 64, 0, 0, 64, 64, 0, 0, 128, 64, 0, 0, 0, 0
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+        .relaxComputationFloat32toFloat16 = true,
+    };
+}
+
+inline bool is_ignored_zero_sized_relaxed(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_zero_sized_quant8() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_QUANT8_ASYMM,
+            .dimensions = {1, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.1f,
+            .zeroPoint = 128,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 2},
+        },
+        {
+            .type = OperandType::TENSOR_QUANT16_ASYMM,
+            .dimensions = {1, 8},
+            .numberOfConsumers = 1,
+            .scale = 0.125f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 2, .length = 16},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {1},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 18, .length = 4},
+        },
+        {
+            .type = OperandType::FLOAT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 22, .length = 4},
+        },
+        {
+            .type = OperandType::FLOAT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 26, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 30, .length = 4},
+        },
+        {
+            .type = OperandType::TENSOR_QUANT8_ASYMM,
+            .dimensions = {0},
+            .numberOfConsumers = 0,
+            .scale = 0.1f,
+            .zeroPoint = 128,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_QUANT16_ASYMM,
+            .dimensions = {0, 4},
+            .numberOfConsumers = 1,
+            .scale = 0.125f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::TEMPORARY_VARIABLE,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {0},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {0},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::TEMPORARY_VARIABLE,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_QUANT8_ASYMM,
+            .dimensions = {1, 1, 1, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.1f,
+            .zeroPoint = 128,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 34, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 38, .length = 4},
+        },
+        {
+            .type = OperandType::FLOAT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 42, .length = 4},
+        },
+        {
+            .type = OperandType::FLOAT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 46, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 50, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 54, .length = 4},
+        },
+        {
+            .type = OperandType::BOOL,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 58, .length = 1},
+        },
+        {
+            .type = OperandType::TENSOR_QUANT8_ASYMM,
+            .dimensions = {0, 2, 2, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.1f,
+            .zeroPoint = 128,
+            .lifetime = OperandLifeTime::TEMPORARY_VARIABLE,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_QUANT8_ASYMM,
+            .dimensions = {1, 2, 2, 1},
+            .numberOfConsumers = 1,
+            .scale = 0.1f,
+            .zeroPoint = 128,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 59, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 63, .length = 4},
+        },
+        {
+            .type = OperandType::TENSOR_QUANT8_ASYMM,
+            .dimensions = {0, 2, 2, 2},
+            .numberOfConsumers = 0,
+            .scale = 0.1f,
+            .zeroPoint = 128,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::BOX_WITH_NMS_LIMIT,
+            .inputs = {0, 1, 2, 3, 4, 5},
+            .outputs = {6, 7, 8, 9},
+        },
+        {
+            .type = OperationType::ROI_ALIGN,
+            .inputs = {10, 7, 9, 11, 12, 13, 14, 15, 16, 17},
+            .outputs = {18},
+        },
+        {
+            .type = OperationType::ADD,
+            .inputs = {18, 19, 20},
+            .outputs = {21},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {10};
+    const std::vector<uint32_t> outputIndexes = {6, 8, 21};
+    std::vector<uint8_t> operandValues = {
+      137, 129, 8, 0, 8, 0, 80, 0, 80, 0, 0, 0, 0, 0, 80, 0, 80, 0, 0, 0, 0, 0, 154, 153, 153, 62, 205, 204, 204, 62, 255, 255, 255, 255, 2, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 64, 0, 0, 0, 64, 4, 0, 0, 0, 4, 0, 0, 0, 0, 138, 148, 158, 168, 0, 0, 0, 0
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored_zero_sized_quant8(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_zero_sized_float16() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {1, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 4},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {1, 8},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 4, .length = 16},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {1},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 20, .length = 4},
+        },
+        {
+            .type = OperandType::FLOAT16,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 24, .length = 2},
+        },
+        {
+            .type = OperandType::FLOAT16,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 26, .length = 2},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 28, .length = 4},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {0},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {0, 4},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::TEMPORARY_VARIABLE,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {0},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {0},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::TEMPORARY_VARIABLE,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {1, 1, 1, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 32, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 36, .length = 4},
+        },
+        {
+            .type = OperandType::FLOAT16,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 40, .length = 2},
+        },
+        {
+            .type = OperandType::FLOAT16,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 42, .length = 2},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 44, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 48, .length = 4},
+        },
+        {
+            .type = OperandType::BOOL,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 52, .length = 1},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {0, 2, 2, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::TEMPORARY_VARIABLE,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {1, 2, 2, 1},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 53, .length = 8},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 61, .length = 4},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {0, 2, 2, 2},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::BOX_WITH_NMS_LIMIT,
+            .inputs = {0, 1, 2, 3, 4, 5},
+            .outputs = {6, 7, 8, 9},
+        },
+        {
+            .type = OperationType::ROI_ALIGN,
+            .inputs = {10, 7, 9, 11, 12, 13, 14, 15, 16, 17},
+            .outputs = {18},
+        },
+        {
+            .type = OperationType::ADD,
+            .inputs = {18, 19, 20},
+            .outputs = {21},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {10};
+    const std::vector<uint32_t> outputIndexes = {6, 8, 21};
+    std::vector<uint8_t> operandValues = {
+      51, 59, 102, 46, 0, 60, 0, 60, 0, 73, 0, 73, 0, 0, 0, 0, 0, 73, 0, 73, 0, 0, 0, 0, 205, 52, 102, 54, 255, 255, 255, 255, 2, 0, 0, 0, 2, 0, 0, 0, 0, 64, 0, 64, 4, 0, 0, 0, 4, 0, 0, 0, 0, 0, 60, 0, 64, 0, 66, 0, 68, 0, 0, 0, 0
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored_zero_sized_float16(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_zero_sized_dynamic_output_shape() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {1, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 8},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {1, 8},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 8, .length = 32},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {1},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 40, .length = 4},
+        },
+        {
+            .type = OperandType::FLOAT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 44, .length = 4},
+        },
+        {
+            .type = OperandType::FLOAT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 48, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 52, .length = 4},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {0},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {0, 4},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::TEMPORARY_VARIABLE,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {0},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {0},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::TEMPORARY_VARIABLE,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {1, 1, 1, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 56, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 60, .length = 4},
+        },
+        {
+            .type = OperandType::FLOAT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 64, .length = 4},
+        },
+        {
+            .type = OperandType::FLOAT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 68, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 72, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 76, .length = 4},
+        },
+        {
+            .type = OperandType::BOOL,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 80, .length = 1},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {0, 2, 2, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::TEMPORARY_VARIABLE,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {1, 2, 2, 1},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 81, .length = 16},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 97, .length = 4},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {0, 0, 0, 0},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::BOX_WITH_NMS_LIMIT,
+            .inputs = {0, 1, 2, 3, 4, 5},
+            .outputs = {6, 7, 8, 9},
+        },
+        {
+            .type = OperationType::ROI_ALIGN,
+            .inputs = {10, 7, 9, 11, 12, 13, 14, 15, 16, 17},
+            .outputs = {18},
+        },
+        {
+            .type = OperationType::ADD,
+            .inputs = {18, 19, 20},
+            .outputs = {21},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {10};
+    const std::vector<uint32_t> outputIndexes = {6, 8, 21};
+    std::vector<uint8_t> operandValues = {
+      102, 102, 102, 63, 205, 204, 204, 61, 0, 0, 128, 63, 0, 0, 128, 63, 0, 0, 32, 65, 0, 0, 32, 65, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 65, 0, 0, 32, 65, 0, 0, 0, 0, 154, 153, 153, 62, 205, 204, 204, 62, 255, 255, 255, 255, 2, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 64, 0, 0, 0, 64, 4, 0, 0, 0, 4, 0, 0, 0, 0, 0, 0, 128, 63, 0, 0, 0, 64, 0, 0, 64, 64, 0, 0, 128, 64, 0, 0, 0, 0
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored_zero_sized_dynamic_output_shape(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_zero_sized_dynamic_output_shape_relaxed() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {1, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 8},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {1, 8},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 8, .length = 32},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {1},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 40, .length = 4},
+        },
+        {
+            .type = OperandType::FLOAT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 44, .length = 4},
+        },
+        {
+            .type = OperandType::FLOAT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 48, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 52, .length = 4},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {0},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {0, 4},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::TEMPORARY_VARIABLE,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {0},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {0},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::TEMPORARY_VARIABLE,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {1, 1, 1, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 56, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 60, .length = 4},
+        },
+        {
+            .type = OperandType::FLOAT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 64, .length = 4},
+        },
+        {
+            .type = OperandType::FLOAT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 68, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 72, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 76, .length = 4},
+        },
+        {
+            .type = OperandType::BOOL,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 80, .length = 1},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {0, 2, 2, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::TEMPORARY_VARIABLE,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {1, 2, 2, 1},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 81, .length = 16},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 97, .length = 4},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {0, 0, 0, 0},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::BOX_WITH_NMS_LIMIT,
+            .inputs = {0, 1, 2, 3, 4, 5},
+            .outputs = {6, 7, 8, 9},
+        },
+        {
+            .type = OperationType::ROI_ALIGN,
+            .inputs = {10, 7, 9, 11, 12, 13, 14, 15, 16, 17},
+            .outputs = {18},
+        },
+        {
+            .type = OperationType::ADD,
+            .inputs = {18, 19, 20},
+            .outputs = {21},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {10};
+    const std::vector<uint32_t> outputIndexes = {6, 8, 21};
+    std::vector<uint8_t> operandValues = {
+      102, 102, 102, 63, 205, 204, 204, 61, 0, 0, 128, 63, 0, 0, 128, 63, 0, 0, 32, 65, 0, 0, 32, 65, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 65, 0, 0, 32, 65, 0, 0, 0, 0, 154, 153, 153, 62, 205, 204, 204, 62, 255, 255, 255, 255, 2, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 64, 0, 0, 0, 64, 4, 0, 0, 0, 4, 0, 0, 0, 0, 0, 0, 128, 63, 0, 0, 0, 64, 0, 0, 64, 64, 0, 0, 128, 64, 0, 0, 0, 0
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+        .relaxComputationFloat32toFloat16 = true,
+    };
+}
+
+inline bool is_ignored_zero_sized_dynamic_output_shape_relaxed(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_zero_sized_dynamic_output_shape_quant8() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_QUANT8_ASYMM,
+            .dimensions = {1, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.1f,
+            .zeroPoint = 128,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 2},
+        },
+        {
+            .type = OperandType::TENSOR_QUANT16_ASYMM,
+            .dimensions = {1, 8},
+            .numberOfConsumers = 1,
+            .scale = 0.125f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 2, .length = 16},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {1},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 18, .length = 4},
+        },
+        {
+            .type = OperandType::FLOAT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 22, .length = 4},
+        },
+        {
+            .type = OperandType::FLOAT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 26, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 30, .length = 4},
+        },
+        {
+            .type = OperandType::TENSOR_QUANT8_ASYMM,
+            .dimensions = {0},
+            .numberOfConsumers = 0,
+            .scale = 0.1f,
+            .zeroPoint = 128,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_QUANT16_ASYMM,
+            .dimensions = {0, 4},
+            .numberOfConsumers = 1,
+            .scale = 0.125f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::TEMPORARY_VARIABLE,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {0},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {0},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::TEMPORARY_VARIABLE,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_QUANT8_ASYMM,
+            .dimensions = {1, 1, 1, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.1f,
+            .zeroPoint = 128,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 34, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 38, .length = 4},
+        },
+        {
+            .type = OperandType::FLOAT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 42, .length = 4},
+        },
+        {
+            .type = OperandType::FLOAT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 46, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 50, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 54, .length = 4},
+        },
+        {
+            .type = OperandType::BOOL,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 58, .length = 1},
+        },
+        {
+            .type = OperandType::TENSOR_QUANT8_ASYMM,
+            .dimensions = {0, 2, 2, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.1f,
+            .zeroPoint = 128,
+            .lifetime = OperandLifeTime::TEMPORARY_VARIABLE,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_QUANT8_ASYMM,
+            .dimensions = {1, 2, 2, 1},
+            .numberOfConsumers = 1,
+            .scale = 0.1f,
+            .zeroPoint = 128,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 59, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 63, .length = 4},
+        },
+        {
+            .type = OperandType::TENSOR_QUANT8_ASYMM,
+            .dimensions = {0, 0, 0, 0},
+            .numberOfConsumers = 0,
+            .scale = 0.1f,
+            .zeroPoint = 128,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::BOX_WITH_NMS_LIMIT,
+            .inputs = {0, 1, 2, 3, 4, 5},
+            .outputs = {6, 7, 8, 9},
+        },
+        {
+            .type = OperationType::ROI_ALIGN,
+            .inputs = {10, 7, 9, 11, 12, 13, 14, 15, 16, 17},
+            .outputs = {18},
+        },
+        {
+            .type = OperationType::ADD,
+            .inputs = {18, 19, 20},
+            .outputs = {21},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {10};
+    const std::vector<uint32_t> outputIndexes = {6, 8, 21};
+    std::vector<uint8_t> operandValues = {
+      137, 129, 8, 0, 8, 0, 80, 0, 80, 0, 0, 0, 0, 0, 80, 0, 80, 0, 0, 0, 0, 0, 154, 153, 153, 62, 205, 204, 204, 62, 255, 255, 255, 255, 2, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 64, 0, 0, 0, 64, 4, 0, 0, 0, 4, 0, 0, 0, 0, 138, 148, 158, 168, 0, 0, 0, 0
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored_zero_sized_dynamic_output_shape_quant8(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_zero_sized_dynamic_output_shape_float16() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {1, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 4},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {1, 8},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 4, .length = 16},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {1},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 20, .length = 4},
+        },
+        {
+            .type = OperandType::FLOAT16,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 24, .length = 2},
+        },
+        {
+            .type = OperandType::FLOAT16,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 26, .length = 2},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 28, .length = 4},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {0},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {0, 4},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::TEMPORARY_VARIABLE,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {0},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {0},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::TEMPORARY_VARIABLE,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {1, 1, 1, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 32, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 36, .length = 4},
+        },
+        {
+            .type = OperandType::FLOAT16,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 40, .length = 2},
+        },
+        {
+            .type = OperandType::FLOAT16,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 42, .length = 2},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 44, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 48, .length = 4},
+        },
+        {
+            .type = OperandType::BOOL,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 52, .length = 1},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {0, 2, 2, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::TEMPORARY_VARIABLE,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {1, 2, 2, 1},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 53, .length = 8},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 61, .length = 4},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {0, 0, 0, 0},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::BOX_WITH_NMS_LIMIT,
+            .inputs = {0, 1, 2, 3, 4, 5},
+            .outputs = {6, 7, 8, 9},
+        },
+        {
+            .type = OperationType::ROI_ALIGN,
+            .inputs = {10, 7, 9, 11, 12, 13, 14, 15, 16, 17},
+            .outputs = {18},
+        },
+        {
+            .type = OperationType::ADD,
+            .inputs = {18, 19, 20},
+            .outputs = {21},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {10};
+    const std::vector<uint32_t> outputIndexes = {6, 8, 21};
+    std::vector<uint8_t> operandValues = {
+      51, 59, 102, 46, 0, 60, 0, 60, 0, 73, 0, 73, 0, 0, 0, 0, 0, 73, 0, 73, 0, 0, 0, 0, 205, 52, 102, 54, 255, 255, 255, 255, 2, 0, 0, 0, 2, 0, 0, 0, 0, 64, 0, 64, 4, 0, 0, 0, 4, 0, 0, 0, 0, 0, 60, 0, 64, 0, 66, 0, 68, 0, 0, 0, 0
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored_zero_sized_dynamic_output_shape_float16(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
diff --git a/runtime/test/generated/vts_models/div_broadcast_float16.model.cpp b/runtime/test/generated/vts_models/div_broadcast_float16.model.cpp
deleted file mode 100644
index 8db6f45..0000000
--- a/runtime/test/generated/vts_models/div_broadcast_float16.model.cpp
+++ /dev/null
@@ -1,144 +0,0 @@
-// clang-format off
-// Generated file (from: div_broadcast_float16.mod.py). Do not edit
-// Create the model
-Model createTestModel() {
-    const std::vector<Operand> operands = {
-        {
-            .type = OperandType::TENSOR_FLOAT16,
-            .dimensions = {2, 2},
-            .numberOfConsumers = 1,
-            .scale = 0.0f,
-            .zeroPoint = 0,
-            .lifetime = OperandLifeTime::MODEL_INPUT,
-            .location = {.poolIndex = 0, .offset = 0, .length = 0},
-        },
-        {
-            .type = OperandType::TENSOR_FLOAT16,
-            .dimensions = {1, 2},
-            .numberOfConsumers = 1,
-            .scale = 0.0f,
-            .zeroPoint = 0,
-            .lifetime = OperandLifeTime::MODEL_INPUT,
-            .location = {.poolIndex = 0, .offset = 0, .length = 0},
-        },
-        {
-            .type = OperandType::INT32,
-            .dimensions = {},
-            .numberOfConsumers = 1,
-            .scale = 0.0f,
-            .zeroPoint = 0,
-            .lifetime = OperandLifeTime::CONSTANT_COPY,
-            .location = {.poolIndex = 0, .offset = 0, .length = 4},
-        },
-        {
-            .type = OperandType::TENSOR_FLOAT16,
-            .dimensions = {2, 2},
-            .numberOfConsumers = 0,
-            .scale = 0.0f,
-            .zeroPoint = 0,
-            .lifetime = OperandLifeTime::MODEL_OUTPUT,
-            .location = {.poolIndex = 0, .offset = 0, .length = 0},
-        }
-    };
-
-    const std::vector<Operation> operations = {
-        {
-            .type = OperationType::DIV,
-            .inputs = {0, 1, 2},
-            .outputs = {3},
-        }
-    };
-
-    const std::vector<uint32_t> inputIndexes = {0, 1};
-    const std::vector<uint32_t> outputIndexes = {3};
-    std::vector<uint8_t> operandValues = {
-      0, 0, 0, 0
-    };
-    const std::vector<hidl_memory> pools = {};
-
-    return {
-        .operands = operands,
-        .operations = operations,
-        .inputIndexes = inputIndexes,
-        .outputIndexes = outputIndexes,
-        .operandValues = operandValues,
-        .pools = pools,
-    };
-}
-
-inline bool is_ignored(int i) {
-  static std::set<int> ignore = {};
-  return ignore.find(i) != ignore.end();
-}
-
-// Create the model
-Model createTestModel_dynamic_output_shape() {
-    const std::vector<Operand> operands = {
-        {
-            .type = OperandType::TENSOR_FLOAT16,
-            .dimensions = {2, 2},
-            .numberOfConsumers = 1,
-            .scale = 0.0f,
-            .zeroPoint = 0,
-            .lifetime = OperandLifeTime::MODEL_INPUT,
-            .location = {.poolIndex = 0, .offset = 0, .length = 0},
-        },
-        {
-            .type = OperandType::TENSOR_FLOAT16,
-            .dimensions = {1, 2},
-            .numberOfConsumers = 1,
-            .scale = 0.0f,
-            .zeroPoint = 0,
-            .lifetime = OperandLifeTime::MODEL_INPUT,
-            .location = {.poolIndex = 0, .offset = 0, .length = 0},
-        },
-        {
-            .type = OperandType::INT32,
-            .dimensions = {},
-            .numberOfConsumers = 1,
-            .scale = 0.0f,
-            .zeroPoint = 0,
-            .lifetime = OperandLifeTime::CONSTANT_COPY,
-            .location = {.poolIndex = 0, .offset = 0, .length = 4},
-        },
-        {
-            .type = OperandType::TENSOR_FLOAT16,
-            .dimensions = {0, 0},
-            .numberOfConsumers = 0,
-            .scale = 0.0f,
-            .zeroPoint = 0,
-            .lifetime = OperandLifeTime::MODEL_OUTPUT,
-            .location = {.poolIndex = 0, .offset = 0, .length = 0},
-        }
-    };
-
-    const std::vector<Operation> operations = {
-        {
-            .type = OperationType::DIV,
-            .inputs = {0, 1, 2},
-            .outputs = {3},
-        }
-    };
-
-    const std::vector<uint32_t> inputIndexes = {0, 1};
-    const std::vector<uint32_t> outputIndexes = {3};
-    std::vector<uint8_t> operandValues = {
-      0, 0, 0, 0
-    };
-    const std::vector<hidl_memory> pools = {};
-
-    return {
-        .operands = operands,
-        .operations = operations,
-        .inputIndexes = inputIndexes,
-        .outputIndexes = outputIndexes,
-        .operandValues = operandValues,
-        .pools = pools,
-    };
-}
-
-inline bool is_ignored_dynamic_output_shape(int i) {
-  static std::set<int> ignore = {};
-  return ignore.find(i) != ignore.end();
-}
-
diff --git a/runtime/test/generated/vts_models/div_float16.model.cpp b/runtime/test/generated/vts_models/div_float16.model.cpp
deleted file mode 100644
index 7e5981c..0000000
--- a/runtime/test/generated/vts_models/div_float16.model.cpp
+++ /dev/null
@@ -1,144 +0,0 @@
-// clang-format off
-// Generated file (from: div_float16.mod.py). Do not edit
-// Create the model
-Model createTestModel() {
-    const std::vector<Operand> operands = {
-        {
-            .type = OperandType::TENSOR_FLOAT16,
-            .dimensions = {3},
-            .numberOfConsumers = 1,
-            .scale = 0.0f,
-            .zeroPoint = 0,
-            .lifetime = OperandLifeTime::MODEL_INPUT,
-            .location = {.poolIndex = 0, .offset = 0, .length = 0},
-        },
-        {
-            .type = OperandType::TENSOR_FLOAT16,
-            .dimensions = {3},
-            .numberOfConsumers = 1,
-            .scale = 0.0f,
-            .zeroPoint = 0,
-            .lifetime = OperandLifeTime::MODEL_INPUT,
-            .location = {.poolIndex = 0, .offset = 0, .length = 0},
-        },
-        {
-            .type = OperandType::INT32,
-            .dimensions = {},
-            .numberOfConsumers = 1,
-            .scale = 0.0f,
-            .zeroPoint = 0,
-            .lifetime = OperandLifeTime::CONSTANT_COPY,
-            .location = {.poolIndex = 0, .offset = 0, .length = 4},
-        },
-        {
-            .type = OperandType::TENSOR_FLOAT16,
-            .dimensions = {3},
-            .numberOfConsumers = 0,
-            .scale = 0.0f,
-            .zeroPoint = 0,
-            .lifetime = OperandLifeTime::MODEL_OUTPUT,
-            .location = {.poolIndex = 0, .offset = 0, .length = 0},
-        }
-    };
-
-    const std::vector<Operation> operations = {
-        {
-            .type = OperationType::DIV,
-            .inputs = {0, 1, 2},
-            .outputs = {3},
-        }
-    };
-
-    const std::vector<uint32_t> inputIndexes = {0, 1};
-    const std::vector<uint32_t> outputIndexes = {3};
-    std::vector<uint8_t> operandValues = {
-      0, 0, 0, 0
-    };
-    const std::vector<hidl_memory> pools = {};
-
-    return {
-        .operands = operands,
-        .operations = operations,
-        .inputIndexes = inputIndexes,
-        .outputIndexes = outputIndexes,
-        .operandValues = operandValues,
-        .pools = pools,
-    };
-}
-
-inline bool is_ignored(int i) {
-  static std::set<int> ignore = {};
-  return ignore.find(i) != ignore.end();
-}
-
-// Create the model
-Model createTestModel_dynamic_output_shape() {
-    const std::vector<Operand> operands = {
-        {
-            .type = OperandType::TENSOR_FLOAT16,
-            .dimensions = {3},
-            .numberOfConsumers = 1,
-            .scale = 0.0f,
-            .zeroPoint = 0,
-            .lifetime = OperandLifeTime::MODEL_INPUT,
-            .location = {.poolIndex = 0, .offset = 0, .length = 0},
-        },
-        {
-            .type = OperandType::TENSOR_FLOAT16,
-            .dimensions = {3},
-            .numberOfConsumers = 1,
-            .scale = 0.0f,
-            .zeroPoint = 0,
-            .lifetime = OperandLifeTime::MODEL_INPUT,
-            .location = {.poolIndex = 0, .offset = 0, .length = 0},
-        },
-        {
-            .type = OperandType::INT32,
-            .dimensions = {},
-            .numberOfConsumers = 1,
-            .scale = 0.0f,
-            .zeroPoint = 0,
-            .lifetime = OperandLifeTime::CONSTANT_COPY,
-            .location = {.poolIndex = 0, .offset = 0, .length = 4},
-        },
-        {
-            .type = OperandType::TENSOR_FLOAT16,
-            .dimensions = {0},
-            .numberOfConsumers = 0,
-            .scale = 0.0f,
-            .zeroPoint = 0,
-            .lifetime = OperandLifeTime::MODEL_OUTPUT,
-            .location = {.poolIndex = 0, .offset = 0, .length = 0},
-        }
-    };
-
-    const std::vector<Operation> operations = {
-        {
-            .type = OperationType::DIV,
-            .inputs = {0, 1, 2},
-            .outputs = {3},
-        }
-    };
-
-    const std::vector<uint32_t> inputIndexes = {0, 1};
-    const std::vector<uint32_t> outputIndexes = {3};
-    std::vector<uint8_t> operandValues = {
-      0, 0, 0, 0
-    };
-    const std::vector<hidl_memory> pools = {};
-
-    return {
-        .operands = operands,
-        .operations = operations,
-        .inputIndexes = inputIndexes,
-        .outputIndexes = outputIndexes,
-        .operandValues = operandValues,
-        .pools = pools,
-    };
-}
-
-inline bool is_ignored_dynamic_output_shape(int i) {
-  static std::set<int> ignore = {};
-  return ignore.find(i) != ignore.end();
-}
-
diff --git a/runtime/test/generated/vts_models/div_v1_2.model.cpp b/runtime/test/generated/vts_models/div_v1_2.model.cpp
new file mode 100644
index 0000000..88da37e
--- /dev/null
+++ b/runtime/test/generated/vts_models/div_v1_2.model.cpp
@@ -0,0 +1,1746 @@
+// clang-format off
+// Generated file (from: div_v1_2.mod.py). Do not edit
+// Create the model
+Model createTestModel() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {3},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {3},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 4},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {3},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::DIV,
+            .inputs = {0, 1, 2},
+            .outputs = {3},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0, 1};
+    const std::vector<uint32_t> outputIndexes = {3};
+    std::vector<uint8_t> operandValues = {
+      0, 0, 0, 0
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_dynamic_output_shape() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {3},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {3},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 4},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {0},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::DIV,
+            .inputs = {0, 1, 2},
+            .outputs = {3},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0, 1};
+    const std::vector<uint32_t> outputIndexes = {3};
+    std::vector<uint8_t> operandValues = {
+      0, 0, 0, 0
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored_dynamic_output_shape(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_2() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {2, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {1, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 4},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {2, 2},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::DIV,
+            .inputs = {0, 1, 2},
+            .outputs = {3},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0, 1};
+    const std::vector<uint32_t> outputIndexes = {3};
+    std::vector<uint8_t> operandValues = {
+      0, 0, 0, 0
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored_2(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_dynamic_output_shape_2() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {2, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {1, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 4},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {0, 0},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::DIV,
+            .inputs = {0, 1, 2},
+            .outputs = {3},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0, 1};
+    const std::vector<uint32_t> outputIndexes = {3};
+    std::vector<uint8_t> operandValues = {
+      0, 0, 0, 0
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored_dynamic_output_shape_2(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_zero_sized() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {1, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 8},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {1, 8},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 8, .length = 32},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {1},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 40, .length = 4},
+        },
+        {
+            .type = OperandType::FLOAT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 44, .length = 4},
+        },
+        {
+            .type = OperandType::FLOAT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 48, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 52, .length = 4},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {0},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {0, 4},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::TEMPORARY_VARIABLE,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {0},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {0},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::TEMPORARY_VARIABLE,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {1, 1, 1, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 56, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 60, .length = 4},
+        },
+        {
+            .type = OperandType::FLOAT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 64, .length = 4},
+        },
+        {
+            .type = OperandType::FLOAT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 68, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 72, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 76, .length = 4},
+        },
+        {
+            .type = OperandType::BOOL,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 80, .length = 1},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {0, 2, 2, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::TEMPORARY_VARIABLE,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {1, 2, 2, 1},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 81, .length = 16},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 97, .length = 4},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {0, 2, 2, 2},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::BOX_WITH_NMS_LIMIT,
+            .inputs = {0, 1, 2, 3, 4, 5},
+            .outputs = {6, 7, 8, 9},
+        },
+        {
+            .type = OperationType::ROI_ALIGN,
+            .inputs = {10, 7, 9, 11, 12, 13, 14, 15, 16, 17},
+            .outputs = {18},
+        },
+        {
+            .type = OperationType::DIV,
+            .inputs = {18, 19, 20},
+            .outputs = {21},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {10};
+    const std::vector<uint32_t> outputIndexes = {6, 8, 21};
+    std::vector<uint8_t> operandValues = {
+      102, 102, 102, 63, 205, 204, 204, 61, 0, 0, 128, 63, 0, 0, 128, 63, 0, 0, 32, 65, 0, 0, 32, 65, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 65, 0, 0, 32, 65, 0, 0, 0, 0, 154, 153, 153, 62, 205, 204, 204, 62, 255, 255, 255, 255, 2, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 64, 0, 0, 0, 64, 4, 0, 0, 0, 4, 0, 0, 0, 0, 0, 0, 128, 63, 0, 0, 0, 64, 0, 0, 64, 64, 0, 0, 128, 64, 0, 0, 0, 0
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored_zero_sized(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_zero_sized_relaxed() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {1, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 8},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {1, 8},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 8, .length = 32},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {1},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 40, .length = 4},
+        },
+        {
+            .type = OperandType::FLOAT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 44, .length = 4},
+        },
+        {
+            .type = OperandType::FLOAT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 48, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 52, .length = 4},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {0},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {0, 4},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::TEMPORARY_VARIABLE,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {0},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {0},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::TEMPORARY_VARIABLE,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {1, 1, 1, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 56, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 60, .length = 4},
+        },
+        {
+            .type = OperandType::FLOAT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 64, .length = 4},
+        },
+        {
+            .type = OperandType::FLOAT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 68, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 72, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 76, .length = 4},
+        },
+        {
+            .type = OperandType::BOOL,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 80, .length = 1},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {0, 2, 2, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::TEMPORARY_VARIABLE,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {1, 2, 2, 1},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 81, .length = 16},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 97, .length = 4},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {0, 2, 2, 2},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::BOX_WITH_NMS_LIMIT,
+            .inputs = {0, 1, 2, 3, 4, 5},
+            .outputs = {6, 7, 8, 9},
+        },
+        {
+            .type = OperationType::ROI_ALIGN,
+            .inputs = {10, 7, 9, 11, 12, 13, 14, 15, 16, 17},
+            .outputs = {18},
+        },
+        {
+            .type = OperationType::DIV,
+            .inputs = {18, 19, 20},
+            .outputs = {21},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {10};
+    const std::vector<uint32_t> outputIndexes = {6, 8, 21};
+    std::vector<uint8_t> operandValues = {
+      102, 102, 102, 63, 205, 204, 204, 61, 0, 0, 128, 63, 0, 0, 128, 63, 0, 0, 32, 65, 0, 0, 32, 65, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 65, 0, 0, 32, 65, 0, 0, 0, 0, 154, 153, 153, 62, 205, 204, 204, 62, 255, 255, 255, 255, 2, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 64, 0, 0, 0, 64, 4, 0, 0, 0, 4, 0, 0, 0, 0, 0, 0, 128, 63, 0, 0, 0, 64, 0, 0, 64, 64, 0, 0, 128, 64, 0, 0, 0, 0
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+        .relaxComputationFloat32toFloat16 = true,
+    };
+}
+
+inline bool is_ignored_zero_sized_relaxed(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_zero_sized_float16() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {1, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 4},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {1, 8},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 4, .length = 16},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {1},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 20, .length = 4},
+        },
+        {
+            .type = OperandType::FLOAT16,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 24, .length = 2},
+        },
+        {
+            .type = OperandType::FLOAT16,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 26, .length = 2},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 28, .length = 4},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {0},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {0, 4},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::TEMPORARY_VARIABLE,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {0},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {0},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::TEMPORARY_VARIABLE,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {1, 1, 1, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 32, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 36, .length = 4},
+        },
+        {
+            .type = OperandType::FLOAT16,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 40, .length = 2},
+        },
+        {
+            .type = OperandType::FLOAT16,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 42, .length = 2},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 44, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 48, .length = 4},
+        },
+        {
+            .type = OperandType::BOOL,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 52, .length = 1},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {0, 2, 2, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::TEMPORARY_VARIABLE,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {1, 2, 2, 1},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 53, .length = 8},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 61, .length = 4},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {0, 2, 2, 2},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::BOX_WITH_NMS_LIMIT,
+            .inputs = {0, 1, 2, 3, 4, 5},
+            .outputs = {6, 7, 8, 9},
+        },
+        {
+            .type = OperationType::ROI_ALIGN,
+            .inputs = {10, 7, 9, 11, 12, 13, 14, 15, 16, 17},
+            .outputs = {18},
+        },
+        {
+            .type = OperationType::DIV,
+            .inputs = {18, 19, 20},
+            .outputs = {21},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {10};
+    const std::vector<uint32_t> outputIndexes = {6, 8, 21};
+    std::vector<uint8_t> operandValues = {
+      51, 59, 102, 46, 0, 60, 0, 60, 0, 73, 0, 73, 0, 0, 0, 0, 0, 73, 0, 73, 0, 0, 0, 0, 205, 52, 102, 54, 255, 255, 255, 255, 2, 0, 0, 0, 2, 0, 0, 0, 0, 64, 0, 64, 4, 0, 0, 0, 4, 0, 0, 0, 0, 0, 60, 0, 64, 0, 66, 0, 68, 0, 0, 0, 0
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored_zero_sized_float16(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_zero_sized_dynamic_output_shape() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {1, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 8},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {1, 8},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 8, .length = 32},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {1},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 40, .length = 4},
+        },
+        {
+            .type = OperandType::FLOAT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 44, .length = 4},
+        },
+        {
+            .type = OperandType::FLOAT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 48, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 52, .length = 4},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {0},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {0, 4},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::TEMPORARY_VARIABLE,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {0},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {0},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::TEMPORARY_VARIABLE,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {1, 1, 1, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 56, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 60, .length = 4},
+        },
+        {
+            .type = OperandType::FLOAT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 64, .length = 4},
+        },
+        {
+            .type = OperandType::FLOAT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 68, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 72, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 76, .length = 4},
+        },
+        {
+            .type = OperandType::BOOL,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 80, .length = 1},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {0, 2, 2, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::TEMPORARY_VARIABLE,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {1, 2, 2, 1},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 81, .length = 16},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 97, .length = 4},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {0, 0, 0, 0},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::BOX_WITH_NMS_LIMIT,
+            .inputs = {0, 1, 2, 3, 4, 5},
+            .outputs = {6, 7, 8, 9},
+        },
+        {
+            .type = OperationType::ROI_ALIGN,
+            .inputs = {10, 7, 9, 11, 12, 13, 14, 15, 16, 17},
+            .outputs = {18},
+        },
+        {
+            .type = OperationType::DIV,
+            .inputs = {18, 19, 20},
+            .outputs = {21},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {10};
+    const std::vector<uint32_t> outputIndexes = {6, 8, 21};
+    std::vector<uint8_t> operandValues = {
+      102, 102, 102, 63, 205, 204, 204, 61, 0, 0, 128, 63, 0, 0, 128, 63, 0, 0, 32, 65, 0, 0, 32, 65, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 65, 0, 0, 32, 65, 0, 0, 0, 0, 154, 153, 153, 62, 205, 204, 204, 62, 255, 255, 255, 255, 2, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 64, 0, 0, 0, 64, 4, 0, 0, 0, 4, 0, 0, 0, 0, 0, 0, 128, 63, 0, 0, 0, 64, 0, 0, 64, 64, 0, 0, 128, 64, 0, 0, 0, 0
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored_zero_sized_dynamic_output_shape(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_zero_sized_dynamic_output_shape_relaxed() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {1, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 8},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {1, 8},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 8, .length = 32},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {1},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 40, .length = 4},
+        },
+        {
+            .type = OperandType::FLOAT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 44, .length = 4},
+        },
+        {
+            .type = OperandType::FLOAT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 48, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 52, .length = 4},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {0},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {0, 4},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::TEMPORARY_VARIABLE,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {0},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {0},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::TEMPORARY_VARIABLE,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {1, 1, 1, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 56, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 60, .length = 4},
+        },
+        {
+            .type = OperandType::FLOAT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 64, .length = 4},
+        },
+        {
+            .type = OperandType::FLOAT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 68, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 72, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 76, .length = 4},
+        },
+        {
+            .type = OperandType::BOOL,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 80, .length = 1},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {0, 2, 2, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::TEMPORARY_VARIABLE,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {1, 2, 2, 1},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 81, .length = 16},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 97, .length = 4},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {0, 0, 0, 0},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::BOX_WITH_NMS_LIMIT,
+            .inputs = {0, 1, 2, 3, 4, 5},
+            .outputs = {6, 7, 8, 9},
+        },
+        {
+            .type = OperationType::ROI_ALIGN,
+            .inputs = {10, 7, 9, 11, 12, 13, 14, 15, 16, 17},
+            .outputs = {18},
+        },
+        {
+            .type = OperationType::DIV,
+            .inputs = {18, 19, 20},
+            .outputs = {21},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {10};
+    const std::vector<uint32_t> outputIndexes = {6, 8, 21};
+    std::vector<uint8_t> operandValues = {
+      102, 102, 102, 63, 205, 204, 204, 61, 0, 0, 128, 63, 0, 0, 128, 63, 0, 0, 32, 65, 0, 0, 32, 65, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 65, 0, 0, 32, 65, 0, 0, 0, 0, 154, 153, 153, 62, 205, 204, 204, 62, 255, 255, 255, 255, 2, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 64, 0, 0, 0, 64, 4, 0, 0, 0, 4, 0, 0, 0, 0, 0, 0, 128, 63, 0, 0, 0, 64, 0, 0, 64, 64, 0, 0, 128, 64, 0, 0, 0, 0
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+        .relaxComputationFloat32toFloat16 = true,
+    };
+}
+
+inline bool is_ignored_zero_sized_dynamic_output_shape_relaxed(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_zero_sized_dynamic_output_shape_float16() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {1, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 4},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {1, 8},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 4, .length = 16},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {1},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 20, .length = 4},
+        },
+        {
+            .type = OperandType::FLOAT16,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 24, .length = 2},
+        },
+        {
+            .type = OperandType::FLOAT16,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 26, .length = 2},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 28, .length = 4},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {0},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {0, 4},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::TEMPORARY_VARIABLE,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {0},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {0},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::TEMPORARY_VARIABLE,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {1, 1, 1, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 32, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 36, .length = 4},
+        },
+        {
+            .type = OperandType::FLOAT16,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 40, .length = 2},
+        },
+        {
+            .type = OperandType::FLOAT16,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 42, .length = 2},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 44, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 48, .length = 4},
+        },
+        {
+            .type = OperandType::BOOL,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 52, .length = 1},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {0, 2, 2, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::TEMPORARY_VARIABLE,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {1, 2, 2, 1},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 53, .length = 8},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 61, .length = 4},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {0, 0, 0, 0},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::BOX_WITH_NMS_LIMIT,
+            .inputs = {0, 1, 2, 3, 4, 5},
+            .outputs = {6, 7, 8, 9},
+        },
+        {
+            .type = OperationType::ROI_ALIGN,
+            .inputs = {10, 7, 9, 11, 12, 13, 14, 15, 16, 17},
+            .outputs = {18},
+        },
+        {
+            .type = OperationType::DIV,
+            .inputs = {18, 19, 20},
+            .outputs = {21},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {10};
+    const std::vector<uint32_t> outputIndexes = {6, 8, 21};
+    std::vector<uint8_t> operandValues = {
+      51, 59, 102, 46, 0, 60, 0, 60, 0, 73, 0, 73, 0, 0, 0, 0, 0, 73, 0, 73, 0, 0, 0, 0, 205, 52, 102, 54, 255, 255, 255, 255, 2, 0, 0, 0, 2, 0, 0, 0, 0, 64, 0, 64, 4, 0, 0, 0, 4, 0, 0, 0, 0, 0, 60, 0, 64, 0, 66, 0, 68, 0, 0, 0, 0
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored_zero_sized_dynamic_output_shape_float16(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
diff --git a/runtime/test/generated/vts_models/mul_broadcast_float16.model.cpp b/runtime/test/generated/vts_models/mul_broadcast_float16.model.cpp
deleted file mode 100644
index e44664e..0000000
--- a/runtime/test/generated/vts_models/mul_broadcast_float16.model.cpp
+++ /dev/null
@@ -1,144 +0,0 @@
-// clang-format off
-// Generated file (from: mul_broadcast_float16.mod.py). Do not edit
-// Create the model
-Model createTestModel() {
-    const std::vector<Operand> operands = {
-        {
-            .type = OperandType::TENSOR_FLOAT16,
-            .dimensions = {1, 2},
-            .numberOfConsumers = 1,
-            .scale = 0.0f,
-            .zeroPoint = 0,
-            .lifetime = OperandLifeTime::MODEL_INPUT,
-            .location = {.poolIndex = 0, .offset = 0, .length = 0},
-        },
-        {
-            .type = OperandType::TENSOR_FLOAT16,
-            .dimensions = {2, 2},
-            .numberOfConsumers = 1,
-            .scale = 0.0f,
-            .zeroPoint = 0,
-            .lifetime = OperandLifeTime::MODEL_INPUT,
-            .location = {.poolIndex = 0, .offset = 0, .length = 0},
-        },
-        {
-            .type = OperandType::INT32,
-            .dimensions = {},
-            .numberOfConsumers = 1,
-            .scale = 0.0f,
-            .zeroPoint = 0,
-            .lifetime = OperandLifeTime::CONSTANT_COPY,
-            .location = {.poolIndex = 0, .offset = 0, .length = 4},
-        },
-        {
-            .type = OperandType::TENSOR_FLOAT16,
-            .dimensions = {2, 2},
-            .numberOfConsumers = 0,
-            .scale = 0.0f,
-            .zeroPoint = 0,
-            .lifetime = OperandLifeTime::MODEL_OUTPUT,
-            .location = {.poolIndex = 0, .offset = 0, .length = 0},
-        }
-    };
-
-    const std::vector<Operation> operations = {
-        {
-            .type = OperationType::MUL,
-            .inputs = {0, 1, 2},
-            .outputs = {3},
-        }
-    };
-
-    const std::vector<uint32_t> inputIndexes = {0, 1};
-    const std::vector<uint32_t> outputIndexes = {3};
-    std::vector<uint8_t> operandValues = {
-      0, 0, 0, 0
-    };
-    const std::vector<hidl_memory> pools = {};
-
-    return {
-        .operands = operands,
-        .operations = operations,
-        .inputIndexes = inputIndexes,
-        .outputIndexes = outputIndexes,
-        .operandValues = operandValues,
-        .pools = pools,
-    };
-}
-
-inline bool is_ignored(int i) {
-  static std::set<int> ignore = {};
-  return ignore.find(i) != ignore.end();
-}
-
-// Create the model
-Model createTestModel_dynamic_output_shape() {
-    const std::vector<Operand> operands = {
-        {
-            .type = OperandType::TENSOR_FLOAT16,
-            .dimensions = {1, 2},
-            .numberOfConsumers = 1,
-            .scale = 0.0f,
-            .zeroPoint = 0,
-            .lifetime = OperandLifeTime::MODEL_INPUT,
-            .location = {.poolIndex = 0, .offset = 0, .length = 0},
-        },
-        {
-            .type = OperandType::TENSOR_FLOAT16,
-            .dimensions = {2, 2},
-            .numberOfConsumers = 1,
-            .scale = 0.0f,
-            .zeroPoint = 0,
-            .lifetime = OperandLifeTime::MODEL_INPUT,
-            .location = {.poolIndex = 0, .offset = 0, .length = 0},
-        },
-        {
-            .type = OperandType::INT32,
-            .dimensions = {},
-            .numberOfConsumers = 1,
-            .scale = 0.0f,
-            .zeroPoint = 0,
-            .lifetime = OperandLifeTime::CONSTANT_COPY,
-            .location = {.poolIndex = 0, .offset = 0, .length = 4},
-        },
-        {
-            .type = OperandType::TENSOR_FLOAT16,
-            .dimensions = {0, 0},
-            .numberOfConsumers = 0,
-            .scale = 0.0f,
-            .zeroPoint = 0,
-            .lifetime = OperandLifeTime::MODEL_OUTPUT,
-            .location = {.poolIndex = 0, .offset = 0, .length = 0},
-        }
-    };
-
-    const std::vector<Operation> operations = {
-        {
-            .type = OperationType::MUL,
-            .inputs = {0, 1, 2},
-            .outputs = {3},
-        }
-    };
-
-    const std::vector<uint32_t> inputIndexes = {0, 1};
-    const std::vector<uint32_t> outputIndexes = {3};
-    std::vector<uint8_t> operandValues = {
-      0, 0, 0, 0
-    };
-    const std::vector<hidl_memory> pools = {};
-
-    return {
-        .operands = operands,
-        .operations = operations,
-        .inputIndexes = inputIndexes,
-        .outputIndexes = outputIndexes,
-        .operandValues = operandValues,
-        .pools = pools,
-    };
-}
-
-inline bool is_ignored_dynamic_output_shape(int i) {
-  static std::set<int> ignore = {};
-  return ignore.find(i) != ignore.end();
-}
-
diff --git a/runtime/test/generated/vts_models/mul_float16.model.cpp b/runtime/test/generated/vts_models/mul_float16.model.cpp
deleted file mode 100644
index ba757ab..0000000
--- a/runtime/test/generated/vts_models/mul_float16.model.cpp
+++ /dev/null
@@ -1,144 +0,0 @@
-// clang-format off
-// Generated file (from: mul_float16.mod.py). Do not edit
-// Create the model
-Model createTestModel() {
-    const std::vector<Operand> operands = {
-        {
-            .type = OperandType::TENSOR_FLOAT16,
-            .dimensions = {3},
-            .numberOfConsumers = 1,
-            .scale = 0.0f,
-            .zeroPoint = 0,
-            .lifetime = OperandLifeTime::MODEL_INPUT,
-            .location = {.poolIndex = 0, .offset = 0, .length = 0},
-        },
-        {
-            .type = OperandType::TENSOR_FLOAT16,
-            .dimensions = {3},
-            .numberOfConsumers = 1,
-            .scale = 0.0f,
-            .zeroPoint = 0,
-            .lifetime = OperandLifeTime::MODEL_INPUT,
-            .location = {.poolIndex = 0, .offset = 0, .length = 0},
-        },
-        {
-            .type = OperandType::INT32,
-            .dimensions = {},
-            .numberOfConsumers = 1,
-            .scale = 0.0f,
-            .zeroPoint = 0,
-            .lifetime = OperandLifeTime::CONSTANT_COPY,
-            .location = {.poolIndex = 0, .offset = 0, .length = 4},
-        },
-        {
-            .type = OperandType::TENSOR_FLOAT16,
-            .dimensions = {3},
-            .numberOfConsumers = 0,
-            .scale = 0.0f,
-            .zeroPoint = 0,
-            .lifetime = OperandLifeTime::MODEL_OUTPUT,
-            .location = {.poolIndex = 0, .offset = 0, .length = 0},
-        }
-    };
-
-    const std::vector<Operation> operations = {
-        {
-            .type = OperationType::MUL,
-            .inputs = {0, 1, 2},
-            .outputs = {3},
-        }
-    };
-
-    const std::vector<uint32_t> inputIndexes = {0, 1};
-    const std::vector<uint32_t> outputIndexes = {3};
-    std::vector<uint8_t> operandValues = {
-      0, 0, 0, 0
-    };
-    const std::vector<hidl_memory> pools = {};
-
-    return {
-        .operands = operands,
-        .operations = operations,
-        .inputIndexes = inputIndexes,
-        .outputIndexes = outputIndexes,
-        .operandValues = operandValues,
-        .pools = pools,
-    };
-}
-
-inline bool is_ignored(int i) {
-  static std::set<int> ignore = {};
-  return ignore.find(i) != ignore.end();
-}
-
-// Create the model
-Model createTestModel_dynamic_output_shape() {
-    const std::vector<Operand> operands = {
-        {
-            .type = OperandType::TENSOR_FLOAT16,
-            .dimensions = {3},
-            .numberOfConsumers = 1,
-            .scale = 0.0f,
-            .zeroPoint = 0,
-            .lifetime = OperandLifeTime::MODEL_INPUT,
-            .location = {.poolIndex = 0, .offset = 0, .length = 0},
-        },
-        {
-            .type = OperandType::TENSOR_FLOAT16,
-            .dimensions = {3},
-            .numberOfConsumers = 1,
-            .scale = 0.0f,
-            .zeroPoint = 0,
-            .lifetime = OperandLifeTime::MODEL_INPUT,
-            .location = {.poolIndex = 0, .offset = 0, .length = 0},
-        },
-        {
-            .type = OperandType::INT32,
-            .dimensions = {},
-            .numberOfConsumers = 1,
-            .scale = 0.0f,
-            .zeroPoint = 0,
-            .lifetime = OperandLifeTime::CONSTANT_COPY,
-            .location = {.poolIndex = 0, .offset = 0, .length = 4},
-        },
-        {
-            .type = OperandType::TENSOR_FLOAT16,
-            .dimensions = {0},
-            .numberOfConsumers = 0,
-            .scale = 0.0f,
-            .zeroPoint = 0,
-            .lifetime = OperandLifeTime::MODEL_OUTPUT,
-            .location = {.poolIndex = 0, .offset = 0, .length = 0},
-        }
-    };
-
-    const std::vector<Operation> operations = {
-        {
-            .type = OperationType::MUL,
-            .inputs = {0, 1, 2},
-            .outputs = {3},
-        }
-    };
-
-    const std::vector<uint32_t> inputIndexes = {0, 1};
-    const std::vector<uint32_t> outputIndexes = {3};
-    std::vector<uint8_t> operandValues = {
-      0, 0, 0, 0
-    };
-    const std::vector<hidl_memory> pools = {};
-
-    return {
-        .operands = operands,
-        .operations = operations,
-        .inputIndexes = inputIndexes,
-        .outputIndexes = outputIndexes,
-        .operandValues = operandValues,
-        .pools = pools,
-    };
-}
-
-inline bool is_ignored_dynamic_output_shape(int i) {
-  static std::set<int> ignore = {};
-  return ignore.find(i) != ignore.end();
-}
-
diff --git a/runtime/test/generated/vts_models/mul_v1_2.model.cpp b/runtime/test/generated/vts_models/mul_v1_2.model.cpp
new file mode 100644
index 0000000..9ed3ace
--- /dev/null
+++ b/runtime/test/generated/vts_models/mul_v1_2.model.cpp
@@ -0,0 +1,2232 @@
+// clang-format off
+// Generated file (from: mul_v1_2.mod.py). Do not edit
+// Create the model
+Model createTestModel() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {3},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {3},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 4},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {3},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::MUL,
+            .inputs = {0, 1, 2},
+            .outputs = {3},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0, 1};
+    const std::vector<uint32_t> outputIndexes = {3};
+    std::vector<uint8_t> operandValues = {
+      0, 0, 0, 0
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_dynamic_output_shape() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {3},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {3},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 4},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {0},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::MUL,
+            .inputs = {0, 1, 2},
+            .outputs = {3},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0, 1};
+    const std::vector<uint32_t> outputIndexes = {3};
+    std::vector<uint8_t> operandValues = {
+      0, 0, 0, 0
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored_dynamic_output_shape(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_2() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {1, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {2, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 4},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {2, 2},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::MUL,
+            .inputs = {0, 1, 2},
+            .outputs = {3},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0, 1};
+    const std::vector<uint32_t> outputIndexes = {3};
+    std::vector<uint8_t> operandValues = {
+      0, 0, 0, 0
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored_2(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_dynamic_output_shape_2() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {1, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {2, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 4},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {0, 0},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::MUL,
+            .inputs = {0, 1, 2},
+            .outputs = {3},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0, 1};
+    const std::vector<uint32_t> outputIndexes = {3};
+    std::vector<uint8_t> operandValues = {
+      0, 0, 0, 0
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored_dynamic_output_shape_2(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_zero_sized() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {1, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 8},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {1, 8},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 8, .length = 32},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {1},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 40, .length = 4},
+        },
+        {
+            .type = OperandType::FLOAT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 44, .length = 4},
+        },
+        {
+            .type = OperandType::FLOAT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 48, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 52, .length = 4},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {0},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {0, 4},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::TEMPORARY_VARIABLE,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {0},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {0},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::TEMPORARY_VARIABLE,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {1, 1, 1, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 56, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 60, .length = 4},
+        },
+        {
+            .type = OperandType::FLOAT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 64, .length = 4},
+        },
+        {
+            .type = OperandType::FLOAT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 68, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 72, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 76, .length = 4},
+        },
+        {
+            .type = OperandType::BOOL,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 80, .length = 1},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {0, 2, 2, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::TEMPORARY_VARIABLE,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {1, 2, 2, 1},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 81, .length = 16},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 97, .length = 4},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {0, 2, 2, 2},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::BOX_WITH_NMS_LIMIT,
+            .inputs = {0, 1, 2, 3, 4, 5},
+            .outputs = {6, 7, 8, 9},
+        },
+        {
+            .type = OperationType::ROI_ALIGN,
+            .inputs = {10, 7, 9, 11, 12, 13, 14, 15, 16, 17},
+            .outputs = {18},
+        },
+        {
+            .type = OperationType::MUL,
+            .inputs = {18, 19, 20},
+            .outputs = {21},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {10};
+    const std::vector<uint32_t> outputIndexes = {6, 8, 21};
+    std::vector<uint8_t> operandValues = {
+      102, 102, 102, 63, 205, 204, 204, 61, 0, 0, 128, 63, 0, 0, 128, 63, 0, 0, 32, 65, 0, 0, 32, 65, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 65, 0, 0, 32, 65, 0, 0, 0, 0, 154, 153, 153, 62, 205, 204, 204, 62, 255, 255, 255, 255, 2, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 64, 0, 0, 0, 64, 4, 0, 0, 0, 4, 0, 0, 0, 0, 0, 0, 128, 63, 0, 0, 0, 64, 0, 0, 64, 64, 0, 0, 128, 64, 0, 0, 0, 0
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored_zero_sized(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_zero_sized_relaxed() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {1, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 8},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {1, 8},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 8, .length = 32},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {1},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 40, .length = 4},
+        },
+        {
+            .type = OperandType::FLOAT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 44, .length = 4},
+        },
+        {
+            .type = OperandType::FLOAT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 48, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 52, .length = 4},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {0},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {0, 4},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::TEMPORARY_VARIABLE,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {0},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {0},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::TEMPORARY_VARIABLE,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {1, 1, 1, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 56, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 60, .length = 4},
+        },
+        {
+            .type = OperandType::FLOAT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 64, .length = 4},
+        },
+        {
+            .type = OperandType::FLOAT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 68, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 72, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 76, .length = 4},
+        },
+        {
+            .type = OperandType::BOOL,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 80, .length = 1},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {0, 2, 2, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::TEMPORARY_VARIABLE,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {1, 2, 2, 1},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 81, .length = 16},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 97, .length = 4},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {0, 2, 2, 2},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::BOX_WITH_NMS_LIMIT,
+            .inputs = {0, 1, 2, 3, 4, 5},
+            .outputs = {6, 7, 8, 9},
+        },
+        {
+            .type = OperationType::ROI_ALIGN,
+            .inputs = {10, 7, 9, 11, 12, 13, 14, 15, 16, 17},
+            .outputs = {18},
+        },
+        {
+            .type = OperationType::MUL,
+            .inputs = {18, 19, 20},
+            .outputs = {21},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {10};
+    const std::vector<uint32_t> outputIndexes = {6, 8, 21};
+    std::vector<uint8_t> operandValues = {
+      102, 102, 102, 63, 205, 204, 204, 61, 0, 0, 128, 63, 0, 0, 128, 63, 0, 0, 32, 65, 0, 0, 32, 65, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 65, 0, 0, 32, 65, 0, 0, 0, 0, 154, 153, 153, 62, 205, 204, 204, 62, 255, 255, 255, 255, 2, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 64, 0, 0, 0, 64, 4, 0, 0, 0, 4, 0, 0, 0, 0, 0, 0, 128, 63, 0, 0, 0, 64, 0, 0, 64, 64, 0, 0, 128, 64, 0, 0, 0, 0
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+        .relaxComputationFloat32toFloat16 = true,
+    };
+}
+
+inline bool is_ignored_zero_sized_relaxed(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_zero_sized_quant8() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_QUANT8_ASYMM,
+            .dimensions = {1, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.1f,
+            .zeroPoint = 128,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 2},
+        },
+        {
+            .type = OperandType::TENSOR_QUANT16_ASYMM,
+            .dimensions = {1, 8},
+            .numberOfConsumers = 1,
+            .scale = 0.125f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 2, .length = 16},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {1},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 18, .length = 4},
+        },
+        {
+            .type = OperandType::FLOAT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 22, .length = 4},
+        },
+        {
+            .type = OperandType::FLOAT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 26, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 30, .length = 4},
+        },
+        {
+            .type = OperandType::TENSOR_QUANT8_ASYMM,
+            .dimensions = {0},
+            .numberOfConsumers = 0,
+            .scale = 0.1f,
+            .zeroPoint = 128,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_QUANT16_ASYMM,
+            .dimensions = {0, 4},
+            .numberOfConsumers = 1,
+            .scale = 0.125f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::TEMPORARY_VARIABLE,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {0},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {0},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::TEMPORARY_VARIABLE,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_QUANT8_ASYMM,
+            .dimensions = {1, 1, 1, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.1f,
+            .zeroPoint = 128,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 34, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 38, .length = 4},
+        },
+        {
+            .type = OperandType::FLOAT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 42, .length = 4},
+        },
+        {
+            .type = OperandType::FLOAT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 46, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 50, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 54, .length = 4},
+        },
+        {
+            .type = OperandType::BOOL,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 58, .length = 1},
+        },
+        {
+            .type = OperandType::TENSOR_QUANT8_ASYMM,
+            .dimensions = {0, 2, 2, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.1f,
+            .zeroPoint = 128,
+            .lifetime = OperandLifeTime::TEMPORARY_VARIABLE,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_QUANT8_ASYMM,
+            .dimensions = {1, 2, 2, 1},
+            .numberOfConsumers = 1,
+            .scale = 0.1f,
+            .zeroPoint = 128,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 59, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 63, .length = 4},
+        },
+        {
+            .type = OperandType::TENSOR_QUANT8_ASYMM,
+            .dimensions = {0, 2, 2, 2},
+            .numberOfConsumers = 0,
+            .scale = 0.1f,
+            .zeroPoint = 128,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::BOX_WITH_NMS_LIMIT,
+            .inputs = {0, 1, 2, 3, 4, 5},
+            .outputs = {6, 7, 8, 9},
+        },
+        {
+            .type = OperationType::ROI_ALIGN,
+            .inputs = {10, 7, 9, 11, 12, 13, 14, 15, 16, 17},
+            .outputs = {18},
+        },
+        {
+            .type = OperationType::MUL,
+            .inputs = {18, 19, 20},
+            .outputs = {21},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {10};
+    const std::vector<uint32_t> outputIndexes = {6, 8, 21};
+    std::vector<uint8_t> operandValues = {
+      137, 129, 8, 0, 8, 0, 80, 0, 80, 0, 0, 0, 0, 0, 80, 0, 80, 0, 0, 0, 0, 0, 154, 153, 153, 62, 205, 204, 204, 62, 255, 255, 255, 255, 2, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 64, 0, 0, 0, 64, 4, 0, 0, 0, 4, 0, 0, 0, 0, 138, 148, 158, 168, 0, 0, 0, 0
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored_zero_sized_quant8(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_zero_sized_float16() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {1, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 4},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {1, 8},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 4, .length = 16},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {1},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 20, .length = 4},
+        },
+        {
+            .type = OperandType::FLOAT16,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 24, .length = 2},
+        },
+        {
+            .type = OperandType::FLOAT16,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 26, .length = 2},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 28, .length = 4},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {0},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {0, 4},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::TEMPORARY_VARIABLE,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {0},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {0},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::TEMPORARY_VARIABLE,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {1, 1, 1, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 32, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 36, .length = 4},
+        },
+        {
+            .type = OperandType::FLOAT16,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 40, .length = 2},
+        },
+        {
+            .type = OperandType::FLOAT16,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 42, .length = 2},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 44, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 48, .length = 4},
+        },
+        {
+            .type = OperandType::BOOL,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 52, .length = 1},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {0, 2, 2, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::TEMPORARY_VARIABLE,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {1, 2, 2, 1},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 53, .length = 8},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 61, .length = 4},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {0, 2, 2, 2},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::BOX_WITH_NMS_LIMIT,
+            .inputs = {0, 1, 2, 3, 4, 5},
+            .outputs = {6, 7, 8, 9},
+        },
+        {
+            .type = OperationType::ROI_ALIGN,
+            .inputs = {10, 7, 9, 11, 12, 13, 14, 15, 16, 17},
+            .outputs = {18},
+        },
+        {
+            .type = OperationType::MUL,
+            .inputs = {18, 19, 20},
+            .outputs = {21},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {10};
+    const std::vector<uint32_t> outputIndexes = {6, 8, 21};
+    std::vector<uint8_t> operandValues = {
+      51, 59, 102, 46, 0, 60, 0, 60, 0, 73, 0, 73, 0, 0, 0, 0, 0, 73, 0, 73, 0, 0, 0, 0, 205, 52, 102, 54, 255, 255, 255, 255, 2, 0, 0, 0, 2, 0, 0, 0, 0, 64, 0, 64, 4, 0, 0, 0, 4, 0, 0, 0, 0, 0, 60, 0, 64, 0, 66, 0, 68, 0, 0, 0, 0
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored_zero_sized_float16(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_zero_sized_dynamic_output_shape() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {1, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 8},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {1, 8},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 8, .length = 32},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {1},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 40, .length = 4},
+        },
+        {
+            .type = OperandType::FLOAT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 44, .length = 4},
+        },
+        {
+            .type = OperandType::FLOAT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 48, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 52, .length = 4},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {0},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {0, 4},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::TEMPORARY_VARIABLE,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {0},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {0},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::TEMPORARY_VARIABLE,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {1, 1, 1, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 56, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 60, .length = 4},
+        },
+        {
+            .type = OperandType::FLOAT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 64, .length = 4},
+        },
+        {
+            .type = OperandType::FLOAT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 68, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 72, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 76, .length = 4},
+        },
+        {
+            .type = OperandType::BOOL,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 80, .length = 1},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {0, 2, 2, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::TEMPORARY_VARIABLE,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {1, 2, 2, 1},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 81, .length = 16},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 97, .length = 4},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {0, 0, 0, 0},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::BOX_WITH_NMS_LIMIT,
+            .inputs = {0, 1, 2, 3, 4, 5},
+            .outputs = {6, 7, 8, 9},
+        },
+        {
+            .type = OperationType::ROI_ALIGN,
+            .inputs = {10, 7, 9, 11, 12, 13, 14, 15, 16, 17},
+            .outputs = {18},
+        },
+        {
+            .type = OperationType::MUL,
+            .inputs = {18, 19, 20},
+            .outputs = {21},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {10};
+    const std::vector<uint32_t> outputIndexes = {6, 8, 21};
+    std::vector<uint8_t> operandValues = {
+      102, 102, 102, 63, 205, 204, 204, 61, 0, 0, 128, 63, 0, 0, 128, 63, 0, 0, 32, 65, 0, 0, 32, 65, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 65, 0, 0, 32, 65, 0, 0, 0, 0, 154, 153, 153, 62, 205, 204, 204, 62, 255, 255, 255, 255, 2, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 64, 0, 0, 0, 64, 4, 0, 0, 0, 4, 0, 0, 0, 0, 0, 0, 128, 63, 0, 0, 0, 64, 0, 0, 64, 64, 0, 0, 128, 64, 0, 0, 0, 0
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored_zero_sized_dynamic_output_shape(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_zero_sized_dynamic_output_shape_relaxed() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {1, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 8},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {1, 8},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 8, .length = 32},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {1},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 40, .length = 4},
+        },
+        {
+            .type = OperandType::FLOAT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 44, .length = 4},
+        },
+        {
+            .type = OperandType::FLOAT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 48, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 52, .length = 4},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {0},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {0, 4},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::TEMPORARY_VARIABLE,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {0},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {0},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::TEMPORARY_VARIABLE,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {1, 1, 1, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 56, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 60, .length = 4},
+        },
+        {
+            .type = OperandType::FLOAT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 64, .length = 4},
+        },
+        {
+            .type = OperandType::FLOAT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 68, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 72, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 76, .length = 4},
+        },
+        {
+            .type = OperandType::BOOL,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 80, .length = 1},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {0, 2, 2, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::TEMPORARY_VARIABLE,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {1, 2, 2, 1},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 81, .length = 16},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 97, .length = 4},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {0, 0, 0, 0},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::BOX_WITH_NMS_LIMIT,
+            .inputs = {0, 1, 2, 3, 4, 5},
+            .outputs = {6, 7, 8, 9},
+        },
+        {
+            .type = OperationType::ROI_ALIGN,
+            .inputs = {10, 7, 9, 11, 12, 13, 14, 15, 16, 17},
+            .outputs = {18},
+        },
+        {
+            .type = OperationType::MUL,
+            .inputs = {18, 19, 20},
+            .outputs = {21},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {10};
+    const std::vector<uint32_t> outputIndexes = {6, 8, 21};
+    std::vector<uint8_t> operandValues = {
+      102, 102, 102, 63, 205, 204, 204, 61, 0, 0, 128, 63, 0, 0, 128, 63, 0, 0, 32, 65, 0, 0, 32, 65, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 65, 0, 0, 32, 65, 0, 0, 0, 0, 154, 153, 153, 62, 205, 204, 204, 62, 255, 255, 255, 255, 2, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 64, 0, 0, 0, 64, 4, 0, 0, 0, 4, 0, 0, 0, 0, 0, 0, 128, 63, 0, 0, 0, 64, 0, 0, 64, 64, 0, 0, 128, 64, 0, 0, 0, 0
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+        .relaxComputationFloat32toFloat16 = true,
+    };
+}
+
+inline bool is_ignored_zero_sized_dynamic_output_shape_relaxed(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_zero_sized_dynamic_output_shape_quant8() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_QUANT8_ASYMM,
+            .dimensions = {1, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.1f,
+            .zeroPoint = 128,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 2},
+        },
+        {
+            .type = OperandType::TENSOR_QUANT16_ASYMM,
+            .dimensions = {1, 8},
+            .numberOfConsumers = 1,
+            .scale = 0.125f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 2, .length = 16},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {1},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 18, .length = 4},
+        },
+        {
+            .type = OperandType::FLOAT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 22, .length = 4},
+        },
+        {
+            .type = OperandType::FLOAT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 26, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 30, .length = 4},
+        },
+        {
+            .type = OperandType::TENSOR_QUANT8_ASYMM,
+            .dimensions = {0},
+            .numberOfConsumers = 0,
+            .scale = 0.1f,
+            .zeroPoint = 128,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_QUANT16_ASYMM,
+            .dimensions = {0, 4},
+            .numberOfConsumers = 1,
+            .scale = 0.125f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::TEMPORARY_VARIABLE,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {0},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {0},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::TEMPORARY_VARIABLE,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_QUANT8_ASYMM,
+            .dimensions = {1, 1, 1, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.1f,
+            .zeroPoint = 128,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 34, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 38, .length = 4},
+        },
+        {
+            .type = OperandType::FLOAT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 42, .length = 4},
+        },
+        {
+            .type = OperandType::FLOAT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 46, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 50, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 54, .length = 4},
+        },
+        {
+            .type = OperandType::BOOL,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 58, .length = 1},
+        },
+        {
+            .type = OperandType::TENSOR_QUANT8_ASYMM,
+            .dimensions = {0, 2, 2, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.1f,
+            .zeroPoint = 128,
+            .lifetime = OperandLifeTime::TEMPORARY_VARIABLE,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_QUANT8_ASYMM,
+            .dimensions = {1, 2, 2, 1},
+            .numberOfConsumers = 1,
+            .scale = 0.1f,
+            .zeroPoint = 128,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 59, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 63, .length = 4},
+        },
+        {
+            .type = OperandType::TENSOR_QUANT8_ASYMM,
+            .dimensions = {0, 0, 0, 0},
+            .numberOfConsumers = 0,
+            .scale = 0.1f,
+            .zeroPoint = 128,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::BOX_WITH_NMS_LIMIT,
+            .inputs = {0, 1, 2, 3, 4, 5},
+            .outputs = {6, 7, 8, 9},
+        },
+        {
+            .type = OperationType::ROI_ALIGN,
+            .inputs = {10, 7, 9, 11, 12, 13, 14, 15, 16, 17},
+            .outputs = {18},
+        },
+        {
+            .type = OperationType::MUL,
+            .inputs = {18, 19, 20},
+            .outputs = {21},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {10};
+    const std::vector<uint32_t> outputIndexes = {6, 8, 21};
+    std::vector<uint8_t> operandValues = {
+      137, 129, 8, 0, 8, 0, 80, 0, 80, 0, 0, 0, 0, 0, 80, 0, 80, 0, 0, 0, 0, 0, 154, 153, 153, 62, 205, 204, 204, 62, 255, 255, 255, 255, 2, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 64, 0, 0, 0, 64, 4, 0, 0, 0, 4, 0, 0, 0, 0, 138, 148, 158, 168, 0, 0, 0, 0
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored_zero_sized_dynamic_output_shape_quant8(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_zero_sized_dynamic_output_shape_float16() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {1, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 4},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {1, 8},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 4, .length = 16},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {1},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 20, .length = 4},
+        },
+        {
+            .type = OperandType::FLOAT16,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 24, .length = 2},
+        },
+        {
+            .type = OperandType::FLOAT16,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 26, .length = 2},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 28, .length = 4},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {0},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {0, 4},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::TEMPORARY_VARIABLE,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {0},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {0},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::TEMPORARY_VARIABLE,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {1, 1, 1, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 32, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 36, .length = 4},
+        },
+        {
+            .type = OperandType::FLOAT16,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 40, .length = 2},
+        },
+        {
+            .type = OperandType::FLOAT16,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 42, .length = 2},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 44, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 48, .length = 4},
+        },
+        {
+            .type = OperandType::BOOL,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 52, .length = 1},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {0, 2, 2, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::TEMPORARY_VARIABLE,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {1, 2, 2, 1},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 53, .length = 8},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 61, .length = 4},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {0, 0, 0, 0},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::BOX_WITH_NMS_LIMIT,
+            .inputs = {0, 1, 2, 3, 4, 5},
+            .outputs = {6, 7, 8, 9},
+        },
+        {
+            .type = OperationType::ROI_ALIGN,
+            .inputs = {10, 7, 9, 11, 12, 13, 14, 15, 16, 17},
+            .outputs = {18},
+        },
+        {
+            .type = OperationType::MUL,
+            .inputs = {18, 19, 20},
+            .outputs = {21},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {10};
+    const std::vector<uint32_t> outputIndexes = {6, 8, 21};
+    std::vector<uint8_t> operandValues = {
+      51, 59, 102, 46, 0, 60, 0, 60, 0, 73, 0, 73, 0, 0, 0, 0, 0, 73, 0, 73, 0, 0, 0, 0, 205, 52, 102, 54, 255, 255, 255, 255, 2, 0, 0, 0, 2, 0, 0, 0, 0, 64, 0, 64, 4, 0, 0, 0, 4, 0, 0, 0, 0, 0, 60, 0, 64, 0, 66, 0, 68, 0, 0, 0, 0
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored_zero_sized_dynamic_output_shape_float16(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
diff --git a/runtime/test/generated/vts_models/sub_v1_2.model.cpp b/runtime/test/generated/vts_models/sub_v1_2.model.cpp
index 4be4866..5e2f5bb 100644
--- a/runtime/test/generated/vts_models/sub_v1_2.model.cpp
+++ b/runtime/test/generated/vts_models/sub_v1_2.model.cpp
@@ -1278,3 +1278,1949 @@
   return ignore.find(i) != ignore.end();
 }
 
+// Create the model
+Model createTestModel_zero_sized() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {1, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 8},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {1, 8},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 8, .length = 32},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {1},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 40, .length = 4},
+        },
+        {
+            .type = OperandType::FLOAT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 44, .length = 4},
+        },
+        {
+            .type = OperandType::FLOAT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 48, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 52, .length = 4},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {0},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {0, 4},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::TEMPORARY_VARIABLE,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {0},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {0},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::TEMPORARY_VARIABLE,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {1, 1, 1, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 56, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 60, .length = 4},
+        },
+        {
+            .type = OperandType::FLOAT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 64, .length = 4},
+        },
+        {
+            .type = OperandType::FLOAT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 68, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 72, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 76, .length = 4},
+        },
+        {
+            .type = OperandType::BOOL,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 80, .length = 1},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {0, 2, 2, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::TEMPORARY_VARIABLE,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {1, 2, 2, 1},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 81, .length = 16},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 97, .length = 4},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {0, 2, 2, 2},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::BOX_WITH_NMS_LIMIT,
+            .inputs = {0, 1, 2, 3, 4, 5},
+            .outputs = {6, 7, 8, 9},
+        },
+        {
+            .type = OperationType::ROI_ALIGN,
+            .inputs = {10, 7, 9, 11, 12, 13, 14, 15, 16, 17},
+            .outputs = {18},
+        },
+        {
+            .type = OperationType::SUB,
+            .inputs = {18, 19, 20},
+            .outputs = {21},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {10};
+    const std::vector<uint32_t> outputIndexes = {6, 8, 21};
+    std::vector<uint8_t> operandValues = {
+      102, 102, 102, 63, 205, 204, 204, 61, 0, 0, 128, 63, 0, 0, 128, 63, 0, 0, 32, 65, 0, 0, 32, 65, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 65, 0, 0, 32, 65, 0, 0, 0, 0, 154, 153, 153, 62, 205, 204, 204, 62, 255, 255, 255, 255, 2, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 64, 0, 0, 0, 64, 4, 0, 0, 0, 4, 0, 0, 0, 0, 0, 0, 128, 63, 0, 0, 0, 64, 0, 0, 64, 64, 0, 0, 128, 64, 0, 0, 0, 0
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored_zero_sized(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_zero_sized_relaxed() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {1, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 8},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {1, 8},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 8, .length = 32},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {1},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 40, .length = 4},
+        },
+        {
+            .type = OperandType::FLOAT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 44, .length = 4},
+        },
+        {
+            .type = OperandType::FLOAT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 48, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 52, .length = 4},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {0},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {0, 4},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::TEMPORARY_VARIABLE,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {0},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {0},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::TEMPORARY_VARIABLE,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {1, 1, 1, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 56, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 60, .length = 4},
+        },
+        {
+            .type = OperandType::FLOAT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 64, .length = 4},
+        },
+        {
+            .type = OperandType::FLOAT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 68, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 72, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 76, .length = 4},
+        },
+        {
+            .type = OperandType::BOOL,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 80, .length = 1},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {0, 2, 2, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::TEMPORARY_VARIABLE,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {1, 2, 2, 1},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 81, .length = 16},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 97, .length = 4},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {0, 2, 2, 2},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::BOX_WITH_NMS_LIMIT,
+            .inputs = {0, 1, 2, 3, 4, 5},
+            .outputs = {6, 7, 8, 9},
+        },
+        {
+            .type = OperationType::ROI_ALIGN,
+            .inputs = {10, 7, 9, 11, 12, 13, 14, 15, 16, 17},
+            .outputs = {18},
+        },
+        {
+            .type = OperationType::SUB,
+            .inputs = {18, 19, 20},
+            .outputs = {21},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {10};
+    const std::vector<uint32_t> outputIndexes = {6, 8, 21};
+    std::vector<uint8_t> operandValues = {
+      102, 102, 102, 63, 205, 204, 204, 61, 0, 0, 128, 63, 0, 0, 128, 63, 0, 0, 32, 65, 0, 0, 32, 65, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 65, 0, 0, 32, 65, 0, 0, 0, 0, 154, 153, 153, 62, 205, 204, 204, 62, 255, 255, 255, 255, 2, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 64, 0, 0, 0, 64, 4, 0, 0, 0, 4, 0, 0, 0, 0, 0, 0, 128, 63, 0, 0, 0, 64, 0, 0, 64, 64, 0, 0, 128, 64, 0, 0, 0, 0
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+        .relaxComputationFloat32toFloat16 = true,
+    };
+}
+
+inline bool is_ignored_zero_sized_relaxed(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_zero_sized_quant8() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_QUANT8_ASYMM,
+            .dimensions = {1, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.1f,
+            .zeroPoint = 128,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 2},
+        },
+        {
+            .type = OperandType::TENSOR_QUANT16_ASYMM,
+            .dimensions = {1, 8},
+            .numberOfConsumers = 1,
+            .scale = 0.125f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 2, .length = 16},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {1},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 18, .length = 4},
+        },
+        {
+            .type = OperandType::FLOAT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 22, .length = 4},
+        },
+        {
+            .type = OperandType::FLOAT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 26, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 30, .length = 4},
+        },
+        {
+            .type = OperandType::TENSOR_QUANT8_ASYMM,
+            .dimensions = {0},
+            .numberOfConsumers = 0,
+            .scale = 0.1f,
+            .zeroPoint = 128,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_QUANT16_ASYMM,
+            .dimensions = {0, 4},
+            .numberOfConsumers = 1,
+            .scale = 0.125f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::TEMPORARY_VARIABLE,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {0},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {0},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::TEMPORARY_VARIABLE,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_QUANT8_ASYMM,
+            .dimensions = {1, 1, 1, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.1f,
+            .zeroPoint = 128,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 34, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 38, .length = 4},
+        },
+        {
+            .type = OperandType::FLOAT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 42, .length = 4},
+        },
+        {
+            .type = OperandType::FLOAT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 46, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 50, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 54, .length = 4},
+        },
+        {
+            .type = OperandType::BOOL,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 58, .length = 1},
+        },
+        {
+            .type = OperandType::TENSOR_QUANT8_ASYMM,
+            .dimensions = {0, 2, 2, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.1f,
+            .zeroPoint = 128,
+            .lifetime = OperandLifeTime::TEMPORARY_VARIABLE,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_QUANT8_ASYMM,
+            .dimensions = {1, 2, 2, 1},
+            .numberOfConsumers = 1,
+            .scale = 0.1f,
+            .zeroPoint = 128,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 59, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 63, .length = 4},
+        },
+        {
+            .type = OperandType::TENSOR_QUANT8_ASYMM,
+            .dimensions = {0, 2, 2, 2},
+            .numberOfConsumers = 0,
+            .scale = 0.1f,
+            .zeroPoint = 128,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::BOX_WITH_NMS_LIMIT,
+            .inputs = {0, 1, 2, 3, 4, 5},
+            .outputs = {6, 7, 8, 9},
+        },
+        {
+            .type = OperationType::ROI_ALIGN,
+            .inputs = {10, 7, 9, 11, 12, 13, 14, 15, 16, 17},
+            .outputs = {18},
+        },
+        {
+            .type = OperationType::SUB,
+            .inputs = {18, 19, 20},
+            .outputs = {21},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {10};
+    const std::vector<uint32_t> outputIndexes = {6, 8, 21};
+    std::vector<uint8_t> operandValues = {
+      137, 129, 8, 0, 8, 0, 80, 0, 80, 0, 0, 0, 0, 0, 80, 0, 80, 0, 0, 0, 0, 0, 154, 153, 153, 62, 205, 204, 204, 62, 255, 255, 255, 255, 2, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 64, 0, 0, 0, 64, 4, 0, 0, 0, 4, 0, 0, 0, 0, 138, 148, 158, 168, 0, 0, 0, 0
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored_zero_sized_quant8(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_zero_sized_float16() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {1, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 4},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {1, 8},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 4, .length = 16},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {1},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 20, .length = 4},
+        },
+        {
+            .type = OperandType::FLOAT16,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 24, .length = 2},
+        },
+        {
+            .type = OperandType::FLOAT16,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 26, .length = 2},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 28, .length = 4},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {0},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {0, 4},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::TEMPORARY_VARIABLE,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {0},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {0},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::TEMPORARY_VARIABLE,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {1, 1, 1, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 32, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 36, .length = 4},
+        },
+        {
+            .type = OperandType::FLOAT16,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 40, .length = 2},
+        },
+        {
+            .type = OperandType::FLOAT16,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 42, .length = 2},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 44, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 48, .length = 4},
+        },
+        {
+            .type = OperandType::BOOL,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 52, .length = 1},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {0, 2, 2, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::TEMPORARY_VARIABLE,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {1, 2, 2, 1},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 53, .length = 8},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 61, .length = 4},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {0, 2, 2, 2},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::BOX_WITH_NMS_LIMIT,
+            .inputs = {0, 1, 2, 3, 4, 5},
+            .outputs = {6, 7, 8, 9},
+        },
+        {
+            .type = OperationType::ROI_ALIGN,
+            .inputs = {10, 7, 9, 11, 12, 13, 14, 15, 16, 17},
+            .outputs = {18},
+        },
+        {
+            .type = OperationType::SUB,
+            .inputs = {18, 19, 20},
+            .outputs = {21},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {10};
+    const std::vector<uint32_t> outputIndexes = {6, 8, 21};
+    std::vector<uint8_t> operandValues = {
+      51, 59, 102, 46, 0, 60, 0, 60, 0, 73, 0, 73, 0, 0, 0, 0, 0, 73, 0, 73, 0, 0, 0, 0, 205, 52, 102, 54, 255, 255, 255, 255, 2, 0, 0, 0, 2, 0, 0, 0, 0, 64, 0, 64, 4, 0, 0, 0, 4, 0, 0, 0, 0, 0, 60, 0, 64, 0, 66, 0, 68, 0, 0, 0, 0
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored_zero_sized_float16(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_zero_sized_dynamic_output_shape() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {1, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 8},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {1, 8},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 8, .length = 32},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {1},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 40, .length = 4},
+        },
+        {
+            .type = OperandType::FLOAT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 44, .length = 4},
+        },
+        {
+            .type = OperandType::FLOAT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 48, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 52, .length = 4},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {0},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {0, 4},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::TEMPORARY_VARIABLE,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {0},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {0},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::TEMPORARY_VARIABLE,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {1, 1, 1, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 56, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 60, .length = 4},
+        },
+        {
+            .type = OperandType::FLOAT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 64, .length = 4},
+        },
+        {
+            .type = OperandType::FLOAT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 68, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 72, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 76, .length = 4},
+        },
+        {
+            .type = OperandType::BOOL,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 80, .length = 1},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {0, 2, 2, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::TEMPORARY_VARIABLE,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {1, 2, 2, 1},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 81, .length = 16},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 97, .length = 4},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {0, 0, 0, 0},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::BOX_WITH_NMS_LIMIT,
+            .inputs = {0, 1, 2, 3, 4, 5},
+            .outputs = {6, 7, 8, 9},
+        },
+        {
+            .type = OperationType::ROI_ALIGN,
+            .inputs = {10, 7, 9, 11, 12, 13, 14, 15, 16, 17},
+            .outputs = {18},
+        },
+        {
+            .type = OperationType::SUB,
+            .inputs = {18, 19, 20},
+            .outputs = {21},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {10};
+    const std::vector<uint32_t> outputIndexes = {6, 8, 21};
+    std::vector<uint8_t> operandValues = {
+      102, 102, 102, 63, 205, 204, 204, 61, 0, 0, 128, 63, 0, 0, 128, 63, 0, 0, 32, 65, 0, 0, 32, 65, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 65, 0, 0, 32, 65, 0, 0, 0, 0, 154, 153, 153, 62, 205, 204, 204, 62, 255, 255, 255, 255, 2, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 64, 0, 0, 0, 64, 4, 0, 0, 0, 4, 0, 0, 0, 0, 0, 0, 128, 63, 0, 0, 0, 64, 0, 0, 64, 64, 0, 0, 128, 64, 0, 0, 0, 0
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored_zero_sized_dynamic_output_shape(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_zero_sized_dynamic_output_shape_relaxed() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {1, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 8},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {1, 8},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 8, .length = 32},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {1},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 40, .length = 4},
+        },
+        {
+            .type = OperandType::FLOAT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 44, .length = 4},
+        },
+        {
+            .type = OperandType::FLOAT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 48, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 52, .length = 4},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {0},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {0, 4},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::TEMPORARY_VARIABLE,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {0},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {0},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::TEMPORARY_VARIABLE,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {1, 1, 1, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 56, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 60, .length = 4},
+        },
+        {
+            .type = OperandType::FLOAT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 64, .length = 4},
+        },
+        {
+            .type = OperandType::FLOAT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 68, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 72, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 76, .length = 4},
+        },
+        {
+            .type = OperandType::BOOL,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 80, .length = 1},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {0, 2, 2, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::TEMPORARY_VARIABLE,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {1, 2, 2, 1},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 81, .length = 16},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 97, .length = 4},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {0, 0, 0, 0},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::BOX_WITH_NMS_LIMIT,
+            .inputs = {0, 1, 2, 3, 4, 5},
+            .outputs = {6, 7, 8, 9},
+        },
+        {
+            .type = OperationType::ROI_ALIGN,
+            .inputs = {10, 7, 9, 11, 12, 13, 14, 15, 16, 17},
+            .outputs = {18},
+        },
+        {
+            .type = OperationType::SUB,
+            .inputs = {18, 19, 20},
+            .outputs = {21},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {10};
+    const std::vector<uint32_t> outputIndexes = {6, 8, 21};
+    std::vector<uint8_t> operandValues = {
+      102, 102, 102, 63, 205, 204, 204, 61, 0, 0, 128, 63, 0, 0, 128, 63, 0, 0, 32, 65, 0, 0, 32, 65, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 65, 0, 0, 32, 65, 0, 0, 0, 0, 154, 153, 153, 62, 205, 204, 204, 62, 255, 255, 255, 255, 2, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 64, 0, 0, 0, 64, 4, 0, 0, 0, 4, 0, 0, 0, 0, 0, 0, 128, 63, 0, 0, 0, 64, 0, 0, 64, 64, 0, 0, 128, 64, 0, 0, 0, 0
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+        .relaxComputationFloat32toFloat16 = true,
+    };
+}
+
+inline bool is_ignored_zero_sized_dynamic_output_shape_relaxed(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_zero_sized_dynamic_output_shape_quant8() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_QUANT8_ASYMM,
+            .dimensions = {1, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.1f,
+            .zeroPoint = 128,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 2},
+        },
+        {
+            .type = OperandType::TENSOR_QUANT16_ASYMM,
+            .dimensions = {1, 8},
+            .numberOfConsumers = 1,
+            .scale = 0.125f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 2, .length = 16},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {1},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 18, .length = 4},
+        },
+        {
+            .type = OperandType::FLOAT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 22, .length = 4},
+        },
+        {
+            .type = OperandType::FLOAT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 26, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 30, .length = 4},
+        },
+        {
+            .type = OperandType::TENSOR_QUANT8_ASYMM,
+            .dimensions = {0},
+            .numberOfConsumers = 0,
+            .scale = 0.1f,
+            .zeroPoint = 128,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_QUANT16_ASYMM,
+            .dimensions = {0, 4},
+            .numberOfConsumers = 1,
+            .scale = 0.125f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::TEMPORARY_VARIABLE,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {0},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {0},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::TEMPORARY_VARIABLE,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_QUANT8_ASYMM,
+            .dimensions = {1, 1, 1, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.1f,
+            .zeroPoint = 128,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 34, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 38, .length = 4},
+        },
+        {
+            .type = OperandType::FLOAT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 42, .length = 4},
+        },
+        {
+            .type = OperandType::FLOAT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 46, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 50, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 54, .length = 4},
+        },
+        {
+            .type = OperandType::BOOL,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 58, .length = 1},
+        },
+        {
+            .type = OperandType::TENSOR_QUANT8_ASYMM,
+            .dimensions = {0, 2, 2, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.1f,
+            .zeroPoint = 128,
+            .lifetime = OperandLifeTime::TEMPORARY_VARIABLE,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_QUANT8_ASYMM,
+            .dimensions = {1, 2, 2, 1},
+            .numberOfConsumers = 1,
+            .scale = 0.1f,
+            .zeroPoint = 128,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 59, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 63, .length = 4},
+        },
+        {
+            .type = OperandType::TENSOR_QUANT8_ASYMM,
+            .dimensions = {0, 0, 0, 0},
+            .numberOfConsumers = 0,
+            .scale = 0.1f,
+            .zeroPoint = 128,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::BOX_WITH_NMS_LIMIT,
+            .inputs = {0, 1, 2, 3, 4, 5},
+            .outputs = {6, 7, 8, 9},
+        },
+        {
+            .type = OperationType::ROI_ALIGN,
+            .inputs = {10, 7, 9, 11, 12, 13, 14, 15, 16, 17},
+            .outputs = {18},
+        },
+        {
+            .type = OperationType::SUB,
+            .inputs = {18, 19, 20},
+            .outputs = {21},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {10};
+    const std::vector<uint32_t> outputIndexes = {6, 8, 21};
+    std::vector<uint8_t> operandValues = {
+      137, 129, 8, 0, 8, 0, 80, 0, 80, 0, 0, 0, 0, 0, 80, 0, 80, 0, 0, 0, 0, 0, 154, 153, 153, 62, 205, 204, 204, 62, 255, 255, 255, 255, 2, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 64, 0, 0, 0, 64, 4, 0, 0, 0, 4, 0, 0, 0, 0, 138, 148, 158, 168, 0, 0, 0, 0
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored_zero_sized_dynamic_output_shape_quant8(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_zero_sized_dynamic_output_shape_float16() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {1, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 4},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {1, 8},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 4, .length = 16},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {1},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 20, .length = 4},
+        },
+        {
+            .type = OperandType::FLOAT16,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 24, .length = 2},
+        },
+        {
+            .type = OperandType::FLOAT16,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 26, .length = 2},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 28, .length = 4},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {0},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {0, 4},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::TEMPORARY_VARIABLE,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {0},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {0},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::TEMPORARY_VARIABLE,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {1, 1, 1, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 32, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 36, .length = 4},
+        },
+        {
+            .type = OperandType::FLOAT16,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 40, .length = 2},
+        },
+        {
+            .type = OperandType::FLOAT16,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 42, .length = 2},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 44, .length = 4},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 48, .length = 4},
+        },
+        {
+            .type = OperandType::BOOL,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 52, .length = 1},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {0, 2, 2, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::TEMPORARY_VARIABLE,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {1, 2, 2, 1},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 53, .length = 8},
+        },
+        {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 61, .length = 4},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {0, 0, 0, 0},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::BOX_WITH_NMS_LIMIT,
+            .inputs = {0, 1, 2, 3, 4, 5},
+            .outputs = {6, 7, 8, 9},
+        },
+        {
+            .type = OperationType::ROI_ALIGN,
+            .inputs = {10, 7, 9, 11, 12, 13, 14, 15, 16, 17},
+            .outputs = {18},
+        },
+        {
+            .type = OperationType::SUB,
+            .inputs = {18, 19, 20},
+            .outputs = {21},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {10};
+    const std::vector<uint32_t> outputIndexes = {6, 8, 21};
+    std::vector<uint8_t> operandValues = {
+      51, 59, 102, 46, 0, 60, 0, 60, 0, 73, 0, 73, 0, 0, 0, 0, 0, 73, 0, 73, 0, 0, 0, 0, 205, 52, 102, 54, 255, 255, 255, 255, 2, 0, 0, 0, 2, 0, 0, 0, 0, 64, 0, 64, 4, 0, 0, 0, 4, 0, 0, 0, 0, 0, 60, 0, 64, 0, 66, 0, 68, 0, 0, 0, 0
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored_zero_sized_dynamic_output_shape_float16(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
diff --git a/runtime/test/specs/V1_2/add_broadcast_float16.mod.py b/runtime/test/specs/V1_2/add_broadcast_float16.mod.py
deleted file mode 100644
index e70224f..0000000
--- a/runtime/test/specs/V1_2/add_broadcast_float16.mod.py
+++ /dev/null
@@ -1,19 +0,0 @@
-# model
-model = Model()
-i1 = Input("op1", "TENSOR_FLOAT16", "{1, 2}")
-i2 = Input("op2", "TENSOR_FLOAT16", "{2, 2}")
-act = Int32Scalar("act", 0)
-i3 = Output("op3", "TENSOR_FLOAT16", "{2, 2}")
-model = model.Operation("ADD", i1, i2, act).To(i3)
-
-# Example 1. Input in operand 0,
-input0 = {i1: # input 0
-          [1, 2],
-          i2: # input 1
-          [1, 2, 3, 4]}
-
-output0 = {i3: # output 0
-           [2, 4, 4, 6]}
-
-# Instantiate an example
-Example((input0, output0))
diff --git a/runtime/test/specs/V1_2/add_float16.mod.py b/runtime/test/specs/V1_2/add_float16.mod.py
deleted file mode 100644
index d512c02..0000000
--- a/runtime/test/specs/V1_2/add_float16.mod.py
+++ /dev/null
@@ -1,36 +0,0 @@
-#
-# Copyright (C) 2018 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-# model
-model = Model()
-i1 = Input("op1", "TENSOR_FLOAT16", "{3}") # a vector of 3 float16s
-i2 = Input("op2", "TENSOR_FLOAT16", "{3}") # another vector of 3 float16s
-act = Int32Scalar("act", 0) # an int32_t scalar activation
-i3 = Output("op3", "TENSOR_FLOAT16", "{3}")
-model = model.Operation("ADD", i1, i2, act).To(i3)
-model = model.RelaxedExecution(False)
-
-# Example 1. Input in operand 0,
-input0 = {i1: # input 0
-          [1.0009765625, 1.0, 2.5],
-          i2: # input 1
-          [2E-23, 0.0001, 3.5]}
-
-output0 = {i3: # output 0
-           [1.0009765625, 1.0, 6.0]}
-
-# Instantiate an example
-Example((input0, output0))
diff --git a/runtime/test/specs/V1_2/add_v1_2.mod.py b/runtime/test/specs/V1_2/add_v1_2.mod.py
new file mode 100644
index 0000000..b42d637
--- /dev/null
+++ b/runtime/test/specs/V1_2/add_v1_2.mod.py
@@ -0,0 +1,99 @@
+#
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# TEST 1: ADD float16
+model = Model()
+i1 = Input("op1", "TENSOR_FLOAT16", "{3}") # a vector of 3 float16s
+i2 = Input("op2", "TENSOR_FLOAT16", "{3}") # another vector of 3 float16s
+act = Int32Scalar("act", 0) # an int32_t scalar activation
+i3 = Output("op3", "TENSOR_FLOAT16", "{3}")
+model = model.Operation("ADD", i1, i2, act).To(i3)
+model = model.RelaxedExecution(False)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+          [1.0009765625, 1.0, 2.5],
+          i2: # input 1
+          [2E-23, 0.0001, 3.5]}
+
+output0 = {i3: # output 0
+           [1.0009765625, 1.0, 6.0]}
+
+# Instantiate an example
+Example((input0, output0))
+
+
+# TEST 2: ADD broadcast float16
+model = Model()
+i1 = Input("op1", "TENSOR_FLOAT16", "{1, 2}")
+i2 = Input("op2", "TENSOR_FLOAT16", "{2, 2}")
+act = Int32Scalar("act", 0)
+i3 = Output("op3", "TENSOR_FLOAT16", "{2, 2}")
+model = model.Operation("ADD", i1, i2, act).To(i3)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+          [1, 2],
+          i2: # input 1
+          [1, 2, 3, 4]}
+
+output0 = {i3: # output 0
+           [2, 4, 4, 6]}
+
+# Instantiate an example
+Example((input0, output0))
+
+
+# TEST 3: ADD, zero-sized input
+
+# Use BOX_WITH_NMS_LIMIT op to generate a zero-sized internal tensor for box cooridnates.
+p1 = Parameter("scores", "TENSOR_FLOAT32", "{1, 2}", [0.90, 0.10]) # scores
+p2 = Parameter("roi", "TENSOR_FLOAT32", "{1, 8}", [1, 1, 10, 10, 0, 0, 10, 10]) # roi
+o1 = Output("scoresOut", "TENSOR_FLOAT32", "{0}") # scores out
+o2 = Output("classesOut", "TENSOR_INT32", "{0}") # classes out
+tmp1 = Internal("roiOut", "TENSOR_FLOAT32", "{0, 4}") # roi out
+tmp2 = Internal("batchSplitOut", "TENSOR_INT32", "{0}") # batch split out
+model = Model("zero_sized").Operation("BOX_WITH_NMS_LIMIT", p1, p2, [0], 0.3, 0.4, -1).To(o1, tmp1, o2, tmp2)
+
+# Use ROI_ALIGN op to convert into zero-sized feature map.
+layout = BoolScalar("layout", False) # NHWC
+i1 = Input("in", "TENSOR_FLOAT32", "{1, 1, 1, 2}")
+zero_sized = Internal("featureMap", "TENSOR_FLOAT32", "{0, 2, 2, 2}")
+model = model.Operation("ROI_ALIGN", i1, tmp1, tmp2, 2, 2, 2.0, 2.0, 4, 4, layout).To(zero_sized)
+
+# ADD op with numBatches = 0.
+i2 = Parameter("op", "TENSOR_FLOAT32", "{1, 2, 2, 1}", [1, 2, 3, 4]) # weights
+o3 = Output("out", "TENSOR_FLOAT32", "{0, 2, 2, 2}") # out
+model = model.Operation("ADD", zero_sized, i2, 0).To(o3)
+
+quant8 = DataTypeConverter().Identify({
+    p1: ("TENSOR_QUANT8_ASYMM", 0.1, 128),
+    p2: ("TENSOR_QUANT16_ASYMM", 0.125, 0),
+    o1: ("TENSOR_QUANT8_ASYMM", 0.1, 128),
+    tmp1: ("TENSOR_QUANT16_ASYMM", 0.125, 0),
+    i1: ("TENSOR_QUANT8_ASYMM", 0.1, 128),
+    zero_sized: ("TENSOR_QUANT8_ASYMM", 0.1, 128),
+    i2: ("TENSOR_QUANT8_ASYMM", 0.1, 128),
+    o3: ("TENSOR_QUANT8_ASYMM", 0.1, 128)
+})
+
+# Create test case with dummy values.
+Example({
+    i1: [1, 2],
+    o1: [0],
+    o2: [0],
+    o3: [0],
+}).AddVariations("relaxed", quant8, "float16")
diff --git a/runtime/test/specs/V1_2/div_broadcast_float16.mod.py b/runtime/test/specs/V1_2/div_broadcast_float16.mod.py
deleted file mode 100644
index 5a4b3b7..0000000
--- a/runtime/test/specs/V1_2/div_broadcast_float16.mod.py
+++ /dev/null
@@ -1,19 +0,0 @@
-# model
-model = Model()
-i1 = Input("op1", "TENSOR_FLOAT16", "{2, 2}")
-i2 = Input("op2", "TENSOR_FLOAT16", "{1, 2}")
-act = Int32Scalar("act", 0)
-i3 = Output("op3", "TENSOR_FLOAT16", "{2, 2}")
-model = model.Operation("DIV", i1, i2, act).To(i3)
-
-# Example 1. Input in operand 0,
-input0 = {i1: # input 0
-          [1, 4, 3, 8],
-          i2: # input 1
-          [1, 2]}
-
-output0 = {i3: # output 0
-          [1, 2, 3, 4]}
-
-# Instantiate an example
-Example((input0, output0))
diff --git a/runtime/test/specs/V1_2/div_float16.mod.py b/runtime/test/specs/V1_2/div_float16.mod.py
deleted file mode 100644
index dc94b3b..0000000
--- a/runtime/test/specs/V1_2/div_float16.mod.py
+++ /dev/null
@@ -1,36 +0,0 @@
-#
-# Copyright (C) 2018 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-# model
-model = Model()
-i1 = Input("op1", "TENSOR_FLOAT16", "{3}") # a vector of 3 float16s
-i2 = Input("op2", "TENSOR_FLOAT16", "{3}") # another vector of 3 float16s
-act = Int32Scalar("act", 0) # an int32_t scalar activation
-i3 = Output("op3", "TENSOR_FLOAT16", "{3}")
-model = model.Operation("DIV", i1, i2, act).To(i3)
-model = model.RelaxedExecution(False)
-
-# Example 1. Input in operand 0,
-input0 = {i1: # input 0
-          [2.001953125, 0.0001000165, 8.75],
-          i2: # input 1
-          [2, 0.0001, 3.5]}
-
-output0 = {i3: # output 0
-          [1.0009765625, 1.0, 2.5]}
-
-# Instantiate an example
-Example((input0, output0))
diff --git a/runtime/test/specs/V1_2/div_v1_2.mod.py b/runtime/test/specs/V1_2/div_v1_2.mod.py
new file mode 100644
index 0000000..524cd08
--- /dev/null
+++ b/runtime/test/specs/V1_2/div_v1_2.mod.py
@@ -0,0 +1,88 @@
+#
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# TEST 1: DIV float16
+model = Model()
+i1 = Input("op1", "TENSOR_FLOAT16", "{3}") # a vector of 3 float16s
+i2 = Input("op2", "TENSOR_FLOAT16", "{3}") # another vector of 3 float16s
+act = Int32Scalar("act", 0) # an int32_t scalar activation
+i3 = Output("op3", "TENSOR_FLOAT16", "{3}")
+model = model.Operation("DIV", i1, i2, act).To(i3)
+model = model.RelaxedExecution(False)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+          [2.001953125, 0.0001000165, 8.75],
+          i2: # input 1
+          [2, 0.0001, 3.5]}
+
+output0 = {i3: # output 0
+          [1.0009765625, 1.0, 2.5]}
+
+# Instantiate an example
+Example((input0, output0))
+
+
+# TEST 2: DIV broadcast float16
+model = Model()
+i1 = Input("op1", "TENSOR_FLOAT16", "{2, 2}")
+i2 = Input("op2", "TENSOR_FLOAT16", "{1, 2}")
+act = Int32Scalar("act", 0)
+i3 = Output("op3", "TENSOR_FLOAT16", "{2, 2}")
+model = model.Operation("DIV", i1, i2, act).To(i3)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+          [1, 4, 3, 8],
+          i2: # input 1
+          [1, 2]}
+
+output0 = {i3: # output 0
+          [1, 2, 3, 4]}
+
+# Instantiate an example
+Example((input0, output0))
+
+
+# TEST 3: DIV, zero-sized input
+
+# Use BOX_WITH_NMS_LIMIT op to generate a zero-sized internal tensor for box cooridnates.
+p1 = Parameter("scores", "TENSOR_FLOAT32", "{1, 2}", [0.90, 0.10]) # scores
+p2 = Parameter("roi", "TENSOR_FLOAT32", "{1, 8}", [1, 1, 10, 10, 0, 0, 10, 10]) # roi
+o1 = Output("scoresOut", "TENSOR_FLOAT32", "{0}") # scores out
+o2 = Output("classesOut", "TENSOR_INT32", "{0}") # classes out
+tmp1 = Internal("roiOut", "TENSOR_FLOAT32", "{0, 4}") # roi out
+tmp2 = Internal("batchSplitOut", "TENSOR_INT32", "{0}") # batch split out
+model = Model("zero_sized").Operation("BOX_WITH_NMS_LIMIT", p1, p2, [0], 0.3, 0.4, -1).To(o1, tmp1, o2, tmp2)
+
+# Use ROI_ALIGN op to convert into zero-sized feature map.
+layout = BoolScalar("layout", False) # NHWC
+i1 = Input("in", "TENSOR_FLOAT32", "{1, 1, 1, 2}")
+zero_sized = Internal("featureMap", "TENSOR_FLOAT32", "{0, 2, 2, 2}")
+model = model.Operation("ROI_ALIGN", i1, tmp1, tmp2, 2, 2, 2.0, 2.0, 4, 4, layout).To(zero_sized)
+
+# DIV op with numBatches = 0.
+i2 = Parameter("op", "TENSOR_FLOAT32", "{1, 2, 2, 1}", [1, 2, 3, 4]) # weights
+o3 = Output("out", "TENSOR_FLOAT32", "{0, 2, 2, 2}") # out
+model = model.Operation("DIV", zero_sized, i2, 0).To(o3)
+
+# Create test case with dummy values.
+Example({
+    i1: [1, 2],
+    o1: [0],
+    o2: [0],
+    o3: [0],
+}).AddVariations("relaxed", "float16")
diff --git a/runtime/test/specs/V1_2/mul_broadcast_float16.mod.py b/runtime/test/specs/V1_2/mul_broadcast_float16.mod.py
deleted file mode 100644
index c5ac040..0000000
--- a/runtime/test/specs/V1_2/mul_broadcast_float16.mod.py
+++ /dev/null
@@ -1,19 +0,0 @@
-# model
-model = Model()
-i1 = Input("op1", "TENSOR_FLOAT16", "{1, 2}")
-i2 = Input("op2", "TENSOR_FLOAT16", "{2, 2}")
-act = Int32Scalar("act", 0)
-i3 = Output("op3", "TENSOR_FLOAT16", "{2, 2}")
-model = model.Operation("MUL", i1, i2, act).To(i3)
-
-# Example 1. Input in operand 0,
-input0 = {i1: # input 0
-          [1, 2],
-          i2: # input 1
-          [1, 2, 3, 4]}
-
-output0 = {i3: # output 0
-           [1, 4, 3, 8]}
-
-# Instantiate an example
-Example((input0, output0))
diff --git a/runtime/test/specs/V1_2/mul_float16.mod.py b/runtime/test/specs/V1_2/mul_float16.mod.py
deleted file mode 100644
index ff886c7..0000000
--- a/runtime/test/specs/V1_2/mul_float16.mod.py
+++ /dev/null
@@ -1,36 +0,0 @@
-#
-# Copyright (C) 2018 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-# model
-model = Model()
-i1 = Input("op1", "TENSOR_FLOAT16", "{3}") # a vector of 3 float16s
-i2 = Input("op2", "TENSOR_FLOAT16", "{3}") # another vector of 3 float16s
-act = Int32Scalar("act", 0) # an int32_t scalar activation
-i3 = Output("op3", "TENSOR_FLOAT16", "{3}")
-model = model.Operation("MUL", i1, i2, act).To(i3)
-model = model.RelaxedExecution(False)
-
-# Example 1. Input in operand 0,
-input0 = {i1: # input 0
-          [1.0009765625, 1.0, 2.5],
-          i2: # input 1
-          [2, 0.0001, 3.5]}
-
-output0 = {i3: # output 0
-           [2.001953125, 0.0001000165, 8.75]}
-
-# Instantiate an example
-Example((input0, output0))
diff --git a/runtime/test/specs/V1_2/mul_v1_2.mod.py b/runtime/test/specs/V1_2/mul_v1_2.mod.py
new file mode 100644
index 0000000..69f51dc
--- /dev/null
+++ b/runtime/test/specs/V1_2/mul_v1_2.mod.py
@@ -0,0 +1,99 @@
+#
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# TEST 1: MUL float16
+model = Model()
+i1 = Input("op1", "TENSOR_FLOAT16", "{3}") # a vector of 3 float16s
+i2 = Input("op2", "TENSOR_FLOAT16", "{3}") # another vector of 3 float16s
+act = Int32Scalar("act", 0) # an int32_t scalar activation
+i3 = Output("op3", "TENSOR_FLOAT16", "{3}")
+model = model.Operation("MUL", i1, i2, act).To(i3)
+model = model.RelaxedExecution(False)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+          [1.0009765625, 1.0, 2.5],
+          i2: # input 1
+          [2, 0.0001, 3.5]}
+
+output0 = {i3: # output 0
+           [2.001953125, 0.0001000165, 8.75]}
+
+# Instantiate an example
+Example((input0, output0))
+
+
+# TEST 2: MUL broadcast float16
+model = Model()
+i1 = Input("op1", "TENSOR_FLOAT16", "{1, 2}")
+i2 = Input("op2", "TENSOR_FLOAT16", "{2, 2}")
+act = Int32Scalar("act", 0)
+i3 = Output("op3", "TENSOR_FLOAT16", "{2, 2}")
+model = model.Operation("MUL", i1, i2, act).To(i3)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+          [1, 2],
+          i2: # input 1
+          [1, 2, 3, 4]}
+
+output0 = {i3: # output 0
+           [1, 4, 3, 8]}
+
+# Instantiate an example
+Example((input0, output0))
+
+
+# TEST 3: MUL, zero-sized input
+
+# Use BOX_WITH_NMS_LIMIT op to generate a zero-sized internal tensor for box cooridnates.
+p1 = Parameter("scores", "TENSOR_FLOAT32", "{1, 2}", [0.90, 0.10]) # scores
+p2 = Parameter("roi", "TENSOR_FLOAT32", "{1, 8}", [1, 1, 10, 10, 0, 0, 10, 10]) # roi
+o1 = Output("scoresOut", "TENSOR_FLOAT32", "{0}") # scores out
+o2 = Output("classesOut", "TENSOR_INT32", "{0}") # classes out
+tmp1 = Internal("roiOut", "TENSOR_FLOAT32", "{0, 4}") # roi out
+tmp2 = Internal("batchSplitOut", "TENSOR_INT32", "{0}") # batch split out
+model = Model("zero_sized").Operation("BOX_WITH_NMS_LIMIT", p1, p2, [0], 0.3, 0.4, -1).To(o1, tmp1, o2, tmp2)
+
+# Use ROI_ALIGN op to convert into zero-sized feature map.
+layout = BoolScalar("layout", False) # NHWC
+i1 = Input("in", "TENSOR_FLOAT32", "{1, 1, 1, 2}")
+zero_sized = Internal("featureMap", "TENSOR_FLOAT32", "{0, 2, 2, 2}")
+model = model.Operation("ROI_ALIGN", i1, tmp1, tmp2, 2, 2, 2.0, 2.0, 4, 4, layout).To(zero_sized)
+
+# MUL op with numBatches = 0.
+i2 = Parameter("op", "TENSOR_FLOAT32", "{1, 2, 2, 1}", [1, 2, 3, 4]) # weights
+o3 = Output("out", "TENSOR_FLOAT32", "{0, 2, 2, 2}") # out
+model = model.Operation("MUL", zero_sized, i2, 0).To(o3)
+
+quant8 = DataTypeConverter().Identify({
+    p1: ("TENSOR_QUANT8_ASYMM", 0.1, 128),
+    p2: ("TENSOR_QUANT16_ASYMM", 0.125, 0),
+    o1: ("TENSOR_QUANT8_ASYMM", 0.1, 128),
+    tmp1: ("TENSOR_QUANT16_ASYMM", 0.125, 0),
+    i1: ("TENSOR_QUANT8_ASYMM", 0.1, 128),
+    zero_sized: ("TENSOR_QUANT8_ASYMM", 0.1, 128),
+    i2: ("TENSOR_QUANT8_ASYMM", 0.1, 128),
+    o3: ("TENSOR_QUANT8_ASYMM", 0.1, 128)
+})
+
+# Create test case with dummy values.
+Example({
+    i1: [1, 2],
+    o1: [0],
+    o2: [0],
+    o3: [0],
+}).AddVariations("relaxed", quant8, "float16")
diff --git a/runtime/test/specs/V1_2/sub_v1_2.mod.py b/runtime/test/specs/V1_2/sub_v1_2.mod.py
index 3de18bf..ab6fc19 100644
--- a/runtime/test/specs/V1_2/sub_v1_2.mod.py
+++ b/runtime/test/specs/V1_2/sub_v1_2.mod.py
@@ -52,3 +52,45 @@
     input1: input1_values,
     output0: output_values,
 })
+
+
+# SUB, zero-sized input
+
+# Use BOX_WITH_NMS_LIMIT op to generate a zero-sized internal tensor for box cooridnates.
+p1 = Parameter("scores", "TENSOR_FLOAT32", "{1, 2}", [0.90, 0.10]) # scores
+p2 = Parameter("roi", "TENSOR_FLOAT32", "{1, 8}", [1, 1, 10, 10, 0, 0, 10, 10]) # roi
+o1 = Output("scoresOut", "TENSOR_FLOAT32", "{0}") # scores out
+o2 = Output("classesOut", "TENSOR_INT32", "{0}") # classes out
+tmp1 = Internal("roiOut", "TENSOR_FLOAT32", "{0, 4}") # roi out
+tmp2 = Internal("batchSplitOut", "TENSOR_INT32", "{0}") # batch split out
+model = Model("zero_sized").Operation("BOX_WITH_NMS_LIMIT", p1, p2, [0], 0.3, 0.4, -1).To(o1, tmp1, o2, tmp2)
+
+# Use ROI_ALIGN op to convert into zero-sized feature map.
+layout = BoolScalar("layout", False) # NHWC
+i1 = Input("in", "TENSOR_FLOAT32", "{1, 1, 1, 2}")
+zero_sized = Internal("featureMap", "TENSOR_FLOAT32", "{0, 2, 2, 2}")
+model = model.Operation("ROI_ALIGN", i1, tmp1, tmp2, 2, 2, 2.0, 2.0, 4, 4, layout).To(zero_sized)
+
+# SUB op with numBatches = 0.
+i2 = Parameter("op", "TENSOR_FLOAT32", "{1, 2, 2, 1}", [1, 2, 3, 4]) # weights
+o3 = Output("out", "TENSOR_FLOAT32", "{0, 2, 2, 2}") # out
+model = model.Operation("SUB", zero_sized, i2, 0).To(o3)
+
+quant8 = DataTypeConverter().Identify({
+    p1: ("TENSOR_QUANT8_ASYMM", 0.1, 128),
+    p2: ("TENSOR_QUANT16_ASYMM", 0.125, 0),
+    o1: ("TENSOR_QUANT8_ASYMM", 0.1, 128),
+    tmp1: ("TENSOR_QUANT16_ASYMM", 0.125, 0),
+    i1: ("TENSOR_QUANT8_ASYMM", 0.1, 128),
+    zero_sized: ("TENSOR_QUANT8_ASYMM", 0.1, 128),
+    i2: ("TENSOR_QUANT8_ASYMM", 0.1, 128),
+    o3: ("TENSOR_QUANT8_ASYMM", 0.1, 128)
+})
+
+# Create test case with dummy values.
+Example({
+    i1: [1, 2],
+    o1: [0],
+    o2: [0],
+    o3: [0],
+}).AddVariations("relaxed", quant8, "float16")