Initial implementation of the following quantized ops.
- CONV_QUANT8
- DEPTHWISE_CONV_QUANT8
- AVERAGE_POOL_QUANT8
- MAX_POOL_QUANT8
- LOGISTIC_QUANT8
Additionally, added functions to plumb through quantization
parameters.
Bug: 63905942
Test: mm
Test: end-to-end MobileNet quantized test pass
Change-Id: Ib2753c68bf2c51467ae1c158b45541bcfdf10789
diff --git a/runtime/NeuralNetworks.cpp b/runtime/NeuralNetworks.cpp
index 57e29aa..c6f2fae 100644
--- a/runtime/NeuralNetworks.cpp
+++ b/runtime/NeuralNetworks.cpp
@@ -40,8 +40,8 @@
"ANEURALNETWORKS_TENSOR_FLOAT16 may have changed");
static_assert(ANEURALNETWORKS_TENSOR_FLOAT32 == 9,
"ANEURALNETWORKS_TENSOR_FLOAT32 may have changed");
-static_assert(ANEURALNETWORKS_TENSOR_SYMMETRICAL_QUANT8 == 10,
- "ANEURALNETWORKS_TENSOR_SYMMETRICAL_QUANT8 may have changed");
+static_assert(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM == 10,
+ "ANEURALNETWORKS_TENSOR_QUANT8_ASYMM may have changed");
// Ensure that the constants are compatible with the values defined in the hal files.
static_assert(static_cast<uint32_t>(OperandType::FLOAT16) == ANEURALNETWORKS_FLOAT16,
@@ -64,9 +64,9 @@
"TENSOR_FLOAT16 != ANEURALNETWORKS_TENSOR_FLOAT16");
static_assert(static_cast<uint32_t>(OperandType::TENSOR_FLOAT32) == ANEURALNETWORKS_TENSOR_FLOAT32,
"TENSOR_FLOAT32 != ANEURALNETWORKS_TENSOR_FLOAT32");
-static_assert(static_cast<uint32_t>(OperandType::TENSOR_SYMMETRICAL_QUANT8) ==
- ANEURALNETWORKS_TENSOR_SYMMETRICAL_QUANT8,
- "TENSOR_SYMMETRICAL_QUANT8 != ANEURALNETWORKS_TENSOR_SYMMETRICAL_QUANT8");
+static_assert(static_cast<uint32_t>(OperandType::TENSOR_QUANT8_ASYMM) ==
+ ANEURALNETWORKS_TENSOR_QUANT8_ASYMM,
+ "TENSOR_QUANT8_ASYMM != ANEURALNETWORKS_TENSOR_QUANT8_ASYMM");
using namespace android::nn;