Add TENSOR_QUANT8_ASYMM_SIGNED support to ADD, MUL and SUB.

* Update legacy TF Lite functions calls to the current ones.
* Add QuantizeMultiplierSmallerThanOneExp function (same as TF Lite
function, does the same thing as QuantizeMultiplierSmallerThanOne but
returns left shift instead of right shift so that all the
QuantizeMultiplier* functions have the same interface.

Bug: 143934463
Bug: 143934627
Bug: 143935040
Test: NNTest_static and 1.3 VTS QuantizationCouplingTest
Change-Id: I897c07bc5d93dfa7c03a0c66eeb77fcbdec6aea0
Merged-In: I897c07bc5d93dfa7c03a0c66eeb77fcbdec6aea0
(cherry picked from commit c7b32bf9555a556cd1b9e5ffe9372a7f6d3fadb8)
diff --git a/common/OperationsUtils.cpp b/common/OperationsUtils.cpp
index 5c251a2..89ed5dc 100644
--- a/common/OperationsUtils.cpp
+++ b/common/OperationsUtils.cpp
@@ -27,6 +27,9 @@
 #include "Operations.h"
 #include "Utils.h"
 
+#include "Operations.h"
+#include "Utils.h"
+
 namespace android {
 namespace nn {
 
@@ -47,6 +50,32 @@
     return true;
 }
 
+void CalculateActivationRangeImpl(int32_t activation, const Shape& outputShape, int32_t qmin,
+                                  int32_t qmax, int32_t* act_min, int32_t* act_max) {
+    const auto scale = outputShape.scale;
+    const auto zero_point = outputShape.offset;
+
+    auto quantize = [scale, zero_point](float f) {
+        return zero_point + static_cast<int32_t>(std::round(f / scale));
+    };
+
+    if (activation == kActivationRelu) {
+        *act_min = std::max(qmin, quantize(0.0));
+        *act_max = qmax;
+    } else if (activation == kActivationRelu6) {
+        *act_min = std::max(qmin, quantize(0.0));
+        *act_max = std::min(qmax, quantize(6.0));
+    } else if (activation == kActivationRelu1) {
+        *act_min = std::max(qmin, quantize(-1.0));
+        *act_max = std::min(qmax, quantize(1.0));
+    } else if (activation == kActivationNone) {
+        *act_min = qmin;
+        *act_max = qmax;
+    } else {
+        LOG(ERROR) << "Unsupported fused activation function.";
+    }
+}
+
 }  // namespace
 
 bool validateInputTypes(const IOperationValidationContext* context,
@@ -171,7 +200,7 @@
     return true;
 }
 
-bool QuantizeMultiplier(double double_multiplier, int32_t* quantized_multiplier, int* shift) {
+bool QuantizeMultiplier(double double_multiplier, int32_t* quantized_multiplier, int32_t* shift) {
     if (double_multiplier == 0.) {
         *quantized_multiplier = 0;
         *shift = 0;
@@ -189,6 +218,15 @@
     return true;
 }
 
+bool QuantizeMultiplierSmallerThanOneExp(double double_multiplier, int32_t* quantized_multiplier,
+                                         int32_t* left_shift) {
+    NN_RET_CHECK(double_multiplier > 0.);
+    NN_RET_CHECK(double_multiplier < 1.);
+    NN_RET_CHECK(QuantizeMultiplier(double_multiplier, quantized_multiplier, left_shift));
+    NN_RET_CHECK(*left_shift <= 0);
+    return true;
+}
+
 bool QuantizeMultiplierSmallerThanOne(double double_multiplier, int32_t* quantized_multiplier,
                                       int32_t* right_shift) {
     NN_OPS_CHECK(double_multiplier >= 0.);
@@ -249,28 +287,15 @@
     const int32_t qmin = std::numeric_limits<uint8_t>::min();
     const int32_t qmax = std::numeric_limits<uint8_t>::max();
 
-    const auto scale = outputShape.scale;
-    const auto zero_point = outputShape.offset;
+    CalculateActivationRangeImpl(activation, outputShape, qmin, qmax, act_min, act_max);
+}
 
-    auto quantize = [scale, zero_point](float f) {
-        return zero_point + static_cast<int32_t>(std::round(f / scale));
-    };
+void CalculateActivationRangeInt8(int32_t activation, const Shape& outputShape, int32_t* act_min,
+                                  int32_t* act_max) {
+    const int32_t qmin = std::numeric_limits<int8_t>::min();
+    const int32_t qmax = std::numeric_limits<int8_t>::max();
 
-    if (activation == kActivationRelu) {
-        *act_min = std::max(qmin, quantize(0.0));
-        *act_max = qmax;
-    } else if (activation == kActivationRelu6) {
-        *act_min = std::max(qmin, quantize(0.0));
-        *act_max = std::min(qmax, quantize(6.0));
-    } else if (activation == kActivationRelu1) {
-        *act_min = std::max(qmin, quantize(-1.0));
-        *act_max = std::min(qmax, quantize(1.0));
-    } else if (activation == kActivationNone) {
-        *act_min = qmin;
-        *act_max = qmax;
-    } else {
-        LOG(ERROR) << "Unsupported fused activation function.";
-    }
+    CalculateActivationRangeImpl(activation, outputShape, qmin, qmax, act_min, act_max);
 }
 
 void CalculateActivationRangeFloat(int32_t activation, float* activation_min,