Generalize CONV_2D op to support input product scale > output scale

To remove inconsistency with TF Lite.
TF Lite removed input/output scale restrictions to support additional use
cases. This is a superset of existing CONV_2D functionality.

Bug: 127427474
Test: NeuralNetworksTest_static
Change-Id: I7d9f7afa0ae177c9169af012b2ba915a1341a698
Merged-In: I7d9f7afa0ae177c9169af012b2ba915a1341a698
(cherry picked from commit 5ad151ad06d6544ea6407532e03a72b29735b56d)
diff --git a/common/OperationsUtils.cpp b/common/OperationsUtils.cpp
index 4b257eb..938aed0 100644
--- a/common/OperationsUtils.cpp
+++ b/common/OperationsUtils.cpp
@@ -150,6 +150,24 @@
     return true;
 }
 
+bool QuantizeMultiplier(double double_multiplier, int32_t* quantized_multiplier, int* shift) {
+    if (double_multiplier == 0.) {
+        *quantized_multiplier = 0;
+        *shift = 0;
+        return true;
+    }
+    const double q = std::frexp(double_multiplier, shift);
+    auto q_fixed = static_cast<int64_t>(std::round(q * (1ll << 31)));
+    NN_RET_CHECK(q_fixed <= (1ll << 31));
+    if (q_fixed == (1ll << 31)) {
+        q_fixed /= 2;
+        ++*shift;
+    }
+    NN_RET_CHECK_LE(q_fixed, std::numeric_limits<int32_t>::max());
+    *quantized_multiplier = static_cast<int32_t>(q_fixed);
+    return true;
+}
+
 bool QuantizeMultiplierSmallerThanOne(double double_multiplier,
                                       int32_t* quantized_multiplier,
                                       int32_t* right_shift) {
@@ -205,7 +223,6 @@
     NN_OPS_CHECK(std::abs(input_product_scale - bias_scale) <=
               1e-6 * std::min(input_product_scale, bias_scale));
     NN_OPS_CHECK(input_product_scale >= 0);
-    NN_OPS_CHECK(input_product_scale < output_scale);
     *multiplier = input_product_scale / output_scale;
     return true;
 }