Support zero batch in broadcast ops.
Also switch to OperationResolver.
Bug: 126737477
Test: NeuralNetworksTest_static
Change-Id: Ia2aaf7db4539ce5ffb97eb2341b0b5a56b2b8483
Merged-In: Ia2aaf7db4539ce5ffb97eb2341b0b5a56b2b8483
(cherry picked from commit 041d28acbe75b80b5d55db5daea7303751f1c4fa)
diff --git a/common/OperationsUtils.cpp b/common/OperationsUtils.cpp
index af4e2e7..25a5258 100644
--- a/common/OperationsUtils.cpp
+++ b/common/OperationsUtils.cpp
@@ -289,6 +289,7 @@
}
bool calculateBroadcastedShape(const Shape& in1, const Shape& in2, Shape* out) {
+ NN_RET_CHECK(in1.type == in2.type);
uint32_t numberOfDims1 = getNumberOfDimensions(in1);
uint32_t numberOfDims2 = getNumberOfDimensions(in2);
uint32_t maxDims = std::max(numberOfDims1, numberOfDims2);
@@ -308,7 +309,7 @@
<< "\nSecond tensor: dimension " << numberOfDims2 - i << "of size " << dim2;
return false;
}
- out->dimensions[maxDims - i] = std::max(dim1, dim2);
+ out->dimensions[maxDims - i] = (dim1 == 1) ? dim2 : dim1;
}
return true;
}
@@ -318,15 +319,6 @@
return static_cast<uint8_t>(doubleValue / newShape.scale + newShape.offset);
}
-bool addMulPrepare(const Shape& in1, const Shape& in2, Shape* out) {
- NN_OPS_CHECK(getNumberOfDimensions(in1) <= 4 && getNumberOfDimensions(in2) <= 4);
- NN_OPS_CHECK(in1.type == in2.type);
- if (SameShape(in1, in2)) {
- return SetShape(in1, out);
- }
- return calculateBroadcastedShape(in1, in2, out);
-}
-
bool floorPrepare(const Shape& input, Shape* output) {
return SetShape(input, output);
}