Move all op preparation functions to OperationsUtils.
- All op operations are moved to OperationUtils.
- Add a helper function to derive implict padding scheme from
explicit paddings.
- Make all prepare function return false when seeing error.
Bug: 63905942
Test: NeuralNetworkTests
Change-Id: I16538dbd731a5ca1e6de5e0d0b269e9f386f4d29
diff --git a/common/OperationsUtils.cpp b/common/OperationsUtils.cpp
index b2ce63a..dc5ddec 100644
--- a/common/OperationsUtils.cpp
+++ b/common/OperationsUtils.cpp
@@ -162,5 +162,320 @@
return static_cast<int32_t>(std::floor(max_input_rescaled));
}
+
+// Macro to check if the input parameters for operation are valid or not.
+#define nnOpsCheck(v) \
+ if (!(v)) { \
+ LOG(ERROR) << "nnOpsCheck failed: " << #v << "'\n"; \
+ return false; \
+ }
+
+bool addMulPrepare(const Shape& in1, const Shape& in2, Shape* out) {
+ nnOpsCheck(getNumberOfDimensions(in1) <= 4 && getNumberOfDimensions(in2) <= 4);
+ if (SameShape(in1, in2)) {
+ return SetShape(in1, out);
+ } else {
+ // BroadcastAdd needed
+ uint32_t numberOfDims1 = getNumberOfDimensions(in1);
+ uint32_t numberOfDims2 = getNumberOfDimensions(in2);
+ uint32_t maxDims = std::max(numberOfDims1, numberOfDims2);
+ out->dimensions = std::vector<uint32_t>(maxDims);
+ for (uint32_t i = 1; i <= maxDims; i++) {
+ uint32_t dim1 = 1;
+ if (i <= numberOfDims1) {
+ dim1 = getSizeOfDimension(in1, numberOfDims1 - i);
+ }
+ uint32_t dim2 = 1;
+ if (i <= numberOfDims2) {
+ dim2 = getSizeOfDimension(in2, numberOfDims2 - i);
+ }
+ if (dim1 != dim2 && dim1 != 1 && dim2 != 1) {
+ LOG(ERROR) << "Dimensions mismatch for BroadcastAdd";
+ return false;
+ }
+ out->dimensions[maxDims - i] = std::max(dim1, dim2);
+ }
+ }
+ return true;
+}
+
+bool floorPrepare(const Shape& input, Shape* output) {
+ return SetShape(input, output);
+}
+
+bool dequantizePrepare(const Shape& input, Shape* output) {
+ if (input.type != OperandType::TENSOR_QUANT8_ASYMM ||
+ output->type != OperandType::TENSOR_FLOAT32) {
+ LOG(ERROR) << "bad input / output operand type.";
+ return false;
+ }
+ return SetShape(input, output);
+}
+
+bool convPrepare(const Shape& input,
+ const Shape& filter,
+ const Shape& bias,
+ int32_t padding_left, int32_t padding_right,
+ int32_t padding_top, int32_t padding_bottom,
+ int32_t stride_width, int32_t stride_height,
+ Shape* output) {
+ nnOpsCheck(getNumberOfDimensions(input) == 4);
+ nnOpsCheck(getNumberOfDimensions(filter) == 4);
+ nnOpsCheck(getNumberOfDimensions(bias) == 1);
+
+ nnOpsCheck(getSizeOfDimension(filter, 0) == getSizeOfDimension(bias, 0));
+ nnOpsCheck(getSizeOfDimension(filter, 3) == getSizeOfDimension(input, 3));
+
+ nnOpsCheck(stride_width == stride_height);
+
+ uint32_t channels_out = getSizeOfDimension(filter, 0);
+ uint32_t width = getSizeOfDimension(input, 2);
+ uint32_t height = getSizeOfDimension(input, 1);
+ uint32_t filterWidth = getSizeOfDimension(filter, 2);
+ uint32_t filterHeight = getSizeOfDimension(filter, 1);
+ uint32_t batches = getSizeOfDimension(input, 0);
+
+ uint32_t outWidth = computeOutSize(width, filterWidth, stride_width,
+ padding_left, padding_right);
+ uint32_t outHeight = computeOutSize(height, filterHeight, stride_height,
+ padding_top, padding_bottom);
+
+ output->type = input.type;
+ output->dimensions = {batches, outHeight, outWidth, channels_out};
+ return true;
+}
+
+bool depthwiseConvPrepare(const Shape& input,
+ const Shape& filter,
+ const Shape& bias,
+ int32_t padding_left, int32_t padding_right,
+ int32_t padding_top, int32_t padding_bottom,
+ int32_t stride_width, int32_t stride_height,
+ Shape* output) {
+ nnOpsCheck(getNumberOfDimensions(input) == 4);
+ nnOpsCheck(getNumberOfDimensions(filter) == 4);
+ nnOpsCheck(getNumberOfDimensions(bias) == 1);
+
+ nnOpsCheck(getSizeOfDimension(filter, 3) == getSizeOfDimension(bias, 0));
+
+ nnOpsCheck(stride_width == stride_height);
+
+ uint32_t channels_out = getSizeOfDimension(filter, 3);
+ uint32_t width = getSizeOfDimension(input, 2);
+ uint32_t height = getSizeOfDimension(input, 1);
+ uint32_t filterWidth = getSizeOfDimension(filter, 2);
+ uint32_t filterHeight = getSizeOfDimension(filter, 1);
+ uint32_t batches = getSizeOfDimension(input, 0);
+
+ uint32_t outWidth = computeOutSize(width, filterWidth, stride_width,
+ padding_left, padding_right);
+ uint32_t outHeight = computeOutSize(height, filterHeight, stride_height,
+ padding_top, padding_bottom);
+
+ output->type = input.type;
+ output->dimensions = {batches, outHeight, outWidth, channels_out};
+ return true;
+}
+
+
+bool genericPoolingPrepare(const Shape& input,
+ int32_t padding_left, int32_t padding_right,
+ int32_t padding_top, int32_t padding_bottom,
+ int32_t stride_width, int32_t stride_height,
+ int32_t filter_width, int32_t filter_height,
+ Shape* output) {
+ nnOpsCheck(getNumberOfDimensions(input) == 4);
+ nnOpsCheck(stride_width == stride_height);
+
+ uint32_t batches = getSizeOfDimension(input, 0);
+ uint32_t width = getSizeOfDimension(input, 2);
+ uint32_t height = getSizeOfDimension(input, 1);
+ uint32_t channels_out = getSizeOfDimension(input, 3);
+
+ uint32_t outWidth = computeOutSize(width, filter_width, stride_width,
+ padding_left, padding_right);
+ uint32_t outHeight = computeOutSize(height, filter_height, stride_height,
+ padding_top, padding_bottom);
+
+ output->type = input.type;
+ output->dimensions = {batches, outHeight, outWidth, channels_out};
+ return true;
+}
+
+
+bool genericActivationPrepare(const Shape& input,
+ Shape* output) {
+ nnOpsCheck(getNumberOfDimensions(input) == 4);
+ return SetShape(input, output);
+}
+
+bool fullyConnectedPrepare(const Shape& input,
+ const Shape& weights,
+ const Shape& bias,
+ Shape* output) {
+ // Check all the parameters of tensor match within themselves and match the
+ // input configuration.
+ uint32_t input_size = getNumberOfElements(input);
+ uint32_t num_units = getSizeOfDimension(weights, 0);
+ uint32_t batch_size = input_size / getSizeOfDimension(weights, 1);
+
+ nnOpsCheck(getSizeOfDimension(bias, 0) == num_units);
+ nnOpsCheck(getSizeOfDimension(weights, 1) * batch_size == input_size);
+ nnOpsCheck(getNumberOfDimensions(weights) == 2);
+
+ output->type = input.type;
+ output->dimensions = {batch_size, num_units};
+
+ return true;
+}
+
+bool concatenationPrepare(const std::vector<Shape>& inputShapes,
+ int32_t axis,
+ Shape* output) {
+
+ int num_inputs = inputShapes.size();
+ OperandType input_type = inputShapes[0].type;
+ uint32_t num_dimensions = getNumberOfDimensions(inputShapes[0]);
+
+ nnOpsCheck(axis >= 0);
+ nnOpsCheck(axis < (int32_t)num_dimensions);
+
+ int sum_axis = getSizeOfDimension(inputShapes[0], axis);
+ for (int i = 1; i < num_inputs; ++i) {
+ nnOpsCheck(getNumberOfDimensions(inputShapes[i]) == num_dimensions);
+ nnOpsCheck(inputShapes[i].type == inputShapes[0].type);
+ if (input_type == OperandType::TENSOR_QUANT8_ASYMM) {
+ nnOpsCheck(inputShapes[0].offset == inputShapes[i].offset);
+ nnOpsCheck(inputShapes[0].scale == inputShapes[i].scale);
+ }
+ for (int d = 0; d < (int32_t)num_dimensions; ++d) {
+ if (d == axis) {
+ sum_axis += getSizeOfDimension(inputShapes[i], axis);
+ } else {
+ nnOpsCheck(getSizeOfDimension(inputShapes[0], d) ==
+ getSizeOfDimension(inputShapes[i], d));
+ }
+ }
+ }
+
+ output->type = input_type;
+ output->dimensions = inputShapes[0].dimensions;
+ output->dimensions[axis] = sum_axis;
+
+ if (input_type == OperandType::TENSOR_QUANT8_ASYMM) {
+ nnOpsCheck(inputShapes[0].offset == output->offset);
+ nnOpsCheck(inputShapes[0].scale == output->scale);
+ }
+
+ return true;
+}
+
+
+bool genericNormalizationPrepare(const Shape& input, Shape* output) {
+ nnOpsCheck(getNumberOfDimensions(input) == 4);
+ return SetShape(input, output);
+}
+
+bool reshapePrepare(const Shape& input,
+ const int32_t* targetDims,
+ const int32_t targetDimsSize,
+ Shape* output) {
+ // Reshape allows one of the targetDims components to have the
+ // special -1 value, meaning it will be calculated automatically based on the
+ // input. Here we calculate what that dimension should be so that the number
+ // of output elements in the same as the number of input elements.
+ int32_t numInputElements = (int32_t) getNumberOfElements(input);
+
+ std::vector<uint32_t> outDims(targetDimsSize);
+ int32_t numOutputElements = 1;
+ int32_t strechDim = -1;
+ for (int32_t i = 0; i < targetDimsSize; ++i) {
+ int32_t value = targetDims[i];
+ if (value == -1) {
+ nnOpsCheck(strechDim == -1);
+ strechDim = i;
+ } else {
+ numOutputElements *= value;
+ outDims[i] = (uint32_t)value;
+ }
+ }
+ if (strechDim != -1) {
+ int32_t strechValue = numInputElements / numOutputElements;
+ outDims[strechDim] = (uint32_t) strechValue;
+ numOutputElements *= strechValue;
+ }
+
+ nnOpsCheck(numInputElements == numOutputElements);
+
+ output->type = input.type;
+ output->dimensions = outDims;
+ output->offset = input.offset;
+ output->scale = input.scale;
+
+ return true;
+}
+
+bool resizeBilinearPrepare(const Shape& input,
+ int32_t width,
+ int32_t height,
+ Shape* output) {
+ nnOpsCheck(getNumberOfDimensions(input) == 4);
+ uint32_t batches = getSizeOfDimension(input, 0);
+ uint32_t channels = getSizeOfDimension(input, 3);
+
+ output->type = input.type;
+ output->dimensions = {batches, (uint32_t)height, (uint32_t)width, channels};
+
+ return true;
+}
+
+bool depthToSpacePrepare(const Shape& input,
+ int32_t blockSize,
+ Shape* output) {
+ nnOpsCheck(getNumberOfDimensions(input) == 4);
+ nnOpsCheck(blockSize > 0);
+
+ uint32_t batches = getSizeOfDimension(input, 0);
+ uint32_t height = getSizeOfDimension(input, 1);
+ uint32_t width = getSizeOfDimension(input, 2);
+ uint32_t channels = getSizeOfDimension(input, 3);
+
+ nnOpsCheck(channels % (blockSize * blockSize) == 0);
+ output->type = input.type;
+ output->dimensions = {batches,
+ height * blockSize,
+ width * blockSize,
+ channels / (blockSize * blockSize)};
+ output->offset = input.offset;
+ output->scale = input.scale;
+
+ return true;
+}
+
+bool spaceToDepthPrepare(const Shape& input,
+ int32_t blockSize,
+ Shape* output) {
+ nnOpsCheck(getNumberOfDimensions(input) == 4);
+ nnOpsCheck(blockSize > 0);
+
+ uint32_t batches = getSizeOfDimension(input, 0);
+ uint32_t height = getSizeOfDimension(input, 1);
+ uint32_t width = getSizeOfDimension(input, 2);
+ uint32_t channels = getSizeOfDimension(input, 3);
+
+ nnOpsCheck(height % blockSize == 0);
+ nnOpsCheck(width % blockSize == 0);
+
+ output->type = input.type;
+ output->dimensions = {batches,
+ height / blockSize,
+ width / blockSize,
+ channels * (blockSize * blockSize)};
+ output->offset = input.offset;
+ output->scale = input.scale;
+
+ return true;
+}
+
} // namespace nn
} // namespace android
diff --git a/common/include/Operations.h b/common/include/Operations.h
index 10b6b31..3702719 100644
--- a/common/include/Operations.h
+++ b/common/include/Operations.h
@@ -34,13 +34,6 @@
struct Shape;
-enum PaddingScheme {
- kPaddingUnknown = 0,
- kPaddingSame = 1,
- kPaddingValid = 2,
-};
-
-bool addMulPrepare(const Shape& in1, const Shape& in2, Shape* out1);
bool addFloat32(const float* in1, const Shape& shape1,
const float* in2, const Shape& shape2,
int32_t activation,
@@ -50,23 +43,14 @@
int32_t activation,
float* out, const Shape& shapeOut);
-bool floorPrepare(const Shape& input, Shape* output);
bool floorFloat32(const float* inputData,
float* outputData,
const Shape& shape);
-bool dequantizePrepare(const Shape& input, Shape* output);
bool dequantizeQuant8ToFloat32(const uint8_t* inputData,
float* outputData,
const Shape& shape);
-bool depthwiseConvPrepare(const Shape& input,
- const Shape& filter,
- const Shape& bias,
- int32_t padding_left, int32_t padding_right,
- int32_t padding_top, int32_t padding_bottom,
- int32_t stride_width, int32_t stride_height,
- Shape* output);
bool depthwiseConvFloat32(const float* inputData, const Shape& inputShape,
const float* filterData, const Shape& filterShape,
const float* biasData, const Shape& biasShape,
@@ -84,13 +68,6 @@
int32_t depth_multiplier, int32_t activation,
uint8_t* outputData, const Shape& outputShape);
-bool convPrepare(const Shape& input,
- const Shape& filter,
- const Shape& bias,
- int32_t padding_left, int32_t padding_right,
- int32_t padding_top, int32_t padding_bottom,
- int32_t stride_width, int32_t stride_height,
- Shape* output);
bool convFloat32(const float* inputData, const Shape& inputShape,
const float* filterData, const Shape& filterShape,
const float* biasData, const Shape& biasShape,
@@ -108,12 +85,6 @@
int32_t activation,
uint8_t* outputData, const Shape& outputShape);
-bool genericPoolingPrepare(const Shape& input,
- int32_t padding_left, int32_t padding_right,
- int32_t padding_top, int32_t padding_bottom,
- int32_t stride_width, int32_t stride_height,
- int32_t filter_width, int32_t filter_height,
- Shape* output);
bool averagePoolFloat32(const float* inputData, const Shape& inputShape,
int32_t padding_left, int32_t padding_right,
int32_t padding_top, int32_t padding_bottom,
@@ -145,7 +116,6 @@
int32_t filter_width, int32_t filter_height, int32_t activation,
uint8_t* outputData, const Shape& outputShape);
-bool genericActivationPrepare(const Shape& input, Shape* output);
bool reluFloat32(const float* inputData, const Shape& inputShape,
float* outputData, const Shape& outputShape);
bool relu1Float32(const float* inputData, const Shape& inputShape,
@@ -171,10 +141,6 @@
const float beta,
uint8_t* outputData, const Shape& outputShape);
-bool fullyConnectedPrepare(const Shape& input,
- const Shape& weights,
- const Shape& bias,
- Shape* output);
bool fullyConnectedFloat32(const float* inputData, const Shape& inputShape,
const float* weights, const Shape& weightsShape,
const float* biasData, const Shape& biasShape,
@@ -186,9 +152,6 @@
int32_t activation,
uint8_t* outputData, const Shape& outputShape);
-bool concatenationPrepare(const std::vector<Shape>& inputShapes,
- int32_t axis,
- Shape* output);
bool concatenationFloat32(const std::vector<const float*>& inputDataPtrs,
const std::vector<Shape>& inputShapes,
int32_t axis, int32_t activation,
@@ -198,7 +161,6 @@
int32_t axis, int32_t activation,
uint8_t* outputData, const Shape& outputShape);
-bool genericNormalizationPrepare(const Shape& input, Shape* output);
bool l2normFloat32(const float* inputData, const Shape& inputShape,
float* outputData, const Shape& outputShape);
bool l2normQuant8(const uint8_t* inputData, const Shape& inputShape,
@@ -207,32 +169,18 @@
int32_t radius, float bias, float alpha, float beta,
float* outputData, const Shape& outputShape);
-bool reshapePrepare(const Shape& input,
- const int32_t* targetDims,
- const int32_t targetDimsSize,
- Shape* output);
bool reshapeGeneric(const void* inputData, const Shape& inputShape,
void* outputData, const Shape& outputShape);
-bool resizeBilinearPrepare(const Shape& input,
- int32_t height,
- int32_t width,
- Shape* output);
bool resizeBilinearFloat32(const float* inputData,
const Shape& inputShape,
float* outputData,
const Shape& outputShape);
-bool depthToSpacePrepare(const Shape& input,
- int32_t blockSize,
- Shape* output);
bool depthToSpaceGeneric(const uint8_t* inputData, const Shape& inputShape,
int32_t blockSize,
uint8_t* outputData, const Shape& outputShape);
-bool spaceToDepthPrepare(const Shape& input,
- int32_t blockSize,
- Shape* output);
bool spaceToDepthGeneric(const uint8_t* inputData, const Shape& inputShape,
int32_t blockSize,
uint8_t* outputData, const Shape& outputShape);
diff --git a/common/include/OperationsUtils.h b/common/include/OperationsUtils.h
index 60516db..34d3a8a 100644
--- a/common/include/OperationsUtils.h
+++ b/common/include/OperationsUtils.h
@@ -25,6 +25,32 @@
namespace android {
namespace nn {
+enum PaddingScheme {
+ kPaddingUnknown = 0,
+ kPaddingSame = 1,
+ kPaddingValid = 2,
+};
+
+inline PaddingScheme getPaddingScheme(uint32_t filterWidth, uint32_t filterHeight,
+ uint32_t paddingLeft, uint32_t paddingRight,
+ uint32_t paddingTop, uint32_t paddingBottom) {
+ if (paddingLeft > paddingRight || paddingTop > paddingBottom) {
+ return kPaddingUnknown;
+ }
+
+ uint32_t totolPaddingWidth = paddingLeft + paddingRight;
+ uint32_t totolPaddingHeight = paddingTop + paddingBottom;
+ if (totolPaddingWidth == filterWidth - 1 &&
+ totolPaddingHeight == filterHeight -1) {
+ return kPaddingSame;
+ } else if (totolPaddingWidth == 0 &&
+ totolPaddingHeight == 0) {
+ return kPaddingValid;
+ } else {
+ return kPaddingUnknown;
+ }
+}
+
// The type and dimensions of an operand.
struct Shape {
OperandType type;
@@ -73,6 +99,67 @@
int32_t CalculateInputRadius(int input_integer_bits, int input_left_shift);
+// Preparation functions for the corresponding ops
+bool addMulPrepare(const Shape& in1, const Shape& in2, Shape* out1);
+
+bool floorPrepare(const Shape& input, Shape* output);
+
+bool dequantizePrepare(const Shape& input, Shape* output);
+
+bool depthwiseConvPrepare(const Shape& input,
+ const Shape& filter,
+ const Shape& bias,
+ int32_t padding_left, int32_t padding_right,
+ int32_t padding_top, int32_t padding_bottom,
+ int32_t stride_width, int32_t stride_height,
+ Shape* output);
+
+bool convPrepare(const Shape& input,
+ const Shape& filter,
+ const Shape& bias,
+ int32_t padding_left, int32_t padding_right,
+ int32_t padding_top, int32_t padding_bottom,
+ int32_t stride_width, int32_t stride_height,
+ Shape* output);
+
+bool genericPoolingPrepare(const Shape& input,
+ int32_t padding_left, int32_t padding_right,
+ int32_t padding_top, int32_t padding_bottom,
+ int32_t stride_width, int32_t stride_height,
+ int32_t filter_width, int32_t filter_height,
+ Shape* output);
+
+bool genericActivationPrepare(const Shape& input, Shape* output);
+
+bool fullyConnectedPrepare(const Shape& input,
+ const Shape& weights,
+ const Shape& bias,
+ Shape* output);
+
+bool concatenationPrepare(const std::vector<Shape>& inputShapes,
+ int32_t axis,
+ Shape* output);
+
+bool genericNormalizationPrepare(const Shape& input, Shape* output);
+
+bool reshapePrepare(const Shape& input,
+ const int32_t* targetDims,
+ const int32_t targetDimsSize,
+ Shape* output);
+
+bool resizeBilinearPrepare(const Shape& input,
+ int32_t height,
+ int32_t width,
+ Shape* output);
+
+bool depthToSpacePrepare(const Shape& input,
+ int32_t blockSize,
+ Shape* output);
+
+bool spaceToDepthPrepare(const Shape& input,
+ int32_t blockSize,
+ Shape* output);
+
#define ANDROID_NN_MACRO_DISPATCH(macro) \
switch (activation) { \
case (int32_t) FusedActivationFunc::NONE: \
diff --git a/common/operations/Activation.cpp b/common/operations/Activation.cpp
index 66ab661..c649c22 100644
--- a/common/operations/Activation.cpp
+++ b/common/operations/Activation.cpp
@@ -22,12 +22,6 @@
namespace android {
namespace nn {
-bool genericActivationPrepare(const Shape& input,
- Shape* output) {
- DCHECK_EQ(getNumberOfDimensions(input), 4);
- return SetShape(input, output);
-}
-
bool reluFloat32(const float* inputData, const Shape& inputShape,
float* outputData, const Shape& outputShape) {
int numElements = getNumberOfElements(inputShape);
diff --git a/common/operations/Concatenation.cpp b/common/operations/Concatenation.cpp
index c44e5ec..005c816 100644
--- a/common/operations/Concatenation.cpp
+++ b/common/operations/Concatenation.cpp
@@ -22,47 +22,6 @@
namespace android {
namespace nn {
-bool concatenationPrepare(const std::vector<Shape>& inputShapes,
- int32_t axis,
- Shape* output) {
-
- int num_inputs = inputShapes.size();
- OperandType input_type = inputShapes[0].type;
- uint32_t num_dimensions = getNumberOfDimensions(inputShapes[0]);
-
- DCHECK_GE(axis, 0);
- DCHECK_LT(axis, (int32_t)num_dimensions);
-
- int sum_axis = getSizeOfDimension(inputShapes[0], axis);
- for (int i = 1; i < num_inputs; ++i) {
- DCHECK_EQ(getNumberOfDimensions(inputShapes[i]), num_dimensions);
- DCHECK_EQ((uint32_t)inputShapes[i].type, (uint32_t)inputShapes[0].type);
- if (input_type == OperandType::TENSOR_QUANT8_ASYMM) {
- DCHECK_EQ(inputShapes[0].offset, inputShapes[i].offset);
- DCHECK_EQ(inputShapes[0].scale, inputShapes[i].scale);
- }
- for (int d = 0; d < (int32_t)num_dimensions; ++d) {
- if (d == axis) {
- sum_axis += getSizeOfDimension(inputShapes[i], axis);
- } else {
- DCHECK_EQ(getSizeOfDimension(inputShapes[0], d),
- getSizeOfDimension(inputShapes[i], d));
- }
- }
- }
-
- output->type = input_type;
- output->dimensions = inputShapes[0].dimensions;
- output->dimensions[axis] = sum_axis;
-
- if (input_type == OperandType::TENSOR_QUANT8_ASYMM) {
- DCHECK_EQ(inputShapes[0].offset, output->offset);
- DCHECK_EQ(inputShapes[0].scale, output->scale);
- }
-
- return true;
-}
-
bool concatenationFloat32(const std::vector<const float*>& inputDataPtrs,
const std::vector<Shape>& inputShapes,
int32_t axis, int32_t activation,
diff --git a/common/operations/Conv2D.cpp b/common/operations/Conv2D.cpp
index fbb20a0..cbdc4f8 100644
--- a/common/operations/Conv2D.cpp
+++ b/common/operations/Conv2D.cpp
@@ -26,37 +26,6 @@
static constexpr int kStaticBufferSize = 1605632;
static char static_scratch_buffer[kStaticBufferSize];
-bool convPrepare(const Shape& input,
- const Shape& filter,
- const Shape& bias,
- int32_t padding_left, int32_t padding_right,
- int32_t padding_top, int32_t padding_bottom,
- int32_t stride_width, int32_t stride_height,
- Shape* output) {
- DCHECK_EQ(getNumberOfDimensions(input), 4);
- DCHECK_EQ(getNumberOfDimensions(filter), 4);
- DCHECK_EQ(getNumberOfDimensions(bias), 1);
-
- DCHECK_EQ(getSizeOfDimension(filter, 3), getSizeOfDimension(bias, 0));
- DCHECK_EQ(stride_width, stride_height);
-
- uint32_t channels_out = getSizeOfDimension(filter, 0);
- uint32_t width = getSizeOfDimension(input, 2);
- uint32_t height = getSizeOfDimension(input, 1);
- uint32_t filterWidth = getSizeOfDimension(filter, 2);
- uint32_t filterHeight = getSizeOfDimension(filter, 1);
- uint32_t batches = getSizeOfDimension(input, 0);
-
- uint32_t outWidth = computeOutSize(width, filterWidth, stride_width,
- padding_left, padding_right);
- uint32_t outHeight = computeOutSize(height, filterHeight, stride_height,
- padding_top, padding_bottom);
-
- output->type = input.type;
- output->dimensions = {batches, outHeight, outWidth, channels_out};
- return true;
-}
-
#define ANDROID_NN_CONV_PARAMETERS(Type) \
uint32_t height = getSizeOfDimension(inputShape, 1); \
uint32_t width = getSizeOfDimension(inputShape, 2); \
diff --git a/common/operations/DepthwiseConv2D.cpp b/common/operations/DepthwiseConv2D.cpp
index 854e1ed..9f43d56 100644
--- a/common/operations/DepthwiseConv2D.cpp
+++ b/common/operations/DepthwiseConv2D.cpp
@@ -23,38 +23,6 @@
namespace android {
namespace nn {
-bool depthwiseConvPrepare(const Shape& input,
- const Shape& filter,
- const Shape& bias,
- int32_t padding_left, int32_t padding_right,
- int32_t padding_top, int32_t padding_bottom,
- int32_t stride_width, int32_t stride_height,
- Shape* output) {
- DCHECK_EQ(getNumberOfDimensions(input), 4);
- DCHECK_EQ(getNumberOfDimensions(filter), 4);
- DCHECK_EQ(getNumberOfDimensions(bias), 1);
-
- DCHECK_EQ(getSizeOfDimension(filter, 3), getSizeOfDimension(bias, 0));
- DCHECK_EQ(stride_width, stride_height);
-
- uint32_t channels_out = getSizeOfDimension(filter, 3);
- uint32_t width = getSizeOfDimension(input, 2);
- uint32_t height = getSizeOfDimension(input, 1);
- uint32_t filterWidth = getSizeOfDimension(filter, 2);
- uint32_t filterHeight = getSizeOfDimension(filter, 1);
- uint32_t batches = getSizeOfDimension(input, 0);
-
- uint32_t outWidth = computeOutSize(width, filterWidth, stride_width,
- padding_left, padding_right);
- uint32_t outHeight = computeOutSize(height, filterHeight, stride_height,
- padding_top, padding_bottom);
-
- output->type = input.type;
- output->dimensions = {batches, outHeight, outWidth, channels_out};
- return true;
-}
-
-
#define ANDROID_NN_DEPTHWISE_CONV_PARAMETERS \
uint32_t height = getSizeOfDimension(inputShape, 1); \
uint32_t width = getSizeOfDimension(inputShape, 2); \
diff --git a/common/operations/FullyConnected.cpp b/common/operations/FullyConnected.cpp
index cb52ccf..0cfc9c9 100644
--- a/common/operations/FullyConnected.cpp
+++ b/common/operations/FullyConnected.cpp
@@ -22,26 +22,6 @@
namespace android {
namespace nn {
-bool fullyConnectedPrepare(const Shape& input,
- const Shape& weights,
- const Shape& bias,
- Shape* output) {
- // Check all the parameters of tensor match within themselves and match the
- // input configuration.
- uint32_t input_size = getNumberOfElements(input);
- uint32_t num_units = getSizeOfDimension(weights, 0);
- uint32_t batch_size = input_size / getSizeOfDimension(weights, 1);
-
- DCHECK_EQ(getSizeOfDimension(bias, 0), num_units);
- DCHECK_EQ(getSizeOfDimension(weights, 1) * batch_size, input_size);
- DCHECK_EQ(getNumberOfDimensions(weights), 2);
-
- output->type = input.type;
- output->dimensions = {batch_size, num_units};
-
- return true;
-}
-
bool fullyConnectedFloat32(const float* inputData, const Shape& inputShape,
const float* weightsData, const Shape& weightsShape,
const float* biasData, const Shape& biasShape,
diff --git a/common/operations/Normalization.cpp b/common/operations/Normalization.cpp
index d19429d..cd742dc 100644
--- a/common/operations/Normalization.cpp
+++ b/common/operations/Normalization.cpp
@@ -22,11 +22,6 @@
namespace android {
namespace nn {
-bool genericNormalizationPrepare(const Shape& input, Shape* output) {
- DCHECK_EQ(getNumberOfDimensions(input), 4);
- return SetShape(input, output);
-}
-
bool l2normFloat32(const float* inputData, const Shape& inputShape,
float* outputData, const Shape& outputShape) {
optimized_ops::L2Normalization<FusedActivationFunctionType::kNone>(
diff --git a/common/operations/Pooling.cpp b/common/operations/Pooling.cpp
index 2ccae56..0d29c7a 100644
--- a/common/operations/Pooling.cpp
+++ b/common/operations/Pooling.cpp
@@ -22,31 +22,6 @@
namespace android {
namespace nn {
-bool genericPoolingPrepare(const Shape& input,
- int32_t padding_left, int32_t padding_right,
- int32_t padding_top, int32_t padding_bottom,
- int32_t stride_width, int32_t stride_height,
- int32_t filter_width, int32_t filter_height,
- Shape* output) {
- DCHECK_EQ(getNumberOfDimensions(input), 4);
- DCHECK_EQ(stride_width, stride_height);
-
- uint32_t batches = getSizeOfDimension(input, 0);
- uint32_t width = getSizeOfDimension(input, 2);
- uint32_t height = getSizeOfDimension(input, 1);
- uint32_t channels_out = getSizeOfDimension(input, 3);
-
- uint32_t outWidth = computeOutSize(width, filter_width, stride_width,
- padding_left, padding_right);
- uint32_t outHeight = computeOutSize(height, filter_height, stride_height,
- padding_top, padding_bottom);
-
- output->type = input.type;
- output->dimensions = {batches, outHeight, outWidth, channels_out};
- return true;
-}
-
-
#define ANDROID_NN_POOLING_PARAMETERS \
uint32_t height = getSizeOfDimension(inputShape, 1); \
uint32_t width = getSizeOfDimension(inputShape, 2); \
diff --git a/common/operations/Reshape.cpp b/common/operations/Reshape.cpp
index 3d36e1e..6c46965 100644
--- a/common/operations/Reshape.cpp
+++ b/common/operations/Reshape.cpp
@@ -26,45 +26,6 @@
namespace android {
namespace nn {
-bool reshapePrepare(const Shape& input,
- const int32_t* targetDims,
- const int32_t targetDimsSize,
- Shape* output) {
- // Reshape allows one of the targetDims components to have the
- // special -1 value, meaning it will be calculated automatically based on the
- // input. Here we calculate what that dimension should be so that the number
- // of output elements in the same as the number of input elements.
- int32_t numInputElements = (int32_t) getNumberOfElements(input);
-
- std::vector<uint32_t> outDims(targetDimsSize);
- int32_t numOutputElements = 1;
- int32_t strechDim = -1;
- for (int32_t i = 0; i < targetDimsSize; ++i) {
- int32_t value = targetDims[i];
- if (value == -1) {
- DCHECK_EQ(strechDim, -1);
- strechDim = i;
- } else {
- numOutputElements *= value;
- outDims[i] = (uint32_t)value;
- }
- }
- if (strechDim != -1) {
- int32_t strechValue = numInputElements / numOutputElements;
- outDims[strechDim] = (uint32_t) strechValue;
- numOutputElements *= strechValue;
- }
-
- DCHECK_EQ(numInputElements, numOutputElements);
-
- output->type = input.type;
- output->dimensions = outDims;
- output->offset = input.offset;
- output->scale = input.scale;
-
- return true;
-}
-
bool reshapeGeneric(const void* inputData, const Shape& inputShape,
void* outputData, const Shape& outputShape) {
size_t count = sizeOfData(inputShape.type, inputShape.dimensions);
@@ -72,20 +33,6 @@
return true;
}
-bool resizeBilinearPrepare(const Shape& input,
- int32_t width,
- int32_t height,
- Shape* output) {
- DCHECK_EQ(getNumberOfDimensions(input), 4);
- uint32_t batches = getSizeOfDimension(input, 0);
- uint32_t channels = getSizeOfDimension(input, 3);
-
- output->type = input.type;
- output->dimensions = {batches, (uint32_t)height, (uint32_t)width, channels};
-
- return true;
-}
-
bool resizeBilinearFloat32(const float* inputData, const Shape& inputShape,
float* outputData, const Shape& outputShape) {
int32_t height = (int32_t) getSizeOfDimension(outputShape, 1);
@@ -103,29 +50,6 @@
return true;
}
-bool depthToSpacePrepare(const Shape& input,
- int32_t blockSize,
- Shape* output) {
- DCHECK_EQ(getNumberOfDimensions(input), 4);
- DCHECK_GT(blockSize, 0);
-
- uint32_t batches = getSizeOfDimension(input, 0);
- uint32_t height = getSizeOfDimension(input, 1);
- uint32_t width = getSizeOfDimension(input, 2);
- uint32_t channels = getSizeOfDimension(input, 3);
-
- DCHECK_EQ(channels % (uint32_t)(blockSize * blockSize), 0);
- output->type = input.type;
- output->dimensions = {batches,
- height * blockSize,
- width * blockSize,
- channels / (blockSize * blockSize)};
- output->offset = input.offset;
- output->scale = input.scale;
-
- return true;
-}
-
bool depthToSpaceGeneric(const uint8_t* inputData, const Shape& inputShape,
int32_t blockSize,
uint8_t* outputData, const Shape& outputShape) {
@@ -150,31 +74,6 @@
return true;
}
-bool spaceToDepthPrepare(const Shape& input,
- int32_t blockSize,
- Shape* output) {
- DCHECK_EQ(getNumberOfDimensions(input), 4);
- DCHECK_GT(blockSize, 0);
-
- uint32_t batches = getSizeOfDimension(input, 0);
- uint32_t height = getSizeOfDimension(input, 1);
- uint32_t width = getSizeOfDimension(input, 2);
- uint32_t channels = getSizeOfDimension(input, 3);
-
- DCHECK_EQ(height % (uint32_t)blockSize, 0);
- DCHECK_EQ(width % (uint32_t)blockSize, 0);
-
- output->type = input.type;
- output->dimensions = {batches,
- height / blockSize,
- width / blockSize,
- channels * (blockSize * blockSize)};
- output->offset = input.offset;
- output->scale = input.scale;
-
- return true;
-}
-
bool spaceToDepthGeneric(const uint8_t* inputData, const Shape& inputShape,
int32_t blockSize,
uint8_t* outputData, const Shape& outputShape) {
diff --git a/common/operations/SimpleMath.cpp b/common/operations/SimpleMath.cpp
index c23c34a..31638b6 100644
--- a/common/operations/SimpleMath.cpp
+++ b/common/operations/SimpleMath.cpp
@@ -25,39 +25,6 @@
namespace android {
namespace nn {
-
-bool addMulPrepare(const Shape& in1, const Shape& in2, Shape* out) {
- if (getNumberOfDimensions(in1) > 4 || getNumberOfDimensions(in2) > 4) {
- LOG(ERROR) << "Only supports upto 4D tensors.";
- return false;
- }
- if (SameShape(in1, in2)) {
- return SetShape(in1, out);
- } else {
- // BroadcastAdd needed
- uint32_t numberOfDims1 = getNumberOfDimensions(in1);
- uint32_t numberOfDims2 = getNumberOfDimensions(in2);
- uint32_t maxDims = std::max(numberOfDims1, numberOfDims2);
- out->dimensions = std::vector<uint32_t>(maxDims);
- for (uint32_t i = 1; i <= maxDims; i++) {
- uint32_t dim1 = 1;
- if (i <= numberOfDims1) {
- dim1 = getSizeOfDimension(in1, numberOfDims1 - i);
- }
- uint32_t dim2 = 1;
- if (i <= numberOfDims2) {
- dim2 = getSizeOfDimension(in2, numberOfDims2 - i);
- }
- if (dim1 != dim2 && dim1 != 1 && dim2 != 1) {
- LOG(ERROR) << "Dimensions mismatch for BroadcastAdd";
- return false;
- }
- out->dimensions[maxDims - i] = std::max(dim1, dim2);
- }
- }
- return true;
-}
-
bool addFloat32(const float* in1, const Shape& shape1,
const float* in2, const Shape& shape2,
int32_t activation,
@@ -118,10 +85,6 @@
return true;
}
-bool floorPrepare(const Shape& input, Shape* output) {
- return SetShape(input, output);
-}
-
bool floorFloat32(const float* inputData,
float* outputData,
const Shape& shape) {
@@ -130,15 +93,6 @@
return true;
}
-bool dequantizePrepare(const Shape& input, Shape* output) {
- if (input.type != OperandType::TENSOR_QUANT8_ASYMM ||
- output->type != OperandType::TENSOR_FLOAT32) {
- LOG(ERROR) << "bad input / output operand type.";
- return false;
- }
- return SetShape(input, output);
-}
-
bool dequantizeQuant8ToFloat32(const uint8_t* inputData,
float* outputData,
const Shape& shape) {