Migrate NNAPI runtime to canonical types
This change replaces most uses of HAL types in the codebase with
equivalent canonical types. Later changes will introduce more
refactorings.
Also removes unused files nn/runtime/test/Bridge.{h,cpp}.
Bug: 160669906
Fix: 155923931
Test: NeuralNetworksTest_static (all 7 passes)
Test: NeuralNetworksTest_operations
Test: NeuralNetworksTest_utils
Test: NeuralNetworksTest_logtag
Test: nnCache_test
Test: BlobCache_test
Change-Id: I63fa286e926a096948f1b1b172d1d562c4f52f29
Merged-In: I63fa286e926a096948f1b1b172d1d562c4f52f29
(cherry picked from commit daa4b515bc15a2ac7755f0666c023d7e3caa951a)
diff --git a/common/operations/Activation.cpp b/common/operations/Activation.cpp
index ff5a55d..c0a1934 100644
--- a/common/operations/Activation.cpp
+++ b/common/operations/Activation.cpp
@@ -28,7 +28,6 @@
#include "ActivationFunctor.h"
#include "CpuOperationUtils.h"
-#include "HalInterfaces.h"
#include "OperationResolver.h"
#include "OperationsUtils.h"
#include "Tracing.h"
@@ -36,8 +35,6 @@
namespace android {
namespace nn {
-using namespace hal;
-
namespace activation {
constexpr uint32_t kNumInputs = 1;
@@ -373,7 +370,7 @@
} else if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) {
NN_RET_CHECK(validateHalVersion(context, HalVersion::V1_3));
} else {
- NN_RET_CHECK_FAIL() << "Unsupported tensor type for operation " << getOperationName(opType);
+ NN_RET_CHECK_FAIL() << "Unsupported tensor type for operation " << opType;
}
const Shape& input = context->getInputShape(kInputTensor);
if (hasKnownRank(input)) {
diff --git a/common/operations/ArgMinMax.cpp b/common/operations/ArgMinMax.cpp
index f53ba47..2ee413c 100644
--- a/common/operations/ArgMinMax.cpp
+++ b/common/operations/ArgMinMax.cpp
@@ -19,7 +19,6 @@
#define LOG_TAG "Operations"
#include "CpuOperationUtils.h"
-#include "HalInterfaces.h"
#include "Operations.h"
#include "Tracing.h"
@@ -27,8 +26,6 @@
namespace android {
namespace nn {
-using namespace hal;
-
template <typename In, typename Out>
static void argMinMaxImpl(const In* inputData, const Shape& inputShape, int32_t axis, bool isArgMin,
Out* outputData, const Shape& outputShape) {
diff --git a/common/operations/BidirectionalSequenceLSTM.cpp b/common/operations/BidirectionalSequenceLSTM.cpp
index 12ac43f..6cf095b 100644
--- a/common/operations/BidirectionalSequenceLSTM.cpp
+++ b/common/operations/BidirectionalSequenceLSTM.cpp
@@ -23,7 +23,6 @@
#include "CpuExecutor.h"
#include "CpuOperationUtils.h"
-#include "HalInterfaces.h"
#include "OperationsUtils.h"
#include "Tracing.h"
@@ -32,8 +31,6 @@
namespace {
-using namespace hal;
-
template <typename T>
inline T* GetBuffer(RunTimeOperandInfo* operand) {
return reinterpret_cast<T*>(operand->buffer);
diff --git a/common/operations/BidirectionalSequenceLSTM.h b/common/operations/BidirectionalSequenceLSTM.h
index 184b65d..7077d3b 100644
--- a/common/operations/BidirectionalSequenceLSTM.h
+++ b/common/operations/BidirectionalSequenceLSTM.h
@@ -34,12 +34,11 @@
class BidirectionalSequenceLSTM {
public:
- BidirectionalSequenceLSTM(const hal::Operation& operation, RunTimeOperandInfo* operands);
+ BidirectionalSequenceLSTM(const Operation& operation, RunTimeOperandInfo* operands);
- bool Prepare(const hal::Operation& operation, RunTimeOperandInfo* operands,
- Shape* fwOutputShape, Shape* bwOutputShape, Shape* fwOutputActivationState,
- Shape* fwOutputCellState, Shape* bwOutputActivationState,
- Shape* bwOutputCellState);
+ bool Prepare(const Operation& operation, RunTimeOperandInfo* operands, Shape* fwOutputShape,
+ Shape* bwOutputShape, Shape* fwOutputActivationState, Shape* fwOutputCellState,
+ Shape* bwOutputActivationState, Shape* bwOutputCellState);
bool Eval();
// Input Tensors of size {max_time, n_batch, n_input}
diff --git a/common/operations/BidirectionalSequenceRNN.cpp b/common/operations/BidirectionalSequenceRNN.cpp
index 98917c0..adacea0 100644
--- a/common/operations/BidirectionalSequenceRNN.cpp
+++ b/common/operations/BidirectionalSequenceRNN.cpp
@@ -20,7 +20,6 @@
#include <utility>
#include <vector>
-#include "HalInterfaces.h"
#include "OperationResolver.h"
#include "RNN.h"
@@ -61,8 +60,6 @@
namespace {
-using namespace hal;
-
template <typename T>
void transposeFirstTwoDims(const T* input, const Shape& inputShape, T* output) {
const uint32_t firstDimSize = getSizeOfDimension(inputShape, 0);
@@ -327,7 +324,7 @@
OperandType inputType = context->getInputType(kInputTensor);
if (inputType != OperandType::TENSOR_FLOAT16 && inputType != OperandType::TENSOR_FLOAT32) {
LOG(ERROR) << "Unsupported input operand type for UNIDIRECTIONAL_SEQUENCE_RNN op: "
- << toString(inputType);
+ << inputType;
return false;
}
NN_RET_CHECK(validateInputTypes(
diff --git a/common/operations/Broadcast.cpp b/common/operations/Broadcast.cpp
index 17094af..67bb914 100644
--- a/common/operations/Broadcast.cpp
+++ b/common/operations/Broadcast.cpp
@@ -29,16 +29,14 @@
#include <vector>
#include "CpuOperationUtils.h"
-#include "HalInterfaces.h"
#include "IndexedShapeWrapper.h"
#include "OperationResolver.h"
#include "Tracing.h"
+#include "nnapi/Types.h"
namespace android {
namespace nn {
-using namespace hal;
-
namespace broadcast {
constexpr uint32_t kNumInputs = 3;
@@ -53,16 +51,16 @@
#define ANDROID_NN_MACRO_DISPATCH(macro) \
switch (activation) { \
- case (int32_t)FusedActivationFunc::NONE: \
+ case static_cast<int32_t>(FusedActivationFunc::NONE): \
macro(kNone); \
break; \
- case (int32_t)FusedActivationFunc::RELU: \
+ case static_cast<int32_t>(FusedActivationFunc::RELU): \
macro(kRelu); \
break; \
- case (int32_t)FusedActivationFunc::RELU1: \
+ case static_cast<int32_t>(FusedActivationFunc::RELU1): \
macro(kRelu1); \
break; \
- case (int32_t)FusedActivationFunc::RELU6: \
+ case static_cast<int32_t>(FusedActivationFunc::RELU6): \
macro(kRelu6); \
break; \
default: \
@@ -464,7 +462,7 @@
inputType == OperandType::TENSOR_INT32) {
NN_RET_CHECK(validateHalVersion(context, std::max(HalVersion::V1_3, opIntroducedAt)));
} else {
- NN_RET_CHECK_FAIL() << "Unsupported tensor type for operation " << getOperationName(opType);
+ NN_RET_CHECK_FAIL() << "Unsupported tensor type for operation " << opType;
}
const Shape& input1 = context->getInputShape(kInputTensor1);
const Shape& input2 = context->getInputShape(kInputTensor2);
diff --git a/common/operations/Cast.cpp b/common/operations/Cast.cpp
index 77e35af..aef3baf 100644
--- a/common/operations/Cast.cpp
+++ b/common/operations/Cast.cpp
@@ -20,7 +20,6 @@
#include <algorithm>
-#include "HalInterfaces.h"
#include "Operations.h"
#include "Tracing.h"
@@ -30,8 +29,6 @@
namespace {
-using namespace hal;
-
template <typename FromT, typename ToT>
void copyCast(const FromT* in, ToT* out, int numElements) {
std::transform(in, in + numElements, out, [](FromT a) -> ToT {
diff --git a/common/operations/ChannelShuffle.cpp b/common/operations/ChannelShuffle.cpp
index 7abf224..779a8d8 100644
--- a/common/operations/ChannelShuffle.cpp
+++ b/common/operations/ChannelShuffle.cpp
@@ -16,7 +16,6 @@
#define LOG_TAG "Operations"
-#include "HalInterfaces.h"
#include "OperationResolver.h"
#include "OperationsUtils.h"
#include "Tracing.h"
@@ -25,8 +24,6 @@
namespace nn {
namespace channel_shuffle {
-using namespace hal;
-
constexpr char kOperationName[] = "CHANNEL_SHUFFLE";
constexpr uint32_t kNumInputs = 3;
diff --git a/common/operations/Comparisons.cpp b/common/operations/Comparisons.cpp
index a8f8622..50ed806 100644
--- a/common/operations/Comparisons.cpp
+++ b/common/operations/Comparisons.cpp
@@ -19,7 +19,6 @@
#include <functional>
#include <vector>
-#include "HalInterfaces.h"
#include "IndexedShapeWrapper.h"
#include "OperationResolver.h"
#include "OperationsUtils.h"
@@ -37,8 +36,6 @@
namespace {
-using namespace hal;
-
template <typename DataType, typename ComparisonType>
bool compute(const std::function<bool(ComparisonType, ComparisonType)>& func, const DataType* aData,
const Shape& aShape, const DataType* bData, const Shape& bShape, bool8* outputData,
@@ -135,7 +132,7 @@
inputType == OperandType::TENSOR_FLOAT32 || inputType == OperandType::TENSOR_INT32 ||
inputType == OperandType::TENSOR_QUANT8_ASYMM ||
inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED)
- << "Unsupported input operand type for comparison op: " << toString(inputType);
+ << "Unsupported input operand type for comparison op: " << inputType;
NN_RET_CHECK(validateInputTypes(context, {inputType, inputType}));
NN_RET_CHECK(validateOutputTypes(context, {OperandType::TENSOR_BOOL8}));
if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) {
diff --git a/common/operations/Concatenation.cpp b/common/operations/Concatenation.cpp
index 08c9c61..6de5bad 100644
--- a/common/operations/Concatenation.cpp
+++ b/common/operations/Concatenation.cpp
@@ -27,7 +27,6 @@
#include <vector>
#include "CpuOperationUtils.h"
-#include "HalInterfaces.h"
#include "OperationResolver.h"
#include "Tracing.h"
@@ -42,8 +41,6 @@
namespace {
-using namespace hal;
-
template <typename T>
bool concatenation(const std::vector<const T*>& inputDataPtrs,
const std::vector<Shape>& inputShapes, int32_t axis, T* outputData,
diff --git a/common/operations/Conv2D.cpp b/common/operations/Conv2D.cpp
index f34e908..5b7d8d0 100644
--- a/common/operations/Conv2D.cpp
+++ b/common/operations/Conv2D.cpp
@@ -26,7 +26,6 @@
#include <vector>
#include "CpuOperationUtils.h"
-#include "HalInterfaces.h"
#include "OperationResolver.h"
#include "Operations.h"
#include "OperationsUtils.h"
@@ -49,8 +48,6 @@
namespace {
-using namespace hal;
-
// If possible we will use this static buffer for the tensor.
constexpr size_t kStaticBufferSize = 1605632;
char static_scratch_buffer[kStaticBufferSize];
@@ -566,7 +563,9 @@
OperandType::INT32};
if (filterType == OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL) {
- NN_RET_CHECK_EQ(context->getInputExtraParams(kFilterTensor).channelQuant().channelDim,
+ NN_RET_CHECK_EQ(std::get<Operand::SymmPerChannelQuantParams>(
+ context->getInputExtraParams(kFilterTensor))
+ .channelDim,
0)
<< "Unsupported filter tensor channel dimension for operation "
<< kOperationName;
@@ -727,7 +726,9 @@
context->getInputShape(kInputTensor),
context->getInputBuffer<int8_t>(kFilterTensor),
context->getInputShape(kFilterTensor),
- context->getInputExtraParams(kFilterTensor).channelQuant().scales.data(),
+ std::get<Operand::SymmPerChannelQuantParams>(
+ context->getInputExtraParams(kFilterTensor))
+ .scales.data(),
context->getInputBuffer<int32_t>(kBiasTensor),
context->getInputShape(kBiasTensor), param.padding_left,
param.padding_right, param.padding_top, param.padding_bottom,
@@ -758,7 +759,9 @@
context->getInputShape(kInputTensor),
context->getInputBuffer<int8_t>(kFilterTensor),
context->getInputShape(kFilterTensor),
- context->getInputExtraParams(kFilterTensor).channelQuant().scales.data(),
+ std::get<Operand::SymmPerChannelQuantParams>(
+ context->getInputExtraParams(kFilterTensor))
+ .scales.data(),
context->getInputBuffer<int32_t>(kBiasTensor),
context->getInputShape(kBiasTensor), param.padding_left,
param.padding_right, param.padding_top, param.padding_bottom,
diff --git a/common/operations/DepthwiseConv2D.cpp b/common/operations/DepthwiseConv2D.cpp
index 32e8b55..47bf010 100644
--- a/common/operations/DepthwiseConv2D.cpp
+++ b/common/operations/DepthwiseConv2D.cpp
@@ -42,8 +42,6 @@
namespace {
-using namespace hal;
-
struct DepthwiseConv2dParam {
int32_t padding_left, padding_right;
int32_t padding_top, padding_bottom;
@@ -443,7 +441,9 @@
filterType == inputType)
<< "Unsupported filter tensor type for operation " << kOperationName;
if (filterType == OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL) {
- NN_RET_CHECK_EQ(context->getInputExtraParams(kFilterTensor).channelQuant().channelDim,
+ NN_RET_CHECK_EQ(std::get<Operand::SymmPerChannelQuantParams>(
+ context->getInputExtraParams(kFilterTensor))
+ .channelDim,
3)
<< "Unsupported filter tensor channel dimension for operation "
<< kOperationName;
@@ -607,7 +607,9 @@
context->getInputShape(kInputTensor),
context->getInputBuffer<int8_t>(kFilterTensor),
context->getInputShape(kFilterTensor),
- context->getInputExtraParams(kFilterTensor).channelQuant().scales.data(),
+ std::get<Operand::SymmPerChannelQuantParams>(
+ context->getInputExtraParams(kFilterTensor))
+ .scales.data(),
context->getInputBuffer<int32_t>(kBiasTensor),
context->getInputShape(kBiasTensor), param.padding_left,
param.padding_right, param.padding_top, param.padding_bottom,
@@ -639,7 +641,9 @@
context->getInputShape(kInputTensor),
context->getInputBuffer<int8_t>(kFilterTensor),
context->getInputShape(kFilterTensor),
- context->getInputExtraParams(kFilterTensor).channelQuant().scales.data(),
+ std::get<Operand::SymmPerChannelQuantParams>(
+ context->getInputExtraParams(kFilterTensor))
+ .scales.data(),
context->getInputBuffer<int32_t>(kBiasTensor),
context->getInputShape(kBiasTensor), param.padding_left,
param.padding_right, param.padding_top, param.padding_bottom,
diff --git a/common/operations/Dequantize.cpp b/common/operations/Dequantize.cpp
index 2fb2d5c..7b81143 100644
--- a/common/operations/Dequantize.cpp
+++ b/common/operations/Dequantize.cpp
@@ -17,7 +17,6 @@
#include "OperationsUtils.h"
#define LOG_TAG "Operations"
-#include "HalInterfaces.h"
#include "IndexedShapeWrapper.h"
#include "OperationResolver.h"
@@ -33,8 +32,6 @@
namespace {
-using namespace hal;
-
template <typename InputType, typename OutputType>
bool compute(const InputType* inputData, const Shape& inputShape, OutputType* outputData) {
const int numElements = getNumberOfElements(inputShape);
@@ -52,7 +49,8 @@
// First we calculate a stride which is the number of elements we need to
// skip to change an index along a dimension with different quantization
// scales.
- const int channelDim = inputShape.extraParams.channelQuant().channelDim;
+ const int channelDim =
+ std::get<Operand::SymmPerChannelQuantParams>(inputShape.extraParams).channelDim;
int stride = 1;
for (int i = getNumberOfDimensions(inputShape) - 1; i > channelDim; --i) {
stride *= getSizeOfDimension(inputShape, i);
@@ -67,7 +65,8 @@
// size of the dimension (so that we don't have an overflow if the
// channelDim is not 0).
const int scaleIndex = (i / stride) % getSizeOfDimension(inputShape, channelDim);
- const float scale = inputShape.extraParams.channelQuant().scales[scaleIndex];
+ const float scale = std::get<Operand::SymmPerChannelQuantParams>(inputShape.extraParams)
+ .scales[scaleIndex];
const int32_t value = inputData[i];
outputData[i] = static_cast<OutputType>(scale * (value - zeroPoint));
}
@@ -97,10 +96,10 @@
inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED ||
inputType == OperandType::TENSOR_QUANT8_SYMM ||
inputType == OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL)
- << "Unsupported input operand type for DEQUANTIZE op: " << toString(inputType);
+ << "Unsupported input operand type for DEQUANTIZE op: " << inputType;
NN_RET_CHECK(outputType == OperandType::TENSOR_FLOAT16 ||
outputType == OperandType::TENSOR_FLOAT32)
- << "Unsupported output operand type for DEQUANTIZE op: " << toString(outputType);
+ << "Unsupported output operand type for DEQUANTIZE op: " << outputType;
return validateHalVersion(context, HalVersion::V1_2);
}
@@ -155,7 +154,7 @@
}
}
NN_RET_CHECK_FAIL() << "Unsupported tensor types combination for dequantize op. (input type: "
- << toString(inputType) << " output type: " << toString(outputType) << ")";
+ << inputType << " output type: " << outputType << ")";
}
} // namespace dequantize
diff --git a/common/operations/Elementwise.cpp b/common/operations/Elementwise.cpp
index 82a2687..3ddae90 100644
--- a/common/operations/Elementwise.cpp
+++ b/common/operations/Elementwise.cpp
@@ -18,7 +18,6 @@
#include <cmath>
-#include "HalInterfaces.h"
#include "OperationResolver.h"
#include "OperationsUtils.h"
#include "Tracing.h"
@@ -35,8 +34,6 @@
namespace {
-using namespace hal;
-
template <typename IntermediateType, typename T>
inline bool compute(IntermediateType func(IntermediateType), const T* input, const Shape& shape,
T* output) {
diff --git a/common/operations/Elu.cpp b/common/operations/Elu.cpp
index 07304e7..dfb221c 100644
--- a/common/operations/Elu.cpp
+++ b/common/operations/Elu.cpp
@@ -20,7 +20,6 @@
#include <cmath>
#include <vector>
-#include "HalInterfaces.h"
#include "IndexedShapeWrapper.h"
#include "OperationResolver.h"
#include "OperationsUtils.h"
@@ -30,8 +29,6 @@
namespace nn {
namespace elu {
-using namespace hal;
-
constexpr uint32_t kNumInputs = 2;
constexpr uint32_t kInputTensor = 0;
constexpr uint32_t kAlphaScalar = 1;
diff --git a/common/operations/EmbeddingLookup.cpp b/common/operations/EmbeddingLookup.cpp
index 12e4a65..5ff26e8 100644
--- a/common/operations/EmbeddingLookup.cpp
+++ b/common/operations/EmbeddingLookup.cpp
@@ -19,7 +19,6 @@
#include "EmbeddingLookup.h"
#include "CpuExecutor.h"
-#include "HalInterfaces.h"
#include "Operations.h"
#include "Tracing.h"
@@ -27,8 +26,6 @@
namespace android {
namespace nn {
-using namespace hal;
-
EmbeddingLookup::EmbeddingLookup(const Operation& operation, RunTimeOperandInfo* operands) {
value_ = GetInput(operation, operands, kValueTensor);
lookup_ = GetInput(operation, operands, kLookupTensor);
diff --git a/common/operations/EmbeddingLookup.h b/common/operations/EmbeddingLookup.h
index 9a82dda..0388b35 100644
--- a/common/operations/EmbeddingLookup.h
+++ b/common/operations/EmbeddingLookup.h
@@ -19,7 +19,7 @@
#include <vector>
-#include "HalInterfaces.h"
+#include "nnapi/Types.h"
namespace android {
namespace nn {
@@ -28,7 +28,7 @@
class EmbeddingLookup {
public:
- EmbeddingLookup(const hal::Operation& operation, RunTimeOperandInfo* operands);
+ EmbeddingLookup(const Operation& operation, RunTimeOperandInfo* operands);
bool Eval();
diff --git a/common/operations/Fill.cpp b/common/operations/Fill.cpp
index a6b3906..a233627 100644
--- a/common/operations/Fill.cpp
+++ b/common/operations/Fill.cpp
@@ -17,7 +17,6 @@
#include "OperationsUtils.h"
#define LOG_TAG "Operations"
-#include "HalInterfaces.h"
#include "OperationResolver.h"
namespace android {
@@ -33,8 +32,6 @@
namespace {
-using namespace hal;
-
template <typename T>
bool executeTyped(IOperationExecutionContext* context) {
T* output = context->getOutputBuffer<T>(kOutputTensor);
@@ -58,7 +55,7 @@
*valueType = OperandType::INT32;
return true;
default:
- NN_RET_CHECK_FAIL() << "Unsupported value type for fill op: " << toString(outputType);
+ NN_RET_CHECK_FAIL() << "Unsupported value type for fill op: " << outputType;
}
}
@@ -73,7 +70,7 @@
NN_RET_CHECK(outputType == OperandType::TENSOR_FLOAT16 ||
outputType == OperandType::TENSOR_FLOAT32 ||
outputType == OperandType::TENSOR_INT32)
- << "Unsupported output type for fill op: " << toString(outputType);
+ << "Unsupported output type for fill op: " << outputType;
NN_RET_CHECK(validateOutputTypes(context, {outputType}));
OperandType valueType;
diff --git a/common/operations/FullyConnected.cpp b/common/operations/FullyConnected.cpp
index 9bdd0ba..9fcc072 100644
--- a/common/operations/FullyConnected.cpp
+++ b/common/operations/FullyConnected.cpp
@@ -24,7 +24,6 @@
#include <vector>
#include "CpuOperationUtils.h"
-#include "HalInterfaces.h"
#include "OperationResolver.h"
#include "Tracing.h"
@@ -45,8 +44,6 @@
namespace {
-using namespace hal;
-
// executionMutex is used to protect concurrent access of non-threadsafe resources
// like gemmlowp::GemmContext.
// std::mutex is safe for pthreads on Android.
diff --git a/common/operations/Gather.cpp b/common/operations/Gather.cpp
index d496d6a..e73a22e 100644
--- a/common/operations/Gather.cpp
+++ b/common/operations/Gather.cpp
@@ -16,7 +16,6 @@
#define LOG_TAG "Operations"
-#include "HalInterfaces.h"
#include "OperationResolver.h"
#include "OperationsUtils.h"
#include "Tracing.h"
@@ -37,8 +36,6 @@
namespace {
-using namespace hal;
-
template <typename T>
inline bool eval(const T* inputData, const Shape& inputShape, int32_t axis,
const int32_t* indicesData, const Shape& indicesShape, T* outputData) {
diff --git a/common/operations/GenerateProposals.cpp b/common/operations/GenerateProposals.cpp
index 4e3aa3f..2ef733e 100644
--- a/common/operations/GenerateProposals.cpp
+++ b/common/operations/GenerateProposals.cpp
@@ -24,7 +24,6 @@
#include <vector>
#include "CpuOperationUtils.h"
-#include "HalInterfaces.h"
#include "OperationResolver.h"
#include "OperationsUtils.h"
#include "Tracing.h"
@@ -35,8 +34,6 @@
namespace {
-using namespace hal;
-
struct BoxEncodingCorner {
float x1, y1, x2, y2;
};
diff --git a/common/operations/HashtableLookup.cpp b/common/operations/HashtableLookup.cpp
index 287c866..cfb9d98 100644
--- a/common/operations/HashtableLookup.cpp
+++ b/common/operations/HashtableLookup.cpp
@@ -19,7 +19,6 @@
#include "HashtableLookup.h"
#include "CpuExecutor.h"
-#include "HalInterfaces.h"
#include "Operations.h"
#include "Tracing.h"
@@ -29,8 +28,6 @@
namespace {
-using namespace hal;
-
int greater(const void* a, const void* b) {
return *static_cast<const int*>(a) - *static_cast<const int*>(b);
}
diff --git a/common/operations/HashtableLookup.h b/common/operations/HashtableLookup.h
index c0921e0..1ae554f 100644
--- a/common/operations/HashtableLookup.h
+++ b/common/operations/HashtableLookup.h
@@ -19,7 +19,7 @@
#include <vector>
-#include "HalInterfaces.h"
+#include "nnapi/Types.h"
namespace android {
namespace nn {
@@ -28,7 +28,7 @@
class HashtableLookup {
public:
- HashtableLookup(const hal::Operation& operation, RunTimeOperandInfo* operands);
+ HashtableLookup(const Operation& operation, RunTimeOperandInfo* operands);
bool Eval();
diff --git a/common/operations/HeatmapMaxKeypoint.cpp b/common/operations/HeatmapMaxKeypoint.cpp
index 3608ca5..a07e142 100644
--- a/common/operations/HeatmapMaxKeypoint.cpp
+++ b/common/operations/HeatmapMaxKeypoint.cpp
@@ -22,7 +22,6 @@
#include <vector>
#include "CpuOperationUtils.h"
-#include "HalInterfaces.h"
#include "OperationResolver.h"
#include "OperationsUtils.h"
#include "Tracing.h"
@@ -44,8 +43,6 @@
namespace {
-using namespace hal;
-
// This function uses Taylor expansion up to the quatratic term to approximate bicubic
// upscaling result.
// 2nd order Taylor expansion: D(x) = D - b'x + 1/2 * x'Ax
diff --git a/common/operations/InstanceNormalization.cpp b/common/operations/InstanceNormalization.cpp
index 75b907b..0ce21d0 100644
--- a/common/operations/InstanceNormalization.cpp
+++ b/common/operations/InstanceNormalization.cpp
@@ -20,7 +20,6 @@
#include <vector>
#include "CpuOperationUtils.h"
-#include "HalInterfaces.h"
#include "OperationResolver.h"
#include "Tracing.h"
@@ -42,8 +41,6 @@
namespace {
-using namespace hal;
-
template <typename T>
inline bool instanceNormNhwc(const T* inputData, const Shape& inputShape, T gamma, T beta,
T epsilon, T* outputData, const Shape& outputShape) {
diff --git a/common/operations/L2Normalization.cpp b/common/operations/L2Normalization.cpp
index 1f0c9d0..f86ab80 100644
--- a/common/operations/L2Normalization.cpp
+++ b/common/operations/L2Normalization.cpp
@@ -23,7 +23,6 @@
#include <vector>
#include "CpuOperationUtils.h"
-#include "HalInterfaces.h"
#include "OperationResolver.h"
#include "Tracing.h"
@@ -42,8 +41,6 @@
namespace {
-using namespace hal;
-
inline bool l2normFloat32Impl(const float* inputData, const Shape& inputShape, int32_t axis,
float* outputData, const Shape& outputShape) {
NNTRACE_TRANS("l2normFloat32");
diff --git a/common/operations/LSHProjection.cpp b/common/operations/LSHProjection.cpp
index bdb106e..14d7a79 100644
--- a/common/operations/LSHProjection.cpp
+++ b/common/operations/LSHProjection.cpp
@@ -18,19 +18,18 @@
#include "LSHProjection.h"
+#include <utils/hash/farmhash.h>
+
+#include <memory>
+
#include "CpuExecutor.h"
-#include "HalInterfaces.h"
#include "Tracing.h"
#include "Utils.h"
-
-#include <utils/hash/farmhash.h>
-#include <memory>
+#include "nnapi/Types.h"
namespace android {
namespace nn {
-using namespace hal;
-
LSHProjection::LSHProjection(const Operation& operation, RunTimeOperandInfo* operands) {
input_ = GetInput(operation, operands, kInputTensor);
weight_ = GetInput(operation, operands, kWeightTensor);
@@ -112,7 +111,7 @@
int64_t hash_signature = farmhash::Fingerprint64(key.get(), key_bytes);
double running_value = static_cast<double>(hash_signature);
input_ptr += input_item_bytes;
- if (weight->lifetime == OperandLifeTime::NO_VALUE) {
+ if (weight->lifetime == Operand::LifeTime::NO_VALUE) {
score += running_value;
} else {
score += static_cast<double>(reinterpret_cast<T*>(weight->buffer)[i]) * running_value;
diff --git a/common/operations/LSHProjection.h b/common/operations/LSHProjection.h
index 520f58a..3a953a0 100644
--- a/common/operations/LSHProjection.h
+++ b/common/operations/LSHProjection.h
@@ -19,7 +19,7 @@
#include <vector>
-#include "HalInterfaces.h"
+#include "nnapi/Types.h"
namespace android {
namespace nn {
@@ -36,9 +36,9 @@
class LSHProjection {
public:
- LSHProjection(const hal::Operation& operation, RunTimeOperandInfo* operands);
+ LSHProjection(const Operation& operation, RunTimeOperandInfo* operands);
- static bool Prepare(const hal::Operation& operation, RunTimeOperandInfo* operands,
+ static bool Prepare(const Operation& operation, RunTimeOperandInfo* operands,
Shape* outputShape);
template <typename T>
bool Eval();
diff --git a/common/operations/LSTM.cpp b/common/operations/LSTM.cpp
index 3051cfd..e64d0c4 100644
--- a/common/operations/LSTM.cpp
+++ b/common/operations/LSTM.cpp
@@ -22,18 +22,16 @@
#include "CpuExecutor.h"
#include "CpuOperationUtils.h"
-#include "HalInterfaces.h"
#include "OperationsUtils.h"
#include "Tracing.h"
#include "Utils.h"
+#include "nnapi/Types.h"
namespace android {
namespace nn {
namespace {
-using namespace hal;
-
template <typename T>
inline T* GetBuffer(RunTimeOperandInfo* operand) {
return reinterpret_cast<T*>(operand->buffer);
@@ -113,7 +111,7 @@
} else {
// For LSTM from HAL v1.0 assign operands with no values
static RunTimeOperandInfo no_value;
- no_value.lifetime = OperandLifeTime::NO_VALUE;
+ no_value.lifetime = Operand::LifeTime::NO_VALUE;
input_layer_norm_weights_ = &no_value;
forget_layer_norm_weights_ = &no_value;
@@ -221,8 +219,8 @@
// omitted ones can be omited in case CIFG LSTM is used.
params->use_layer_norm = !IsNullInput(output_layer_norm_weights);
- params->use_projection_weight = (projection_weights->lifetime != OperandLifeTime::NO_VALUE);
- params->use_projection_bias = (projection_bias->lifetime != OperandLifeTime::NO_VALUE);
+ params->use_projection_weight = (projection_weights->lifetime != Operand::LifeTime::NO_VALUE);
+ params->use_projection_bias = (projection_bias->lifetime != Operand::LifeTime::NO_VALUE);
// Make sure the input gate bias is present only when not a CIFG-LSTM.
if (params->use_cifg) {
diff --git a/common/operations/LSTM.h b/common/operations/LSTM.h
index b48c3df..dc6a43c 100644
--- a/common/operations/LSTM.h
+++ b/common/operations/LSTM.h
@@ -24,7 +24,7 @@
#include <vector>
#include "ActivationFunctor.h"
-#include "HalInterfaces.h"
+#include "nnapi/Types.h"
namespace android {
namespace nn {
@@ -48,9 +48,9 @@
class LSTMCell {
public:
- LSTMCell(const hal::Operation& operation, RunTimeOperandInfo* operands);
+ LSTMCell(const Operation& operation, RunTimeOperandInfo* operands);
- bool Prepare(const hal::Operation& operation, RunTimeOperandInfo* operands, Shape* scratchShape,
+ bool Prepare(const Operation& operation, RunTimeOperandInfo* operands, Shape* scratchShape,
Shape* outputStateShape, Shape* cellStateShape, Shape* outputShape);
bool Eval();
diff --git a/common/operations/LocalResponseNormalization.cpp b/common/operations/LocalResponseNormalization.cpp
index 40220e1..26a7a00 100644
--- a/common/operations/LocalResponseNormalization.cpp
+++ b/common/operations/LocalResponseNormalization.cpp
@@ -22,7 +22,6 @@
#include <vector>
#include "CpuOperationUtils.h"
-#include "HalInterfaces.h"
#include "OperationResolver.h"
#include "Tracing.h"
@@ -45,8 +44,6 @@
namespace {
-using namespace hal;
-
inline bool localResponseNormFloat32Impl(const float* inputData, const Shape& inputShape,
int32_t radius, float bias, float alpha, float beta,
int32_t axis, float* outputData,
diff --git a/common/operations/LogSoftmax.cpp b/common/operations/LogSoftmax.cpp
index 4132ef9..fdcccf8 100644
--- a/common/operations/LogSoftmax.cpp
+++ b/common/operations/LogSoftmax.cpp
@@ -16,19 +16,18 @@
#define LOG_TAG "Operations"
-#include "HalInterfaces.h"
+#include <algorithm>
+#include <cmath>
+#include <vector>
+
#include "OperationResolver.h"
#include "OperationsUtils.h"
#include "Tracing.h"
-#include <cmath>
-
namespace android {
namespace nn {
namespace log_softmax {
-using namespace hal;
-
constexpr char kOperationName[] = "LOG_SOFTMAX";
constexpr uint32_t kNumInputs = 3;
diff --git a/common/operations/LogicalAndOr.cpp b/common/operations/LogicalAndOr.cpp
index 6ada724..9d7e5ce 100644
--- a/common/operations/LogicalAndOr.cpp
+++ b/common/operations/LogicalAndOr.cpp
@@ -16,7 +16,9 @@
#define LOG_TAG "Operations"
-#include "HalInterfaces.h"
+#include <functional>
+#include <vector>
+
#include "IndexedShapeWrapper.h"
#include "OperationResolver.h"
#include "OperationsUtils.h"
@@ -34,8 +36,6 @@
namespace {
-using namespace hal;
-
bool compute(const std::function<bool(bool, bool)>& func, const bool8* aData, const Shape& aShape,
const bool8* bData, const Shape& bShape, bool8* outputData, const Shape& outputShape) {
IndexedShapeWrapper aShapeIndexed(aShape);
diff --git a/common/operations/LogicalNot.cpp b/common/operations/LogicalNot.cpp
index 8b41813..c715388 100644
--- a/common/operations/LogicalNot.cpp
+++ b/common/operations/LogicalNot.cpp
@@ -16,7 +16,6 @@
#define LOG_TAG "Operations"
-#include "HalInterfaces.h"
#include "OperationResolver.h"
#include "OperationsUtils.h"
@@ -32,8 +31,6 @@
namespace {
-using namespace hal;
-
bool compute(const bool8* input, const Shape& shape, bool8* output) {
const auto size = getNumberOfElements(shape);
for (uint32_t i = 0; i < size; ++i) {
diff --git a/common/operations/MaximumMinimum.cpp b/common/operations/MaximumMinimum.cpp
index 91a4bb0..339172f 100644
--- a/common/operations/MaximumMinimum.cpp
+++ b/common/operations/MaximumMinimum.cpp
@@ -20,7 +20,6 @@
#include <vector>
#include "MaximumMinimum.h"
-#include "HalInterfaces.h"
#include "IndexedShapeWrapper.h"
#include "OperationsUtils.h"
#include "Tracing.h"
@@ -31,8 +30,6 @@
namespace {
-using namespace hal;
-
template <typename T>
bool evalGeneric(const T* aData, const Shape& aShape, const T* bData, const Shape& bShape,
bool isMinimum, T* outputData, const Shape& outputShape) {
@@ -124,7 +121,7 @@
reinterpret_cast<int8_t*>(output), outputShape);
}
default: {
- LOG(ERROR) << "Unsupported data type: " << toString(shape1.type);
+ LOG(ERROR) << "Unsupported data type: " << shape1.type;
return false;
}
}
diff --git a/common/operations/Multinomial.cpp b/common/operations/Multinomial.cpp
index 7e1d2c6..80fb7e8 100644
--- a/common/operations/Multinomial.cpp
+++ b/common/operations/Multinomial.cpp
@@ -20,7 +20,6 @@
#include "CpuExecutor.h"
#include "CpuOperationUtils.h"
-#include "HalInterfaces.h"
#include "Tracing.h"
#include "guarded_philox_random.h"
@@ -37,8 +36,6 @@
namespace {
-using namespace hal;
-
template <typename T>
inline T* GetBuffer(RunTimeOperandInfo* operand) {
return reinterpret_cast<T*>(operand->buffer);
diff --git a/common/operations/Multinomial.h b/common/operations/Multinomial.h
index 0f5434e..bdfe587 100644
--- a/common/operations/Multinomial.h
+++ b/common/operations/Multinomial.h
@@ -23,7 +23,7 @@
#include <cmath>
#include <vector>
-#include "HalInterfaces.h"
+#include "nnapi/Types.h"
namespace android {
namespace nn {
@@ -33,9 +33,9 @@
class Multinomial {
public:
- Multinomial(const hal::Operation& operation, RunTimeOperandInfo* operands);
+ Multinomial(const Operation& operation, RunTimeOperandInfo* operands);
- static bool Prepare(const hal::Operation& operation, RunTimeOperandInfo* operands,
+ static bool Prepare(const Operation& operation, RunTimeOperandInfo* operands,
Shape* outputShape);
bool Eval();
diff --git a/common/operations/MultinomialTest.cpp b/common/operations/MultinomialTest.cpp
index e34de63..668ed36 100644
--- a/common/operations/MultinomialTest.cpp
+++ b/common/operations/MultinomialTest.cpp
@@ -14,17 +14,17 @@
* limitations under the License.
*/
-#include "Multinomial.h"
+#include <gmock/gmock-matchers.h>
+#include <gtest/gtest.h>
-#include "HalInterfaces.h"
+#include <unsupported/Eigen/CXX11/Tensor>
+#include <vector>
+
+#include "Multinomial.h"
#include "NeuralNetworksWrapper.h"
#include "philox_random.h"
#include "simple_philox.h"
-#include <gmock/gmock-matchers.h>
-#include <gtest/gtest.h>
-#include <unsupported/Eigen/CXX11/Tensor>
-
namespace android {
namespace nn {
namespace wrapper {
diff --git a/common/operations/Neg.cpp b/common/operations/Neg.cpp
index 48d962c..bf21727 100644
--- a/common/operations/Neg.cpp
+++ b/common/operations/Neg.cpp
@@ -16,7 +16,6 @@
#define LOG_TAG "Operations"
-#include "HalInterfaces.h"
#include "OperationResolver.h"
#include "OperationsUtils.h"
#include "Tracing.h"
@@ -37,8 +36,6 @@
namespace {
-using namespace hal;
-
template <typename T>
inline bool compute(const T* input, const Shape& shape, T* output) {
const auto size = getNumberOfElements(shape);
diff --git a/common/operations/PRelu.cpp b/common/operations/PRelu.cpp
index a799a84..7e0c8c3 100644
--- a/common/operations/PRelu.cpp
+++ b/common/operations/PRelu.cpp
@@ -19,7 +19,6 @@
#include <algorithm>
#include <vector>
-#include "HalInterfaces.h"
#include "IndexedShapeWrapper.h"
#include "OperationResolver.h"
#include "OperationsUtils.h"
@@ -31,8 +30,6 @@
namespace nn {
namespace prelu {
-using namespace hal;
-
constexpr char kOperationName[] = "PRELU";
constexpr uint32_t kNumInputs = 2;
diff --git a/common/operations/Pooling.cpp b/common/operations/Pooling.cpp
index 3ffa70f..62594c7 100644
--- a/common/operations/Pooling.cpp
+++ b/common/operations/Pooling.cpp
@@ -22,15 +22,12 @@
#include <vector>
#include "CpuOperationUtils.h"
-#include "HalInterfaces.h"
#include "OperationResolver.h"
#include "Tracing.h"
namespace android {
namespace nn {
-using namespace hal;
-
namespace pooling {
constexpr uint32_t kInputTensor = 0;
@@ -334,8 +331,7 @@
OperandType::INT32,
};
} else {
- NN_RET_CHECK_FAIL() << "Unsupported input tensor type for operation "
- << getOperationName(opType);
+ NN_RET_CHECK_FAIL() << "Unsupported input tensor type for operation " << opType;
}
if (inputCount >= 10) {
diff --git a/common/operations/Pow.cpp b/common/operations/Pow.cpp
index 40c4adf..03892a2 100644
--- a/common/operations/Pow.cpp
+++ b/common/operations/Pow.cpp
@@ -17,11 +17,11 @@
#define LOG_TAG "Operations"
#include "Pow.h"
-#include "HalInterfaces.h"
#include "IndexedShapeWrapper.h"
#include "OperationsUtils.h"
#include <cmath>
+#include <vector>
namespace android {
namespace nn {
@@ -29,8 +29,6 @@
namespace {
-using namespace hal;
-
template <typename T>
bool evalGeneric(const T* baseData, const Shape& baseShape, const T* exponentData,
const Shape& exponentShape, T* outputData, const Shape& outputShape) {
@@ -81,7 +79,7 @@
reinterpret_cast<float*>(outputData), outputShape);
} break;
default: {
- LOG(ERROR) << "Unsupported data type: " << toString(baseShape.type);
+ LOG(ERROR) << "Unsupported data type: " << baseShape.type;
return false;
}
}
diff --git a/common/operations/QLSTM.cpp b/common/operations/QLSTM.cpp
index 3b2dd05..68a9489 100644
--- a/common/operations/QLSTM.cpp
+++ b/common/operations/QLSTM.cpp
@@ -101,8 +101,6 @@
} // namespace
-using hal::OperandType;
-
bool validate(const IOperationValidationContext* context) {
NN_RET_CHECK_EQ(context->getNumInputs(), kNumInputs);
NN_RET_CHECK_EQ(context->getNumOutputs(), kNumOutputs);
diff --git a/common/operations/Quantize.cpp b/common/operations/Quantize.cpp
index fa04bdd..943a33d 100644
--- a/common/operations/Quantize.cpp
+++ b/common/operations/Quantize.cpp
@@ -17,7 +17,6 @@
#include "OperationsUtils.h"
#define LOG_TAG "Operations"
-#include "HalInterfaces.h"
#include "IndexedShapeWrapper.h"
#include "OperationResolver.h"
#include "Tracing.h"
@@ -37,8 +36,6 @@
namespace {
-using namespace hal;
-
template <typename T>
bool quantizeToQuant8(const T* inputData, uint8_t* outputData, const Shape& outputShape) {
NNTRACE_COMP("quantizeToQuant8");
@@ -75,10 +72,10 @@
NN_RET_CHECK(inputType == OperandType::TENSOR_FLOAT16 ||
inputType == OperandType::TENSOR_FLOAT32)
- << "Unsupported input operand type for QUANTIZE op: " << toString(inputType);
+ << "Unsupported input operand type for QUANTIZE op: " << inputType;
NN_RET_CHECK(outputType == OperandType::TENSOR_QUANT8_ASYMM ||
outputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED)
- << "Unsupported output operand type for QUANTIZE op: " << toString(outputType);
+ << "Unsupported output operand type for QUANTIZE op: " << outputType;
if (outputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) {
return validateHalVersion(context, HalVersion::V1_3);
} else {
@@ -121,8 +118,7 @@
}
}
NN_RET_CHECK_FAIL() << "Unsupported tensor types combination for QUANTIZE op. (input type: "
- << toString(inputType)
- << " output type: " << toString(context->getOutputType(kOutputTensor))
+ << inputType << " output type: " << context->getOutputType(kOutputTensor)
<< ")";
}
diff --git a/common/operations/QuantizedLSTM.cpp b/common/operations/QuantizedLSTM.cpp
index e059026..f07bc0a 100644
--- a/common/operations/QuantizedLSTM.cpp
+++ b/common/operations/QuantizedLSTM.cpp
@@ -20,7 +20,6 @@
#include "CpuExecutor.h"
#include "CpuOperationUtils.h"
-#include "HalInterfaces.h"
#include "Tracing.h"
@@ -34,8 +33,6 @@
namespace {
-using namespace hal;
-
template <typename T>
inline T* GetBuffer(RunTimeOperandInfo* operand) {
return reinterpret_cast<T*>(operand->buffer);
diff --git a/common/operations/QuantizedLSTM.h b/common/operations/QuantizedLSTM.h
index 76e74c6..61963c0 100644
--- a/common/operations/QuantizedLSTM.h
+++ b/common/operations/QuantizedLSTM.h
@@ -28,9 +28,9 @@
class QuantizedLSTMCell {
public:
- QuantizedLSTMCell(const hal::Operation& operation, RunTimeOperandInfo* operands);
+ QuantizedLSTMCell(const Operation& operation, RunTimeOperandInfo* operands);
- static bool prepare(const hal::Operation& operation, RunTimeOperandInfo* operands,
+ static bool prepare(const Operation& operation, RunTimeOperandInfo* operands,
Shape* cellStateShape, Shape* outputShape);
bool eval();
diff --git a/common/operations/RNN.cpp b/common/operations/RNN.cpp
index 259c091..f584f0e 100644
--- a/common/operations/RNN.cpp
+++ b/common/operations/RNN.cpp
@@ -22,15 +22,12 @@
#include "CpuExecutor.h"
#include "CpuOperationUtils.h"
-#include "HalInterfaces.h"
#include "Tracing.h"
namespace android {
namespace nn {
-using namespace hal;
-
RNN::RNN(const Operation& operation, RunTimeOperandInfo* operands) {
NNTRACE_TRANS("RNN::RNN");
input_ = GetInput(operation, operands, kInputTensor);
diff --git a/common/operations/RNN.h b/common/operations/RNN.h
index 245eb1d..0a5765b 100644
--- a/common/operations/RNN.h
+++ b/common/operations/RNN.h
@@ -20,7 +20,7 @@
#include <vector>
#include "ActivationFunctor.h"
-#include "HalInterfaces.h"
+#include "nnapi/Types.h"
namespace android {
namespace nn {
@@ -30,9 +30,9 @@
class RNN {
public:
- RNN(const hal::Operation& operation, RunTimeOperandInfo* operands);
+ RNN(const Operation& operation, RunTimeOperandInfo* operands);
- static bool Prepare(const hal::Operation& operation, RunTimeOperandInfo* operands,
+ static bool Prepare(const Operation& operation, RunTimeOperandInfo* operands,
Shape* hiddenStateShape, Shape* outputShape);
bool Eval();
diff --git a/common/operations/Rank.cpp b/common/operations/Rank.cpp
index 5f74437..8a6931b 100644
--- a/common/operations/Rank.cpp
+++ b/common/operations/Rank.cpp
@@ -16,7 +16,6 @@
#define LOG_TAG "Operations"
-#include "HalInterfaces.h"
#include "OperationResolver.h"
#include "OperationsUtils.h"
#include "Utils.h"
@@ -34,19 +33,19 @@
bool validate(const IOperationValidationContext* context) {
NN_RET_CHECK_EQ(context->getNumInputs(), kNumInputs);
NN_RET_CHECK_EQ(context->getNumOutputs(), kNumOutputs);
- hal::OperandType inputType = context->getInputType(kInputTensor);
- NN_RET_CHECK(inputType == hal::OperandType::TENSOR_FLOAT16 ||
- inputType == hal::OperandType::TENSOR_FLOAT32 ||
- inputType == hal::OperandType::TENSOR_INT32 ||
- inputType == hal::OperandType::TENSOR_QUANT8_ASYMM ||
- inputType == hal::OperandType::TENSOR_QUANT16_SYMM ||
- inputType == hal::OperandType::TENSOR_BOOL8 ||
- inputType == hal::OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL ||
- inputType == hal::OperandType::TENSOR_QUANT16_ASYMM ||
- inputType == hal::OperandType::TENSOR_QUANT8_SYMM ||
- inputType == hal::OperandType::TENSOR_QUANT8_ASYMM_SIGNED)
- << "Incorrect input type for a RANK op: " << toString(inputType);
- NN_RET_CHECK(validateOutputTypes(context, {hal::OperandType::INT32}));
+ OperandType inputType = context->getInputType(kInputTensor);
+ NN_RET_CHECK(inputType == OperandType::TENSOR_FLOAT16 ||
+ inputType == OperandType::TENSOR_FLOAT32 ||
+ inputType == OperandType::TENSOR_INT32 ||
+ inputType == OperandType::TENSOR_QUANT8_ASYMM ||
+ inputType == OperandType::TENSOR_QUANT16_SYMM ||
+ inputType == OperandType::TENSOR_BOOL8 ||
+ inputType == OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL ||
+ inputType == OperandType::TENSOR_QUANT16_ASYMM ||
+ inputType == OperandType::TENSOR_QUANT8_SYMM ||
+ inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED)
+ << "Incorrect input type for a RANK op: " << inputType;
+ NN_RET_CHECK(validateOutputTypes(context, {OperandType::INT32}));
return validateHalVersion(context, HalVersion::V1_3);
}
diff --git a/common/operations/Reduce.cpp b/common/operations/Reduce.cpp
index 220a4dc..c56771c 100644
--- a/common/operations/Reduce.cpp
+++ b/common/operations/Reduce.cpp
@@ -22,7 +22,6 @@
#include <limits>
#include <vector>
-#include "HalInterfaces.h"
#include "OperationResolver.h"
#include "OperationsUtils.h"
#include "Tracing.h"
@@ -46,8 +45,6 @@
namespace {
-using namespace hal;
-
template <typename T>
inline bool compute(IOperationExecutionContext* context, T init, T func(T, T)) {
const Shape inputShape = context->getInputShape(kInputTensor);
diff --git a/common/operations/ResizeImageOps.cpp b/common/operations/ResizeImageOps.cpp
index c33abaf..9042099 100644
--- a/common/operations/ResizeImageOps.cpp
+++ b/common/operations/ResizeImageOps.cpp
@@ -23,15 +23,12 @@
#include <vector>
#include "CpuOperationUtils.h"
-#include "HalInterfaces.h"
#include "OperationResolver.h"
#include "Tracing.h"
namespace android {
namespace nn {
-using namespace hal;
-
namespace resize_image {
constexpr uint32_t kNumInputs = 4;
@@ -178,7 +175,7 @@
} else if (opType == OperationType::RESIZE_NEAREST_NEIGHBOR) {
NN_RET_CHECK(numInputs >= kNumInputs && numInputs <= kNumInputs + kNumOptionalInputs);
} else {
- NN_RET_CHECK_FAIL() << "Unsupported operation " << getOperationName(opType);
+ NN_RET_CHECK_FAIL() << "Unsupported operation " << opType;
}
NN_RET_CHECK_EQ(context->getNumOutputs(), kNumOutputs);
auto inputType = context->getInputType(kInputTensor);
@@ -188,7 +185,7 @@
inputType == OperandType::TENSOR_FLOAT32 ||
inputType == OperandType::TENSOR_QUANT8_ASYMM ||
inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED)
- << "Unsupported tensor type for operation " << getOperationName(opType);
+ << "Unsupported tensor type for operation " << opType;
if (inputType == OperandType::TENSOR_FLOAT16 || inputType == OperandType::TENSOR_QUANT8_ASYMM) {
NN_RET_CHECK(validateHalVersion(context, HalVersion::V1_2));
}
@@ -258,7 +255,7 @@
static_cast<float>(inWidth) *
static_cast<float>(context->getInputValue<_Float16>(kOutputWidthParamScalar)));
} else {
- NN_RET_CHECK_FAIL() << "Unsupported scalar type for operation " << getOperationName(opType);
+ NN_RET_CHECK_FAIL() << "Unsupported scalar type for operation " << opType;
}
NN_RET_CHECK_GT(height, 0);
NN_RET_CHECK_GT(width, 0);
@@ -304,8 +301,7 @@
context->getOutputShape(kOutputTensor));
default:
- NN_RET_CHECK_FAIL() << "Unsupported tensor type for operation "
- << getOperationName(opType);
+ NN_RET_CHECK_FAIL() << "Unsupported tensor type for operation " << opType;
}
}
diff --git a/common/operations/RoiAlign.cpp b/common/operations/RoiAlign.cpp
index b9daf45..01008cc 100644
--- a/common/operations/RoiAlign.cpp
+++ b/common/operations/RoiAlign.cpp
@@ -17,7 +17,6 @@
#define LOG_TAG "Operations"
#include "CpuOperationUtils.h"
-#include "HalInterfaces.h"
#include "OperationResolver.h"
#include "OperationsUtils.h"
#include "Tracing.h"
@@ -51,8 +50,6 @@
namespace {
-using namespace hal;
-
template <typename T_Input, typename T_Roi>
inline bool roiAlignNhwc(const T_Input* inputData, const Shape& inputShape, const T_Roi* roiData,
const Shape& roiShape, const int32_t* batchSplitData,
diff --git a/common/operations/RoiPooling.cpp b/common/operations/RoiPooling.cpp
index a4f8214..373669a 100644
--- a/common/operations/RoiPooling.cpp
+++ b/common/operations/RoiPooling.cpp
@@ -22,7 +22,6 @@
#include <vector>
#include "CpuOperationUtils.h"
-#include "HalInterfaces.h"
#include "OperationResolver.h"
#include "OperationsUtils.h"
#include "Tracing.h"
@@ -48,8 +47,6 @@
namespace {
-using namespace hal;
-
template <typename T_Input, typename T_Roi>
inline bool roiPoolingNhwc(const T_Input* inputData, const Shape& inputShape, const T_Roi* roiData,
const Shape& roiShape, const int32_t* batchSplitData,
diff --git a/common/operations/SVDF.cpp b/common/operations/SVDF.cpp
index 8314838..953e2a8 100644
--- a/common/operations/SVDF.cpp
+++ b/common/operations/SVDF.cpp
@@ -20,7 +20,6 @@
#include "CpuExecutor.h"
#include "CpuOperationUtils.h"
-#include "HalInterfaces.h"
#include <algorithm>
#include <vector>
@@ -29,8 +28,6 @@
namespace android {
namespace nn {
-using namespace hal;
-
SVDF::SVDF(const Operation& operation, RunTimeOperandInfo* operands) {
NNTRACE_TRANS("SVDF::SVDF");
input_ = GetInput(operation, operands, kInputTensor);
diff --git a/common/operations/SVDF.h b/common/operations/SVDF.h
index ca9b54e..da18568 100644
--- a/common/operations/SVDF.h
+++ b/common/operations/SVDF.h
@@ -23,7 +23,7 @@
#include <cmath>
#include <vector>
-#include "HalInterfaces.h"
+#include "nnapi/Types.h"
namespace android {
namespace nn {
@@ -38,10 +38,10 @@
class SVDF {
public:
- SVDF(const hal::Operation& operation, RunTimeOperandInfo* operands);
+ SVDF(const Operation& operation, RunTimeOperandInfo* operands);
- static bool Prepare(const hal::Operation& operation, RunTimeOperandInfo* operands,
- Shape* stateShape, Shape* outputShape);
+ static bool Prepare(const Operation& operation, RunTimeOperandInfo* operands, Shape* stateShape,
+ Shape* outputShape);
bool Eval();
static constexpr int kInputTensor = 0;
diff --git a/common/operations/Select.cpp b/common/operations/Select.cpp
index 2026595..9105389 100644
--- a/common/operations/Select.cpp
+++ b/common/operations/Select.cpp
@@ -16,7 +16,6 @@
#define LOG_TAG "Operations"
-#include "HalInterfaces.h"
#include "IndexedShapeWrapper.h"
#include "OperationResolver.h"
#include "OperationsUtils.h"
@@ -35,8 +34,6 @@
namespace {
-using namespace hal;
-
template <typename T>
bool compute(const bool8* conditionData, const Shape& conditionShape, const T* aData,
const Shape& aShape, const T* bData, const Shape& bShape, T* outputData,
@@ -78,7 +75,7 @@
inputType == OperandType::TENSOR_INT32 ||
inputType == OperandType::TENSOR_QUANT8_ASYMM ||
inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED)
- << "Unsupported input operand type for select op: " << toString(inputType);
+ << "Unsupported input operand type for select op: " << inputType;
NN_RET_CHECK(validateInputTypes(context, {OperandType::TENSOR_BOOL8, inputType, inputType}));
NN_RET_CHECK(validateOutputTypes(context, {inputType}));
return validateHalVersion(context, HalVersion::V1_2);
diff --git a/common/operations/Slice.cpp b/common/operations/Slice.cpp
index 3c4f2fa..1b5a493 100644
--- a/common/operations/Slice.cpp
+++ b/common/operations/Slice.cpp
@@ -17,7 +17,6 @@
#define LOG_TAG "Operations"
#include "CpuOperationUtils.h"
-#include "HalInterfaces.h"
#include "IndexedShapeWrapper.h"
#include "OperationResolver.h"
@@ -37,8 +36,6 @@
constexpr uint32_t kNumOutputs = 1;
constexpr uint32_t kOutputTensor = 0;
-using namespace hal;
-
namespace {
template <typename T>
diff --git a/common/operations/Softmax.cpp b/common/operations/Softmax.cpp
index a986390..bb85c0b 100644
--- a/common/operations/Softmax.cpp
+++ b/common/operations/Softmax.cpp
@@ -25,7 +25,6 @@
#include <vector>
#include "CpuOperationUtils.h"
-#include "HalInterfaces.h"
#include "OperationResolver.h"
#include "Tracing.h"
@@ -46,8 +45,6 @@
namespace {
-using namespace hal;
-
inline bool softmaxSlowFloat32(const float* inputData, const Shape& inputShape, const float beta,
int32_t axis, float* outputData, const Shape& outputShape) {
NNTRACE_TRANS("softmaxFloatSlow32");
diff --git a/common/operations/Squeeze.cpp b/common/operations/Squeeze.cpp
index 276461d..d734550 100644
--- a/common/operations/Squeeze.cpp
+++ b/common/operations/Squeeze.cpp
@@ -20,7 +20,6 @@
#include <vector>
-#include "HalInterfaces.h"
#include "OperationResolver.h"
#include "Operations.h"
#include "Tracing.h"
@@ -36,8 +35,6 @@
constexpr uint32_t kNumOutputs = 1;
constexpr uint32_t kOutputTensor = 0;
-using namespace hal;
-
bool validate(const IOperationValidationContext* context) {
NN_RET_CHECK_EQ(context->getNumInputs(), kNumInputs);
NN_RET_CHECK_EQ(context->getNumOutputs(), kNumOutputs);
@@ -46,7 +43,7 @@
inputType == OperandType::TENSOR_FLOAT32 ||
inputType == OperandType::TENSOR_QUANT8_ASYMM ||
inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED)
- << "Unsupported input operand type for SQUEEZE op: " << toString(inputType);
+ << "Unsupported input operand type for SQUEEZE op: " << inputType;
HalVersion minSupportedHalVersion;
if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) {
diff --git a/common/operations/StridedSlice.cpp b/common/operations/StridedSlice.cpp
index 5ff5aec..3bb3a82 100644
--- a/common/operations/StridedSlice.cpp
+++ b/common/operations/StridedSlice.cpp
@@ -23,7 +23,6 @@
#include <vector>
#include "CpuOperationUtils.h"
-#include "HalInterfaces.h"
#include "OperationResolver.h"
#include "Operations.h"
#include "Tracing.h"
@@ -46,8 +45,6 @@
namespace {
-using namespace hal;
-
template <typename T>
bool compute(const T* inputData, const Shape& inputShape, const int32_t* beginData,
const int32_t* endData, const int32_t* stridesData, int32_t beginMask, int32_t endMask,
@@ -107,7 +104,7 @@
inputType == OperandType::TENSOR_FLOAT32 ||
inputType == OperandType::TENSOR_QUANT8_ASYMM ||
inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED)
- << "Unsupported input operand type for STRIDED_SLICE op: " << toString(inputType);
+ << "Unsupported input operand type for STRIDED_SLICE op: " << inputType;
HalVersion minSupportedHalVersion;
if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) {
diff --git a/common/operations/Tile.cpp b/common/operations/Tile.cpp
index 517d75e..af17df1 100644
--- a/common/operations/Tile.cpp
+++ b/common/operations/Tile.cpp
@@ -20,7 +20,6 @@
#include <utility>
#include "Tile.h"
-#include "HalInterfaces.h"
#include "Tracing.h"
namespace android {
@@ -29,8 +28,6 @@
namespace {
-using namespace hal;
-
template <typename T>
void CopyMultipleTimes(const T* in_data, int32_t in_size, int32_t multiplier, T* out_data) {
for (int i = 0; i < multiplier; ++i) {
diff --git a/common/operations/TopK_V2.cpp b/common/operations/TopK_V2.cpp
index e005b9a..9e4ceed 100644
--- a/common/operations/TopK_V2.cpp
+++ b/common/operations/TopK_V2.cpp
@@ -20,7 +20,6 @@
#include <utility>
#include <vector>
-#include "HalInterfaces.h"
#include "OperationResolver.h"
#include "OperationsUtils.h"
@@ -38,8 +37,6 @@
namespace {
-using namespace hal;
-
template <typename T>
bool evalGeneric(const T* inputData, const Shape& inputShape, const int32_t k, T* valuesData,
int32_t* indicesData) {
@@ -85,7 +82,7 @@
inputType == OperandType::TENSOR_INT32 ||
inputType == OperandType::TENSOR_QUANT8_ASYMM ||
inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED)
- << "Unsupported input operand type for select op: " << toString(inputType);
+ << "Unsupported input operand type for select op: " << inputType;
NN_RET_CHECK(validateInputTypes(context, {inputType, OperandType::INT32}));
NN_RET_CHECK(validateOutputTypes(context, {inputType, OperandType::TENSOR_INT32}));
HalVersion minSupportedHalVersion = HalVersion::V1_2;
@@ -132,7 +129,7 @@
return executeTyped<int8_t>(context);
} break;
default: {
- LOG(ERROR) << "Unsupported data type: " << toString(inputShape.type);
+ LOG(ERROR) << "Unsupported data type: " << inputShape.type;
return false;
}
}
diff --git a/common/operations/Transpose.cpp b/common/operations/Transpose.cpp
index ff70f9e..423b3de 100644
--- a/common/operations/Transpose.cpp
+++ b/common/operations/Transpose.cpp
@@ -19,7 +19,6 @@
#include <vector>
#include "CpuOperationUtils.h"
-#include "HalInterfaces.h"
#include "OperationResolver.h"
#include <tensorflow/lite/kernels/internal/optimized/legacy_optimized_ops.h>
@@ -42,8 +41,6 @@
namespace {
-using namespace hal;
-
template <typename T>
bool transposeGeneric(const T* inputData, const Shape& inputShape, const int32_t* perm,
const Shape& permShape, T* outputData, const Shape& outputShape) {
diff --git a/common/operations/TransposeConv2D.cpp b/common/operations/TransposeConv2D.cpp
index d67a473..0ee5d04 100644
--- a/common/operations/TransposeConv2D.cpp
+++ b/common/operations/TransposeConv2D.cpp
@@ -25,7 +25,6 @@
#include <vector>
#include "CpuOperationUtils.h"
-#include "HalInterfaces.h"
#include "OperationResolver.h"
#include "Tracing.h"
@@ -46,8 +45,6 @@
namespace {
-using namespace hal;
-
// If possible we will use this static buffer for the tensor.
constexpr size_t kStaticBufferSize = 1605632;
char static_scratch_buffer[kStaticBufferSize];
@@ -452,7 +449,9 @@
filterType == inputType)
<< "Unsupported filter tensor type for operation " << kOperationName;
if (filterType == OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL) {
- NN_RET_CHECK_EQ(context->getInputExtraParams(kFilterTensor).channelQuant().channelDim,
+ NN_RET_CHECK_EQ(std::get<Operand::SymmPerChannelQuantParams>(
+ context->getInputExtraParams(kFilterTensor))
+ .channelDim,
0)
<< "Unsupported filter tensor channel dimension for operation "
<< kOperationName;
@@ -570,7 +569,9 @@
context->getInputShape(kInputTensor),
context->getInputBuffer<int8_t>(kFilterTensor),
context->getInputShape(kFilterTensor),
- context->getInputExtraParams(kFilterTensor).channelQuant().scales.data(),
+ std::get<Operand::SymmPerChannelQuantParams>(
+ context->getInputExtraParams(kFilterTensor))
+ .scales.data(),
context->getInputBuffer<int32_t>(kBiasTensor),
context->getInputShape(kBiasTensor), param,
context->getOutputBuffer<uint8_t>(kOutputTensor),
@@ -595,7 +596,9 @@
context->getInputShape(kInputTensor),
context->getInputBuffer<int8_t>(kFilterTensor),
context->getInputShape(kFilterTensor),
- context->getInputExtraParams(kFilterTensor).channelQuant().scales.data(),
+ std::get<Operand::SymmPerChannelQuantParams>(
+ context->getInputExtraParams(kFilterTensor))
+ .scales.data(),
context->getInputBuffer<int32_t>(kBiasTensor),
context->getInputShape(kBiasTensor), param,
context->getOutputBuffer<int8_t>(kOutputTensor),
diff --git a/common/operations/UnidirectionalSequenceLSTM.cpp b/common/operations/UnidirectionalSequenceLSTM.cpp
index 03854f6..9a00e1f 100644
--- a/common/operations/UnidirectionalSequenceLSTM.cpp
+++ b/common/operations/UnidirectionalSequenceLSTM.cpp
@@ -18,7 +18,6 @@
#include <vector>
-#include "HalInterfaces.h"
#include "IndexedShapeWrapper.h"
#include "LSTM.h"
#include "OperationResolver.h"
@@ -88,8 +87,6 @@
namespace {
-using namespace hal;
-
inline bool hasTensor(IOperationExecutionContext* context, const uint32_t tensor) {
return context->getInputBuffer(tensor) != nullptr;
}
@@ -157,7 +154,7 @@
} else {
NN_RET_CHECK_FAIL()
<< "Unsupported input operand type for UNIDIRECTIONAL_SEQUENCE_LSTM op: "
- << toString(inputType);
+ << inputType;
}
HalVersion minHalVersionSupported = HalVersion::V1_2;
if (context->getNumOutputs() == kNumOutputsWithState) {
diff --git a/common/operations/UnidirectionalSequenceRNN.cpp b/common/operations/UnidirectionalSequenceRNN.cpp
index 273b701..aa79739 100644
--- a/common/operations/UnidirectionalSequenceRNN.cpp
+++ b/common/operations/UnidirectionalSequenceRNN.cpp
@@ -20,9 +20,9 @@
#include <utility>
#include <vector>
-#include "HalInterfaces.h"
#include "OperationResolver.h"
#include "RNN.h"
+#include "nnapi/TypeUtils.h"
namespace android {
namespace nn {
@@ -44,8 +44,6 @@
namespace {
-using namespace hal;
-
template <typename T>
void transposeFirstTwoDims(const T* input, const Shape& inputShape, T* output) {
const uint32_t firstDimSize = getSizeOfDimension(inputShape, 0);
@@ -135,7 +133,7 @@
OperandType inputType = context->getInputType(kInputTensor);
if (inputType != OperandType::TENSOR_FLOAT16 && inputType != OperandType::TENSOR_FLOAT32) {
LOG(ERROR) << "Unsupported input operand type for UNIDIRECTIONAL_SEQUENCE_RNN op: "
- << toString(inputType);
+ << inputType;
return false;
}
NN_RET_CHECK(validateInputTypes(context, {inputType, inputType, inputType, inputType, inputType,