Migrate NNAPI runtime to canonical types
This change replaces most uses of HAL types in the codebase with
equivalent canonical types. Later changes will introduce more
refactorings.
Also removes unused files nn/runtime/test/Bridge.{h,cpp}.
Bug: 160669906
Fix: 155923931
Test: NeuralNetworksTest_static (all 7 passes)
Test: NeuralNetworksTest_operations
Test: NeuralNetworksTest_utils
Test: NeuralNetworksTest_logtag
Test: nnCache_test
Test: BlobCache_test
Change-Id: I63fa286e926a096948f1b1b172d1d562c4f52f29
Merged-In: I63fa286e926a096948f1b1b172d1d562c4f52f29
(cherry picked from commit daa4b515bc15a2ac7755f0666c023d7e3caa951a)
diff --git a/common/Android.bp b/common/Android.bp
index 202c69e..10f97a2 100644
--- a/common/Android.bp
+++ b/common/Android.bp
@@ -109,6 +109,12 @@
],
whole_static_libs: [
"libarect",
+ "neuralnetworks_types",
+ "neuralnetworks_utils_hal_1_0", // TODO(b/160669116): Remove VNDK dependencies.
+ "neuralnetworks_utils_hal_1_1",
+ "neuralnetworks_utils_hal_1_2",
+ "neuralnetworks_utils_hal_1_3",
+ "neuralnetworks_utils_hal_common",
],
cflags: [
"-DTF_LITE_DISABLE_X86_NEON",
@@ -204,6 +210,12 @@
whole_static_libs: [
"libarect",
"libtflite_kernel_utils",
+ "neuralnetworks_types",
+ "neuralnetworks_utils_hal_1_0", // TODO(b/160669116): Remove VNDK dependencies.
+ "neuralnetworks_utils_hal_1_1",
+ "neuralnetworks_utils_hal_1_2",
+ "neuralnetworks_utils_hal_1_3",
+ "neuralnetworks_utils_hal_common",
"philox_random",
],
static_libs: [
@@ -232,6 +244,12 @@
name: "neuralnetworks_utils_defaults",
host_supported: true,
vendor_available: true,
+ apex_available: [
+ "//apex_available:platform",
+ "com.android.neuralnetworks",
+ "test_com.android.neuralnetworks",
+ ],
+ min_sdk_version: "30",
cflags: [
"-Wall",
"-Werror",
diff --git a/common/BufferTracker.cpp b/common/BufferTracker.cpp
index e6b8d94..cb2a326 100644
--- a/common/BufferTracker.cpp
+++ b/common/BufferTracker.cpp
@@ -28,11 +28,10 @@
#include "CpuExecutor.h"
#include "HalInterfaces.h"
#include "Utils.h"
+#include "nnapi/TypeUtils.h"
namespace android::nn {
-using namespace hal;
-
std::shared_ptr<ManagedBuffer> ManagedBuffer::create(uint32_t size,
std::set<PreparedModelRole> roles,
const Operand& operand) {
@@ -40,7 +39,7 @@
if (buffer == nullptr) {
return nullptr;
}
- if (isExtensionOperandType(operand.type)) {
+ if (isExtension(operand.type)) {
LOG(ERROR) << "ManagedBuffer cannot handle extension operands.";
return nullptr;
}
@@ -55,19 +54,18 @@
kOperandType(operand.type),
kInitialDimensions(operand.dimensions),
mUpdatedDimensions(operand.dimensions) {
- CHECK(!isExtensionOperandType(kOperandType));
+ CHECK(!isExtension(kOperandType));
}
ErrorStatus ManagedBuffer::validateRequest(uint32_t poolIndex, const Request& request,
- const IPreparedModel* preparedModel) const {
+ const V1_3::IPreparedModel* preparedModel) const {
CHECK_LT(poolIndex, request.pools.size());
- CHECK(request.pools[poolIndex].getDiscriminator() ==
- Request::MemoryPool::hidl_discriminator::token);
+ CHECK(std::holds_alternative<Request::MemoryDomainToken>(request.pools[poolIndex]));
std::lock_guard<std::mutex> guard(mMutex);
bool usedAsInput = false, usedAsOutput = false;
for (uint32_t i = 0; i < request.inputs.size(); i++) {
- if (request.inputs[i].hasNoValue) continue;
+ if (request.inputs[i].lifetime != Request::Argument::LifeTime::POOL) continue;
if (request.inputs[i].location.poolIndex != poolIndex) continue;
// Validate if the input role is specified during allocation.
if (kRoles.count({preparedModel, IOType::INPUT, i}) == 0) {
@@ -89,7 +87,7 @@
usedAsInput = true;
}
for (uint32_t i = 0; i < request.outputs.size(); i++) {
- if (request.outputs[i].hasNoValue) continue;
+ if (request.outputs[i].lifetime != Request::Argument::LifeTime::POOL) continue;
if (request.outputs[i].location.poolIndex != poolIndex) continue;
if (usedAsInput || usedAsOutput) {
LOG(ERROR) << "ManagedBuffer::validateRequest -- using the same device memory for "
diff --git a/common/CpuExecutor.cpp b/common/CpuExecutor.cpp
index 5dd41ad..4ca9709 100644
--- a/common/CpuExecutor.cpp
+++ b/common/CpuExecutor.cpp
@@ -40,13 +40,14 @@
#include "Operations.h"
#include "OperationsUtils.h"
#include "Tracing.h"
+#include "nnapi/TypeUtils.h"
namespace android {
namespace nn {
-namespace {
+using ::android::hidl::memory::V1_0::IMemory;
-using namespace hal;
+namespace {
class OperationExecutionContext : public IOperationExecutionContext {
DISALLOW_IMPLICIT_CONSTRUCTORS(OperationExecutionContext);
@@ -59,7 +60,7 @@
OperandType getInputType(uint32_t index) const override;
Shape getInputShape(uint32_t index) const override;
const void* getInputBuffer(uint32_t index) const override;
- const OperandExtraParams getInputExtraParams(uint32_t index) const override;
+ const Operand::ExtraParams& getInputExtraParams(uint32_t index) const override;
uint32_t getNumOutputs() const override;
OperandType getOutputType(uint32_t index) const override;
@@ -117,7 +118,7 @@
return getInputInfo(index)->buffer;
}
-const OperandExtraParams OperationExecutionContext::getInputExtraParams(uint32_t index) const {
+const Operand::ExtraParams& OperationExecutionContext::getInputExtraParams(uint32_t index) const {
return getInputInfo(index)->extraParams;
}
@@ -154,7 +155,7 @@
bool setInfoAndAllocateIfNeeded(RunTimeOperandInfo* info, const Shape& shape, int* result) {
// For user-provided model output operands, the parameters must match the Shape
// calculated from the preparation step.
- if (info->lifetime == OperandLifeTime::SUBGRAPH_OUTPUT) {
+ if (info->lifetime == Operand::LifeTime::SUBGRAPH_OUTPUT) {
if (info->type != shape.type) {
LOG(ERROR) << "Invalid type for model output";
*result = ANEURALNETWORKS_OP_FAILED;
@@ -191,7 +192,7 @@
// TODO(b/153081229): We bypass the overflow check on extension operands because we do not know
// the sizes of extension types.
- if (!isExtensionOperandType(info->type) &&
+ if (!isExtension(info->type) &&
nonExtensionOperandSizeOfDataOverflowsUInt32(info->type, info->dimensions)) {
LOG(ERROR) << "Operand data size overflows uint32_t";
*result = ANEURALNETWORKS_OP_FAILED;
@@ -199,9 +200,9 @@
}
// Allocate the buffer only if the combined dimension is fully specified
- if (info->buffer == nullptr && (info->lifetime == OperandLifeTime::TEMPORARY_VARIABLE ||
- info->lifetime == OperandLifeTime::SUBGRAPH_OUTPUT)) {
- if (isExtensionOperandType(info->type)) {
+ if (info->buffer == nullptr && (info->lifetime == Operand::LifeTime::TEMPORARY_VARIABLE ||
+ info->lifetime == Operand::LifeTime::SUBGRAPH_OUTPUT)) {
+ if (isExtension(info->type)) {
LOG(ERROR) << "Cannot allocate a variable of an extension type";
*result = ANEURALNETWORKS_OP_FAILED;
return false;
@@ -232,21 +233,21 @@
}
bool OperationExecutionContext::isOmittedInput(uint32_t index) const {
- return getInputInfo(index)->lifetime == OperandLifeTime::NO_VALUE;
+ return getInputInfo(index)->lifetime == Operand::LifeTime::NO_VALUE;
}
bool OperationExecutionContext::isOmittedOutput(uint32_t index) const {
- return getOutputInfo(index)->lifetime == OperandLifeTime::NO_VALUE;
+ return getOutputInfo(index)->lifetime == Operand::LifeTime::NO_VALUE;
}
bool OperationExecutionContext::checkNoOmittedOperand() const {
for (uint32_t i = 0; i < operation->inputs.size(); i++) {
- NN_RET_CHECK(!isOmittedInput(i)) << getOperationName(operation->type) << " input operand "
- << i << " is required but missing.";
+ NN_RET_CHECK(!isOmittedInput(i))
+ << operation->type << " input operand " << i << " is required but missing.";
}
for (uint32_t i = 0; i < operation->outputs.size(); i++) {
- NN_RET_CHECK(!isOmittedOutput(i)) << getOperationName(operation->type) << " output operand "
- << i << " is required but missing.";
+ NN_RET_CHECK(!isOmittedOutput(i))
+ << operation->type << " output operand " << i << " is required but missing.";
}
return true;
}
@@ -256,9 +257,8 @@
if (isOmittedInput(i)) continue;
for (uint32_t j = 0; j < getInputInfo(i)->dimensions.size(); j++) {
NN_RET_CHECK_NE(getInputInfo(i)->dimensions[j], 0)
- << getOperationName(operation->type)
- << " does not support zero-sized tensor, but input " << i << " dimension " << j
- << " is 0.";
+ << operation->type << " does not support zero-sized tensor, but input " << i
+ << " dimension " << j << " is 0.";
}
}
return true;
@@ -273,8 +273,8 @@
// when the RunTimePoolInfo is destroyed or is assigned to.
class RunTimePoolInfo::RunTimePoolInfoImpl {
public:
- RunTimePoolInfoImpl(const hidl_memory& hidlMemory, uint8_t* buffer, const sp<IMemory>& memory,
- AHardwareBuffer* hardwareBuffer, uint32_t size);
+ RunTimePoolInfoImpl(const hardware::hidl_memory& hidlMemory, uint8_t* buffer,
+ const sp<IMemory>& memory, AHardwareBuffer* hardwareBuffer, uint32_t size);
// rule of five...
~RunTimePoolInfoImpl();
@@ -288,10 +288,10 @@
bool flush() const;
- const hidl_memory& getHidlMemory() const { return mHidlMemory; }
+ const hardware::hidl_memory& getHidlMemory() const { return mHidlMemory; }
private:
- const hidl_memory mHidlMemory; // always used
+ const hardware::hidl_memory mHidlMemory; // always used
uint8_t* const mBuffer = nullptr; // always used
const sp<IMemory> mMemory; // only used when hidlMemory.name() == "ashmem"
AHardwareBuffer*
@@ -299,7 +299,7 @@
const uint32_t mSize;
};
-RunTimePoolInfo::RunTimePoolInfoImpl::RunTimePoolInfoImpl(const hidl_memory& hidlMemory,
+RunTimePoolInfo::RunTimePoolInfoImpl::RunTimePoolInfoImpl(const hardware::hidl_memory& hidlMemory,
uint8_t* buffer,
const sp<IMemory>& memory,
AHardwareBuffer* hardwareBuffer,
@@ -352,8 +352,8 @@
// TODO: short term, make share memory mapping and updating a utility function.
// TODO: long term, implement mmap_fd as a hidl IMemory service.
-std::optional<RunTimePoolInfo> RunTimePoolInfo::createFromHidlMemory(
- const hidl_memory& hidlMemory) {
+std::optional<RunTimePoolInfo> RunTimePoolInfo::createFromMemory(const Memory& canonicalMemory) {
+ hardware::hidl_memory hidlMemory = convertToV1_0(canonicalMemory);
uint8_t* buffer = nullptr;
sp<IMemory> memory;
AHardwareBuffer* hardwareBuffer = nullptr;
@@ -423,8 +423,8 @@
}
RunTimePoolInfo RunTimePoolInfo::createFromExistingBuffer(uint8_t* buffer, uint32_t size) {
- const auto impl = std::make_shared<const RunTimePoolInfoImpl>(hidl_memory{}, buffer, nullptr,
- nullptr, size);
+ const auto impl = std::make_shared<const RunTimePoolInfoImpl>(hardware::hidl_memory{}, buffer,
+ nullptr, nullptr, size);
return {impl};
}
@@ -443,17 +443,17 @@
return mImpl->flush();
}
-const hidl_memory& RunTimePoolInfo::getHidlMemory() const {
- return mImpl->getHidlMemory();
+Memory RunTimePoolInfo::getMemory() const {
+ return uncheckedConvert(mImpl->getHidlMemory());
}
-bool setRunTimePoolInfosFromHidlMemories(std::vector<RunTimePoolInfo>* poolInfos,
- const hidl_vec<hidl_memory>& pools) {
+bool setRunTimePoolInfosFromCanonicalMemories(std::vector<RunTimePoolInfo>* poolInfos,
+ const std::vector<Memory>& pools) {
CHECK(poolInfos != nullptr);
poolInfos->clear();
poolInfos->reserve(pools.size());
for (const auto& pool : pools) {
- if (std::optional<RunTimePoolInfo> poolInfo = RunTimePoolInfo::createFromHidlMemory(pool)) {
+ if (std::optional<RunTimePoolInfo> poolInfo = RunTimePoolInfo::createFromMemory(pool)) {
poolInfos->push_back(*poolInfo);
} else {
LOG(ERROR) << "Could not map pools";
@@ -465,18 +465,18 @@
}
bool setRunTimePoolInfosFromMemoryPools(std::vector<RunTimePoolInfo>* poolInfos,
- const hidl_vec<Request::MemoryPool>& pools) {
+ const std::vector<Request::MemoryPool>& pools) {
CHECK(poolInfos != nullptr);
poolInfos->clear();
poolInfos->reserve(pools.size());
for (const auto& pool : pools) {
- if (pool.getDiscriminator() != Request::MemoryPool::hidl_discriminator::hidlMemory) {
+ if (!std::holds_alternative<Memory>(pool)) {
LOG(ERROR) << "Unknown memory token";
poolInfos->clear();
return false;
}
if (std::optional<RunTimePoolInfo> poolInfo =
- RunTimePoolInfo::createFromHidlMemory(pool.hidlMemory())) {
+ RunTimePoolInfo::createFromMemory(std::get<Memory>(pool))) {
poolInfos->push_back(*poolInfo);
} else {
LOG(ERROR) << "Could not map pools";
@@ -522,7 +522,7 @@
LOG(ERROR) << "Error converting a non-4-D tensor to NHWC layout";
return false;
}
- to.lifetime = OperandLifeTime::TEMPORARY_VARIABLE;
+ to.lifetime = Operand::LifeTime::TEMPORARY_VARIABLE;
if (data_layout) {
// convert dimensions
Shape inShape = from.shape();
@@ -628,7 +628,7 @@
// that are inputs to an operation.
static void freeUnusedSubgraphOperands(std::vector<RunTimeOperandInfo>* operands) {
for (auto& info : *operands) {
- if (info.lifetime == OperandLifeTime::TEMPORARY_VARIABLE && info.numberOfUsesLeft == 0 &&
+ if (info.lifetime == Operand::LifeTime::TEMPORARY_VARIABLE && info.numberOfUsesLeft == 0 &&
info.buffer != nullptr) {
delete[] info.buffer;
info.buffer = nullptr;
@@ -642,8 +642,8 @@
const std::vector<RunTimePoolInfo>& modelPoolInfos,
const std::vector<RunTimePoolInfo>& requestPoolInfos) {
NNTRACE_CPU(NNTRACE_PHASE_EXECUTION, "run");
- VLOG(CPUEXE) << "CpuExecutor::run() with request(" << SHOW_IF_DEBUG(toString(request)) << ")";
- mModelOperandValues = &model.operandValues;
+ VLOG(CPUEXE) << "CpuExecutor::run() with request(" << SHOW_IF_DEBUG(request) << ")";
+ mModelOperandValues = model.operandValues.data();
mModelPoolInfos = &modelPoolInfos;
mReferencedSubgraphs = &model.referenced;
@@ -680,8 +680,8 @@
return result;
}
-int CpuExecutor::executeSubgraph(const Subgraph& subgraph, RunTimeOperandInfo* operands) {
- VLOG(CPUEXE) << "CpuExecutor::executeSubgraph " << toString(subgraph);
+int CpuExecutor::executeSubgraph(const Model::Subgraph& subgraph, RunTimeOperandInfo* operands) {
+ VLOG(CPUEXE) << "CpuExecutor::executeSubgraph " << subgraph;
// The graph has serialized the operation in execution order.
for (const auto& operation : subgraph.operations) {
NN_RETURN_IF_ERROR(executeOperation(operation, operands));
@@ -689,10 +689,12 @@
return ANEURALNETWORKS_NO_ERROR;
}
-std::vector<RunTimeOperandInfo> CpuExecutor::initializeRunTimeInfo(const Subgraph& subgraph) {
+std::vector<RunTimeOperandInfo> CpuExecutor::initializeRunTimeInfo(
+ const Model::Subgraph& subgraph) {
VLOG(CPUEXE) << "CpuExecutor::initializeRunTimeInfo";
const size_t count = subgraph.operands.size();
std::vector<RunTimeOperandInfo> operands(count);
+ std::vector<uint32_t> numberOfConsumers = countNumberOfConsumers(count, subgraph.operations);
for (size_t i = 0; i < count; i++) {
const Operand& from = subgraph.operands[i];
RunTimeOperandInfo& to = operands[i];
@@ -704,15 +706,15 @@
to.lifetime = from.lifetime;
to.extraParams = from.extraParams;
switch (from.lifetime) {
- case OperandLifeTime::TEMPORARY_VARIABLE:
+ case Operand::LifeTime::TEMPORARY_VARIABLE:
to.buffer = nullptr;
- to.numberOfUsesLeft = from.numberOfConsumers;
+ to.numberOfUsesLeft = numberOfConsumers[i];
break;
- case OperandLifeTime::CONSTANT_COPY:
- to.buffer = const_cast<uint8_t*>(&(*mModelOperandValues)[from.location.offset]);
+ case Operand::LifeTime::CONSTANT_COPY:
+ to.buffer = const_cast<uint8_t*>(mModelOperandValues + from.location.offset);
to.numberOfUsesLeft = 0;
break;
- case OperandLifeTime::CONSTANT_REFERENCE: {
+ case Operand::LifeTime::CONSTANT_REFERENCE: {
auto poolIndex = from.location.poolIndex;
CHECK_LT(poolIndex, mModelPoolInfos->size());
auto& r = (*mModelPoolInfos)[poolIndex];
@@ -720,16 +722,21 @@
to.numberOfUsesLeft = 0;
break;
}
- case OperandLifeTime::SUBGRAPH: {
+ case Operand::LifeTime::SUBGRAPH: {
auto subgraphIndex = from.location.offset;
CHECK_LT(subgraphIndex, mReferencedSubgraphs->size());
to.buffer = reinterpret_cast<uint8_t*>(
- const_cast<Subgraph*>(&(*mReferencedSubgraphs)[subgraphIndex]));
+ const_cast<Model::Subgraph*>(&(*mReferencedSubgraphs)[subgraphIndex]));
to.numberOfUsesLeft = 0;
} break;
- case OperandLifeTime::SUBGRAPH_INPUT:
- case OperandLifeTime::SUBGRAPH_OUTPUT:
- case OperandLifeTime::NO_VALUE:
+ case Operand::LifeTime::POINTER: {
+ to.buffer = reinterpret_cast<uint8_t*>(
+ const_cast<void*>(std::get<const void*>(from.location.pointer)));
+ to.numberOfUsesLeft = 0;
+ } break;
+ case Operand::LifeTime::SUBGRAPH_INPUT:
+ case Operand::LifeTime::SUBGRAPH_OUTPUT:
+ case Operand::LifeTime::NO_VALUE:
to.buffer = nullptr;
to.numberOfUsesLeft = 0;
break;
@@ -739,15 +746,15 @@
}
void CpuExecutor::updateForArguments(const std::vector<uint32_t>& indexes,
- const hal::hidl_vec<hal::RequestArgument>& arguments,
+ const std::vector<Request::Argument>& arguments,
const std::vector<RunTimePoolInfo>& requestPoolInfos,
RunTimeOperandInfo* operands) {
CHECK_EQ(indexes.size(), arguments.size());
for (size_t i = 0; i < indexes.size(); i++) {
const uint32_t operandIndex = indexes[i];
- const RequestArgument& from = arguments[i];
+ const Request::Argument& from = arguments[i];
RunTimeOperandInfo& to = operands[operandIndex];
- if (from.dimensions.size() > 0) {
+ if (!from.dimensions.empty()) {
// It's the responsibility of the caller to validate that
// from.dimensions only modifies the dimensions that were
// unspecified in the model. That's the case in SampleDriver.cpp
@@ -755,8 +762,8 @@
// TODO make sure that's the case for the default CPU path.
to.dimensions = from.dimensions;
}
- if (from.hasNoValue) {
- to.lifetime = OperandLifeTime::NO_VALUE;
+ if (from.lifetime == Request::Argument::LifeTime::NO_VALUE) {
+ to.lifetime = Operand::LifeTime::NO_VALUE;
CHECK(to.buffer == nullptr);
to.length = 0;
} else {
@@ -793,9 +800,9 @@
return result;
}
- // VLOG(CPUEXE) << "CpuExecutor::executeOperation(" << toString(operation) << ")";
- const hidl_vec<uint32_t>& ins = operation.inputs;
- const hidl_vec<uint32_t>& outs = operation.outputs;
+ // VLOG(CPUEXE) << "CpuExecutor::executeOperation(" << operation << ")";
+ const std::vector<uint32_t>& ins = operation.inputs;
+ const std::vector<uint32_t>& outs = operation.outputs;
bool success = false;
int result = ANEURALNETWORKS_NO_ERROR;
@@ -807,29 +814,30 @@
auto allParametersPresent = [&operation, &operands, &ins, &outs](size_t requiredIns,
size_t requiredOuts) -> bool {
auto verify = [&operation, &operands](size_t requiredCount,
- const hidl_vec<uint32_t>& indexes,
+ const std::vector<uint32_t>& indexes,
const char* type) -> bool {
size_t actualCount = indexes.size();
if (actualCount != requiredCount) {
- LOG(ERROR) << getOperationName(operation.type) << ": Invalid number of " << type
- << " operands. Got " << actualCount << " of " << requiredCount;
+ LOG(ERROR) << operation.type << ": Invalid number of " << type << " operands. Got "
+ << actualCount << " of " << requiredCount;
return false;
}
for (size_t i = 0; i < actualCount; i++) {
- if (operands[indexes[i]].lifetime == OperandLifeTime::NO_VALUE) {
- LOG(ERROR) << getOperationName(operation.type) << " " << type << " operand "
- << i << " is required but missing.";
+ if (operands[indexes[i]].lifetime == Operand::LifeTime::NO_VALUE) {
+ LOG(ERROR) << operation.type << " " << type << " operand " << i
+ << " is required but missing.";
return false;
}
}
return true;
};
- auto verifyNoZeroSizedInputs = [&operation, &operands](const hidl_vec<uint32_t>& indexes) {
+ auto verifyNoZeroSizedInputs = [&operation,
+ &operands](const std::vector<uint32_t>& indexes) {
for (size_t i = 0; i < indexes.size(); i++) {
for (size_t j = 0; j < operands[indexes[i]].dimensions.size(); j++) {
if (operands[indexes[i]].dimensions[j] == 0) {
- LOG(ERROR) << getOperationName(operation.type)
+ LOG(ERROR) << operation.type
<< " does not support zero-sized tensor, but input " << i
<< " dimension " << j << " is zero.";
return false;
@@ -882,7 +890,7 @@
success = false;
break;
}
- output_tmp.lifetime = OperandLifeTime::TEMPORARY_VARIABLE;
+ output_tmp.lifetime = Operand::LifeTime::TEMPORARY_VARIABLE;
output_tmp.buffer = data_layout ? nullptr : output.buffer;
output_tmp.length = data_layout ? 0 : output.length;
if (!depthToSpacePrepare(input_tmp.shape(), blockSize, &outShape) ||
@@ -946,7 +954,7 @@
success = false;
break;
}
- output_tmp.lifetime = OperandLifeTime::TEMPORARY_VARIABLE;
+ output_tmp.lifetime = Operand::LifeTime::TEMPORARY_VARIABLE;
output_tmp.buffer = data_layout ? nullptr : output.buffer;
output_tmp.length = data_layout ? 0 : output.length;
@@ -1167,7 +1175,7 @@
success = false;
break;
}
- output_tmp.lifetime = OperandLifeTime::TEMPORARY_VARIABLE;
+ output_tmp.lifetime = Operand::LifeTime::TEMPORARY_VARIABLE;
output_tmp.buffer = data_layout ? nullptr : output.buffer;
output_tmp.length = data_layout ? 0 : output.length;
@@ -1239,7 +1247,7 @@
success = false;
break;
}
- output_tmp.lifetime = OperandLifeTime::TEMPORARY_VARIABLE;
+ output_tmp.lifetime = Operand::LifeTime::TEMPORARY_VARIABLE;
output_tmp.buffer = data_layout ? nullptr : output.buffer;
output_tmp.length = data_layout ? 0 : output.length;
@@ -1558,7 +1566,7 @@
success = false;
break;
}
- output_tmp.lifetime = OperandLifeTime::TEMPORARY_VARIABLE;
+ output_tmp.lifetime = Operand::LifeTime::TEMPORARY_VARIABLE;
output_tmp.buffer = data_layout ? nullptr : output.buffer;
output_tmp.length = data_layout ? 0 : output.length;
@@ -1605,7 +1613,8 @@
success = groupedConvQuant8PerChannel(
reinterpret_cast<const uint8_t*>(input_tmp.buffer), input_tmp.shape(),
reinterpret_cast<const int8_t*>(filter.buffer), filter.shape(),
- filter.extraParams.channelQuant().scales.data(),
+ std::get<Operand::SymmPerChannelQuantParams>(filter.extraParams)
+ .scales.data(),
reinterpret_cast<const int32_t*>(bias.buffer), bias.shape(),
padding_left, padding_right, padding_top, padding_bottom, stride_width,
stride_height, numGroups, activation,
@@ -1624,7 +1633,8 @@
success = groupedConvQuant8PerChannel(
reinterpret_cast<const int8_t*>(input_tmp.buffer), input_tmp.shape(),
reinterpret_cast<const int8_t*>(filter.buffer), filter.shape(),
- filter.extraParams.channelQuant().scales.data(),
+ std::get<Operand::SymmPerChannelQuantParams>(filter.extraParams)
+ .scales.data(),
reinterpret_cast<const int32_t*>(bias.buffer), bias.shape(),
padding_left, padding_right, padding_top, padding_bottom, stride_width,
stride_height, numGroups, activation,
@@ -1703,11 +1713,10 @@
const OperationRegistration* operationRegistration =
mOperationResolver->findOperation(operation.type);
if (operationRegistration == nullptr) {
- LOG(ERROR) << getOperationName(operation.type) << " not registered";
+ LOG(ERROR) << operation.type << " not registered";
} else if (operationRegistration->prepare == nullptr ||
operationRegistration->execute == nullptr) {
- LOG(ERROR) << "Incomplete operation registration: "
- << getOperationName(operation.type);
+ LOG(ERROR) << "Incomplete operation registration: " << operation.type;
} else {
OperationExecutionContext context(&operation, operands);
success = operationRegistration->flags.allowOmittedOperand ||
@@ -1724,7 +1733,7 @@
result = ANEURALNETWORKS_OP_FAILED;
}
if (result != ANEURALNETWORKS_NO_ERROR) {
- LOG(ERROR) << getOperationName(operation.type) << " failed.";
+ LOG(ERROR) << operation.type << " failed.";
}
consumeOperationInputs(ins, operands);
@@ -1753,7 +1762,8 @@
const uint32_t branchInputIndex = condValue ? op::kThenModelOperand : op::kElseModelOperand;
const RunTimeOperandInfo& branchOperand = operands[operation.inputs[branchInputIndex]];
- const Subgraph& branchSubgraph = *reinterpret_cast<const Subgraph*>(branchOperand.buffer);
+ const Model::Subgraph& branchSubgraph =
+ *reinterpret_cast<const Model::Subgraph*>(branchOperand.buffer);
std::vector<RunTimeOperandInfo> branchOperands = initializeRunTimeInfo(branchSubgraph);
// Initialize inner input and output operands from outer operands.
@@ -1783,8 +1793,10 @@
namespace op = operation_while;
const RunTimeOperandInfo& condModelOperand = operands[operation.inputs[op::kCondModelOperand]];
const RunTimeOperandInfo& bodyModelOperand = operands[operation.inputs[op::kBodyModelOperand]];
- const Subgraph& condSubgraph = *reinterpret_cast<const Subgraph*>(condModelOperand.buffer);
- const Subgraph& bodySubgraph = *reinterpret_cast<const Subgraph*>(bodyModelOperand.buffer);
+ const Model::Subgraph& condSubgraph =
+ *reinterpret_cast<const Model::Subgraph*>(condModelOperand.buffer);
+ const Model::Subgraph& bodySubgraph =
+ *reinterpret_cast<const Model::Subgraph*>(bodyModelOperand.buffer);
std::vector<RunTimeOperandInfo> condOperands = initializeRunTimeInfo(condSubgraph);
std::vector<RunTimeOperandInfo> bodyOperands = initializeRunTimeInfo(bodySubgraph);
@@ -1916,7 +1928,7 @@
mOutputShapes[i].dimensions = from.dimensions;
mOutputShapes[i].isSufficient = from.isSufficient();
VLOG(EXECUTION) << "CpuExecutor::setOutputShapes: mOutputShapes[" << i
- << "] = " << toString(mOutputShapes[i]);
+ << "] = " << mOutputShapes[i];
}
}
diff --git a/common/ExecutionBurstController.cpp b/common/ExecutionBurstController.cpp
index e195e7d..bb1c08f 100644
--- a/common/ExecutionBurstController.cpp
+++ b/common/ExecutionBurstController.cpp
@@ -36,8 +36,6 @@
namespace android::nn {
namespace {
-using namespace hal;
-
using V1_2::FmqRequestDatum;
using V1_2::FmqResultDatum;
using V1_2::IBurstCallback;
@@ -45,10 +43,10 @@
using FmqRequestDescriptor = hardware::MQDescriptorSync<FmqRequestDatum>;
using FmqResultDescriptor = hardware::MQDescriptorSync<FmqResultDatum>;
-constexpr Timing kNoTiming = {std::numeric_limits<uint64_t>::max(),
- std::numeric_limits<uint64_t>::max()};
+constexpr V1_2::Timing kNoTiming12 = {std::numeric_limits<uint64_t>::max(),
+ std::numeric_limits<uint64_t>::max()};
-class BurstContextDeathHandler : public hidl_death_recipient {
+class BurstContextDeathHandler : public hardware::hidl_death_recipient {
public:
using Callback = std::function<void()>;
@@ -68,7 +66,7 @@
} // anonymous namespace
// serialize a request into a packet
-std::vector<FmqRequestDatum> serialize(const V1_0::Request& request, MeasureTiming measure,
+std::vector<FmqRequestDatum> serialize(const V1_0::Request& request, V1_2::MeasureTiming measure,
const std::vector<int32_t>& slots) {
// count how many elements need to be sent for a request
size_t count = 2 + request.inputs.size() + request.outputs.size() + request.pools.size();
@@ -149,11 +147,11 @@
}
// deserialize a packet into the result
-std::optional<std::tuple<V1_0::ErrorStatus, std::vector<OutputShape>, Timing>> deserialize(
- const std::vector<FmqResultDatum>& data) {
+std::optional<std::tuple<V1_0::ErrorStatus, std::vector<V1_2::OutputShape>, V1_2::Timing>>
+deserialize(const std::vector<FmqResultDatum>& data) {
using discriminator = FmqResultDatum::hidl_discriminator;
- std::vector<OutputShape> outputShapes;
+ std::vector<V1_2::OutputShape> outputShapes;
size_t index = 0;
// validate packet information
@@ -218,7 +216,7 @@
}
// unpackage execution timing
- const Timing timing = data[index].executionTiming();
+ const V1_2::Timing timing = data[index].executionTiming();
index++;
// validate packet information
@@ -254,7 +252,7 @@
std::chrono::microseconds pollingTimeWindow)
: mFmqResultChannel(std::move(fmqResultChannel)), kPollingTimeWindow(pollingTimeWindow) {}
-std::optional<std::tuple<V1_0::ErrorStatus, std::vector<OutputShape>, Timing>>
+std::optional<std::tuple<V1_0::ErrorStatus, std::vector<V1_2::OutputShape>, V1_2::Timing>>
ResultChannelReceiver::getBlocking() {
const auto packet = getPacketBlocking();
if (!packet) {
@@ -275,7 +273,8 @@
// TODO: look for a different/better way to signal/notify the futex to
// wake up any thread waiting on it
FmqResultDatum datum;
- datum.packetInformation({/*.packetSize=*/0, /*.errorStatus=*/V1_0::ErrorStatus::GENERAL_FAILURE,
+ datum.packetInformation({/*.packetSize=*/0,
+ /*.errorStatus=*/V1_0::ErrorStatus::GENERAL_FAILURE,
/*.numberOfOperands=*/0});
mFmqResultChannel->writeBlocking(&datum, 1);
}
@@ -363,7 +362,7 @@
RequestChannelSender::RequestChannelSender(std::unique_ptr<FmqRequestChannel> fmqRequestChannel)
: mFmqRequestChannel(std::move(fmqRequestChannel)) {}
-bool RequestChannelSender::send(const V1_0::Request& request, MeasureTiming measure,
+bool RequestChannelSender::send(const V1_0::Request& request, V1_2::MeasureTiming measure,
const std::vector<int32_t>& slots) {
const std::vector<FmqRequestDatum> serialized = serialize(request, measure, slots);
return sendPacket(serialized);
@@ -389,30 +388,31 @@
mValid = false;
}
-Return<void> ExecutionBurstController::ExecutionBurstCallback::getMemories(
- const hidl_vec<int32_t>& slots, getMemories_cb cb) {
+hardware::Return<void> ExecutionBurstController::ExecutionBurstCallback::getMemories(
+ const hardware::hidl_vec<int32_t>& slots, getMemories_cb cb) {
std::lock_guard<std::mutex> guard(mMutex);
// get all memories
- hidl_vec<hidl_memory> memories(slots.size());
+ hardware::hidl_vec<hardware::hidl_memory> memories(slots.size());
std::transform(slots.begin(), slots.end(), memories.begin(), [this](int32_t slot) {
- return slot < mMemoryCache.size() ? mMemoryCache[slot] : hidl_memory{};
+ return slot < mMemoryCache.size() ? mMemoryCache[slot] : hardware::hidl_memory{};
});
// ensure all memories are valid
if (!std::all_of(memories.begin(), memories.end(),
- [](const hidl_memory& memory) { return memory.valid(); })) {
+ [](const hardware::hidl_memory& memory) { return memory.valid(); })) {
cb(V1_0::ErrorStatus::INVALID_ARGUMENT, {});
- return Void();
+ return hardware::Void();
}
// return successful
cb(V1_0::ErrorStatus::NONE, std::move(memories));
- return Void();
+ return hardware::Void();
}
std::vector<int32_t> ExecutionBurstController::ExecutionBurstCallback::getSlots(
- const hidl_vec<hidl_memory>& memories, const std::vector<intptr_t>& keys) {
+ const hardware::hidl_vec<hardware::hidl_memory>& memories,
+ const std::vector<intptr_t>& keys) {
std::lock_guard<std::mutex> guard(mMutex);
// retrieve (or bind) all slots corresponding to memories
@@ -439,8 +439,8 @@
return {true, slot};
}
-int32_t ExecutionBurstController::ExecutionBurstCallback::getSlotLocked(const hidl_memory& memory,
- intptr_t key) {
+int32_t ExecutionBurstController::ExecutionBurstCallback::getSlotLocked(
+ const hardware::hidl_memory& memory, intptr_t key) {
auto iter = mMemoryIdToSlot.find(key);
if (iter == mMemoryIdToSlot.end()) {
const int32_t slot = allocateSlotLocked();
@@ -503,7 +503,7 @@
// configure burst
V1_0::ErrorStatus errorStatus;
sp<IBurstContext> burstContext;
- const Return<void> ret = preparedModel->configureExecutionBurst(
+ const hardware::Return<void> ret = preparedModel->configureExecutionBurst(
callback, *requestChannelDescriptor, *resultChannelDescriptor,
[&errorStatus, &burstContext](V1_0::ErrorStatus status,
const sp<IBurstContext>& context) {
@@ -539,7 +539,7 @@
// proactively handle service crashes. If the linkToDeath call fails,
// asynchronous calls are susceptible to hangs if the service crashes before
// providing the response.
- const Return<bool> deathHandlerRet = burstContext->linkToDeath(deathHandler, 0);
+ const hardware::Return<bool> deathHandlerRet = burstContext->linkToDeath(deathHandler, 0);
if (!deathHandlerRet.isOk() || deathHandlerRet != true) {
LOG(ERROR) << "ExecutionBurstController::create -- Failed to register a death recipient "
"for the IBurstContext object.";
@@ -555,7 +555,7 @@
const std::shared_ptr<RequestChannelSender>& requestChannelSender,
const std::shared_ptr<ResultChannelReceiver>& resultChannelReceiver,
const sp<IBurstContext>& burstContext, const sp<ExecutionBurstCallback>& callback,
- const sp<hidl_death_recipient>& deathHandler)
+ const sp<hardware::hidl_death_recipient>& deathHandler)
: mRequestChannelSender(requestChannelSender),
mResultChannelReceiver(resultChannelReceiver),
mBurstContext(burstContext),
@@ -572,17 +572,17 @@
}
}
-static std::tuple<int, std::vector<OutputShape>, Timing, bool> getExecutionResult(
- V1_0::ErrorStatus status, std::vector<OutputShape> outputShapes, Timing timing,
+static std::tuple<int, std::vector<V1_2::OutputShape>, V1_2::Timing, bool> getExecutionResult(
+ V1_0::ErrorStatus status, std::vector<V1_2::OutputShape> outputShapes, V1_2::Timing timing,
bool fallback) {
auto [n, checkedOutputShapes, checkedTiming] =
getExecutionResult(convertToV1_3(status), std::move(outputShapes), timing);
- return {n, std::move(checkedOutputShapes), checkedTiming, fallback};
+ return {n, convertToV1_2(checkedOutputShapes), convertToV1_2(checkedTiming), fallback};
}
-std::tuple<int, std::vector<OutputShape>, Timing, bool> ExecutionBurstController::compute(
- const V1_0::Request& request, MeasureTiming measure,
- const std::vector<intptr_t>& memoryIds) {
+std::tuple<int, std::vector<V1_2::OutputShape>, V1_2::Timing, bool>
+ExecutionBurstController::compute(const V1_0::Request& request, V1_2::MeasureTiming measure,
+ const std::vector<intptr_t>& memoryIds) {
// This is the first point when we know an execution is occurring, so begin
// to collect systraces. Note that the first point we can begin collecting
// systraces in ExecutionBurstServer is when the RequestChannelReceiver
@@ -598,7 +598,7 @@
if (!success) {
LOG(ERROR) << "Error sending FMQ packet";
// only use fallback execution path if the packet could not be sent
- return getExecutionResult(V1_0::ErrorStatus::GENERAL_FAILURE, {}, kNoTiming,
+ return getExecutionResult(V1_0::ErrorStatus::GENERAL_FAILURE, {}, kNoTiming12,
/*fallback=*/true);
}
@@ -607,7 +607,7 @@
if (!result) {
LOG(ERROR) << "Error retrieving FMQ packet";
// only use fallback execution path if the packet could not be sent
- return getExecutionResult(V1_0::ErrorStatus::GENERAL_FAILURE, {}, kNoTiming,
+ return getExecutionResult(V1_0::ErrorStatus::GENERAL_FAILURE, {}, kNoTiming12,
/*fallback=*/false);
}
diff --git a/common/ExecutionBurstServer.cpp b/common/ExecutionBurstServer.cpp
index 487cd9f..67d4ccb 100644
--- a/common/ExecutionBurstServer.cpp
+++ b/common/ExecutionBurstServer.cpp
@@ -35,16 +35,14 @@
namespace android::nn {
namespace {
-using namespace hal;
-
using hardware::MQDescriptorSync;
using V1_2::FmqRequestDatum;
using V1_2::FmqResultDatum;
using V1_2::IBurstCallback;
using V1_2::IBurstContext;
-constexpr Timing kNoTiming = {std::numeric_limits<uint64_t>::max(),
- std::numeric_limits<uint64_t>::max()};
+constexpr V1_2::Timing kNoTiming = {std::numeric_limits<uint64_t>::max(),
+ std::numeric_limits<uint64_t>::max()};
// DefaultBurstExecutorWithCache adapts an IPreparedModel so that it can be
// used as an IBurstExecutorWithCache. Specifically, the cache simply stores the
@@ -61,17 +59,17 @@
return (it != mMemoryCache.end()) && it->second.valid();
}
- void addCacheEntry(const hidl_memory& memory, int32_t slot) override {
+ void addCacheEntry(const hardware::hidl_memory& memory, int32_t slot) override {
mMemoryCache[slot] = memory;
}
void removeCacheEntry(int32_t slot) override { mMemoryCache.erase(slot); }
- std::tuple<V1_0::ErrorStatus, hidl_vec<OutputShape>, Timing> execute(
+ std::tuple<V1_0::ErrorStatus, hardware::hidl_vec<V1_2::OutputShape>, V1_2::Timing> execute(
const V1_0::Request& request, const std::vector<int32_t>& slots,
- MeasureTiming measure) override {
+ V1_2::MeasureTiming measure) override {
// convert slots to pools
- hidl_vec<hidl_memory> pools(slots.size());
+ hardware::hidl_vec<hardware::hidl_memory> pools(slots.size());
std::transform(slots.begin(), slots.end(), pools.begin(),
[this](int32_t slot) { return mMemoryCache[slot]; });
@@ -81,18 +79,20 @@
// setup execution
V1_0::ErrorStatus returnedStatus = V1_0::ErrorStatus::GENERAL_FAILURE;
- hidl_vec<OutputShape> returnedOutputShapes;
- Timing returnedTiming;
+ hardware::hidl_vec<V1_2::OutputShape> returnedOutputShapes;
+ V1_2::Timing returnedTiming;
auto cb = [&returnedStatus, &returnedOutputShapes, &returnedTiming](
- V1_0::ErrorStatus status, const hidl_vec<OutputShape>& outputShapes,
- const Timing& timing) {
+ V1_0::ErrorStatus status,
+ const hardware::hidl_vec<V1_2::OutputShape>& outputShapes,
+ const V1_2::Timing& timing) {
returnedStatus = status;
returnedOutputShapes = outputShapes;
returnedTiming = timing;
};
// execute
- const Return<void> ret = mpPreparedModel->executeSynchronously(fullRequest, measure, cb);
+ const hardware::Return<void> ret =
+ mpPreparedModel->executeSynchronously(fullRequest, measure, cb);
if (!ret.isOk() || returnedStatus != V1_0::ErrorStatus::NONE) {
LOG(ERROR) << "IPreparedModelAdapter::execute -- Error executing";
return {returnedStatus, std::move(returnedOutputShapes), kNoTiming};
@@ -103,14 +103,15 @@
private:
V1_2::IPreparedModel* const mpPreparedModel;
- std::map<int32_t, hidl_memory> mMemoryCache;
+ std::map<int32_t, hardware::hidl_memory> mMemoryCache;
};
} // anonymous namespace
// serialize result
std::vector<FmqResultDatum> serialize(V1_0::ErrorStatus errorStatus,
- const std::vector<OutputShape>& outputShapes, Timing timing) {
+ const std::vector<V1_2::OutputShape>& outputShapes,
+ V1_2::Timing timing) {
// count how many elements need to be sent for a request
size_t count = 2 + outputShapes.size();
for (const auto& outputShape : outputShapes) {
@@ -161,7 +162,7 @@
}
// deserialize request
-std::optional<std::tuple<V1_0::Request, std::vector<int32_t>, MeasureTiming>> deserialize(
+std::optional<std::tuple<V1_0::Request, std::vector<int32_t>, V1_2::MeasureTiming>> deserialize(
const std::vector<FmqRequestDatum>& data) {
using discriminator = FmqRequestDatum::hidl_discriminator;
@@ -188,7 +189,7 @@
}
// unpackage input operands
- std::vector<RequestArgument> inputs;
+ std::vector<V1_0::RequestArgument> inputs;
inputs.reserve(numberOfInputOperands);
for (size_t operand = 0; operand < numberOfInputOperands; ++operand) {
// validate input operand information
@@ -202,7 +203,7 @@
data[index].inputOperandInformation();
index++;
const bool hasNoValue = operandInfo.hasNoValue;
- const DataLocation location = operandInfo.location;
+ const V1_0::DataLocation location = operandInfo.location;
const uint32_t numberOfDimensions = operandInfo.numberOfDimensions;
// unpackage operand dimensions
@@ -229,7 +230,7 @@
}
// unpackage output operands
- std::vector<RequestArgument> outputs;
+ std::vector<V1_0::RequestArgument> outputs;
outputs.reserve(numberOfOutputOperands);
for (size_t operand = 0; operand < numberOfOutputOperands; ++operand) {
// validate output operand information
@@ -243,7 +244,7 @@
data[index].outputOperandInformation();
index++;
const bool hasNoValue = operandInfo.hasNoValue;
- const DataLocation location = operandInfo.location;
+ const V1_0::DataLocation location = operandInfo.location;
const uint32_t numberOfDimensions = operandInfo.numberOfDimensions;
// unpackage operand dimensions
@@ -294,7 +295,7 @@
}
// unpackage measureTiming
- const MeasureTiming measure = data[index].measureTiming();
+ const V1_2::MeasureTiming measure = data[index].measureTiming();
index++;
// validate packet information
@@ -333,7 +334,7 @@
std::chrono::microseconds pollingTimeWindow)
: mFmqRequestChannel(std::move(fmqRequestChannel)), kPollingTimeWindow(pollingTimeWindow) {}
-std::optional<std::tuple<V1_0::Request, std::vector<int32_t>, MeasureTiming>>
+std::optional<std::tuple<V1_0::Request, std::vector<int32_t>, V1_2::MeasureTiming>>
RequestChannelReceiver::getBlocking() {
const auto packet = getPacketBlocking();
if (!packet) {
@@ -463,7 +464,8 @@
: mFmqResultChannel(std::move(fmqResultChannel)) {}
bool ResultChannelSender::send(V1_0::ErrorStatus errorStatus,
- const std::vector<OutputShape>& outputShapes, Timing timing) {
+ const std::vector<V1_2::OutputShape>& outputShapes,
+ V1_2::Timing timing) {
const std::vector<FmqResultDatum> serialized = serialize(errorStatus, outputShapes, timing);
return sendPacket(serialized);
}
@@ -555,10 +557,10 @@
mWorker.join();
}
-Return<void> ExecutionBurstServer::freeMemory(int32_t slot) {
+hardware::Return<void> ExecutionBurstServer::freeMemory(int32_t slot) {
std::lock_guard<std::mutex> hold(mMutex);
mExecutorWithCache->removeCacheEntry(slot);
- return Void();
+ return hardware::Void();
}
void ExecutionBurstServer::ensureCacheEntriesArePresentLocked(const std::vector<int32_t>& slots) {
@@ -580,14 +582,15 @@
}
V1_0::ErrorStatus errorStatus = V1_0::ErrorStatus::GENERAL_FAILURE;
- std::vector<hidl_memory> returnedMemories;
- auto cb = [&errorStatus, &returnedMemories](V1_0::ErrorStatus status,
- const hidl_vec<hidl_memory>& memories) {
+ std::vector<hardware::hidl_memory> returnedMemories;
+ auto cb = [&errorStatus, &returnedMemories](
+ V1_0::ErrorStatus status,
+ const hardware::hidl_vec<hardware::hidl_memory>& memories) {
errorStatus = status;
returnedMemories = memories;
};
- const Return<void> ret = mCallback->getMemories(unknownSlots, cb);
+ const hardware::Return<void> ret = mCallback->getMemories(unknownSlots, cb);
if (!ret.isOk() || errorStatus != V1_0::ErrorStatus::NONE ||
returnedMemories.size() != unknownSlots.size()) {
diff --git a/common/GraphDump.cpp b/common/GraphDump.cpp
index 3c208cd..146e1c6 100644
--- a/common/GraphDump.cpp
+++ b/common/GraphDump.cpp
@@ -18,9 +18,8 @@
#include "GraphDump.h"
-#include "HalInterfaces.h"
-
#include <android-base/logging.h>
+
#include <algorithm>
#include <iostream>
#include <map>
@@ -28,11 +27,11 @@
#include <string>
#include <utility>
+#include "Utils.h"
+
namespace android {
namespace nn {
-using namespace hal;
-
// class Dumper is a wrapper around an std::ostream (if instantiated
// with a pointer to a stream) or around LOG(INFO) (otherwise).
//
@@ -112,25 +111,40 @@
return "OEM";
case OperandType::TENSOR_OEM_BYTE:
return "TOEMB";
- default:
- return toString(type);
+ default: {
+ std::ostringstream oss;
+ oss << type;
+ return oss.str();
+ }
}
}
// If the specified Operand of the specified Model has OperandType
// nnType corresponding to C++ type cppType and is of
-// OperandLifeTime::CONSTANT_COPY, then write the Operand's value to
+// Operand::LifeTime::CONSTANT_COPY, then write the Operand's value to
// the Dumper.
namespace {
template <OperandType nnType, typename cppType>
void tryValueDump(Dumper& dump, const Model& model, const Operand& opnd) {
- if (opnd.type != nnType || opnd.lifetime != OperandLifeTime::CONSTANT_COPY ||
- opnd.location.length != sizeof(cppType)) {
+ if (opnd.type != nnType) {
+ return;
+ }
+
+ const void* pointer = nullptr;
+ if (opnd.lifetime == Operand::LifeTime::CONSTANT_COPY) {
+ pointer = model.operandValues.data() + opnd.location.offset;
+ } else if (opnd.lifetime == Operand::LifeTime::POINTER) {
+ pointer = std::get<const void*>(opnd.location.pointer);
+ } else {
+ return;
+ }
+
+ if (opnd.location.length != sizeof(cppType)) {
return;
}
cppType val;
- memcpy(&val, &model.operandValues[opnd.location.offset], sizeof(cppType));
+ memcpy(&val, pointer, sizeof(cppType));
dump << " = " << val;
}
} // namespace
@@ -172,25 +186,28 @@
const char* kind = nullptr;
const char* io = nullptr;
switch (opnd.lifetime) {
- case OperandLifeTime::CONSTANT_COPY:
+ case Operand::LifeTime::CONSTANT_COPY:
kind = "COPY";
break;
- case OperandLifeTime::CONSTANT_REFERENCE:
+ case Operand::LifeTime::CONSTANT_REFERENCE:
kind = "REF";
break;
- case OperandLifeTime::SUBGRAPH_INPUT:
+ case Operand::LifeTime::SUBGRAPH_INPUT:
io = "input";
break;
- case OperandLifeTime::SUBGRAPH_OUTPUT:
+ case Operand::LifeTime::SUBGRAPH_OUTPUT:
io = "output";
break;
- case OperandLifeTime::NO_VALUE:
+ case Operand::LifeTime::NO_VALUE:
kind = "NO";
break;
- case OperandLifeTime::SUBGRAPH:
+ case Operand::LifeTime::SUBGRAPH:
kind = "SUBGRAPH";
break;
- default:
+ case Operand::LifeTime::POINTER:
+ kind = "POINTER";
+ break;
+ case Operand::LifeTime::TEMPORARY_VARIABLE:
// nothing interesting
break;
}
@@ -205,7 +222,7 @@
tryValueDump<OperandType::FLOAT32, float>(dump, model, opnd);
tryValueDump<OperandType::INT32, int>(dump, model, opnd);
tryValueDump<OperandType::UINT32, unsigned>(dump, model, opnd);
- if (opnd.dimensions.size()) {
+ if (!opnd.dimensions.empty()) {
dump << "(";
for (unsigned i = 0, e = opnd.dimensions.size(); i < e; i++) {
if (i > 0) {
@@ -230,7 +247,7 @@
dump << " ordering=out";
}
}
- dump << " label=\"" << i << ": " << toString(operation.type) << "\"]" << Dumper::endl;
+ dump << " label=\"" << i << ": " << operation.type << "\"]" << Dumper::endl;
{
// operation inputs
for (unsigned in = 0, inE = operation.inputs.size(); in < inE; in++) {
diff --git a/common/MetaModel.cpp b/common/MetaModel.cpp
index 30d88a1..81d1282 100644
--- a/common/MetaModel.cpp
+++ b/common/MetaModel.cpp
@@ -24,6 +24,7 @@
#include <sstream>
#include <type_traits>
#include <utility>
+#include <vector>
#include "GraphDump.h"
#include "HalInterfaces.h"
@@ -31,14 +32,12 @@
namespace android::nn {
-using namespace hal;
-
namespace {
// Add an element to the end of the vector and return a pair consisting of the
// index of the new element and a pointer to the new element.
template <class T>
-std::pair<uint32_t, T*> extend(hidl_vec<T>* vec) {
+std::pair<uint32_t, T*> extend(hardware::hidl_vec<T>* vec) {
size_t nextIndex = vec->size();
vec->resize(nextIndex + 1);
return {nextIndex, &(*vec)[nextIndex]};
@@ -48,14 +47,14 @@
// return a pair consisting of the index of the new element and a pointer to the
// new element.
template <class T>
-std::pair<uint32_t, T*> extend(hidl_vec<T>* vec, const T& val) {
+std::pair<uint32_t, T*> extend(hardware::hidl_vec<T>* vec, const T& val) {
auto extended = extend(vec);
*extended.second = val;
return extended;
}
template <typename T>
-bool operator<(const hidl_vec<T>& a, const hidl_vec<T>& b) {
+bool operator<(const hardware::hidl_vec<T>& a, const hardware::hidl_vec<T>& b) {
return std::lexicographical_compare(a.begin(), a.end(), b.begin(), b.end());
}
@@ -63,19 +62,19 @@
template <class T_Model>
struct ModelVersion;
template <>
-struct ModelVersion<hal::V1_0::Model> {
+struct ModelVersion<V1_0::Model> {
static constexpr char name[] = "V1_0";
};
template <>
-struct ModelVersion<hal::V1_1::Model> {
+struct ModelVersion<V1_1::Model> {
static constexpr char name[] = "V1_1";
};
template <>
-struct ModelVersion<hal::V1_2::Model> {
+struct ModelVersion<V1_2::Model> {
static constexpr char name[] = "V1_2";
};
template <>
-struct ModelVersion<hal::V1_3::Model> {
+struct ModelVersion<V1_3::Model> {
static constexpr char name[] = "V1_3";
};
@@ -84,16 +83,16 @@
template <typename T_ReturnType>
T_ReturnType uncheckedConvertTo(OperationType type);
template <>
-hal::V1_0::OperationType uncheckedConvertTo<hal::V1_0::OperationType>(OperationType type) {
- return uncheckedConvertToV1_0(type);
+V1_0::OperationType uncheckedConvertTo<V1_0::OperationType>(OperationType type) {
+ return uncheckedConvertToV1_0(convertToV1_3(type));
}
template <>
-hal::V1_1::OperationType uncheckedConvertTo<hal::V1_1::OperationType>(OperationType type) {
- return uncheckedConvertToV1_1(type);
+V1_1::OperationType uncheckedConvertTo<V1_1::OperationType>(OperationType type) {
+ return uncheckedConvertToV1_1(convertToV1_3(type));
}
template <>
-hal::V1_2::OperationType uncheckedConvertTo<hal::V1_2::OperationType>(OperationType type) {
- return uncheckedConvertToV1_2(type);
+V1_2::OperationType uncheckedConvertTo<V1_2::OperationType>(OperationType type) {
+ return uncheckedConvertToV1_2(convertToV1_3(type));
}
// Dispatcher mechanism for calling an appropriate convertToV1_* given the
@@ -101,45 +100,41 @@
template <typename T_ReturnType>
T_ReturnType convertTo(Operand operand);
template <>
-hal::V1_0::Operand convertTo<hal::V1_0::Operand>(Operand operand) {
- return convertToV1_0(operand);
+V1_0::Operand convertTo<V1_0::Operand>(Operand operand) {
+ return convertToV1_0(convertToV1_3(operand));
}
template <>
-hal::V1_2::Operand convertTo<hal::V1_2::Operand>(Operand operand) {
- return convertToV1_2(operand);
+V1_2::Operand convertTo<V1_2::Operand>(Operand operand) {
+ return convertToV1_2(convertToV1_3(operand));
}
// Dispatcher mechanism for calling an appropriate convertToV1_* given the
-// desired return type. Note that there are no V1_[12]::OperandLifeTime types.
+// desired return type. Note that there are no V1_[12]::Operand::LifeTime types.
template <typename T_ReturnType>
-T_ReturnType convertTo(OperandLifeTime lifetime);
+T_ReturnType convertTo(V1_3::OperandLifeTime lifetime);
template <>
-hal::V1_0::OperandLifeTime convertTo<hal::V1_0::OperandLifeTime>(OperandLifeTime lifetime) {
+V1_0::OperandLifeTime convertTo<V1_0::OperandLifeTime>(V1_3::OperandLifeTime lifetime) {
return convertToV1_0(lifetime);
}
-template <>
-hal::V1_3::OperandLifeTime convertTo<hal::V1_3::OperandLifeTime>(OperandLifeTime lifetime) {
- return lifetime;
-}
// Dispatcher mechanism for calling an appropriate compliantWithV1_* given the
// desired target model type.
template <typename T_SlicedModel>
-void getNoncompliantOperations(const hal::V1_3::Model& model,
+void getNoncompliantOperations(const V1_3::Model& model,
std::set<uint32_t>* noncompliantOperations);
template <>
-void getNoncompliantOperations<hal::V1_0::Model>(const hal::V1_3::Model& model,
- std::set<uint32_t>* noncompliantOperations) {
+void getNoncompliantOperations<V1_0::Model>(const V1_3::Model& model,
+ std::set<uint32_t>* noncompliantOperations) {
compliantWithV1_0(model, noncompliantOperations);
}
template <>
-void getNoncompliantOperations<hal::V1_1::Model>(const hal::V1_3::Model& model,
- std::set<uint32_t>* noncompliantOperations) {
+void getNoncompliantOperations<V1_1::Model>(const V1_3::Model& model,
+ std::set<uint32_t>* noncompliantOperations) {
compliantWithV1_1(model, noncompliantOperations);
}
template <>
-void getNoncompliantOperations<hal::V1_2::Model>(const hal::V1_3::Model& model,
- std::set<uint32_t>* noncompliantOperations) {
+void getNoncompliantOperations<V1_2::Model>(const V1_3::Model& model,
+ std::set<uint32_t>* noncompliantOperations) {
compliantWithV1_2(model, noncompliantOperations);
}
@@ -191,18 +186,25 @@
return slice->mSlicedOperationIndexToOrigIndex.at(slicedOperationIndex);
})));
}
-template MetaModel::ReturnedSlice<hal::V1_0::Model> MetaModel::getSlice(
- Slice<hal::V1_0::Model>* slice) const;
-template MetaModel::ReturnedSlice<hal::V1_1::Model> MetaModel::getSlice(
- Slice<hal::V1_1::Model>* slice) const;
-template MetaModel::ReturnedSlice<hal::V1_2::Model> MetaModel::getSlice(
- Slice<hal::V1_2::Model>* slice) const;
-// When adding HAL version 1.4, make sure to handle control flow and referenced
-// subgraphs here properly. A V1_3 sliced model should contain an IF/WHILE and
-// its referenced subgraphs only if there are no V1_4+ operations in those
-// subgraphs.
-// template MetaModel::ReturnedSlice<hal::V1_3::Model> MetaModel::getSlice(
-// Slice<hal::V1_3::Model>* slice) const;
+template MetaModel::ReturnedSlice<V1_0::Model> MetaModel::getSlice(Slice<V1_0::Model>* slice) const;
+template MetaModel::ReturnedSlice<V1_1::Model> MetaModel::getSlice(Slice<V1_1::Model>* slice) const;
+template MetaModel::ReturnedSlice<V1_2::Model> MetaModel::getSlice(Slice<V1_2::Model>* slice) const;
+template <>
+MetaModel::ReturnedSlice<V1_3::Model> MetaModel::getSlice(Slice<V1_3::Model>* slice) const {
+ CHECK(slice != nullptr);
+ if (slice->mState == SliceState::UNINITIALIZED) {
+ // When adding HAL version 1.4, make sure to handle control flow and referenced
+ // subgraphs here properly. A V1_3 sliced model should contain an IF/WHILE and
+ // its referenced subgraphs only if there are no V1_4+ operations in those
+ // subgraphs.
+ *slice = {
+ .mState = SliceState::NORMAL,
+ .mHidlModel = convertToV1_3(mModel),
+ };
+ }
+ Mapper trivialMapper = [](uint32_t i) { return i; };
+ return std::make_pair(slice->mHidlModel, trivialMapper);
+}
// Utility class for makeSlice().
//
@@ -234,8 +236,8 @@
template <typename T_SlicedOperand>
class MetaModel::OrigOperandToSlicedInputOperandIndex {
public:
- OrigOperandToSlicedInputOperandIndex(hidl_vec<T_SlicedOperand>* slicedOperands,
- hidl_vec<uint32_t>* slicedInputIndexes)
+ OrigOperandToSlicedInputOperandIndex(hardware::hidl_vec<T_SlicedOperand>* slicedOperands,
+ hardware::hidl_vec<uint32_t>* slicedInputIndexes)
: mSlicedOperands(*slicedOperands), mSlicedInputIndexes(*slicedInputIndexes) {}
// Given an operand from the original model, return the index of the
@@ -246,21 +248,19 @@
auto it = mMap.find(operand);
if (it != mMap.end()) {
VLOG(COMPILATION) << "OrigOperandToSlicedInputOperandIndex::getIndex looked for "
- << toString(operand) << " and found " << it->second << ": "
- << toString(it->first);
+ << operand << " and found " << it->second << ": " << it->first;
return it->second;
}
// Create
- operand.numberOfConsumers = 0;
- operand.lifetime = convertTo<decltype(operand.lifetime)>(OperandLifeTime::SUBGRAPH_INPUT);
+ operand.lifetime = Operand::LifeTime::SUBGRAPH_INPUT;
operand.location = {};
uint32_t slicedOperandIndex =
extend(&mSlicedOperands, convertTo<T_SlicedOperand>(operand)).first;
mMap[operand] = slicedOperandIndex;
extend(&mSlicedInputIndexes, slicedOperandIndex);
VLOG(COMPILATION) << "OrigOperandToSlicedInputOperandIndex::getIndex created "
- << slicedOperandIndex << ": " << toString(operand);
+ << slicedOperandIndex << ": " << operand;
return slicedOperandIndex;
}
@@ -284,38 +284,36 @@
}
private:
- static bool compare(const SymmPerChannelQuantParams& a,
- const SymmPerChannelQuantParams& b) {
+ static bool compare(const Operand::SymmPerChannelQuantParams& a,
+ const Operand::SymmPerChannelQuantParams& b) {
if (a.scales != b.scales) {
return a.scales < b.scales;
}
return a.channelDim < b.channelDim;
}
- static bool compare(const OperandExtraParams& a, const OperandExtraParams& b) {
- if (a.getDiscriminator() != b.getDiscriminator()) {
- return a.getDiscriminator() < b.getDiscriminator();
+ static bool compare(const Operand::ExtraParams& a, const Operand::ExtraParams& b) {
+ if (a.index() != b.index()) {
+ return a.index() < b.index();
}
-
- switch (a.getDiscriminator()) {
- case OperandExtraParams::hidl_discriminator::channelQuant:
- return compare(a.channelQuant(), b.channelQuant());
-
- case OperandExtraParams::hidl_discriminator::extension:
- return a.extension() < b.extension();
-
- case OperandExtraParams::hidl_discriminator::none:
- return false;
-
- default:
- CHECK(false) << "Unexpected";
- return false;
+ if (std::holds_alternative<Operand::SymmPerChannelQuantParams>(a)) {
+ return compare(std::get<Operand::SymmPerChannelQuantParams>(a),
+ std::get<Operand::SymmPerChannelQuantParams>(b));
}
+ if (std::holds_alternative<Operand::ExtensionParams>(a)) {
+ return compare(std::get<Operand::ExtensionParams>(a),
+ std::get<Operand::ExtensionParams>(b));
+ }
+ if (std::holds_alternative<Operand::NoParams>(a)) {
+ return false;
+ }
+ CHECK(false) << "Unexpected";
+ return false;
}
};
std::map<Operand, uint32_t, Compare> mMap;
- hidl_vec<T_SlicedOperand>& mSlicedOperands;
- hidl_vec<uint32_t>& mSlicedInputIndexes;
+ hardware::hidl_vec<T_SlicedOperand>& mSlicedOperands;
+ hardware::hidl_vec<uint32_t>& mSlicedInputIndexes;
};
template <class T_SlicedModel>
@@ -329,11 +327,14 @@
using SlicedOperation = typename Slice<T_SlicedModel>::Operation;
using SlicedOperationType = typename Slice<T_SlicedModel>::OperationType;
- const auto& origOperands = mHidlModel.main.operands;
- const auto& origOperations = mHidlModel.main.operations;
+ const auto& origOperands = mModel.main.operands;
+ const auto& origOperations = mModel.main.operations;
auto& slicedOperands = slice->mHidlModel.operands;
auto& slicedOperations = slice->mHidlModel.operations;
+ std::vector<uint32_t> origOperandNumberOfConsumers =
+ countNumberOfConsumers(origOperands.size(), origOperations);
+
for (uint32_t origOperationIndex = 0; origOperationIndex < origOperations.size();
++origOperationIndex) {
const Operation& origOperation = origOperations[origOperationIndex];
@@ -401,9 +402,9 @@
slicedOperation.outputs[outputNum] = slicedOperandIndex;
const auto subgraphOutputLifetime = convertTo<decltype(slicedOperand.lifetime)>(
- OperandLifeTime::SUBGRAPH_OUTPUT);
+ V1_3::OperandLifeTime::SUBGRAPH_OUTPUT);
if (!inputOperandIndexesOfCompliantOperations.count(origOperandIndex) &&
- origOperand.numberOfConsumers) {
+ origOperandNumberOfConsumers[origOperandIndex] != 0) {
// Was consumed only by noncompliant operations; convert to
// an output of the sliced model.
slicedOperand.lifetime = subgraphOutputLifetime;
@@ -427,24 +428,24 @@
Slice<T_SlicedModel> slice;
- const auto& origOperands = mHidlModel.main.operands;
- const auto& origOperations = mHidlModel.main.operations;
+ const auto& origOperands = mModel.main.operands;
+ const auto& origOperations = mModel.main.operations;
auto& slicedOperands = slice.mHidlModel.operands;
// Indexes of elements of noncompliant origOperations
std::set<uint32_t> noncompliantOperations;
- getNoncompliantOperations<T_SlicedModel>(mHidlModel, &noncompliantOperations);
+ getNoncompliantOperations<T_SlicedModel>(convertToV1_3(mModel), &noncompliantOperations);
// Map from an operand index in origOperands to the corresponding operand index in
// slicedOperands
std::map<uint32_t, uint32_t> origOperandIndexToSlicedIndex;
// Collect the operand indexes of every operand that is an input to a
- // compliant operation. If the operand is a CONSTANT_* or a NO_VALUE, copy
- // it to the sliced model and update origOperandIndexToSlicedIndex
- // accordingly. Otherwise, we'll deal with the operand in the subsequent
- // "Main loop", where we process operation outputs (intermediates and model
- // outputs).
+ // compliant operation. If the operand is a CONSTANT_*, POINTER, or a
+ // NO_VALUE, copy it to the sliced model and update
+ // origOperandIndexToSlicedIndex accordingly. Otherwise, we'll deal with
+ // the operand in the subsequent "Main loop", where we process operation
+ // outputs (intermediates and model outputs).
std::set<uint32_t> inputOperandIndexesOfCompliantOperations;
for (uint32_t origOperationIndex = 0; origOperationIndex < origOperations.size();
++origOperationIndex) {
@@ -455,9 +456,10 @@
if (inputOperandIndexesOfCompliantOperations.insert(input).second) {
const Operand& origOperand = origOperands[input];
switch (origOperand.lifetime) {
- case OperandLifeTime::CONSTANT_COPY:
- case OperandLifeTime::CONSTANT_REFERENCE:
- case OperandLifeTime::NO_VALUE: {
+ case Operand::LifeTime::CONSTANT_COPY:
+ case Operand::LifeTime::CONSTANT_REFERENCE:
+ case Operand::LifeTime::POINTER:
+ case Operand::LifeTime::NO_VALUE: {
const uint32_t slicedOperandIndex =
extend(&slicedOperands, convertTo<SlicedOperand>(origOperand))
.first;
@@ -482,7 +484,7 @@
// only if it is consumed by at least one compliant operation. Note that in
// the sliced model we share all model inputs of the same "type"; and that
// we may later add model inputs to the sliced model.
- for (uint32_t origInputIndex : mHidlModel.main.inputIndexes) {
+ for (uint32_t origInputIndex : mModel.main.inputIndexes) {
if (inputOperandIndexesOfCompliantOperations.count(origInputIndex)) {
const uint32_t slicedIndex =
origOperandToSlicedInputOperandIndex.getIndex(origOperands[origInputIndex]);
@@ -502,19 +504,19 @@
// This would be more complex and probably take more computation time, but
// it would reduce the size of the sliced model, and hence the time spent
// copying it around and passing it across the HAL interface.
- slice.mHidlModel.operandValues = mHidlModel.operandValues;
- slice.mHidlModel.pools = mHidlModel.pools;
+ slice.mHidlModel.operandValues = convertToV1_0(mModel.operandValues);
+ slice.mHidlModel.pools = convertToV1_0(mModel.pools);
if (VLOG_IS_ON(COMPILATION)) {
{
std::ostringstream fromName;
- fromName << "Slice: From " << ModelVersion<decltype(mHidlModel)>::name;
- graphDump(fromName.str().c_str(), mHidlModel);
+ fromName << "Slice: From canonical";
+ graphDump(fromName.str().c_str(), mModel);
}
{
std::ostringstream toName;
toName << "Slice: To " << ModelVersion<decltype(slice.mHidlModel)>::name;
- graphDump(toName.str().c_str(), convertToV1_3(slice.mHidlModel));
+ graphDump(toName.str().c_str(), uncheckedConvert(convertToV1_3(slice.mHidlModel)));
}
}
diff --git a/common/OperationResolver.cpp b/common/OperationResolver.cpp
index fce3af4..e6792b2 100644
--- a/common/OperationResolver.cpp
+++ b/common/OperationResolver.cpp
@@ -23,8 +23,6 @@
namespace android {
namespace nn {
-using namespace hal;
-
// TODO(b/119608412): Find a way to not reference every operation here.
const OperationRegistration* register_ABS();
const OperationRegistration* register_ADD();
diff --git a/common/OperationsUtils.cpp b/common/OperationsUtils.cpp
index f0bcb0e..9f2af22 100644
--- a/common/OperationsUtils.cpp
+++ b/common/OperationsUtils.cpp
@@ -32,8 +32,6 @@
namespace {
-using namespace hal;
-
bool validateOperandTypes(const std::vector<OperandType>& expectedTypes, const char* tag,
uint32_t operandCount,
std::function<OperandType(uint32_t)> getOperandType) {
@@ -41,8 +39,8 @@
for (uint32_t i = 0; i < operandCount; ++i) {
OperandType type = getOperandType(i);
NN_RET_CHECK(type == expectedTypes[i])
- << "Invalid " << tag << " tensor type " << toString(type) << " for " << tag << " "
- << i << ", expected " << toString(expectedTypes[i]);
+ << "Invalid " << tag << " tensor type " << type << " for " << tag << " " << i
+ << ", expected " << expectedTypes[i];
}
return true;
}
@@ -97,17 +95,17 @@
if (i != 0) {
message << ", ";
}
- message << toString(context->getInputType(i));
+ message << context->getInputType(i);
}
message << "} and outputs {";
for (uint32_t i = 0, n = context->getNumOutputs(); i < n; ++i) {
if (i != 0) {
message << ", ";
}
- message << toString(context->getOutputType(i));
+ message << context->getOutputType(i);
}
- message << "} is only supported since " << toString(minSupportedHalVersion)
- << " (validating using " << toString(context->getHalVersion()) << ")";
+ message << "} is only supported since " << minSupportedHalVersion << " (validating using "
+ << context->getHalVersion() << ")";
NN_RET_CHECK_FAIL() << message.str();
}
return true;
diff --git a/common/Utils.cpp b/common/Utils.cpp
index 1c41e59..398da55 100644
--- a/common/Utils.cpp
+++ b/common/Utils.cpp
@@ -22,6 +22,10 @@
#include <android-base/properties.h>
#include <android-base/strings.h>
#include <errno.h>
+#include <nnapi/hal/1.0/Conversions.h>
+#include <nnapi/hal/1.1/Conversions.h>
+#include <nnapi/hal/1.2/Conversions.h>
+#include <nnapi/hal/1.3/Conversions.h>
#include <poll.h>
#include <algorithm>
@@ -42,13 +46,12 @@
#include "NeuralNetworksOEM.h"
#include "OperationResolver.h"
#include "ValidateHal.h"
+#include "nnapi/TypeUtils.h"
namespace android {
namespace nn {
-using namespace hal;
-
-constexpr PerformanceInfo kNoPerformanceInfo = {.execTime = FLT_MAX, .powerUsage = FLT_MAX};
+constexpr V1_0::PerformanceInfo kNoPerformanceInfo = {.execTime = FLT_MAX, .powerUsage = FLT_MAX};
const char kVLogPropKey[] = "debug.nn.vlog";
int vLogMask = ~0;
@@ -98,21 +101,26 @@
}
}
-Deadline makeDeadline(uint64_t duration) {
+TimeoutDuration makeTimeoutDuration(uint64_t nanoseconds) {
+ // According to the standard, std::chrono::nanoseconds::rep is a signed
+ // integer type of at least 64 bits. This check prevents an overflow when
+ // rep is exactly 64 bits.
+ if constexpr (sizeof(std::chrono::nanoseconds::rep) == sizeof(int64_t)) {
+ nanoseconds = std::min(nanoseconds,
+ static_cast<uint64_t>(std::chrono::nanoseconds::max().count()));
+ }
+ return std::chrono::nanoseconds{nanoseconds};
+}
+
+Deadline makeDeadline(TimeoutDuration duration) {
const auto maxTime = Deadline::max();
const auto currentTime = std::chrono::steady_clock::now();
- // Create Deadline. If there would be an overflow, use the max value.
- const uint64_t remainingNanoseconds =
- std::chrono::duration_cast<std::chrono::nanoseconds>(maxTime - currentTime).count();
- if (duration > remainingNanoseconds) {
+ // If there would be an overflow, use the max value.
+ if (duration > maxTime - currentTime) {
return maxTime;
}
- return currentTime + std::chrono::nanoseconds{duration};
-}
-
-std::optional<Deadline> makeDeadline(std::optional<uint64_t> duration) {
- return duration.has_value() ? makeDeadline(*duration) : std::optional<Deadline>{};
+ return currentTime + duration;
}
static uint64_t getMaxNanosecondsSinceEpoch() {
@@ -121,8 +129,8 @@
return maxTime.time_since_epoch().count();
}
-std::optional<Deadline> makeDeadline(const OptionalTimePoint& timePoint) {
- using Discriminator = hal::OptionalTimePoint::hidl_discriminator;
+std::optional<Deadline> makeDeadline(const V1_3::OptionalTimePoint& timePoint) {
+ using Discriminator = V1_3::OptionalTimePoint::hidl_discriminator;
if (timePoint.getDiscriminator() == Discriminator::none) {
return std::nullopt;
}
@@ -146,12 +154,7 @@
}
static OptionalTimePoint makeTimePoint(const Deadline& deadline) {
- const auto timeSinceEpoch = deadline.time_since_epoch();
- const uint64_t nanosecondsSinceEpoch =
- std::chrono::duration_cast<std::chrono::nanoseconds>(timeSinceEpoch).count();
- OptionalTimePoint ret;
- ret.nanosecondsSinceEpoch(nanosecondsSinceEpoch);
- return ret;
+ return deadline;
}
OptionalTimePoint makeTimePoint(const std::optional<Deadline>& deadline) {
@@ -159,18 +162,18 @@
}
static bool isExtensionOperandType(int32_t type) {
- return static_cast<uint32_t>(type) > static_cast<uint32_t>(OperandTypeRange::BASE_MAX);
+ return (static_cast<uint32_t>(type) >> kExtensionTypeBits) != 0;
}
static bool isExtensionOperationType(ANeuralNetworksOperationType type) {
- return static_cast<uint32_t>(type) > static_cast<uint32_t>(OperationTypeRange::BASE_MAX);
+ return (static_cast<uint32_t>(type) >> kExtensionTypeBits) != 0;
}
-bool isExtensionOperandType(OperandType type) {
+bool isExtensionOperandType(V1_3::OperandType type) {
return isExtensionOperandType(static_cast<int32_t>(type));
}
-bool isExtensionOperationType(OperationType type) {
+bool isExtensionOperationType(V1_3::OperationType type) {
return isExtensionOperationType(static_cast<int32_t>(type));
}
@@ -211,7 +214,7 @@
uint32_t getNumInputs() const override;
OperandType getInputType(uint32_t index) const override;
Shape getInputShape(uint32_t index) const override;
- const OperandExtraParams getInputExtraParams(uint32_t index) const override;
+ const Operand::ExtraParams& getInputExtraParams(uint32_t index) const override;
uint32_t getNumOutputs() const override;
OperandType getOutputType(uint32_t index) const override;
@@ -266,7 +269,7 @@
operand->extraParams};
}
-const OperandExtraParams OperationValidationContext::getInputExtraParams(uint32_t index) const {
+const Operand::ExtraParams& OperationValidationContext::getInputExtraParams(uint32_t index) const {
return getInputOperand(index)->extraParams;
}
@@ -284,15 +287,11 @@
#define COUNT(X) (sizeof(X) / sizeof(X[0]))
-std::string getOperandTypeName(OperandType type) {
+std::string getOperandTypeName(V1_3::OperandType type) {
return toString(type);
}
-static std::string getOperationName(uint32_t code) {
- return getOperationName(static_cast<OperationType>(code));
-}
-
-std::string getOperationName(OperationType type) {
+std::string getOperationName(V1_3::OperationType type) {
return toString(type);
}
@@ -360,12 +359,14 @@
}
uint32_t nonExtensionOperandSizeOfData(OperandType type, const std::vector<uint32_t>& dimensions) {
- CHECK(!isExtensionOperandType(type)) << "Size of extension operand data is unknown";
- int n = static_cast<int>(type);
- uint32_t sizeOfElement = tableLookup(kSizeOfDataType, kSizeOfDataTypeOEM, n);
- return tableLookup(kScalarDataType, kScalarDataTypeOEM, n)
- ? sizeOfElement
- : sizeOfTensorData(sizeOfElement, dimensions);
+ const size_t size = getNonExtensionSize(type, dimensions).value();
+ CHECK_LE(size, std::numeric_limits<uint32_t>::max());
+ return size;
+}
+
+uint32_t nonExtensionOperandSizeOfData(V1_3::OperandType type,
+ const std::vector<uint32_t>& dimensions) {
+ return nonExtensionOperandSizeOfData(uncheckedConvert(type), dimensions);
}
// Returns a pair of {false, size} on success, {true, 0} if size overflows uint32_t.
@@ -389,9 +390,9 @@
return size;
}
-bool nonExtensionOperandSizeOfDataOverflowsUInt32(hal::OperandType type,
+bool nonExtensionOperandSizeOfDataOverflowsUInt32(OperandType type,
const std::vector<uint32_t>& dimensions) {
- CHECK(!isExtensionOperandType(type)) << "Size of extension operand data is unknown";
+ CHECK(!isExtension(type)) << "Size of extension operand data is unknown";
int n = static_cast<int>(type);
uint32_t sizeOfElement = tableLookup(kSizeOfDataType, kSizeOfDataTypeOEM, n);
return tableLookup(kScalarDataType, kScalarDataTypeOEM, n)
@@ -399,6 +400,11 @@
: sizeOfTensorDataOverflowsUInt32(sizeOfElement, dimensions);
}
+bool nonExtensionOperandSizeOfDataOverflowsUInt32(V1_3::OperandType type,
+ const std::vector<uint32_t>& dimensions) {
+ return nonExtensionOperandSizeOfDataOverflowsUInt32(uncheckedConvert(type), dimensions);
+}
+
bool sizeOfTensorDataOverflowsUInt32(uint32_t sizeOfElement,
const std::vector<uint32_t>& dimensions) {
return sizeOfTensorDataHelper(sizeOfElement, dimensions).first;
@@ -417,11 +423,21 @@
dimensions.size());
}
+bool tensorHasUnspecifiedDimensions(V1_3::OperandType type,
+ const std::vector<uint32_t>& dimensions) {
+ return tensorHasUnspecifiedDimensions(static_cast<int>(type), dimensions.data(),
+ dimensions.size());
+}
+
bool tensorHasUnspecifiedDimensions(const ANeuralNetworksOperandType* type) {
return tensorHasUnspecifiedDimensions(type->type, type->dimensions, type->dimensionCount);
}
bool tensorHasUnspecifiedDimensions(const Operand& operand) {
+ return tensorHasUnspecifiedDimensions(operand.type, operand.dimensions);
+}
+
+bool tensorHasUnspecifiedDimensions(const V1_3::Operand& operand) {
return tensorHasUnspecifiedDimensions(static_cast<int>(operand.type), operand.dimensions.data(),
operand.dimensions.size());
}
@@ -490,10 +506,15 @@
LOG(INFO) << "extensionNameToPrefix" << toString(model.extensionNameToPrefix);
}
+void logModelToInfo(const Model& model) {
+ LOG(INFO) << "Model start";
+ logModelToInfo(convertToV1_3(model));
+}
+
bool validateOperandSymmPerChannelQuantParams(
- const Operand& halOperand, const ANeuralNetworksSymmPerChannelQuantParams& channelQuant,
- const char* tag) {
- if (halOperand.type != OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL) {
+ const V1_3::Operand& halOperand,
+ const ANeuralNetworksSymmPerChannelQuantParams& channelQuant, const char* tag) {
+ if (halOperand.type != V1_3::OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL) {
return false;
}
@@ -663,17 +684,15 @@
}
for (uint32_t i = 0; i < inOperandCount; i++) {
if (operands[inOperandIndexes[i]].type != inExpectedTypes[i]) {
- LOG(ERROR) << "Invalid input tensor type "
- << toString(operands[inOperandIndexes[i]].type) << " for input " << i
- << ", expected " << toString(inExpectedTypes[i]);
+ LOG(ERROR) << "Invalid input tensor type " << operands[inOperandIndexes[i]].type
+ << " for input " << i << ", expected " << inExpectedTypes[i];
return ANEURALNETWORKS_BAD_DATA;
}
}
for (uint32_t i = 0; i < outOperandCount; i++) {
if (operands[outOperandIndexes[i]].type != outExpectedInTypes[i]) {
- LOG(ERROR) << "Invalid output tensor type "
- << toString(operands[outOperandIndexes[i]].type) << " for input " << i
- << ", expected " << toString(outExpectedInTypes[i]);
+ LOG(ERROR) << "Invalid output tensor type " << operands[outOperandIndexes[i]].type
+ << " for input " << i << ", expected " << outExpectedInTypes[i];
return ANEURALNETWORKS_BAD_DATA;
}
}
@@ -684,9 +703,9 @@
static int validateHalVersion(ANeuralNetworksOperationType opType, HalVersion halVersion,
HalVersion minSupportedHalVersion) {
if (halVersion < minSupportedHalVersion) {
- LOG(ERROR) << "The given inputs and outputs for operation " << getOperationName(opType)
- << " are only supported in " << toString(minSupportedHalVersion)
- << " and later (validating using " << toString(halVersion) << ")";
+ LOG(ERROR) << "The given inputs and outputs for operation " << opType
+ << " are only supported in " << minSupportedHalVersion
+ << " and later (validating using " << halVersion << ")";
return ANEURALNETWORKS_BAD_DATA;
}
return ANEURALNETWORKS_NO_ERROR;
@@ -695,7 +714,7 @@
// Checks if two operands have the same types, ranks (if specified), dimensions
// (if specified), scales, zeroPoints, and extraParams.
static bool compatible(const Operand& a, const Operand& b) {
- NN_RET_CHECK(a.type == b.type) << toString(a.type) << " != " << toString(b.type);
+ NN_RET_CHECK(a.type == b.type) << a.type << " != " << b.type;
if (a.dimensions.size() != 0 && b.dimensions.size() != 0) {
NN_RET_CHECK_EQ(a.dimensions.size(), b.dimensions.size()) << "Incompatible dimensions";
for (uint32_t i = 0, n = a.dimensions.size(); i < n; ++i) {
@@ -706,14 +725,13 @@
}
NN_RET_CHECK_EQ(a.scale, b.scale);
NN_RET_CHECK_EQ(a.zeroPoint, b.zeroPoint);
- NN_RET_CHECK(a.extraParams == b.extraParams)
- << toString(a.extraParams) << " != " << toString(b.extraParams);
+ NN_RET_CHECK(a.extraParams == b.extraParams) << a.extraParams << " != " << b.extraParams;
return true;
}
static bool validateConditionOperand(const Operand& operand) {
NN_RET_CHECK(operand.type == OperandType::TENSOR_BOOL8)
- << "Unexpected condition operand type: " << toString(operand.type);
+ << "Unexpected condition operand type: " << operand.type;
NN_RET_CHECK_EQ(operand.dimensions.size(), 1u) << "Condition operand must be a singleton";
NN_RET_CHECK_EQ(operand.dimensions[0], 1u) << "Condition operand must be a singleton";
return true;
@@ -764,8 +782,7 @@
static bool validateControlFlowOperandUnknownSize(const SubgraphValidationHelper& helper,
const Operand& operand) {
- if (!helper.allowControlFlowOperationWithOperandOfUnknownSize &&
- !isExtensionOperandType(operand.type)) {
+ if (!helper.allowControlFlowOperationWithOperandOfUnknownSize && !isExtension(operand.type)) {
NN_RET_CHECK_NE(nonExtensionOperandSizeOfData(operand.type, operand.dimensions), 0u);
}
return true;
@@ -847,8 +864,7 @@
static inline int validateOperation(ANeuralNetworksOperationType opType, uint32_t inputCount,
const uint32_t* inputIndexes, uint32_t outputCount,
const uint32_t* outputIndexes,
- const std::vector<hal::Operand>& operands,
- HalVersion halVersion) {
+ const std::vector<Operand>& operands, HalVersion halVersion) {
if (opType == ANEURALNETWORKS_IF || opType == ANEURALNETWORKS_WHILE) {
NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3));
LOG(ERROR) << "This validateOperation() overload does not support control flow";
@@ -873,7 +889,7 @@
if (halVersion < HalVersion::V1_2) {
LOG(ERROR)
<< "Extension operations are supported since HAL version 1.2, validating using "
- << toString(halVersion);
+ << halVersion;
return ANEURALNETWORKS_BAD_DATA;
}
// There is no other validation we can do for an extension operation.
@@ -883,7 +899,7 @@
auto logInvalidInOutNumber = [opType, inputCount, outputCount](int expIn, int expOut) {
LOG(ERROR) << "Invalid number of input operands (" << inputCount << ", expected " << expIn
<< ") or output operands (" << outputCount << ", expected " << expOut
- << ") for operation " << getOperationName(opType);
+ << ") for operation " << opType;
};
switch (opType) {
@@ -916,14 +932,12 @@
OperandType::TENSOR_INT32};
outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM_SIGNED};
} else {
- LOG(ERROR) << "Unsupported input tensor type for operation "
- << getOperationName(opType);
+ LOG(ERROR) << "Unsupported input tensor type for operation " << opType;
return ANEURALNETWORKS_BAD_DATA;
}
const auto inputRank = operands[inputIndexes[0]].dimensions.size();
if (inputRank > 4) {
- LOG(ERROR) << "Unsupported input tensor rank for operation "
- << getOperationName(opType);
+ LOG(ERROR) << "Unsupported input tensor rank for operation " << opType;
return ANEURALNETWORKS_BAD_DATA;
}
return validateOperationOperandTypes(operands, inputCount, inputIndexes,
@@ -934,7 +948,7 @@
if ((inputCount != 3 && inputCount != 2) || outputCount != 1) {
LOG(ERROR) << "Invalid number of input operands (" << inputCount
<< ", expected 3 or 2) or output operands (" << outputCount
- << ", expected 1) for operation " << getOperationName(opType);
+ << ", expected 1) for operation " << opType;
return ANEURALNETWORKS_BAD_DATA;
}
auto inputType = operands[inputIndexes[0]].type;
@@ -957,8 +971,7 @@
inExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM_SIGNED, OperandType::INT32};
outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM_SIGNED};
} else {
- LOG(ERROR) << "Unsupported input tensor type for operation "
- << getOperationName(opType);
+ LOG(ERROR) << "Unsupported input tensor type for operation " << opType;
return ANEURALNETWORKS_BAD_DATA;
}
if (inputCount == 3) {
@@ -975,7 +988,7 @@
if ((inputCount != 3 && inputCount != 2) || outputCount != 1) {
LOG(ERROR) << "Invalid number of input operands (" << inputCount
<< ", expected 3 or 2) or output operands (" << outputCount
- << ", expected 1) for operation " << getOperationName(opType);
+ << ", expected 1) for operation " << opType;
return ANEURALNETWORKS_BAD_DATA;
}
auto inputType = operands[inputIndexes[0]].type;
@@ -998,8 +1011,7 @@
inExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM_SIGNED, OperandType::INT32};
outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM_SIGNED};
} else {
- LOG(ERROR) << "Unsupported input tensor type for operation "
- << getOperationName(opType);
+ LOG(ERROR) << "Unsupported input tensor type for operation " << opType;
return ANEURALNETWORKS_BAD_DATA;
}
if (inputCount == 3) {
@@ -1023,8 +1035,7 @@
inputType != OperandType::TENSOR_INT32 &&
inputType != OperandType::TENSOR_QUANT8_ASYMM &&
inputType != OperandType::TENSOR_QUANT8_ASYMM_SIGNED) {
- LOG(ERROR) << "Unsupported input tensor type for operation "
- << getOperationName(opType);
+ LOG(ERROR) << "Unsupported input tensor type for operation " << opType;
return ANEURALNETWORKS_BAD_DATA;
}
std::vector<OperandType> inExpectedTypes = {OperandType::TENSOR_INT32, inputType};
@@ -1051,8 +1062,7 @@
if (inputType != OperandType::TENSOR_FLOAT32 &&
inputType != OperandType::TENSOR_INT32 &&
inputType != OperandType::TENSOR_QUANT8_ASYMM) {
- LOG(ERROR) << "Unsupported input tensor type for operation "
- << getOperationName(opType);
+ LOG(ERROR) << "Unsupported input tensor type for operation " << opType;
return ANEURALNETWORKS_BAD_DATA;
}
std::vector<OperandType> inExpectedTypes = {OperandType::TENSOR_INT32,
@@ -1074,8 +1084,7 @@
inputType != OperandType::TENSOR_FLOAT32 &&
inputType != OperandType::TENSOR_INT32 &&
inputType != OperandType::TENSOR_QUANT8_ASYMM) {
- LOG(ERROR) << "Unsupported input tensor type for operation "
- << getOperationName(opType);
+ LOG(ERROR) << "Unsupported input tensor type for operation " << opType;
return ANEURALNETWORKS_BAD_DATA;
}
auto hashType = operands[inputIndexes[0]].type;
@@ -1097,8 +1106,7 @@
OperandType::INT32,
};
} else {
- LOG(ERROR) << "Unsupported hash tensor type for operation "
- << getOperationName(opType);
+ LOG(ERROR) << "Unsupported hash tensor type for operation " << opType;
return ANEURALNETWORKS_BAD_DATA;
}
std::vector<OperandType> outExpectedTypes = {OperandType::TENSOR_INT32};
@@ -1117,7 +1125,7 @@
outputCount != kNumOutputsMergedWithState)) {
LOG(ERROR) << "Invalid number of input operands (" << inputCount
<< ", expected 61) or output operands (" << outputCount
- << ", expected 1, 2, 5 or 6) for operation " << getOperationName(opType);
+ << ", expected 1, 2, 5 or 6) for operation " << opType;
return ANEURALNETWORKS_BAD_DATA;
}
@@ -1125,8 +1133,7 @@
auto inputType = operands[inputIndexes[0]].type;
if (inputType != OperandType::TENSOR_FLOAT32 &&
inputType != OperandType::TENSOR_FLOAT16) {
- LOG(ERROR) << "Unsupported input tensor type for operation "
- << getOperationName(opType);
+ LOG(ERROR) << "Unsupported input tensor type for operation " << opType;
return ANEURALNETWORKS_BAD_DATA;
}
@@ -1162,7 +1169,7 @@
if ((inputCount != 23 && inputCount != 27) || outputCount != 4) {
LOG(ERROR) << "Invalid number of input operands (" << inputCount
<< ", expected 23 or 27) or output operands (" << outputCount
- << ", expected 4) for operation " << getOperationName(opType);
+ << ", expected 4) for operation " << opType;
return ANEURALNETWORKS_BAD_DATA;
}
std::vector<OperandType> inExpectedTypes;
@@ -1170,8 +1177,7 @@
auto inputType = operands[inputIndexes[0]].type;
if (inputType != OperandType::TENSOR_FLOAT32 &&
inputType != OperandType::TENSOR_FLOAT16) {
- LOG(ERROR) << "Unsupported input tensor type for operation "
- << getOperationName(opType);
+ LOG(ERROR) << "Unsupported input tensor type for operation " << opType;
return ANEURALNETWORKS_BAD_DATA;
}
@@ -1239,8 +1245,7 @@
OperandType::TENSOR_INT32,
};
} else {
- LOG(ERROR) << "Unsupported input tensor type for operation "
- << getOperationName(opType);
+ LOG(ERROR) << "Unsupported input tensor type for operation " << opType;
return ANEURALNETWORKS_BAD_DATA;
}
std::vector<OperandType> outExpectedTypes = {OperandType::TENSOR_INT32};
@@ -1279,8 +1284,7 @@
OperandType::TENSOR_FLOAT16,
};
} else {
- LOG(ERROR) << "Unsupported input tensor type for operation "
- << getOperationName(opType);
+ LOG(ERROR) << "Unsupported input tensor type for operation " << opType;
return ANEURALNETWORKS_BAD_DATA;
}
return validateOperationOperandTypes(operands, inputCount, inputIndexes,
@@ -1299,8 +1303,7 @@
} else if (inputType == OperandType::TENSOR_FLOAT16) {
NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
} else {
- LOG(ERROR) << "Unsupported input tensor type for operation "
- << getOperationName(opType);
+ LOG(ERROR) << "Unsupported input tensor type for operation " << opType;
return ANEURALNETWORKS_BAD_DATA;
}
std::vector<OperandType> inExpectedTypes = {
@@ -1316,7 +1319,7 @@
if ((inputCount != 3 && inputCount != 2) || outputCount != 1) {
LOG(ERROR) << "Invalid number of input operands (" << inputCount
<< ", expected 3 or 2) or output operands (" << outputCount
- << ", expected 1) for operation " << getOperationName(opType);
+ << ", expected 1) for operation " << opType;
return ANEURALNETWORKS_BAD_DATA;
}
auto inputType = operands[inputIndexes[0]].type;
@@ -1349,8 +1352,7 @@
};
outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM_SIGNED};
} else {
- LOG(ERROR) << "Unsupported input tensor type for operation "
- << getOperationName(opType);
+ LOG(ERROR) << "Unsupported input tensor type for operation " << opType;
return ANEURALNETWORKS_BAD_DATA;
}
if (inputCount == 3) {
@@ -1367,7 +1369,7 @@
if ((inputCount != 4 && inputCount != 3) || outputCount != 1) {
LOG(ERROR) << "Invalid number of input operands (" << inputCount
<< ", expected 4 or 3) or output operands (" << outputCount
- << ", expected 1) for operation " << getOperationName(opType);
+ << ", expected 1) for operation " << opType;
return ANEURALNETWORKS_BAD_DATA;
}
auto inputType = operands[inputIndexes[0]].type;
@@ -1407,8 +1409,7 @@
};
outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM_SIGNED};
} else {
- LOG(ERROR) << "Unsupported input tensor type for operation "
- << getOperationName(opType);
+ LOG(ERROR) << "Unsupported input tensor type for operation " << opType;
return ANEURALNETWORKS_BAD_DATA;
}
if (inputCount == 4) {
@@ -1462,14 +1463,12 @@
};
outExpectedTypes = {inputType};
} else {
- LOG(ERROR) << "Unsupported input tensor type for operation "
- << getOperationName(opType);
+ LOG(ERROR) << "Unsupported input tensor type for operation " << opType;
return ANEURALNETWORKS_BAD_DATA;
}
const auto inputRank = operands[inputIndexes[0]].dimensions.size();
if (inputRank > 4) {
- LOG(ERROR) << "Unsupported input tensor rank for operation "
- << getOperationName(opType);
+ LOG(ERROR) << "Unsupported input tensor rank for operation " << opType;
return ANEURALNETWORKS_BAD_DATA;
}
return validateOperationOperandTypes(operands, inputCount, inputIndexes,
@@ -1514,14 +1513,12 @@
}; // TODO(b/116699425): Make it UINT8.
outExpectedTypes = {inputType};
} else {
- LOG(ERROR) << "Unsupported input tensor type for operation "
- << getOperationName(opType);
+ LOG(ERROR) << "Unsupported input tensor type for operation " << opType;
return ANEURALNETWORKS_BAD_DATA;
}
const auto inputRank = operands[inputIndexes[0]].dimensions.size();
if (inputRank > 4) {
- LOG(ERROR) << "Unsupported input tensor rank for operation "
- << getOperationName(opType);
+ LOG(ERROR) << "Unsupported input tensor rank for operation " << opType;
return ANEURALNETWORKS_BAD_DATA;
}
return validateOperationOperandTypes(operands, inputCount, inputIndexes,
@@ -1559,7 +1556,7 @@
outExpectedTypes = {inputType}; // Only identity CAST is supported.
NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3));
} else {
- LOG(ERROR) << "Unsupported data type for operation " << getOperationName(opType);
+ LOG(ERROR) << "Unsupported data type for operation " << opType;
return ANEURALNETWORKS_BAD_DATA;
}
// Validate that output shape is equal to input shape if dimensions
@@ -1586,8 +1583,7 @@
}
const auto inputRank = operands[inputIndexes[0]].dimensions.size();
if (inputRank > 4) {
- LOG(ERROR) << "Unsupported input tensor rank for operation "
- << getOperationName(opType);
+ LOG(ERROR) << "Unsupported input tensor rank for operation " << opType;
return ANEURALNETWORKS_BAD_DATA;
}
auto inputType = operands[inputIndexes[0]].type;
@@ -1600,8 +1596,7 @@
} else if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) {
NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3));
} else {
- LOG(ERROR) << "Unsupported input tensor type for operation "
- << getOperationName(opType);
+ LOG(ERROR) << "Unsupported input tensor type for operation " << opType;
return ANEURALNETWORKS_BAD_DATA;
}
std::vector<OperandType> inExpectedTypes = {inputType, OperandType::TENSOR_INT32,
@@ -1628,8 +1623,7 @@
inExpectedTypes = {inputType, OperandType::INT32};
outExpectedTypes = {OperandType::TENSOR_INT32};
} else {
- LOG(ERROR) << "Unsupported input tensor type for operation "
- << getOperationName(opType);
+ LOG(ERROR) << "Unsupported input tensor type for operation " << opType;
return ANEURALNETWORKS_BAD_DATA;
}
NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
@@ -1653,8 +1647,7 @@
inExpectedTypes = {inputType, OperandType::INT32};
outExpectedTypes = {inputType};
} else {
- LOG(ERROR) << "Unsupported input tensor type for operation "
- << getOperationName(opType);
+ LOG(ERROR) << "Unsupported input tensor type for operation " << opType;
return ANEURALNETWORKS_BAD_DATA;
}
if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) {
@@ -1669,7 +1662,7 @@
case ANEURALNETWORKS_SPLIT: {
if (inputCount != 3) {
LOG(ERROR) << "Invalid number of input operands (" << inputCount << ", expected 3)"
- << getOperationName(opType);
+ << opType;
return ANEURALNETWORKS_BAD_DATA;
}
auto inputType = operands[inputIndexes[0]].type;
@@ -1678,8 +1671,7 @@
inputType != OperandType::TENSOR_INT32 &&
inputType != OperandType::TENSOR_QUANT8_ASYMM &&
inputType != OperandType::TENSOR_QUANT8_ASYMM_SIGNED) {
- LOG(ERROR) << "Unsupported input tensor type for operation "
- << getOperationName(opType);
+ LOG(ERROR) << "Unsupported input tensor type for operation " << opType;
return ANEURALNETWORKS_BAD_DATA;
}
if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) {
@@ -1711,8 +1703,7 @@
inExpectedTypes = {inputType, inputType};
outExpectedTypes = {inputType};
} else {
- LOG(ERROR) << "Unsupported input tensor type for operation "
- << getOperationName(opType);
+ LOG(ERROR) << "Unsupported input tensor type for operation " << opType;
return ANEURALNETWORKS_BAD_DATA;
}
if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) {
@@ -1728,7 +1719,7 @@
if ((inputCount != 12 && inputCount != 9) || outputCount != 1) {
LOG(ERROR) << "Invalid number of input operands (" << inputCount
<< ", expected 12 or 9) or output operands (" << outputCount
- << ", expected 1) for operation " << getOperationName(opType);
+ << ", expected 1) for operation " << opType;
return ANEURALNETWORKS_BAD_DATA;
}
auto inputType = operands[inputIndexes[0]].type;
@@ -1751,15 +1742,16 @@
inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) {
if (filterType != inputType &&
filterType != OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL) {
- LOG(ERROR) << "Unsupported filter tensor type for operation "
- << getOperationName(opType);
+ LOG(ERROR) << "Unsupported filter tensor type for operation " << opType;
return ANEURALNETWORKS_BAD_DATA;
}
if (filterType == OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL &&
- operands[inputIndexes[1]].extraParams.channelQuant().channelDim != 0) {
+ std::get<Operand::SymmPerChannelQuantParams>(
+ operands[inputIndexes[1]].extraParams)
+ .channelDim != 0) {
LOG(ERROR) << "Unsupported filter tensor channel dimension for operation "
- << getOperationName(opType);
+ << opType;
return ANEURALNETWORKS_BAD_DATA;
}
@@ -1769,8 +1761,7 @@
OperandType::INT32, OperandType::INT32};
outExpectedTypes = {inputType};
} else {
- LOG(ERROR) << "Unsupported input tensor type for operation "
- << getOperationName(opType);
+ LOG(ERROR) << "Unsupported input tensor type for operation " << opType;
return ANEURALNETWORKS_BAD_DATA;
}
@@ -1805,8 +1796,7 @@
inExpectedTypes = {inputType, OperandType::TENSOR_INT32};
outExpectedTypes = {inputType};
} else {
- LOG(ERROR) << "Unsupported input tensor type for operation "
- << getOperationName(opType);
+ LOG(ERROR) << "Unsupported input tensor type for operation " << opType;
return ANEURALNETWORKS_BAD_DATA;
}
if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) {
@@ -1831,8 +1821,7 @@
inExpectedTypes = {inputType, inputType};
outExpectedTypes = {inputType};
} else {
- LOG(ERROR) << "Unsupported input tensor type for operation "
- << getOperationName(opType);
+ LOG(ERROR) << "Unsupported input tensor type for operation " << opType;
return ANEURALNETWORKS_BAD_DATA;
}
if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) {
@@ -1864,7 +1853,7 @@
static_cast<OperationType>(opType));
if (operationRegistration == nullptr) {
if (0 <= opType && opType < kNumberOfOperationTypes) {
- LOG(ERROR) << getOperationName(opType) << " not registered";
+ LOG(ERROR) << opType << " not registered";
} else {
LOG(ERROR) << "Operation type " << opType << " out of the range [0, "
<< kNumberOfOperationTypes << ")";
@@ -1872,14 +1861,14 @@
return ANEURALNETWORKS_UNEXPECTED_NULL;
}
if (operationRegistration->validate == nullptr) {
- LOG(ERROR) << "Incomplete operation registration: " << getOperationName(opType);
+ LOG(ERROR) << "Incomplete operation registration: " << opType;
return ANEURALNETWORKS_UNEXPECTED_NULL;
}
OperationValidationContext context(operationRegistration->name, inputCount,
inputIndexes, outputCount, outputIndexes,
operands.data(), halVersion);
if (!operationRegistration->validate(&context)) {
- LOG(ERROR) << "Validation failed for operation " << getOperationName(opType);
+ LOG(ERROR) << "Validation failed for operation " << opType;
return ANEURALNETWORKS_BAD_DATA;
}
return ANEURALNETWORKS_NO_ERROR;
@@ -1943,12 +1932,28 @@
return ANEURALNETWORKS_RESOURCE_EXHAUSTED_TRANSIENT;
case ErrorStatus::RESOURCE_EXHAUSTED_PERSISTENT:
return ANEURALNETWORKS_RESOURCE_EXHAUSTED_PERSISTENT;
+ case ErrorStatus::DEAD_OBJECT:
+ return ANEURALNETWORKS_DEAD_OBJECT;
}
- LOG(ERROR) << "Unknown ErrorStatus " << toString(status)
- << " mapped to ANEURALNETWORKS_OP_FAILED";
+ LOG(ERROR) << "Unknown ErrorStatus " << status << " mapped to ANEURALNETWORKS_OP_FAILED";
return ANEURALNETWORKS_OP_FAILED;
}
+V1_3::ErrorStatus convertResultCodeToHalErrorStatus(int resultCode) {
+ return convertToV1_3(convertResultCodeToErrorStatus(resultCode));
+}
+
+int convertErrorStatusToResultCode(V1_3::ErrorStatus status) {
+ return convertErrorStatusToResultCode(uncheckedConvert(status));
+}
+
+std::tuple<int, std::vector<OutputShape>, Timing> getExecutionResult(
+ V1_3::ErrorStatus status, const hardware::hidl_vec<V1_2::OutputShape>& outputShapes,
+ const V1_2::Timing& timing) {
+ return getExecutionResult(uncheckedConvert(status), uncheckedConvert(outputShapes),
+ uncheckedConvert(timing));
+}
+
std::tuple<int, std::vector<OutputShape>, Timing> getExecutionResult(
ErrorStatus status, std::vector<OutputShape> outputShapes, Timing timing) {
constexpr Timing kNoTiming = {std::numeric_limits<uint64_t>::max(),
@@ -1966,42 +1971,22 @@
return {n, std::move(outputShapes), timing};
}
-std::optional<std::vector<uint32_t>> combineDimensions(const std::vector<uint32_t>& lhs,
- const std::vector<uint32_t>& rhs) {
- if (rhs.empty()) return lhs;
- if (lhs.empty()) return rhs;
- if (lhs.size() != rhs.size()) {
- LOG(ERROR) << "Incompatible ranks: " << toString(lhs) << " and " << toString(rhs);
- return std::nullopt;
- }
- std::vector<uint32_t> combined = lhs;
- for (uint32_t i = 0; i < lhs.size(); i++) {
- if (lhs[i] == 0) {
- combined[i] = rhs[i];
- } else if (rhs[i] != 0 && lhs[i] != rhs[i]) {
- LOG(ERROR) << "Incompatible dimensions: " << toString(lhs) << " and " << toString(rhs);
- return std::nullopt;
- }
- }
- return combined;
-}
-
// Capabilities::operandPerformance utilities.
// The field Capabilities::operandPerformance is a vector sorted by the field
// Capabilities::OperandPerformance::type.
template <HalVersion version>
-hidl_vec<VersionedOperandPerformance<version>> nonExtensionOperandPerformance(
- PerformanceInfo perf) {
+hardware::hidl_vec<VersionedOperandPerformance<version>> nonExtensionOperandPerformance(
+ V1_0::PerformanceInfo perf) {
using OpPerf = VersionedOperandPerformance<version>;
// Note: range presents enumerators in declaration order, not in numerical order.
- static constexpr hidl_enum_range<VersionedOperandType<version>> kOperandTypeRange;
+ static constexpr hardware::hidl_enum_range<VersionedOperandType<version>> kOperandTypeRange;
std::vector<OpPerf> ret;
ret.reserve(kOperandTypeRange.end() - kOperandTypeRange.begin());
for (VersionedOperandType<version> type : kOperandTypeRange) {
- if (static_cast<OperandType>(type) != OperandType::SUBGRAPH) {
+ if (static_cast<V1_3::OperandType>(type) != V1_3::OperandType::SUBGRAPH) {
ret.push_back(OpPerf{type, perf});
}
}
@@ -2011,14 +1996,14 @@
return ret;
}
-template hal::hidl_vec<V1_2::Capabilities::OperandPerformance>
-nonExtensionOperandPerformance<HalVersion::V1_2>(PerformanceInfo perf);
-template hal::hidl_vec<V1_3::Capabilities::OperandPerformance>
-nonExtensionOperandPerformance<HalVersion::V1_3>(PerformanceInfo perf);
+template hardware::hidl_vec<V1_2::Capabilities::OperandPerformance>
+nonExtensionOperandPerformance<HalVersion::V1_2>(V1_0::PerformanceInfo perf);
+template hardware::hidl_vec<V1_3::Capabilities::OperandPerformance>
+nonExtensionOperandPerformance<HalVersion::V1_3>(V1_0::PerformanceInfo perf);
template <HalVersion version>
-void update(hal::hidl_vec<VersionedOperandPerformance<version>>* operandPerformance,
- VersionedOperandType<version> type, hal::PerformanceInfo perf) {
+void update(hardware::hidl_vec<VersionedOperandPerformance<version>>* operandPerformance,
+ VersionedOperandType<version> type, V1_0::PerformanceInfo perf) {
CHECK(operandPerformance != nullptr);
const auto it =
std::lower_bound(operandPerformance->begin(), operandPerformance->end(), type,
@@ -2029,23 +2014,24 @@
it->info = perf;
}
-void update(hidl_vec<V1_2::Capabilities::OperandPerformance>* operandPerformance,
- V1_2::OperandType type, PerformanceInfo perf) {
+void update(hardware::hidl_vec<V1_2::Capabilities::OperandPerformance>* operandPerformance,
+ V1_2::OperandType type, V1_0::PerformanceInfo perf) {
update<HalVersion::V1_2>(operandPerformance, type, perf);
}
-void update(hidl_vec<V1_3::Capabilities::OperandPerformance>* operandPerformance,
- V1_3::OperandType type, PerformanceInfo perf) {
+void update(hardware::hidl_vec<V1_3::Capabilities::OperandPerformance>* operandPerformance,
+ V1_3::OperandType type, V1_0::PerformanceInfo perf) {
update<HalVersion::V1_3>(operandPerformance, type, perf);
}
template <HalVersion version>
-PerformanceInfo lookup(const hidl_vec<VersionedOperandPerformance<version>>& operandPerformance,
- VersionedOperandType<version> type) {
+V1_0::PerformanceInfo lookup(
+ const hardware::hidl_vec<VersionedOperandPerformance<version>>& operandPerformance,
+ VersionedOperandType<version> type) {
const auto it = std::lower_bound(operandPerformance.begin(), operandPerformance.end(), type,
[](const VersionedOperandPerformance<version>& perf,
VersionedOperandType<version> type) {
- return static_cast<OperandType>(perf.type) <
- static_cast<OperandType>(type);
+ return static_cast<V1_3::OperandType>(perf.type) <
+ static_cast<V1_3::OperandType>(type);
});
if (it == operandPerformance.end()) {
LOG(WARNING) << "No PerformanceInfo for " << toString(type);
@@ -2055,12 +2041,14 @@
}
}
-PerformanceInfo lookup(const hidl_vec<V1_2::Capabilities::OperandPerformance>& operandPerformance,
- V1_2::OperandType type) {
+V1_0::PerformanceInfo lookup(
+ const hardware::hidl_vec<V1_2::Capabilities::OperandPerformance>& operandPerformance,
+ V1_2::OperandType type) {
return lookup<HalVersion::V1_2>(operandPerformance, type);
}
-PerformanceInfo lookup(const hidl_vec<V1_3::Capabilities::OperandPerformance>& operandPerformance,
- V1_3::OperandType type) {
+V1_0::PerformanceInfo lookup(
+ const hardware::hidl_vec<V1_3::Capabilities::OperandPerformance>& operandPerformance,
+ V1_3::OperandType type) {
CHECK(type != V1_3::OperandType::SUBGRAPH)
<< "Use Capabilities::ifPerformance or Capabilities::whilePerformance";
return lookup<HalVersion::V1_3>(operandPerformance, type);
@@ -2070,16 +2058,16 @@
// In Android P, most data types are treated as having the same performance as TENSOR_QUANT8_ASYMM.
// This array must be in sorted order.
-static const OperandType kQuantized8PerformanceConsistentWithP[] = {
- OperandType::INT32, OperandType::UINT32, OperandType::TENSOR_INT32, OperandType::OEM,
- OperandType::TENSOR_OEM_BYTE};
+static const V1_3::OperandType kQuantized8PerformanceConsistentWithP[] = {
+ V1_3::OperandType::INT32, V1_3::OperandType::UINT32, V1_3::OperandType::TENSOR_INT32,
+ V1_3::OperandType::OEM, V1_3::OperandType::TENSOR_OEM_BYTE};
static bool isQuantized8PerformanceConsistentWithP(const V1_2::Capabilities& capabilities) {
- const PerformanceInfo quantized8Performance =
+ const V1_0::PerformanceInfo quantized8Performance =
lookup(capabilities.operandPerformance, V1_2::OperandType::TENSOR_QUANT8_ASYMM);
return std::all_of(std::begin(kQuantized8PerformanceConsistentWithP),
std::end(kQuantized8PerformanceConsistentWithP),
- [quantized8Performance, &capabilities](OperandType type) {
+ [quantized8Performance, &capabilities](V1_3::OperandType type) {
return quantized8Performance ==
lookup(capabilities.operandPerformance,
static_cast<V1_2::OperandType>(type));
@@ -2087,26 +2075,26 @@
}
static bool isQuantized8PerformanceConsistentWithP(const V1_3::Capabilities& capabilities) {
- const PerformanceInfo quantized8Performance =
- lookup(capabilities.operandPerformance, OperandType::TENSOR_QUANT8_ASYMM);
+ const V1_0::PerformanceInfo quantized8Performance =
+ lookup(capabilities.operandPerformance, V1_3::OperandType::TENSOR_QUANT8_ASYMM);
return std::all_of(std::begin(kQuantized8PerformanceConsistentWithP),
std::end(kQuantized8PerformanceConsistentWithP),
- [quantized8Performance, &capabilities](OperandType type) {
+ [quantized8Performance, &capabilities](V1_3::OperandType type) {
return quantized8Performance ==
lookup(capabilities.operandPerformance, type);
});
}
-static hidl_vec<V1_2::Capabilities::OperandPerformance> makeQuantized8PerformanceConsistentWithP(
- PerformanceInfo quantized8Performance) {
- hidl_vec<V1_2::Capabilities::OperandPerformance> ret(
+static hardware::hidl_vec<V1_2::Capabilities::OperandPerformance>
+makeQuantized8PerformanceConsistentWithP(V1_0::PerformanceInfo quantized8Performance) {
+ hardware::hidl_vec<V1_2::Capabilities::OperandPerformance> ret(
std::size(kQuantized8PerformanceConsistentWithP));
- std::transform(
- std::begin(kQuantized8PerformanceConsistentWithP),
- std::end(kQuantized8PerformanceConsistentWithP), ret.begin(),
- [quantized8Performance](OperandType type) -> V1_2::Capabilities::OperandPerformance {
- return {static_cast<V1_2::OperandType>(type), quantized8Performance};
- });
+ std::transform(std::begin(kQuantized8PerformanceConsistentWithP),
+ std::end(kQuantized8PerformanceConsistentWithP), ret.begin(),
+ [quantized8Performance](
+ V1_3::OperandType type) -> V1_2::Capabilities::OperandPerformance {
+ return {static_cast<V1_2::OperandType>(type), quantized8Performance};
+ });
return ret;
}
@@ -2119,9 +2107,9 @@
}
bool compliantWithV1_0(const V1_2::Capabilities& capabilities) {
- const PerformanceInfo perfTensorFloat32 =
+ const V1_0::PerformanceInfo perfTensorFloat32 =
lookup(capabilities.operandPerformance, V1_2::OperandType::TENSOR_FLOAT32);
- const PerformanceInfo perfFloat32 =
+ const V1_0::PerformanceInfo perfFloat32 =
lookup(capabilities.operandPerformance, V1_2::OperandType::FLOAT32);
if (perfTensorFloat32 != perfFloat32 ||
perfTensorFloat32 != capabilities.relaxedFloat32toFloat16PerformanceTensor ||
@@ -2133,10 +2121,10 @@
}
bool compliantWithV1_0(const V1_3::Capabilities& capabilities) {
- const PerformanceInfo perfTensorFloat32 =
- lookup(capabilities.operandPerformance, OperandType::TENSOR_FLOAT32);
- const PerformanceInfo perfFloat32 =
- lookup(capabilities.operandPerformance, OperandType::FLOAT32);
+ const V1_0::PerformanceInfo perfTensorFloat32 =
+ lookup(capabilities.operandPerformance, V1_3::OperandType::TENSOR_FLOAT32);
+ const V1_0::PerformanceInfo perfFloat32 =
+ lookup(capabilities.operandPerformance, V1_3::OperandType::FLOAT32);
if (perfTensorFloat32 != perfFloat32 ||
perfTensorFloat32 != capabilities.relaxedFloat32toFloat16PerformanceTensor ||
perfFloat32 != capabilities.relaxedFloat32toFloat16PerformanceScalar) {
@@ -2168,8 +2156,8 @@
bool compliantWithV1_1(const V1_3::Capabilities& capabilities) {
if ((capabilities.relaxedFloat32toFloat16PerformanceTensor !=
capabilities.relaxedFloat32toFloat16PerformanceScalar) ||
- (lookup(capabilities.operandPerformance, OperandType::TENSOR_FLOAT32) !=
- lookup(capabilities.operandPerformance, OperandType::FLOAT32))) {
+ (lookup(capabilities.operandPerformance, V1_3::OperandType::TENSOR_FLOAT32) !=
+ lookup(capabilities.operandPerformance, V1_3::OperandType::FLOAT32))) {
return false;
}
@@ -2323,9 +2311,9 @@
<< " from V1_3::Capabilities to V1_0::Capabilities";
}
return {.float32Performance =
- lookup(capabilities.operandPerformance, OperandType::TENSOR_FLOAT32),
- .quantized8Performance =
- lookup(capabilities.operandPerformance, OperandType::TENSOR_QUANT8_ASYMM)};
+ lookup(capabilities.operandPerformance, V1_3::OperandType::TENSOR_FLOAT32),
+ .quantized8Performance = lookup(capabilities.operandPerformance,
+ V1_3::OperandType::TENSOR_QUANT8_ASYMM)};
}
V1_1::Capabilities convertToV1_1(const V1_0::Capabilities& capabilities) {
@@ -2357,9 +2345,9 @@
<< " from V1_3::Capabilities to V1_1::Capabilities";
}
return {.float32Performance =
- lookup(capabilities.operandPerformance, OperandType::TENSOR_FLOAT32),
+ lookup(capabilities.operandPerformance, V1_3::OperandType::TENSOR_FLOAT32),
.quantized8Performance =
- lookup(capabilities.operandPerformance, OperandType::TENSOR_QUANT8_ASYMM),
+ lookup(capabilities.operandPerformance, V1_3::OperandType::TENSOR_QUANT8_ASYMM),
.relaxedFloat32toFloat16Performance =
capabilities.relaxedFloat32toFloat16PerformanceTensor};
}
@@ -2415,7 +2403,7 @@
capabilities.relaxedFloat32toFloat16PerformanceTensor,
};
const auto& inputOpPerf = capabilities.operandPerformance;
- hidl_vec<V1_3::Capabilities::OperandPerformance> opPerfSupported;
+ hardware::hidl_vec<V1_3::Capabilities::OperandPerformance> opPerfSupported;
opPerfSupported.resize(inputOpPerf.size());
auto last =
std::copy_if(inputOpPerf.begin(), inputOpPerf.end(), opPerfSupported.begin(),
@@ -2477,17 +2465,18 @@
.outputs = operation.outputs};
}
-static hidl_vec<V1_0::Operation> uncheckedConvertToV1_0(
- const hidl_vec<V1_1::Operation>& operations) {
- hidl_vec<V1_0::Operation> result(operations.size());
+static hardware::hidl_vec<V1_0::Operation> uncheckedConvertToV1_0(
+ const hardware::hidl_vec<V1_1::Operation>& operations) {
+ hardware::hidl_vec<V1_0::Operation> result(operations.size());
std::transform(
operations.begin(), operations.end(), result.begin(),
[](const V1_1::Operation& operation) { return uncheckedConvertToV1_0(operation); });
return result;
}
-static hidl_vec<V1_1::Operation> convertToV1_1(const hidl_vec<V1_0::Operation>& operations) {
- hidl_vec<V1_1::Operation> result(operations.size());
+static hardware::hidl_vec<V1_1::Operation> convertToV1_1(
+ const hardware::hidl_vec<V1_0::Operation>& operations) {
+ hardware::hidl_vec<V1_1::Operation> result(operations.size());
std::transform(operations.begin(), operations.end(), result.begin(),
[](const V1_0::Operation& operation) { return convertToV1_1(operation); });
return result;
@@ -2513,13 +2502,15 @@
std::set<uint32_t>* noncompliantOperations) {
// A boolean vector indicating whether each pool is compliant with the target HAL version.
std::vector<bool> isPoolCompliant(model.pools.size(), false);
- std::transform(model.pools.begin(), model.pools.end(), isPoolCompliant.begin(),
- [version](const hidl_memory& pool) { return validatePool(pool, version); });
+ std::transform(
+ model.pools.begin(), model.pools.end(), isPoolCompliant.begin(),
+ [version](const hardware::hidl_memory& pool) { return validatePool(pool, version); });
// A boolean vector indicating whether each operand is compliant with the target HAL version.
std::vector<bool> isOperandCompliant(model.main.operands.size(), false);
std::transform(model.main.operands.begin(), model.main.operands.end(),
- isOperandCompliant.begin(), [&isPoolCompliant, version](const Operand& op) {
+ isOperandCompliant.begin(),
+ [&isPoolCompliant, version](const V1_3::Operand& op) {
bool is_operand_compliant = false;
switch (version) {
case HalVersion::UNKNOWN:
@@ -2541,22 +2532,24 @@
break;
}
return is_operand_compliant &&
- !(op.lifetime == OperandLifeTime::CONSTANT_REFERENCE &&
+ !(op.lifetime == V1_3::OperandLifeTime::CONSTANT_REFERENCE &&
!isPoolCompliant[op.location.poolIndex]);
});
- auto allOperandsCompliant = [&isOperandCompliant](const hidl_vec<uint32_t>& indices) {
+ auto allOperandsCompliant = [&isOperandCompliant](const hardware::hidl_vec<uint32_t>& indices) {
return std::all_of(
indices.begin(), indices.end(),
[&isOperandCompliant](const uint32_t ind) { return isOperandCompliant[ind]; });
};
- auto localValidateOperation = [&model, version, &allOperandsCompliant](const Operation& op) {
+ auto localValidateOperation = [&model, version,
+ &allOperandsCompliant](const V1_3::Operation& op) {
if (!allOperandsCompliant(op.inputs) || !allOperandsCompliant(op.outputs)) return false;
- int error = validateOperation(
- static_cast<int32_t>(op.type), op.inputs.size(),
- op.inputs.size() > 0 ? op.inputs.data() : nullptr, op.outputs.size(),
- op.outputs.size() > 0 ? op.outputs.data() : nullptr, model.main.operands, version);
+ int error = validateOperation(static_cast<int32_t>(op.type), op.inputs.size(),
+ op.inputs.size() > 0 ? op.inputs.data() : nullptr,
+ op.outputs.size(),
+ op.outputs.size() > 0 ? op.outputs.data() : nullptr,
+ uncheckedConvert(model.main.operands), version);
return error == ANEURALNETWORKS_NO_ERROR;
};
@@ -2586,15 +2579,17 @@
// V1_0::Model because all 1.0 drivers require strict calculation by default
// in the P NN runtime. Even if fp16 calculations are allowed, they can
// still be computed by a strict fp32 driver.
- return std::all_of(
- model.operations.begin(), model.operations.end(), [&model](const V1_1::Operation& op) {
- int error = validateOperation(static_cast<int32_t>(op.type), op.inputs.size(),
- op.inputs.size() > 0 ? op.inputs.data() : nullptr,
- op.outputs.size(),
- op.outputs.size() > 0 ? op.outputs.data() : nullptr,
- convertToV1_3(model.operands), HalVersion::V1_0);
- return error == ANEURALNETWORKS_NO_ERROR;
- });
+ auto operands = uncheckedConvert(convertToV1_3(model.operands));
+ return std::all_of(model.operations.begin(), model.operations.end(),
+ [&operands](const V1_1::Operation& op) {
+ int error = validateOperation(
+ static_cast<int32_t>(op.type), op.inputs.size(),
+ op.inputs.size() > 0 ? op.inputs.data() : nullptr,
+ op.outputs.size(),
+ op.outputs.size() > 0 ? op.outputs.data() : nullptr, operands,
+ HalVersion::V1_0);
+ return error == ANEURALNETWORKS_NO_ERROR;
+ });
}
bool compliantWithV1_0(const V1_2::Model& model, std::set<uint32_t>* noncompliantOperations) {
@@ -2697,81 +2692,86 @@
.outputs = operation.outputs};
}
-static hidl_vec<V1_0::Operation> uncheckedConvertToV1_0(
- const hidl_vec<V1_3::Operation>& operations) {
- hidl_vec<V1_0::Operation> result(operations.size());
+static hardware::hidl_vec<V1_0::Operation> uncheckedConvertToV1_0(
+ const hardware::hidl_vec<V1_3::Operation>& operations) {
+ hardware::hidl_vec<V1_0::Operation> result(operations.size());
std::transform(
operations.begin(), operations.end(), result.begin(),
[](const V1_3::Operation& operation) { return uncheckedConvertToV1_0(operation); });
return result;
}
-static hidl_vec<V1_0::Operation> uncheckedConvertToV1_0(
- const hidl_vec<V1_2::Operation>& operations) {
- hidl_vec<V1_0::Operation> result(operations.size());
+static hardware::hidl_vec<V1_0::Operation> uncheckedConvertToV1_0(
+ const hardware::hidl_vec<V1_2::Operation>& operations) {
+ hardware::hidl_vec<V1_0::Operation> result(operations.size());
std::transform(
operations.begin(), operations.end(), result.begin(),
[](const V1_2::Operation& operation) { return uncheckedConvertToV1_0(operation); });
return result;
}
-static hidl_vec<V1_2::Operation> uncheckedConvertToV1_2(
- const hidl_vec<V1_3::Operation>& operations) {
- hidl_vec<V1_2::Operation> result(operations.size());
+static hardware::hidl_vec<V1_2::Operation> uncheckedConvertToV1_2(
+ const hardware::hidl_vec<V1_3::Operation>& operations) {
+ hardware::hidl_vec<V1_2::Operation> result(operations.size());
std::transform(
operations.begin(), operations.end(), result.begin(),
[](const V1_3::Operation& operation) { return uncheckedConvertToV1_2(operation); });
return result;
}
-static hidl_vec<V1_1::Operation> uncheckedConvertToV1_1(
- const hidl_vec<V1_2::Operation>& operations) {
- hidl_vec<V1_1::Operation> result(operations.size());
+static hardware::hidl_vec<V1_1::Operation> uncheckedConvertToV1_1(
+ const hardware::hidl_vec<V1_2::Operation>& operations) {
+ hardware::hidl_vec<V1_1::Operation> result(operations.size());
std::transform(
operations.begin(), operations.end(), result.begin(),
[](const V1_2::Operation& operation) { return uncheckedConvertToV1_1(operation); });
return result;
}
-static hidl_vec<V1_1::Operation> uncheckedConvertToV1_1(
- const hidl_vec<V1_3::Operation>& operations) {
- hidl_vec<V1_1::Operation> result(operations.size());
+static hardware::hidl_vec<V1_1::Operation> uncheckedConvertToV1_1(
+ const hardware::hidl_vec<V1_3::Operation>& operations) {
+ hardware::hidl_vec<V1_1::Operation> result(operations.size());
std::transform(
operations.begin(), operations.end(), result.begin(),
[](const V1_3::Operation& operation) { return uncheckedConvertToV1_1(operation); });
return result;
}
-static hidl_vec<V1_2::Operation> convertToV1_2(const hidl_vec<V1_0::Operation>& operations) {
- hidl_vec<V1_2::Operation> result(operations.size());
+static hardware::hidl_vec<V1_2::Operation> convertToV1_2(
+ const hardware::hidl_vec<V1_0::Operation>& operations) {
+ hardware::hidl_vec<V1_2::Operation> result(operations.size());
std::transform(operations.begin(), operations.end(), result.begin(),
[](const V1_0::Operation& operation) { return convertToV1_2(operation); });
return result;
}
-static hidl_vec<V1_2::Operation> convertToV1_2(const hidl_vec<V1_1::Operation>& operations) {
- hidl_vec<V1_2::Operation> result(operations.size());
+static hardware::hidl_vec<V1_2::Operation> convertToV1_2(
+ const hardware::hidl_vec<V1_1::Operation>& operations) {
+ hardware::hidl_vec<V1_2::Operation> result(operations.size());
std::transform(operations.begin(), operations.end(), result.begin(),
[](const V1_1::Operation& operation) { return convertToV1_2(operation); });
return result;
}
-static hidl_vec<V1_3::Operation> convertToV1_3(const hidl_vec<V1_0::Operation>& operations) {
- hidl_vec<V1_3::Operation> result(operations.size());
+static hardware::hidl_vec<V1_3::Operation> convertToV1_3(
+ const hardware::hidl_vec<V1_0::Operation>& operations) {
+ hardware::hidl_vec<V1_3::Operation> result(operations.size());
std::transform(operations.begin(), operations.end(), result.begin(),
[](const V1_0::Operation& operation) { return convertToV1_3(operation); });
return result;
}
-static hidl_vec<V1_3::Operation> convertToV1_3(const hidl_vec<V1_1::Operation>& operations) {
- hidl_vec<V1_3::Operation> result(operations.size());
+static hardware::hidl_vec<V1_3::Operation> convertToV1_3(
+ const hardware::hidl_vec<V1_1::Operation>& operations) {
+ hardware::hidl_vec<V1_3::Operation> result(operations.size());
std::transform(operations.begin(), operations.end(), result.begin(),
[](const V1_1::Operation& operation) { return convertToV1_3(operation); });
return result;
}
-static hidl_vec<V1_3::Operation> convertToV1_3(const hidl_vec<V1_2::Operation>& operations) {
- hidl_vec<V1_3::Operation> result(operations.size());
+static hardware::hidl_vec<V1_3::Operation> convertToV1_3(
+ const hardware::hidl_vec<V1_2::Operation>& operations) {
+ hardware::hidl_vec<V1_3::Operation> result(operations.size());
std::transform(operations.begin(), operations.end(), result.begin(),
[](const V1_2::Operation& operation) { return convertToV1_3(operation); });
return result;
@@ -2817,19 +2817,19 @@
return static_cast<V1_0::OperandType>(operandType);
}
-bool compliantWithV1_0(hal::V1_0::OperandLifeTime lifetime) {
+bool compliantWithV1_0(V1_0::OperandLifeTime lifetime) {
return true;
}
-bool compliantWithV1_0(hal::V1_3::OperandLifeTime lifetime) {
+bool compliantWithV1_0(V1_3::OperandLifeTime lifetime) {
return lifetime != V1_3::OperandLifeTime::SUBGRAPH;
}
-bool compliantWithV1_3(hal::V1_0::OperandLifeTime lifetime) {
+bool compliantWithV1_3(V1_0::OperandLifeTime lifetime) {
return true;
}
-bool compliantWithV1_3(hal::V1_3::OperandLifeTime lifetime) {
+bool compliantWithV1_3(V1_3::OperandLifeTime lifetime) {
return true;
}
@@ -2919,57 +2919,57 @@
return operand;
}
-hidl_vec<V1_0::Operand> convertToV1_0(const hidl_vec<V1_0::Operand>& operands) {
+hardware::hidl_vec<V1_0::Operand> convertToV1_0(const hardware::hidl_vec<V1_0::Operand>& operands) {
return operands;
}
-hidl_vec<V1_0::Operand> convertToV1_0(const hidl_vec<V1_2::Operand>& operands) {
- hidl_vec<V1_0::Operand> result(operands.size());
+hardware::hidl_vec<V1_0::Operand> convertToV1_0(const hardware::hidl_vec<V1_2::Operand>& operands) {
+ hardware::hidl_vec<V1_0::Operand> result(operands.size());
std::transform(operands.begin(), operands.end(), result.begin(),
[](const V1_2::Operand& operand) { return convertToV1_0(operand); });
return result;
}
-hidl_vec<V1_0::Operand> convertToV1_0(const hidl_vec<V1_3::Operand>& operands) {
- hidl_vec<V1_0::Operand> result(operands.size());
+hardware::hidl_vec<V1_0::Operand> convertToV1_0(const hardware::hidl_vec<V1_3::Operand>& operands) {
+ hardware::hidl_vec<V1_0::Operand> result(operands.size());
std::transform(operands.begin(), operands.end(), result.begin(),
[](const V1_3::Operand& operand) { return convertToV1_0(operand); });
return result;
}
-hidl_vec<V1_2::Operand> convertToV1_2(const hidl_vec<V1_0::Operand>& operands) {
- hidl_vec<V1_2::Operand> result(operands.size());
+hardware::hidl_vec<V1_2::Operand> convertToV1_2(const hardware::hidl_vec<V1_0::Operand>& operands) {
+ hardware::hidl_vec<V1_2::Operand> result(operands.size());
std::transform(operands.begin(), operands.end(), result.begin(),
[](const V1_0::Operand& operand) { return convertToV1_2(operand); });
return result;
}
-hidl_vec<V1_2::Operand> convertToV1_2(const hidl_vec<V1_2::Operand>& operands) {
+hardware::hidl_vec<V1_2::Operand> convertToV1_2(const hardware::hidl_vec<V1_2::Operand>& operands) {
return operands;
}
-hidl_vec<V1_2::Operand> convertToV1_2(const hidl_vec<V1_3::Operand>& operands) {
- hidl_vec<V1_2::Operand> result(operands.size());
+hardware::hidl_vec<V1_2::Operand> convertToV1_2(const hardware::hidl_vec<V1_3::Operand>& operands) {
+ hardware::hidl_vec<V1_2::Operand> result(operands.size());
std::transform(operands.begin(), operands.end(), result.begin(),
[](const V1_3::Operand& operand) { return convertToV1_2(operand); });
return result;
}
-hidl_vec<V1_3::Operand> convertToV1_3(const hidl_vec<V1_0::Operand>& operands) {
- hidl_vec<V1_3::Operand> result(operands.size());
+hardware::hidl_vec<V1_3::Operand> convertToV1_3(const hardware::hidl_vec<V1_0::Operand>& operands) {
+ hardware::hidl_vec<V1_3::Operand> result(operands.size());
std::transform(operands.begin(), operands.end(), result.begin(),
[](const V1_0::Operand& operand) { return convertToV1_3(operand); });
return result;
}
-hidl_vec<V1_3::Operand> convertToV1_3(const hidl_vec<V1_2::Operand>& operands) {
- hidl_vec<V1_3::Operand> result(operands.size());
+hardware::hidl_vec<V1_3::Operand> convertToV1_3(const hardware::hidl_vec<V1_2::Operand>& operands) {
+ hardware::hidl_vec<V1_3::Operand> result(operands.size());
std::transform(operands.begin(), operands.end(), result.begin(),
[](const V1_2::Operand& operand) { return convertToV1_3(operand); });
return result;
}
-hidl_vec<V1_3::Operand> convertToV1_3(const hidl_vec<V1_3::Operand>& operands) {
+hardware::hidl_vec<V1_3::Operand> convertToV1_3(const hardware::hidl_vec<V1_3::Operand>& operands) {
return operands;
}
@@ -3158,16 +3158,16 @@
});
}
-static hidl_memory convertToV1_0(const V1_3::Request::MemoryPool& pool) {
+static hardware::hidl_memory convertToV1_0(const V1_3::Request::MemoryPool& pool) {
switch (pool.getDiscriminator()) {
case V1_3::Request::MemoryPool::hidl_discriminator::hidlMemory:
return pool.hidlMemory();
case V1_3::Request::MemoryPool::hidl_discriminator::token:
- return hidl_memory{};
+ return hardware::hidl_memory{};
}
}
-static V1_3::Request::MemoryPool convertToV1_3(const hidl_memory& pool) {
+static V1_3::Request::MemoryPool convertToV1_3(const hardware::hidl_memory& pool) {
V1_3::Request::MemoryPool ret;
ret.hidlMemory(pool);
return ret;
@@ -3178,7 +3178,7 @@
}
static V1_0::Request uncheckedConvertToV1_0(const V1_3::Request& request) {
- hidl_vec<hidl_memory> pools(request.pools.size());
+ hardware::hidl_vec<hardware::hidl_memory> pools(request.pools.size());
std::transform(request.pools.begin(), request.pools.end(), pools.begin(),
[](const auto& pool) { return convertToV1_0(pool); });
return {.inputs = request.inputs, .outputs = request.outputs, .pools = std::move(pools)};
@@ -3201,7 +3201,7 @@
}
V1_3::Request convertToV1_3(const V1_0::Request& request) {
- hidl_vec<V1_3::Request::MemoryPool> pools(request.pools.size());
+ hardware::hidl_vec<V1_3::Request::MemoryPool> pools(request.pools.size());
std::transform(request.pools.begin(), request.pools.end(), pools.begin(),
[](const auto& pool) { return convertToV1_3(pool); });
return {.inputs = request.inputs, .outputs = request.outputs, .pools = std::move(pools)};
@@ -3257,5 +3257,293 @@
}
#endif // NN_DEBUGGABLE
+ErrorStatus uncheckedConvert(V1_0::ErrorStatus status) {
+ return nnTryGetValue(convert(status));
+}
+
+ErrorStatus uncheckedConvert(V1_3::ErrorStatus status) {
+ return nnTryGetValue(convert(status));
+}
+
+OperandType uncheckedConvert(V1_3::OperandType operandType) {
+ return nnTryGetValue(convert(operandType));
+}
+
+OperationType uncheckedConvert(V1_3::OperationType operandType) {
+ return nnTryGetValue(convert(operandType));
+}
+
+Operand::LifeTime uncheckedConvert(V1_3::OperandLifeTime lifetime) {
+ return nnTryGetValue(convert(lifetime));
+}
+
+MeasureTiming uncheckedConvert(V1_2::MeasureTiming measure) {
+ return nnTryGetValue(convert(measure));
+}
+
+DataLocation uncheckedConvert(const V1_0::DataLocation& location) {
+ return nnTryGetValue(convert(location));
+}
+
+Operand uncheckedConvert(const V1_3::Operand& operand) {
+ return nnTryGetValue(convert(operand));
+}
+
+Operand::ExtraParams uncheckedConvert(const V1_2::Operand::ExtraParams& params) {
+ return nnTryGetValue(convert(params));
+}
+
+Operand::SymmPerChannelQuantParams uncheckedConvert(const V1_2::SymmPerChannelQuantParams& params) {
+ return nnTryGetValue(convert(params));
+}
+
+Operand::ExtensionParams uncheckedConvert(const hardware::hidl_vec<uint8_t>& params) {
+ return params;
+}
+
+Operation uncheckedConvert(const V1_3::Operation& operation) {
+ return nnTryGetValue(convert(operation));
+}
+
+template <typename CanonicalType, typename HalType>
+static std::vector<CanonicalType> convertVec(const hardware::hidl_vec<HalType>& items) {
+ std::vector<CanonicalType> result(items.size());
+ std::transform(items.begin(), items.end(), result.begin(),
+ [](const HalType& item) { return uncheckedConvert(item); });
+ return result;
+}
+
+Model uncheckedConvert(const V1_3::Model& model) {
+ return nnTryGetValue(convert(model));
+}
+
+Model::Subgraph uncheckedConvert(const V1_3::Subgraph& subgraph) {
+ return nnTryGetValue(convert(subgraph));
+}
+
+Model::ExtensionNameAndPrefix uncheckedConvert(const V1_2::Model::ExtensionNameAndPrefix& x) {
+ return nnTryGetValue(convert(x));
+}
+
+Request uncheckedConvert(const V1_3::Request& request) {
+ return nnTryGetValue(convert(request));
+}
+
+Request::Argument uncheckedConvert(const V1_0::RequestArgument& requestArgument) {
+ return nnTryGetValue(convert(requestArgument));
+}
+
+Request::MemoryPool uncheckedConvert(const V1_3::Request::MemoryPool& memoryPool) {
+ return nnTryGetValue(convert(memoryPool));
+}
+
+OutputShape uncheckedConvert(const V1_2::OutputShape& outputShape) {
+ return nnTryGetValue(convert(outputShape));
+}
+
+std::vector<OutputShape> uncheckedConvert(
+ const hardware::hidl_vec<V1_2::OutputShape>& outputShapes) {
+ return convertVec<OutputShape>(outputShapes);
+}
+
+Capabilities uncheckedConvert(const V1_3::Capabilities& capabilities) {
+ return nnTryGetValue(convert(capabilities));
+}
+
+Capabilities::OperandPerformance uncheckedConvert(
+ const V1_3::Capabilities::OperandPerformance& operandPerformance) {
+ return nnTryGetValue(convert(operandPerformance));
+}
+
+Capabilities::PerformanceInfo uncheckedConvert(const V1_0::PerformanceInfo& performanceInfo) {
+ return nnTryGetValue(convert(performanceInfo));
+}
+
+Extension uncheckedConvert(const V1_2::Extension& extension) {
+ return nnTryGetValue(convert(extension));
+}
+
+std::vector<Extension> uncheckedConvert(const hardware::hidl_vec<V1_2::Extension>& extensions) {
+ return convertVec<Extension>(extensions);
+}
+
+Extension::OperandTypeInformation uncheckedConvert(
+ const V1_2::Extension::OperandTypeInformation& info) {
+ return nnTryGetValue(convert(info));
+}
+
+OptionalTimeoutDuration uncheckedConvert(const V1_3::OptionalTimeoutDuration& timeoutDuration) {
+ return nnTryGetValue(convert(timeoutDuration));
+}
+
+Timing uncheckedConvert(const V1_2::Timing& timing) {
+ return nnTryGetValue(convert(timing));
+}
+
+V1_0::ErrorStatus convertToV1_0(ErrorStatus status) {
+ return static_cast<V1_0::ErrorStatus>(static_cast<int>(status));
+}
+
+V1_3::ErrorStatus convertToV1_3(ErrorStatus status) {
+ return nnTryGetValue(V1_3::utils::convert(status));
+}
+
+V1_3::OperandType convertToV1_3(OperandType operandType) {
+ return nnTryGetValue(V1_3::utils::convert(operandType));
+}
+
+V1_3::OperationType convertToV1_3(OperationType operandType) {
+ return nnTryGetValue(V1_3::utils::convert(operandType));
+}
+
+V1_3::OperandLifeTime convertToV1_3(Operand::LifeTime lifetime) {
+ return nnTryGetValue(V1_3::utils::convert(lifetime));
+}
+
+V1_1::ExecutionPreference convertToV1_1(ExecutionPreference preference) {
+ return nnTryGetValue(V1_1::utils::convert(preference));
+}
+
+V1_3::Priority convertToV1_3(Priority priority) {
+ return nnTryGetValue(V1_3::utils::convert(priority));
+}
+
+V1_2::MeasureTiming convertToV1_2(MeasureTiming measure) {
+ return nnTryGetValue(V1_2::utils::convert(measure));
+}
+
+V1_0::DataLocation convertToV1_0(const DataLocation& location) {
+ return nnTryGetValue(V1_0::utils::convert(location));
+}
+
+V1_3::Operand convertToV1_3(const Operand& operand) {
+ return nnTryGetValue(V1_3::utils::convert(operand));
+}
+
+V1_2::Operand::ExtraParams convertToV1_2(const Operand::ExtraParams& params) {
+ return nnTryGetValue(V1_2::utils::convert(params));
+}
+
+V1_2::SymmPerChannelQuantParams convertToV1_2(const Operand::SymmPerChannelQuantParams& params) {
+ return nnTryGetValue(V1_2::utils::convert(params));
+}
+
+hardware::hidl_vec<uint8_t> uncheckedConvert(const Operand::ExtensionParams& params) {
+ return params;
+}
+
+V1_3::Operation convertToV1_3(const Operation& operation) {
+ return nnTryGetValue(V1_3::utils::convert(operation));
+}
+
+template <typename HalType, typename CanonicalType>
+static hardware::hidl_vec<HalType> convertVecToV1_0(const std::vector<CanonicalType>& items) {
+ hardware::hidl_vec<HalType> result(items.size());
+ std::transform(items.begin(), items.end(), result.begin(),
+ [](const CanonicalType& item) { return convertToV1_0(item); });
+ return result;
+}
+
+template <typename HalType, typename CanonicalType>
+static hardware::hidl_vec<HalType> convertVecToV1_2(const std::vector<CanonicalType>& items) {
+ hardware::hidl_vec<HalType> result(items.size());
+ std::transform(items.begin(), items.end(), result.begin(),
+ [](const CanonicalType& item) { return convertToV1_2(item); });
+ return result;
+}
+
+template <typename HalType, typename CanonicalType>
+static hardware::hidl_vec<HalType> convertVecToV1_3(const std::vector<CanonicalType>& items) {
+ hardware::hidl_vec<HalType> result(items.size());
+ std::transform(items.begin(), items.end(), result.begin(),
+ [](const CanonicalType& item) { return convertToV1_3(item); });
+ return result;
+}
+
+V1_2::OutputShape convertToV1_2(const OutputShape& outputShape) {
+ return nnTryGetValue(V1_2::utils::convert(outputShape));
+}
+
+hardware::hidl_vec<V1_2::OutputShape> convertToV1_2(const std::vector<OutputShape>& outputShapes) {
+ return convertVecToV1_2<V1_2::OutputShape>(outputShapes);
+}
+
+V1_3::Model convertToV1_3(const Model& model) {
+ return nnTryGetValue(V1_3::utils::convert(model));
+}
+
+V1_3::Subgraph convertToV1_3(const Model::Subgraph& subgraph) {
+ return nnTryGetValue(V1_3::utils::convert(subgraph));
+}
+
+V1_2::Model::ExtensionNameAndPrefix convertToV1_2(const Model::ExtensionNameAndPrefix& x) {
+ return nnTryGetValue(V1_2::utils::convert(x));
+}
+
+V1_3::Request convertToV1_3(const Request& request) {
+ return nnTryGetValue(V1_3::utils::convert(request));
+}
+
+V1_0::RequestArgument convertToV1_0(const Request::Argument& requestArgument) {
+ return nnTryGetValue(V1_0::utils::convert(requestArgument));
+}
+
+V1_3::Request::MemoryPool convertToV1_3(const Request::MemoryPool& memoryPool) {
+ return nnTryGetValue(V1_3::utils::convert(memoryPool));
+}
+
+std::vector<Request::MemoryPool> uncheckedConvert(
+ const hardware::hidl_vec<V1_3::Request::MemoryPool>& memoryPools) {
+ return convertVec<Request::MemoryPool>(memoryPools);
+}
+
+V1_3::OptionalTimePoint convertToV1_3(const OptionalTimePoint& timePoint) {
+ return nnTryGetValue(V1_3::utils::convert(timePoint));
+}
+
+V1_3::OptionalTimeoutDuration convertToV1_3(const OptionalTimeoutDuration& timeoutDuration) {
+ return nnTryGetValue(V1_3::utils::convert(timeoutDuration));
+}
+
+V1_2::Timing convertToV1_2(const Timing& timing) {
+ return nnTryGetValue(V1_2::utils::convert(timing));
+}
+
+V1_3::BufferRole convertToV1_3(const BufferRole& bufferRole) {
+ return nnTryGetValue(V1_3::utils::convert(bufferRole));
+}
+
+hardware::hidl_vec<V1_3::BufferRole> convertToV1_3(const std::vector<BufferRole>& bufferRoles) {
+ return convertVecToV1_3<V1_3::BufferRole>(bufferRoles);
+}
+
+hardware::hidl_vec<uint8_t> convertToV1_0(const Model::OperandValues& operandValues) {
+ return nnTryGetValue(V1_0::utils::convert(operandValues));
+}
+
+hardware::hidl_memory convertToV1_0(const Memory& memory) {
+ return nnTryGetValue(V1_0::utils::convert(memory));
+}
+
+Memory uncheckedConvert(const hardware::hidl_memory& memory) {
+ return nnTryGetValue(convert(memory));
+}
+
+hardware::hidl_vec<hardware::hidl_memory> convertToV1_0(const std::vector<Memory>& memories) {
+ return convertVecToV1_0<hardware::hidl_memory>(memories);
+}
+
+std::vector<Memory> uncheckedConvert(const hardware::hidl_vec<hardware::hidl_memory>& memories) {
+ return convertVec<Memory>(memories);
+}
+
+std::vector<Model::Subgraph> uncheckedConvert(const hardware::hidl_vec<V1_3::Subgraph>& subgraphs) {
+ return convertVec<Model::Subgraph>(subgraphs);
+}
+
+std::vector<Operand> uncheckedConvert(const hardware::hidl_vec<V1_3::Operand>& operands) {
+ return convertVec<Operand>(operands);
+}
+
} // namespace nn
} // namespace android
diff --git a/common/UtilsTest.cpp b/common/UtilsTest.cpp
index 291a710..8bc8f24 100644
--- a/common/UtilsTest.cpp
+++ b/common/UtilsTest.cpp
@@ -20,16 +20,18 @@
#include <utility>
#include <vector>
+#include "HalInterfaces.h"
#include "MemoryUtils.h"
#include "OperationsUtils.cpp"
#include "QuantUtils.h"
+#include "nnapi/TypeUtils.h"
+#include "nnapi/Types.h"
namespace android {
namespace nn {
namespace wrapper {
namespace {
-using namespace hal;
using ::testing::ElementsAreArray;
} // namespace
@@ -62,9 +64,8 @@
}
static int32_t getExtensionType(uint16_t extensionPrefix, uint16_t typeWithinExtension) {
- constexpr uint8_t kLowBitsType = static_cast<uint8_t>(ExtensionTypeEncoding::LOW_BITS_TYPE);
- int32_t type = (extensionPrefix << kLowBitsType) | typeWithinExtension;
- EXPECT_TRUE(isExtensionOperandType(static_cast<OperandType>(type)));
+ int32_t type = (extensionPrefix << kExtensionTypeBits) | typeWithinExtension;
+ EXPECT_TRUE(isExtensionOperandType(static_cast<V1_3::OperandType>(type)));
return type;
}
@@ -128,7 +129,7 @@
}
TEST(ValidateRequestTest, UnknownOutputRank) {
- Request::MemoryPool pool;
+ V1_3::Request::MemoryPool pool;
pool.hidlMemory(allocateSharedMemory(2 * sizeof(float)));
ASSERT_TRUE(pool.hidlMemory().valid());
const V1_3::Model model = {
@@ -170,7 +171,7 @@
}
TEST(ValidateRequestTest, ScalarOutput) {
- Request::MemoryPool pool;
+ V1_3::Request::MemoryPool pool;
pool.hidlMemory(allocateSharedMemory(sizeof(float) + sizeof(int32_t)));
ASSERT_TRUE(pool.hidlMemory().valid());
const V1_3::Model model = {
diff --git a/common/ValidateHal.cpp b/common/ValidateHal.cpp
index 46f9b2f..c4e5f96 100644
--- a/common/ValidateHal.cpp
+++ b/common/ValidateHal.cpp
@@ -29,12 +29,11 @@
#include "OperationsUtils.h"
#include "Tracing.h"
#include "Utils.h"
+#include "nnapi/TypeUtils.h"
namespace android {
namespace nn {
-using namespace hal;
-
template <class T_Model>
struct ModelToHalVersion;
template <>
@@ -56,27 +55,27 @@
class MemoryAccessVerifier {
public:
- MemoryAccessVerifier(const hidl_vec<hidl_memory>& pools)
+ MemoryAccessVerifier(const hardware::hidl_vec<hardware::hidl_memory>& pools)
: mPoolCount(pools.size()), mPoolSizes(mPoolCount) {
for (size_t i = 0; i < mPoolCount; i++) {
mPoolSizes[i] = pools[i].size();
}
}
- MemoryAccessVerifier(const hidl_vec<V1_3::Request::MemoryPool>& pools)
+ MemoryAccessVerifier(const hardware::hidl_vec<V1_3::Request::MemoryPool>& pools)
: mPoolCount(pools.size()), mPoolSizes(mPoolCount) {
for (size_t i = 0; i < mPoolCount; i++) {
switch (pools[i].getDiscriminator()) {
- case Request::MemoryPool::hidl_discriminator::hidlMemory:
+ case V1_3::Request::MemoryPool::hidl_discriminator::hidlMemory:
mPoolSizes[i] = pools[i].hidlMemory().size();
break;
- case Request::MemoryPool::hidl_discriminator::token:
+ case V1_3::Request::MemoryPool::hidl_discriminator::token:
// Set size to 0 to enforce length == 0 && offset == 0.
mPoolSizes[i] = 0;
break;
}
}
}
- bool validate(const DataLocation& location) const {
+ bool validate(const V1_0::DataLocation& location) const {
if (location.poolIndex >= mPoolCount) {
LOG(ERROR) << "Invalid poolIndex " << location.poolIndex << "/" << mPoolCount;
return false;
@@ -99,29 +98,29 @@
static bool validateOperandExtraParams(const V1_3::Operand& operand, uint32_t index) {
switch (operand.type) {
- case OperandType::FLOAT32:
- case OperandType::INT32:
- case OperandType::UINT32:
- case OperandType::BOOL:
- case OperandType::SUBGRAPH:
- case OperandType::TENSOR_FLOAT32:
- case OperandType::TENSOR_FLOAT16:
- case OperandType::TENSOR_INT32:
- case OperandType::TENSOR_QUANT8_ASYMM:
- case OperandType::TENSOR_QUANT8_ASYMM_SIGNED:
- case OperandType::TENSOR_QUANT8_SYMM:
- case OperandType::TENSOR_QUANT16_ASYMM:
- case OperandType::TENSOR_QUANT16_SYMM:
- case OperandType::TENSOR_BOOL8: {
+ case V1_3::OperandType::FLOAT32:
+ case V1_3::OperandType::INT32:
+ case V1_3::OperandType::UINT32:
+ case V1_3::OperandType::BOOL:
+ case V1_3::OperandType::SUBGRAPH:
+ case V1_3::OperandType::TENSOR_FLOAT32:
+ case V1_3::OperandType::TENSOR_FLOAT16:
+ case V1_3::OperandType::TENSOR_INT32:
+ case V1_3::OperandType::TENSOR_QUANT8_ASYMM:
+ case V1_3::OperandType::TENSOR_QUANT8_ASYMM_SIGNED:
+ case V1_3::OperandType::TENSOR_QUANT8_SYMM:
+ case V1_3::OperandType::TENSOR_QUANT16_ASYMM:
+ case V1_3::OperandType::TENSOR_QUANT16_SYMM:
+ case V1_3::OperandType::TENSOR_BOOL8: {
NN_RET_CHECK(operand.extraParams.getDiscriminator() ==
- OperandExtraParams::hidl_discriminator::none)
+ V1_2::Operand::ExtraParams::hidl_discriminator::none)
<< "Operand " << index << ": Operand of type "
<< getOperandTypeName(operand.type)
<< " has incorrect extraParams: " << toString(operand.extraParams);
} break;
- case OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL: {
+ case V1_3::OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL: {
NN_RET_CHECK(operand.extraParams.getDiscriminator() ==
- OperandExtraParams::hidl_discriminator::channelQuant)
+ V1_2::Operand::ExtraParams::hidl_discriminator::channelQuant)
<< "Operand " << index << ": Operand of type "
<< getOperandTypeName(operand.type) << " without a Channel Quantization params";
auto& channelQuant = operand.extraParams.channelQuant();
@@ -151,9 +150,9 @@
default: {
if (isExtensionOperandType(operand.type)) {
NN_RET_CHECK(operand.extraParams.getDiscriminator() ==
- OperandExtraParams::hidl_discriminator::extension ||
+ V1_2::Operand::ExtraParams::hidl_discriminator::extension ||
operand.extraParams.getDiscriminator() ==
- OperandExtraParams::hidl_discriminator::none)
+ V1_2::Operand::ExtraParams::hidl_discriminator::none)
<< "Operand " << index << ": Extension operand of type "
<< getOperandTypeName(operand.type)
<< " has incorrect extraParams: " << toString(operand.extraParams);
@@ -165,10 +164,11 @@
}
template <typename VersionedOperand>
-static bool validateOperands(const hidl_vec<VersionedOperand>& operands,
- const hidl_vec<uint8_t>& operandValues,
- const hidl_vec<hidl_memory>& pools,
- const hidl_vec<Subgraph>& subgraphs, bool allowUnspecifiedRank) {
+static bool validateOperands(const hardware::hidl_vec<VersionedOperand>& operands,
+ const hardware::hidl_vec<uint8_t>& operandValues,
+ const hardware::hidl_vec<hardware::hidl_memory>& pools,
+ const hardware::hidl_vec<V1_3::Subgraph>& subgraphs,
+ bool allowUnspecifiedRank) {
uint32_t index = 0;
MemoryAccessVerifier poolVerifier(pools);
for (auto& versionedOperand : operands) {
@@ -182,13 +182,13 @@
V1_3::Operand operand = convertToV1_3(versionedOperand);
// Validate type and dimensions.
switch (operand.type) {
- case OperandType::FLOAT16:
- case OperandType::FLOAT32:
- case OperandType::INT32:
- case OperandType::UINT32:
- case OperandType::BOOL:
- case OperandType::SUBGRAPH:
- case OperandType::OEM: {
+ case V1_3::OperandType::FLOAT16:
+ case V1_3::OperandType::FLOAT32:
+ case V1_3::OperandType::INT32:
+ case V1_3::OperandType::UINT32:
+ case V1_3::OperandType::BOOL:
+ case V1_3::OperandType::SUBGRAPH:
+ case V1_3::OperandType::OEM: {
size_t count = operand.dimensions.size();
if (count != 0) {
LOG(ERROR) << "Operand " << index << ": Scalar data has dimensions of rank "
@@ -197,19 +197,20 @@
}
break;
}
- case OperandType::TENSOR_FLOAT16:
- case OperandType::TENSOR_FLOAT32:
- case OperandType::TENSOR_INT32:
- case OperandType::TENSOR_QUANT8_ASYMM:
- case OperandType::TENSOR_QUANT8_ASYMM_SIGNED:
- case OperandType::TENSOR_QUANT8_SYMM:
- case OperandType::TENSOR_QUANT16_ASYMM:
- case OperandType::TENSOR_QUANT16_SYMM:
- case OperandType::TENSOR_BOOL8:
- case OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL:
- case OperandType::TENSOR_OEM_BYTE: {
- if ((!allowUnspecifiedRank || operand.lifetime == OperandLifeTime::CONSTANT_COPY ||
- operand.lifetime == OperandLifeTime::CONSTANT_REFERENCE) &&
+ case V1_3::OperandType::TENSOR_FLOAT16:
+ case V1_3::OperandType::TENSOR_FLOAT32:
+ case V1_3::OperandType::TENSOR_INT32:
+ case V1_3::OperandType::TENSOR_QUANT8_ASYMM:
+ case V1_3::OperandType::TENSOR_QUANT8_ASYMM_SIGNED:
+ case V1_3::OperandType::TENSOR_QUANT8_SYMM:
+ case V1_3::OperandType::TENSOR_QUANT16_ASYMM:
+ case V1_3::OperandType::TENSOR_QUANT16_SYMM:
+ case V1_3::OperandType::TENSOR_BOOL8:
+ case V1_3::OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL:
+ case V1_3::OperandType::TENSOR_OEM_BYTE: {
+ if ((!allowUnspecifiedRank ||
+ operand.lifetime == V1_3::OperandLifeTime::CONSTANT_COPY ||
+ operand.lifetime == V1_3::OperandLifeTime::CONSTANT_REFERENCE) &&
operand.dimensions.size() == 0) {
LOG(ERROR) << "Operand " << index << ": Tensor has dimensions of rank 0";
return false;
@@ -227,16 +228,16 @@
// Validate the scale.
switch (operand.type) {
- case OperandType::FLOAT16:
- case OperandType::FLOAT32:
- case OperandType::INT32:
- case OperandType::UINT32:
- case OperandType::BOOL:
- case OperandType::SUBGRAPH:
- case OperandType::TENSOR_FLOAT16:
- case OperandType::TENSOR_FLOAT32:
- case OperandType::TENSOR_BOOL8:
- case OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL:
+ case V1_3::OperandType::FLOAT16:
+ case V1_3::OperandType::FLOAT32:
+ case V1_3::OperandType::INT32:
+ case V1_3::OperandType::UINT32:
+ case V1_3::OperandType::BOOL:
+ case V1_3::OperandType::SUBGRAPH:
+ case V1_3::OperandType::TENSOR_FLOAT16:
+ case V1_3::OperandType::TENSOR_FLOAT32:
+ case V1_3::OperandType::TENSOR_BOOL8:
+ case V1_3::OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL:
if (operand.scale != 0.f) {
LOG(ERROR) << "Operand " << index << ": Operand of type "
<< getOperandTypeName(operand.type) << " with a non-zero scale ("
@@ -244,7 +245,7 @@
return false;
}
break;
- case OperandType::TENSOR_INT32:
+ case V1_3::OperandType::TENSOR_INT32:
// TENSOR_INT32 may be used with or without scale, depending on the operation.
if (operand.scale < 0.f) {
LOG(ERROR) << "Operand " << index << ": Operand of type "
@@ -252,11 +253,11 @@
return false;
}
break;
- case OperandType::TENSOR_QUANT8_ASYMM:
- case OperandType::TENSOR_QUANT8_ASYMM_SIGNED:
- case OperandType::TENSOR_QUANT8_SYMM:
- case OperandType::TENSOR_QUANT16_ASYMM:
- case OperandType::TENSOR_QUANT16_SYMM:
+ case V1_3::OperandType::TENSOR_QUANT8_ASYMM:
+ case V1_3::OperandType::TENSOR_QUANT8_ASYMM_SIGNED:
+ case V1_3::OperandType::TENSOR_QUANT8_SYMM:
+ case V1_3::OperandType::TENSOR_QUANT16_ASYMM:
+ case V1_3::OperandType::TENSOR_QUANT16_SYMM:
if (operand.scale <= 0.f) {
LOG(ERROR) << "Operand " << index << ": Operand of type "
<< getOperandTypeName(operand.type) << " with a non-positive scale";
@@ -277,18 +278,18 @@
// Validate the zeroPoint.
switch (operand.type) {
- case OperandType::FLOAT16:
- case OperandType::FLOAT32:
- case OperandType::INT32:
- case OperandType::UINT32:
- case OperandType::BOOL:
- case OperandType::SUBGRAPH:
- case OperandType::TENSOR_FLOAT16:
- case OperandType::TENSOR_FLOAT32:
- case OperandType::TENSOR_INT32:
- case OperandType::TENSOR_BOOL8:
- case OperandType::TENSOR_QUANT8_SYMM:
- case OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL:
+ case V1_3::OperandType::FLOAT16:
+ case V1_3::OperandType::FLOAT32:
+ case V1_3::OperandType::INT32:
+ case V1_3::OperandType::UINT32:
+ case V1_3::OperandType::BOOL:
+ case V1_3::OperandType::SUBGRAPH:
+ case V1_3::OperandType::TENSOR_FLOAT16:
+ case V1_3::OperandType::TENSOR_FLOAT32:
+ case V1_3::OperandType::TENSOR_INT32:
+ case V1_3::OperandType::TENSOR_BOOL8:
+ case V1_3::OperandType::TENSOR_QUANT8_SYMM:
+ case V1_3::OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL:
if (operand.zeroPoint != 0) {
LOG(ERROR) << "Operand " << index << ": Operand of type "
<< getOperandTypeName(operand.type) << " with a non-zero zeroPoint "
@@ -296,7 +297,7 @@
return false;
}
break;
- case OperandType::TENSOR_QUANT8_ASYMM:
+ case V1_3::OperandType::TENSOR_QUANT8_ASYMM:
if (operand.zeroPoint < 0 || operand.zeroPoint > 255) {
LOG(ERROR) << "Operand " << index << ": Operand of type "
<< getOperandTypeName(operand.type) << " with an invalid zeroPoint "
@@ -304,7 +305,7 @@
return false;
}
break;
- case OperandType::TENSOR_QUANT8_ASYMM_SIGNED:
+ case V1_3::OperandType::TENSOR_QUANT8_ASYMM_SIGNED:
if (operand.zeroPoint < -128 || operand.zeroPoint > 127) {
LOG(ERROR) << "Operand " << index << ": Operand of type "
<< getOperandTypeName(operand.type) << " with an invalid zeroPoint "
@@ -312,7 +313,7 @@
return false;
}
break;
- case OperandType::TENSOR_QUANT16_ASYMM:
+ case V1_3::OperandType::TENSOR_QUANT16_ASYMM:
if (operand.zeroPoint < 0 || operand.zeroPoint > 65535) {
LOG(ERROR) << "Operand " << index << ": Operand of type "
<< getOperandTypeName(operand.type) << " with an invalid zeroPoint "
@@ -320,7 +321,7 @@
return false;
}
break;
- case OperandType::TENSOR_QUANT16_SYMM:
+ case V1_3::OperandType::TENSOR_QUANT16_SYMM:
if (operand.zeroPoint != 0) {
LOG(ERROR) << "Operand " << index << ": Operand of type "
<< getOperandTypeName(operand.type) << " with a non-zero zeroPoint "
@@ -342,9 +343,9 @@
NN_RET_CHECK(validateOperandExtraParams(operand, index));
// Validate the lifetime and the location.
- const DataLocation& location = operand.location;
+ const V1_0::DataLocation& location = operand.location;
switch (operand.lifetime) {
- case OperandLifeTime::CONSTANT_COPY:
+ case V1_3::OperandLifeTime::CONSTANT_COPY:
if (location.poolIndex != 0) {
LOG(ERROR) << "Operand " << index
<< ": CONSTANT_COPY with a non-zero poolIndex "
@@ -360,15 +361,15 @@
return false;
}
break;
- case OperandLifeTime::CONSTANT_REFERENCE:
+ case V1_3::OperandLifeTime::CONSTANT_REFERENCE:
if (!poolVerifier.validate(location)) {
return false;
}
break;
- case OperandLifeTime::TEMPORARY_VARIABLE:
- case OperandLifeTime::SUBGRAPH_INPUT:
- case OperandLifeTime::SUBGRAPH_OUTPUT:
- case OperandLifeTime::NO_VALUE:
+ case V1_3::OperandLifeTime::TEMPORARY_VARIABLE:
+ case V1_3::OperandLifeTime::SUBGRAPH_INPUT:
+ case V1_3::OperandLifeTime::SUBGRAPH_OUTPUT:
+ case V1_3::OperandLifeTime::NO_VALUE:
if (location.poolIndex != 0 || location.offset != 0 || location.length != 0) {
LOG(ERROR) << "Operand " << index << ": Unexpected poolIndex "
<< location.poolIndex << ", offset " << location.offset
@@ -377,14 +378,14 @@
return false;
}
break;
- case OperandLifeTime::SUBGRAPH: {
+ case V1_3::OperandLifeTime::SUBGRAPH: {
if (location.poolIndex != 0) {
LOG(ERROR) << "Operand " << index << ": SUBGRAPH with a non-zero poolIndex "
<< location.poolIndex;
return false;
}
if (location.offset >= subgraphs.size()) {
- LOG(ERROR) << "Subgraph index out of range: " << location.offset
+ LOG(ERROR) << "Model::Subgraph index out of range: " << location.offset
<< " >= " << subgraphs.size();
return false;
}
@@ -401,8 +402,8 @@
}
// Make sure SUBGRAPH operand type and lifetime always go together.
- if ((operand.type == OperandType::SUBGRAPH) !=
- (operand.lifetime == OperandLifeTime::SUBGRAPH)) {
+ if ((operand.type == V1_3::OperandType::SUBGRAPH) !=
+ (operand.lifetime == V1_3::OperandLifeTime::SUBGRAPH)) {
LOG(ERROR) << "Operand " << index << ": Operand of type " << toString(operand.type)
<< " cannot have lifetime " << toString(operand.lifetime);
return false;
@@ -410,10 +411,10 @@
// For constants, validate that the length is as expected. The other lifetimes
// expect the length to be 0. Don't validate for OEM types.
- if (operand.lifetime == OperandLifeTime::CONSTANT_REFERENCE ||
- operand.lifetime == OperandLifeTime::CONSTANT_COPY) {
- if (!isExtensionOperandType(operand.type) && operand.type != OperandType::OEM &&
- operand.type != OperandType::TENSOR_OEM_BYTE) {
+ if (operand.lifetime == V1_3::OperandLifeTime::CONSTANT_REFERENCE ||
+ operand.lifetime == V1_3::OperandLifeTime::CONSTANT_COPY) {
+ if (!isExtensionOperandType(operand.type) && operand.type != V1_3::OperandType::OEM &&
+ operand.type != V1_3::OperandType::TENSOR_OEM_BYTE) {
uint32_t expectedLength = nonExtensionOperandSizeOfData(operand);
if (location.length != expectedLength) {
LOG(ERROR) << "Operand " << index << ": For operand " << toString(operand)
@@ -446,19 +447,22 @@
}
template <typename VersionedOperation>
-static bool validateOperations(const hidl_vec<VersionedOperation>& operations,
- const hidl_vec<Operand>& operands,
- const hidl_vec<Subgraph>& subgraphs, ValidationMode mode) {
- auto isValidSubgraphReference = [&subgraphs](const Operand& modelOperand) -> bool {
+static bool validateOperations(const hardware::hidl_vec<VersionedOperation>& operations,
+ const hardware::hidl_vec<V1_3::Operand>& operands,
+ const hardware::hidl_vec<V1_3::Subgraph>& subgraphs,
+ ValidationMode mode) {
+ auto canonicalSubgraphs = uncheckedConvert(subgraphs);
+ auto isValidSubgraphReference = [&canonicalSubgraphs](const Operand& modelOperand) -> bool {
NN_RET_CHECK(modelOperand.type == OperandType::SUBGRAPH)
- << "Unexpected operand type: " << toString(modelOperand.type);
- NN_RET_CHECK_LT(modelOperand.location.offset, subgraphs.size())
+ << "Unexpected operand type: " << modelOperand.type;
+ NN_RET_CHECK_LT(modelOperand.location.offset, canonicalSubgraphs.size())
<< "Invalid subgraph reference";
return true;
};
- auto getSubgraph = [&subgraphs](const Operand& modelOperand) -> const Subgraph* {
- CHECK_LT(modelOperand.location.offset, subgraphs.size());
- return &subgraphs[modelOperand.location.offset];
+ auto getSubgraph =
+ [&canonicalSubgraphs](const Operand& modelOperand) -> const Model::Subgraph* {
+ CHECK_LT(modelOperand.location.offset, canonicalSubgraphs.size());
+ return &canonicalSubgraphs[modelOperand.location.offset];
};
auto getInputCount = [&getSubgraph](const Operand& modelOperand) -> uint32_t {
return getSubgraph(modelOperand)->inputIndexes.size();
@@ -468,32 +472,33 @@
};
auto getInputOperand = [&getSubgraph](const Operand& modelOperand,
uint32_t index) -> const Operand* {
- const Subgraph& subgraph = *getSubgraph(modelOperand);
+ const Model::Subgraph& subgraph = *getSubgraph(modelOperand);
CHECK_LT(subgraph.inputIndexes[index], subgraph.operands.size());
return &subgraph.operands[subgraph.inputIndexes[index]];
};
auto getOutputOperand = [&getSubgraph](const Operand& modelOperand,
uint32_t index) -> const Operand* {
- const Subgraph& subgraph = *getSubgraph(modelOperand);
+ const Model::Subgraph& subgraph = *getSubgraph(modelOperand);
CHECK_LT(subgraph.outputIndexes[index], subgraph.operands.size());
return &subgraph.operands[subgraph.outputIndexes[index]];
};
for (auto& op : operations) {
// TODO Validate the shapes and any known values. This is currently
// done in CpuExecutor but should be done here for all drivers.
- int error = validateOperation(
- static_cast<int32_t>(op.type), op.inputs.size(),
- op.inputs.size() > 0 ? op.inputs.data() : nullptr, op.outputs.size(),
- op.outputs.size() > 0 ? op.outputs.data() : nullptr, operands, getHalVersion(op),
- {.isValidSubgraphReference = isValidSubgraphReference,
- .getSubgraphInputCount = getInputCount,
- .getSubgraphOutputCount = getOutputCount,
- .getSubgraphInputOperand = getInputOperand,
- .getSubgraphOutputOperand = getOutputOperand,
- // 1.3 HAL does not support CF operations with operands of
- // unknown size. See http://b/132458982#comment63.
- .allowControlFlowOperationWithOperandOfUnknownSize =
- mode == ValidationMode::RUNTIME});
+ int error = validateOperation(static_cast<int32_t>(op.type), op.inputs.size(),
+ op.inputs.size() > 0 ? op.inputs.data() : nullptr,
+ op.outputs.size(),
+ op.outputs.size() > 0 ? op.outputs.data() : nullptr,
+ uncheckedConvert(operands), getHalVersion(op),
+ {.isValidSubgraphReference = isValidSubgraphReference,
+ .getSubgraphInputCount = getInputCount,
+ .getSubgraphOutputCount = getOutputCount,
+ .getSubgraphInputOperand = getInputOperand,
+ .getSubgraphOutputOperand = getOutputOperand,
+ // 1.3 HAL does not support CF operations with operands of
+ // unknown size. See http://b/132458982#comment63.
+ .allowControlFlowOperationWithOperandOfUnknownSize =
+ mode == ValidationMode::RUNTIME});
if (error != ANEURALNETWORKS_NO_ERROR) {
LOG(ERROR) << "Invalid operation " << toString(op.type);
return false;
@@ -503,9 +508,9 @@
// but it is retained here in order to emit more informative
// error messages.
for (uint32_t i : op.outputs) {
- const Operand& operand = operands[i];
- if (operand.lifetime != OperandLifeTime::TEMPORARY_VARIABLE &&
- operand.lifetime != OperandLifeTime::SUBGRAPH_OUTPUT) {
+ const V1_3::Operand& operand = operands[i];
+ if (operand.lifetime != V1_3::OperandLifeTime::TEMPORARY_VARIABLE &&
+ operand.lifetime != V1_3::OperandLifeTime::SUBGRAPH_OUTPUT) {
LOG(ERROR) << "Writing to operand " << i << " with incompatible lifetime "
<< toString(operand.lifetime);
return false;
@@ -515,7 +520,7 @@
return true;
}
-bool validatePool(const hidl_memory& pool, HalVersion ver) {
+bool validatePool(const hardware::hidl_memory& pool, HalVersion ver) {
const auto& name = pool.name();
if (name != "ashmem" && name != "mmap_fd" &&
((ver < HalVersion::V1_2) ||
@@ -532,9 +537,9 @@
bool validatePool(const V1_3::Request::MemoryPool& pool, HalVersion ver) {
switch (pool.getDiscriminator()) {
- case Request::MemoryPool::hidl_discriminator::hidlMemory:
+ case V1_3::Request::MemoryPool::hidl_discriminator::hidlMemory:
return validatePool(pool.hidlMemory(), ver);
- case Request::MemoryPool::hidl_discriminator::token:
+ case V1_3::Request::MemoryPool::hidl_discriminator::token:
return pool.token() > 0;
}
LOG(FATAL) << "unknown MemoryPool discriminator";
@@ -542,20 +547,21 @@
}
template <class T_MemoryPool>
-static bool validatePools(const hidl_vec<T_MemoryPool>& pools, HalVersion ver) {
+static bool validatePools(const hardware::hidl_vec<T_MemoryPool>& pools, HalVersion ver) {
return std::all_of(pools.begin(), pools.end(),
[ver](const auto& pool) { return validatePool(pool, ver); });
}
-static bool validateModelInputOutputs(const hidl_vec<uint32_t> indexes,
- const hidl_vec<Operand>& operands, OperandLifeTime lifetime) {
+static bool validateModelInputOutputs(const hardware::hidl_vec<uint32_t> indexes,
+ const hardware::hidl_vec<V1_3::Operand>& operands,
+ V1_3::OperandLifeTime lifetime) {
const size_t operandCount = operands.size();
for (uint32_t i : indexes) {
if (i >= operandCount) {
LOG(ERROR) << "Model input or output index out of range: " << i << "/" << operandCount;
return false;
}
- const Operand& operand = operands[i];
+ const V1_3::Operand& operand = operands[i];
if (operand.lifetime != lifetime) {
LOG(ERROR) << "Model input or output operand " << i << " has lifetime of "
<< toString(operand.lifetime) << " instead of the expected "
@@ -596,12 +602,12 @@
// mark known operands
for (size_t i = 0; i < model.operands.size(); ++i) {
const auto& operand = model.operands[i];
- const OperandLifeTime lifetime = convertToV1_3(operand.lifetime);
- operandValueKnown[i] = lifetime == OperandLifeTime::SUBGRAPH_INPUT ||
- lifetime == OperandLifeTime::CONSTANT_COPY ||
- lifetime == OperandLifeTime::CONSTANT_REFERENCE ||
- lifetime == OperandLifeTime::NO_VALUE ||
- lifetime == OperandLifeTime::SUBGRAPH;
+ const V1_3::OperandLifeTime lifetime = convertToV1_3(operand.lifetime);
+ operandValueKnown[i] = lifetime == V1_3::OperandLifeTime::SUBGRAPH_INPUT ||
+ lifetime == V1_3::OperandLifeTime::CONSTANT_COPY ||
+ lifetime == V1_3::OperandLifeTime::CONSTANT_REFERENCE ||
+ lifetime == V1_3::OperandLifeTime::NO_VALUE ||
+ lifetime == V1_3::OperandLifeTime::SUBGRAPH;
}
// Validate that operations are sorted into execution order.
@@ -672,8 +678,8 @@
LOG(ERROR) << "Model contains a circular subgraph reference";
return false;
}
- for (const Operand& operand : subgraph.operands) {
- if (operand.lifetime == OperandLifeTime::SUBGRAPH) {
+ for (const V1_3::Operand& operand : subgraph.operands) {
+ if (operand.lifetime == V1_3::OperandLifeTime::SUBGRAPH) {
uint32_t refSubgraphIndex = operand.location.offset;
if (!checkNoReferenceCycles(model, model.referenced[refSubgraphIndex], path)) {
return false;
@@ -699,14 +705,14 @@
}
// We only need versioned operands for their validation. For all the other
// validations we can use operands upcasted to the latest version.
- const hidl_vec<Operand> latestVersionOperands = convertToV1_3(model.operands);
+ const hardware::hidl_vec<V1_3::Operand> latestVersionOperands = convertToV1_3(model.operands);
return (validateOperands(model.operands, model.operandValues, model.pools, /*subgraphs=*/{},
/*allowUnspecifiedRank=*/version >= HalVersion::V1_2) &&
validateOperations(model.operations, latestVersionOperands, /*subgraphs=*/{}, mode) &&
validateModelInputOutputs(model.inputIndexes, latestVersionOperands,
- OperandLifeTime::SUBGRAPH_INPUT) &&
+ V1_3::OperandLifeTime::SUBGRAPH_INPUT) &&
validateModelInputOutputs(model.outputIndexes, latestVersionOperands,
- OperandLifeTime::SUBGRAPH_OUTPUT) &&
+ V1_3::OperandLifeTime::SUBGRAPH_OUTPUT) &&
validatePools(model.pools, version) && validateGraph(model));
}
@@ -721,15 +727,15 @@
LOG(ERROR) << "Invalid empty model.";
return false;
}
- auto validateSubgraph = [&model, mode](const Subgraph& subgraph) -> bool {
+ auto validateSubgraph = [&model, mode](const V1_3::Subgraph& subgraph) -> bool {
return (validateOperands(subgraph.operands, model.operandValues, model.pools,
model.referenced, /*allowUnspecifiedRank=*/true) &&
validateOperations(subgraph.operations, subgraph.operands, model.referenced,
mode) &&
validateModelInputOutputs(subgraph.inputIndexes, subgraph.operands,
- OperandLifeTime::SUBGRAPH_INPUT) &&
+ V1_3::OperandLifeTime::SUBGRAPH_INPUT) &&
validateModelInputOutputs(subgraph.outputIndexes, subgraph.operands,
- OperandLifeTime::SUBGRAPH_OUTPUT) &&
+ V1_3::OperandLifeTime::SUBGRAPH_OUTPUT) &&
validateGraph(subgraph));
};
return (validateSubgraph(model.main) &&
@@ -740,11 +746,11 @@
// Validates the arguments of a request. type is either "input" or "output" and is used
// for printing error messages. The operandIndexes is the appropriate array of input
// or output operand indexes that was passed to the ANeuralNetworksModel_identifyInputsAndOutputs.
-static bool validateRequestArguments(const hidl_vec<RequestArgument>& requestArguments,
- const hidl_vec<uint32_t>& operandIndexes,
- const hidl_vec<Operand>& operands,
- const MemoryAccessVerifier& poolVerifier,
- bool allowUnspecified, const char* type) {
+static bool validateRequestArguments(
+ const hardware::hidl_vec<V1_0::RequestArgument>& requestArguments,
+ const hardware::hidl_vec<uint32_t>& operandIndexes,
+ const hardware::hidl_vec<V1_3::Operand>& operands, const MemoryAccessVerifier& poolVerifier,
+ bool allowUnspecified, const char* type) {
// The request should specify as many arguments as were described in the model.
const size_t requestArgumentCount = requestArguments.size();
if (requestArgumentCount != operandIndexes.size()) {
@@ -754,13 +760,13 @@
}
for (size_t requestArgumentIndex = 0; requestArgumentIndex < requestArgumentCount;
requestArgumentIndex++) {
- const RequestArgument& requestArgument = requestArguments[requestArgumentIndex];
- const DataLocation& location = requestArgument.location;
+ const V1_0::RequestArgument& requestArgument = requestArguments[requestArgumentIndex];
+ const V1_0::DataLocation& location = requestArgument.location;
// Get the operand index for this argument. We extract it from the list
// that was provided in the call to ANeuralNetworksModel_identifyInputsAndOutputs.
// We assume in this function that the model has been validated already.
const uint32_t operandIndex = operandIndexes[requestArgumentIndex];
- const Operand& operand = operands[operandIndex];
+ const V1_3::Operand& operand = operands[operandIndex];
if (requestArgument.hasNoValue) {
if (location.poolIndex != 0 || location.offset != 0 || location.length != 0 ||
requestArgument.dimensions.size() != 0) {
@@ -861,9 +867,9 @@
}
bool validateMemoryDesc(const V1_3::BufferDesc& desc,
- const hidl_vec<sp<V1_3::IPreparedModel>>& preparedModels,
- const hidl_vec<V1_3::BufferRole>& inputRoles,
- const hidl_vec<V1_3::BufferRole>& outputRoles,
+ const hardware::hidl_vec<sp<V1_3::IPreparedModel>>& preparedModels,
+ const hardware::hidl_vec<V1_3::BufferRole>& inputRoles,
+ const hardware::hidl_vec<V1_3::BufferRole>& outputRoles,
std::function<const V1_3::Model*(const sp<V1_3::IPreparedModel>&)> getModel,
std::set<PreparedModelRole>* preparedModelRoles,
V1_3::Operand* combinedOperand) {
@@ -939,14 +945,15 @@
return true;
}
-bool validateExecutionPreference(ExecutionPreference preference) {
- return preference == ExecutionPreference::LOW_POWER ||
- preference == ExecutionPreference::FAST_SINGLE_ANSWER ||
- preference == ExecutionPreference::SUSTAINED_SPEED;
+bool validateExecutionPreference(V1_1::ExecutionPreference preference) {
+ return preference == V1_1::ExecutionPreference::LOW_POWER ||
+ preference == V1_1::ExecutionPreference::FAST_SINGLE_ANSWER ||
+ preference == V1_1::ExecutionPreference::SUSTAINED_SPEED;
}
-bool validatePriority(Priority priority) {
- return priority == Priority::LOW || priority == Priority::MEDIUM || priority == Priority::HIGH;
+bool validatePriority(V1_3::Priority priority) {
+ return priority == V1_3::Priority::LOW || priority == V1_3::Priority::MEDIUM ||
+ priority == V1_3::Priority::HIGH;
}
bool validOperandType(V1_0::OperandType operandType) {
diff --git a/common/include/BufferTracker.h b/common/include/BufferTracker.h
index feabda6..60432ca 100644
--- a/common/include/BufferTracker.h
+++ b/common/include/BufferTracker.h
@@ -37,23 +37,23 @@
class ManagedBuffer {
public:
static std::shared_ptr<ManagedBuffer> create(uint32_t size, std::set<PreparedModelRole> roles,
- const hal::Operand& operand);
+ const Operand& operand);
// Prefer ManagedBuffer::create.
ManagedBuffer(std::unique_ptr<uint8_t[]> buffer, uint32_t size,
- std::set<PreparedModelRole> roles, const hal::Operand& operand);
+ std::set<PreparedModelRole> roles, const Operand& operand);
RunTimePoolInfo createRunTimePoolInfo() const {
return RunTimePoolInfo::createFromExistingBuffer(kBuffer.get(), kSize);
}
// "poolIndex" is the index of this buffer in the request.pools.
- hal::ErrorStatus validateRequest(uint32_t poolIndex, const hal::Request& request,
- const hal::IPreparedModel* preparedModel) const;
+ ErrorStatus validateRequest(uint32_t poolIndex, const Request& request,
+ const V1_3::IPreparedModel* preparedModel) const;
// "size" is the byte size of the hidl_memory provided to the copyFrom or copyTo method.
- hal::ErrorStatus validateCopyFrom(const std::vector<uint32_t>& dimensions, uint32_t size) const;
- hal::ErrorStatus validateCopyTo(uint32_t size) const;
+ ErrorStatus validateCopyFrom(const std::vector<uint32_t>& dimensions, uint32_t size) const;
+ ErrorStatus validateCopyTo(uint32_t size) const;
bool updateDimensions(const std::vector<uint32_t>& dimensions);
void setInitialized(bool initialized);
@@ -63,7 +63,7 @@
const std::unique_ptr<uint8_t[]> kBuffer;
const uint32_t kSize;
const std::set<PreparedModelRole> kRoles;
- const hal::OperandType kOperandType;
+ const OperandType kOperandType;
const std::vector<uint32_t> kInitialDimensions;
std::vector<uint32_t> mUpdatedDimensions;
bool mInitialized = false;
diff --git a/common/include/CpuExecutor.h b/common/include/CpuExecutor.h
index edb2332..0945729 100644
--- a/common/include/CpuExecutor.h
+++ b/common/include/CpuExecutor.h
@@ -25,10 +25,10 @@
#include <vector>
#include "ControlFlow.h"
-#include "HalInterfaces.h"
#include "OperationResolver.h"
#include "OperationsUtils.h"
#include "Utils.h"
+#include "nnapi/Types.h"
namespace android {
namespace nn {
@@ -37,7 +37,7 @@
// may change during execution.
struct RunTimeOperandInfo {
// TODO Storing the type here is redundant, as it won't change during execution.
- hal::OperandType type;
+ OperandType type;
// The type and dimensions of the operand. The dimensions can
// change at runtime. We include the type because it's useful
// to pass together with the dimension to the functions implementing
@@ -64,14 +64,14 @@
// The length of the buffer.
uint32_t length;
// Whether this is a temporary variable, a model input, a constant, etc.
- hal::OperandLifeTime lifetime;
+ Operand::LifeTime lifetime;
// Keeps track of how many operations have yet to make use
// of this temporary variable. When the count is decremented to 0,
// we free the buffer. For non-temporary variables, this count is
// always 0.
uint32_t numberOfUsesLeft;
- hal::OperandExtraParams extraParams;
+ Operand::ExtraParams extraParams;
Shape shape() const {
return {
@@ -84,7 +84,7 @@
}
bool isSufficient() const {
- if (isExtensionOperandType(type)) {
+ if (isExtension(type)) {
// We don't know sizes of extension types.
return true;
}
@@ -98,19 +98,20 @@
// may reference the same region of memory by either:
// (1) copying an existing RunTimePoolInfo object, or
// (2) creating multiple RunTimePoolInfo objects from the same memory resource
-// (e.g., "createFromHidlMemory" or "createFromExistingBuffer")
+// (e.g., "createFromMemory" or "createFromExistingBuffer")
//
-// If the underlying region of memory is mapped by "createFromHidlMemory", the
+// If the underlying region of memory is mapped by "createFromMemory", the
// mapping will be sustained until it is no longer referenced by any
// RunTimePoolInfo objects.
class RunTimePoolInfo {
public:
- static std::optional<RunTimePoolInfo> createFromHidlMemory(const hal::hidl_memory& hidlMemory);
+ static std::optional<RunTimePoolInfo> createFromMemory(const Memory& memory);
static RunTimePoolInfo createFromExistingBuffer(uint8_t* buffer, uint32_t size = 0);
uint8_t* getBuffer() const;
bool flush() const;
- const hal::hidl_memory& getHidlMemory() const;
+ // TODO(b/169672209): "const Memory& getMemory() const;"
+ Memory getMemory() const;
uint32_t getSize() const;
private:
@@ -120,11 +121,20 @@
std::shared_ptr<const RunTimePoolInfoImpl> mImpl;
};
-bool setRunTimePoolInfosFromHidlMemories(std::vector<RunTimePoolInfo>* poolInfos,
- const hal::hidl_vec<hal::hidl_memory>& pools);
+bool setRunTimePoolInfosFromCanonicalMemories(std::vector<RunTimePoolInfo>* poolInfos,
+ const std::vector<Memory>& pools);
+
+// DEPRECATED. Use setRunTimePoolInfosFromCanonicalMemories().
+//
+// Used by external code.
+inline bool setRunTimePoolInfosFromHidlMemories(
+ std::vector<RunTimePoolInfo>* poolInfos,
+ const hardware::hidl_vec<hardware::hidl_memory>& pools) {
+ return setRunTimePoolInfosFromCanonicalMemories(poolInfos, uncheckedConvert(pools));
+}
bool setRunTimePoolInfosFromMemoryPools(std::vector<RunTimePoolInfo>* poolInfos,
- const hal::hidl_vec<hal::Request::MemoryPool>& pools);
+ const std::vector<Request::MemoryPool>& pools);
// This class is used to execute a model on the CPU.
class CpuExecutor {
@@ -146,11 +156,11 @@
// specified in the constructor.
// The model must outlive the executor. We prevent it from being modified
// while this is executing.
- int run(const hal::Model& model, const hal::Request& request,
+ int run(const Model& model, const Request& request,
const std::vector<RunTimePoolInfo>& modelPoolInfos,
const std::vector<RunTimePoolInfo>& requestPoolInfos);
- const std::vector<hal::OutputShape>& getOutputShapes() const {
+ const std::vector<OutputShape>& getOutputShapes() const {
CHECK(mFinished) << "getOutputShapes() called by an unfinished CpuExecutor.";
return mOutputShapes;
}
@@ -160,31 +170,31 @@
private:
// Creates runtime info from what's in the model.
- std::vector<RunTimeOperandInfo> initializeRunTimeInfo(const hal::Subgraph& subgraph);
+ std::vector<RunTimeOperandInfo> initializeRunTimeInfo(const Model::Subgraph& subgraph);
// Adjusts the runtime info for the arguments passed to the model,
// modifying the buffer location, and possibly the dimensions.
void updateForArguments(const std::vector<uint32_t>& indexes,
- const hal::hidl_vec<hal::RequestArgument>& arguments,
+ const std::vector<Request::Argument>& arguments,
const std::vector<RunTimePoolInfo>& requestPoolInfos,
RunTimeOperandInfo* operands);
// Runs one subgraph.
- int executeSubgraph(const hal::Subgraph& subgraph, RunTimeOperandInfo* operands);
+ int executeSubgraph(const Model::Subgraph& subgraph, RunTimeOperandInfo* operands);
// Runs one operation of the graph.
- int executeOperation(const hal::Operation& operation, RunTimeOperandInfo* operands);
- int executeIfOperation(const hal::Operation& operation, RunTimeOperandInfo* operands);
- int executeWhileOperation(const hal::Operation& operation, RunTimeOperandInfo* operands);
+ int executeOperation(const Operation& operation, RunTimeOperandInfo* operands);
+ int executeIfOperation(const Operation& operation, RunTimeOperandInfo* operands);
+ int executeWhileOperation(const Operation& operation, RunTimeOperandInfo* operands);
void setOutputShapes(const std::vector<uint32_t>& outputIndexes,
const std::vector<RunTimeOperandInfo>& operands);
// Compile-time operand value information used by initializeRunTimeInfo.
// The fields are only valid while run() is being executed.
- const hal::hidl_vec<uint8_t>* mModelOperandValues = nullptr;
+ const uint8_t* mModelOperandValues = nullptr;
const std::vector<RunTimePoolInfo>* mModelPoolInfos = nullptr;
- const hal::hidl_vec<hal::Subgraph>* mReferencedSubgraphs = nullptr;
+ const std::vector<Model::Subgraph>* mReferencedSubgraphs = nullptr;
// The output operand shapes returning to the runtime.
- std::vector<hal::OutputShape> mOutputShapes;
+ std::vector<OutputShape> mOutputShapes;
// Whether execution is finished and mOutputShapes is ready
bool mFinished = false;
@@ -259,17 +269,16 @@
}
inline bool IsNullInput(const RunTimeOperandInfo* input) {
- return input->lifetime == hal::OperandLifeTime::NO_VALUE;
+ return input->lifetime == Operand::LifeTime::NO_VALUE;
}
-inline int NumInputsWithValues(const hal::Operation& operation,
- const RunTimeOperandInfo* operands) {
+inline int NumInputsWithValues(const Operation& operation, const RunTimeOperandInfo* operands) {
const std::vector<uint32_t>& inputs = operation.inputs;
return std::count_if(inputs.begin(), inputs.end(),
[&operands](uint32_t i) { return !IsNullInput(&operands[i]); });
}
-inline int NumOutputs(const hal::Operation& operation) {
+inline int NumOutputs(const Operation& operation) {
return operation.outputs.size();
}
@@ -281,12 +290,12 @@
return operand->shape().dimensions[i];
}
-inline RunTimeOperandInfo* GetInput(const hal::Operation& operation, RunTimeOperandInfo* operands,
+inline RunTimeOperandInfo* GetInput(const Operation& operation, RunTimeOperandInfo* operands,
int index) {
return &operands[operation.inputs[index]];
}
-inline RunTimeOperandInfo* GetOutput(const hal::Operation& operation, RunTimeOperandInfo* operands,
+inline RunTimeOperandInfo* GetOutput(const Operation& operation, RunTimeOperandInfo* operands,
int index) {
return &operands[operation.outputs[index]];
}
diff --git a/common/include/GraphDump.h b/common/include/GraphDump.h
index 207afe5..208b4ec 100644
--- a/common/include/GraphDump.h
+++ b/common/include/GraphDump.h
@@ -17,10 +17,10 @@
#ifndef ANDROID_FRAMEWORKS_ML_NN_COMMON_GRAPH_DUMP_H
#define ANDROID_FRAMEWORKS_ML_NN_COMMON_GRAPH_DUMP_H
-#include <android/hardware/neuralnetworks/1.3/types.h>
-
#include <iostream>
+#include "nnapi/Types.h"
+
namespace android {
namespace nn {
@@ -45,8 +45,7 @@
// A model input or output (operand) is shown in "reverse colors" --
// white text on a black background.
//
-void graphDump(const char* name, const ::android::hardware::neuralnetworks::V1_3::Model& model,
- std::ostream* outStream = nullptr);
+void graphDump(const char* name, const Model& model, std::ostream* outStream = nullptr);
} // namespace nn
} // namespace android
diff --git a/common/include/HalInterfaces.h b/common/include/HalInterfaces.h
index 4e3a380..8eeb23d 100644
--- a/common/include/HalInterfaces.h
+++ b/common/include/HalInterfaces.h
@@ -40,74 +40,20 @@
#include <functional>
-namespace android::nn::hal {
+namespace android::nn {
-using android::sp;
+namespace V1_0 = ::android::hardware::neuralnetworks::V1_0;
+namespace V1_1 = ::android::hardware::neuralnetworks::V1_1;
+namespace V1_2 = ::android::hardware::neuralnetworks::V1_2;
+namespace V1_3 = ::android::hardware::neuralnetworks::V1_3;
-using hardware::hidl_death_recipient;
-using hardware::hidl_enum_range;
-using hardware::hidl_handle;
-using hardware::hidl_memory;
-using hardware::hidl_string;
-using hardware::hidl_vec;
-using hardware::Return;
-using hardware::Void;
+using HalCacheToken =
+ hardware::hidl_array<uint8_t,
+ static_cast<uint32_t>(V1_2::Constant::BYTE_SIZE_OF_CACHE_TOKEN)>;
+using HalDeviceFactory = std::function<sp<V1_0::IDevice>(bool blocking)>;
-using hidl::memory::V1_0::IMemory;
+inline constexpr V1_3::Priority kDefaultPriority13 = V1_3::Priority::MEDIUM;
-namespace V1_0 = hardware::neuralnetworks::V1_0;
-namespace V1_1 = hardware::neuralnetworks::V1_1;
-namespace V1_2 = hardware::neuralnetworks::V1_2;
-namespace V1_3 = hardware::neuralnetworks::V1_3;
-
-using V1_0::DataLocation;
-using V1_0::DeviceStatus;
-using V1_0::FusedActivationFunc;
-using V1_0::PerformanceInfo;
-using V1_0::RequestArgument;
-using V1_1::ExecutionPreference;
-using V1_2::Constant;
-using V1_2::DeviceType;
-using V1_2::Extension;
-using V1_2::MeasureTiming;
-using V1_2::OutputShape;
-using V1_2::SymmPerChannelQuantParams;
-using V1_2::Timing;
-using V1_3::BufferDesc;
-using V1_3::BufferRole;
-using V1_3::Capabilities;
-using V1_3::ErrorStatus;
-using V1_3::IBuffer;
-using V1_3::IDevice;
-using V1_3::IExecutionCallback;
-using V1_3::IFencedExecutionCallback;
-using V1_3::IPreparedModel;
-using V1_3::IPreparedModelCallback;
-using V1_3::LoopTimeoutDurationNs;
-using V1_3::Model;
-using V1_3::Operand;
-using V1_3::OperandLifeTime;
-using V1_3::OperandType;
-using V1_3::OperandTypeRange;
-using V1_3::Operation;
-using V1_3::OperationType;
-using V1_3::OperationTypeRange;
-using V1_3::OptionalTimeoutDuration;
-using V1_3::OptionalTimePoint;
-using V1_3::Priority;
-using V1_3::Request;
-using V1_3::Subgraph;
-using ExtensionNameAndPrefix = V1_2::Model::ExtensionNameAndPrefix;
-using ExtensionTypeEncoding = V1_2::Model::ExtensionTypeEncoding;
-using OperandExtraParams = V1_2::Operand::ExtraParams;
-
-using CacheToken =
- hardware::hidl_array<uint8_t, static_cast<uint32_t>(Constant::BYTE_SIZE_OF_CACHE_TOKEN)>;
-using DeviceFactory = std::function<sp<V1_0::IDevice>(bool blocking)>;
-using ModelFactory = std::function<Model()>;
-
-inline constexpr Priority kDefaultPriority = Priority::MEDIUM;
-
-} // namespace android::nn::hal
+} // namespace android::nn
#endif // ANDROID_FRAMEWORKS_ML_NN_COMMON_HAL_INTERFACES_H
diff --git a/common/include/MetaModel.h b/common/include/MetaModel.h
index 154a453..3cb87f3 100644
--- a/common/include/MetaModel.h
+++ b/common/include/MetaModel.h
@@ -17,9 +17,8 @@
#ifndef ANDROID_FRAMEWORKS_ML_NN_COMMON_META_MODEL_H
#define ANDROID_FRAMEWORKS_ML_NN_COMMON_META_MODEL_H
-#include "HalInterfaces.h"
-
#include <android-base/macros.h>
+
#include <functional>
#include <map>
#include <optional>
@@ -27,6 +26,10 @@
#include <utility>
#include <vector>
+#include "HalInterfaces.h"
+#include "Utils.h"
+#include "nnapi/Types.h"
+
namespace android::nn {
// The MetaModel class encapsulates a Model and provides machinery to create
@@ -67,14 +70,15 @@
template <class T_Model>
using ReturnedSlice = std::optional<std::pair<T_Model, Mapper>>;
- MetaModel(hal::Model model, bool strictSlicing)
- : mHidlModel(std::move(model)), mStrictSlicing(strictSlicing) {}
+ MetaModel(Model model, bool strictSlicing)
+ : mModel(std::move(model)), mStrictSlicing(strictSlicing) {}
- const hal::Model& getModel() const { return mHidlModel; }
+ const Model& getModel() const { return mModel; }
- ReturnedSlice<hal::V1_0::Model> getSliceV1_0() const { return getSlice(&mSliceV1_0); }
- ReturnedSlice<hal::V1_1::Model> getSliceV1_1() const { return getSlice(&mSliceV1_1); }
- ReturnedSlice<hal::V1_2::Model> getSliceV1_2() const { return getSlice(&mSliceV1_2); }
+ ReturnedSlice<V1_0::Model> getSliceV1_0() const { return getSlice(&mSliceV1_0); }
+ ReturnedSlice<V1_1::Model> getSliceV1_1() const { return getSlice(&mSliceV1_1); }
+ ReturnedSlice<V1_2::Model> getSliceV1_2() const { return getSlice(&mSliceV1_2); }
+ ReturnedSlice<V1_3::Model> getSliceV1_3() const { return getSlice(&mSliceV1_3); }
// Disallowing copy constructor and assignment operator is for efficiency,
// not for correctness. The default copy constructor and assignment
@@ -92,7 +96,7 @@
MetaModel& operator=(MetaModel&&) = default;
private:
- hal::Model mHidlModel;
+ Model mModel;
// mStrictSlicing controls validity checking. If the slicing algorithm
// produces an invalid model (because something has gone wrong with the
@@ -114,12 +118,20 @@
using Operation = typename decltype(mHidlModel.operations)::value_type;
using OperationType = decltype(Operation::type);
};
- mutable Slice<hal::V1_0::Model> mSliceV1_0;
- mutable Slice<hal::V1_1::Model> mSliceV1_1;
- mutable Slice<hal::V1_2::Model> mSliceV1_2;
+ template <>
+ struct Slice<V1_3::Model> { // Trivial slice.
+ SliceState mState = SliceState::UNINITIALIZED;
+ V1_3::Model mHidlModel;
+ };
+ mutable Slice<V1_0::Model> mSliceV1_0;
+ mutable Slice<V1_1::Model> mSliceV1_1;
+ mutable Slice<V1_2::Model> mSliceV1_2;
+ mutable Slice<V1_3::Model> mSliceV1_3;
template <class T_SlicedModel>
ReturnedSlice<T_SlicedModel> getSlice(Slice<T_SlicedModel>* slice) const;
+ template <>
+ ReturnedSlice<V1_3::Model> getSlice(Slice<V1_3::Model>* slice) const;
template <class T_SlicedModel>
Slice<T_SlicedModel> makeSlice() const;
diff --git a/common/include/OperationResolver.h b/common/include/OperationResolver.h
index ab70e4c..700513d 100644
--- a/common/include/OperationResolver.h
+++ b/common/include/OperationResolver.h
@@ -25,7 +25,7 @@
// Encapsulates an operation implementation.
struct OperationRegistration {
- hal::OperationType type;
+ OperationType type;
const char* name;
// Validates operand types, shapes, and any values known during graph creation.
@@ -47,7 +47,7 @@
bool allowZeroSizedInput = false;
} flags;
- OperationRegistration(hal::OperationType type, const char* name,
+ OperationRegistration(OperationType type, const char* name,
std::function<bool(const IOperationValidationContext*)> validate,
std::function<bool(IOperationExecutionContext*)> prepare,
std::function<bool(IOperationExecutionContext*)> execute, Flag flags)
@@ -62,7 +62,7 @@
// A registry of operation implementations.
class IOperationResolver {
public:
- virtual const OperationRegistration* findOperation(hal::OperationType operationType) const = 0;
+ virtual const OperationRegistration* findOperation(OperationType operationType) const = 0;
virtual ~IOperationResolver() {}
};
@@ -86,7 +86,7 @@
return &instance;
}
- const OperationRegistration* findOperation(hal::OperationType operationType) const override;
+ const OperationRegistration* findOperation(OperationType operationType) const override;
private:
BuiltinOperationResolver();
@@ -116,11 +116,11 @@
// .allowZeroSizedInput = true);
//
#ifdef NN_INCLUDE_CPU_IMPLEMENTATION
-#define NN_REGISTER_OPERATION(identifier, operationName, validate, prepare, execute, ...) \
- const OperationRegistration* register_##identifier() { \
- static OperationRegistration registration(hal::OperationType::identifier, operationName, \
- validate, prepare, execute, {__VA_ARGS__}); \
- return ®istration; \
+#define NN_REGISTER_OPERATION(identifier, operationName, validate, prepare, execute, ...) \
+ const OperationRegistration* register_##identifier() { \
+ static OperationRegistration registration(OperationType::identifier, operationName, \
+ validate, prepare, execute, {__VA_ARGS__}); \
+ return ®istration; \
}
#else
// This version ignores CPU execution logic (prepare and execute).
@@ -129,7 +129,7 @@
#define NN_REGISTER_OPERATION(identifier, operationName, validate, unused_prepare, unused_execute, \
...) \
const OperationRegistration* register_##identifier() { \
- static OperationRegistration registration(hal::OperationType::identifier, operationName, \
+ static OperationRegistration registration(OperationType::identifier, operationName, \
validate, nullptr, nullptr, {__VA_ARGS__}); \
return ®istration; \
}
diff --git a/common/include/OperationsUtils.h b/common/include/OperationsUtils.h
index a8a07db..9b0a9bd 100644
--- a/common/include/OperationsUtils.h
+++ b/common/include/OperationsUtils.h
@@ -23,6 +23,7 @@
#include "HalInterfaces.h"
#include "Utils.h"
+#include "nnapi/Types.h"
namespace android {
namespace nn {
@@ -45,11 +46,11 @@
// Stores operand type information. "Shape" is a historical name.
struct Shape {
- hal::OperandType type = hal::OperandType::FLOAT32;
+ OperandType type = OperandType::FLOAT32;
std::vector<uint32_t> dimensions;
float scale = 0.0f;
int32_t offset = 0;
- hal::OperandExtraParams extraParams;
+ Operand::ExtraParams extraParams;
};
// Provides information available during graph creation to validate an operation.
@@ -76,12 +77,12 @@
virtual HalVersion getHalVersion() const = 0;
virtual uint32_t getNumInputs() const = 0;
- virtual hal::OperandType getInputType(uint32_t index) const = 0;
+ virtual OperandType getInputType(uint32_t index) const = 0;
virtual Shape getInputShape(uint32_t index) const = 0;
- virtual const hal::OperandExtraParams getInputExtraParams(uint32_t index) const = 0;
+ virtual const Operand::ExtraParams& getInputExtraParams(uint32_t index) const = 0;
virtual uint32_t getNumOutputs() const = 0;
- virtual hal::OperandType getOutputType(uint32_t index) const = 0;
+ virtual OperandType getOutputType(uint32_t index) const = 0;
virtual Shape getOutputShape(uint32_t index) const = 0;
};
@@ -91,13 +92,13 @@
virtual ~IOperationExecutionContext() {}
virtual uint32_t getNumInputs() const = 0;
- virtual hal::OperandType getInputType(uint32_t index) const = 0;
+ virtual OperandType getInputType(uint32_t index) const = 0;
virtual Shape getInputShape(uint32_t index) const = 0;
virtual const void* getInputBuffer(uint32_t index) const = 0;
- virtual const hal::OperandExtraParams getInputExtraParams(uint32_t index) const = 0;
+ virtual const Operand::ExtraParams& getInputExtraParams(uint32_t index) const = 0;
virtual uint32_t getNumOutputs() const = 0;
- virtual hal::OperandType getOutputType(uint32_t index) const = 0;
+ virtual OperandType getOutputType(uint32_t index) const = 0;
virtual Shape getOutputShape(uint32_t index) const = 0;
virtual void* getOutputBuffer(uint32_t index) = 0;
@@ -125,11 +126,11 @@
// Verifies that the number and types of operation inputs are as expected.
bool validateInputTypes(const IOperationValidationContext* context,
- const std::vector<hal::OperandType>& expectedTypes);
+ const std::vector<OperandType>& expectedTypes);
// Verifies that the number and types of operation outputs are as expected.
bool validateOutputTypes(const IOperationValidationContext* context,
- const std::vector<hal::OperandType>& expectedTypes);
+ const std::vector<OperandType>& expectedTypes);
// Verifies that the HAL version specified in the context is greater or equal
// than the minimal supported HAL version.
diff --git a/common/include/Utils.h b/common/include/Utils.h
index a291909..1d4c681 100644
--- a/common/include/Utils.h
+++ b/common/include/Utils.h
@@ -28,6 +28,8 @@
#include "HalInterfaces.h"
#include "NeuralNetworks.h"
#include "ValidateHal.h"
+#include "nnapi/TypeUtils.h"
+#include "nnapi/Types.h"
namespace android {
namespace nn {
@@ -135,24 +137,36 @@
#define NN_RET_CHECK_GE(x, y) NN_RET_CHECK_OP(x, y, >=)
#define NN_RET_CHECK_GT(x, y) NN_RET_CHECK_OP(x, y, >)
+// Make an TimeoutDuration from a duration in nanoseconds. If the value exceeds
+// the max duration, return the maximum expressible duration.
+TimeoutDuration makeTimeoutDuration(uint64_t nanoseconds);
+
// Type to represent a deadline time point across processes.
using Deadline = std::chrono::steady_clock::time_point;
// Make an Deadline from a duration. If the sum of the current time and the
// duration exceeds the max time, return a time point holding the maximum
// expressible time.
-Deadline makeDeadline(uint64_t duration);
+Deadline makeDeadline(TimeoutDuration duration);
+inline Deadline makeDeadline(uint64_t duration) {
+ return makeDeadline(makeTimeoutDuration(duration));
+}
// Convenience function. If the duration is provided, this function creates a
// Deadline using makeDeadline. If the duration is not provided, this function
// returns std::nullopt.
-std::optional<Deadline> makeDeadline(std::optional<uint64_t> duration);
+inline std::optional<Deadline> makeDeadline(OptionalTimeoutDuration duration) {
+ return duration.has_value() ? makeDeadline(*duration) : std::optional<Deadline>{};
+}
+inline std::optional<Deadline> makeDeadline(std::optional<uint64_t> duration) {
+ return duration.has_value() ? makeDeadline(*duration) : std::optional<Deadline>{};
+}
// Make an optional Deadline from an OptionalTimePoint. If
// timePoint.nanosecondsSinceEpoch cannot be represented in Deadline, return a
// time point holding the maximum Deadline. If the OptionalTimePoint is none,
// this function returns std::nullopt.
-std::optional<Deadline> makeDeadline(const hal::OptionalTimePoint& timePoint);
+std::optional<Deadline> makeDeadline(const V1_3::OptionalTimePoint& timePoint);
// Returns true if the deadline has passed. Returns false if either the deadline
// has not been exceeded or if the deadline is not present.
@@ -160,7 +174,7 @@
// Make an OptionalTimePoint from an optional Deadline. If the Deadline is not
// provided, this function returns none for OptionalTimePoint.
-hal::OptionalTimePoint makeTimePoint(const std::optional<Deadline>& deadline);
+OptionalTimePoint makeTimePoint(const std::optional<Deadline>& deadline);
// Ensure that every user of FalseyErrorStream is linked to the
// correct instance, using the correct LOG_TAG
@@ -193,14 +207,14 @@
template <>
struct VersionedType<HalVersion::V1_2> {
- using OperandPerformance = hal::V1_2::Capabilities::OperandPerformance;
- using OperandType = hal::V1_2::OperandType;
+ using OperandPerformance = V1_2::Capabilities::OperandPerformance;
+ using OperandType = V1_2::OperandType;
};
template <>
struct VersionedType<HalVersion::V1_3> {
- using OperandPerformance = hal::V1_3::Capabilities::OperandPerformance;
- using OperandType = hal::V1_3::OperandType;
+ using OperandPerformance = V1_3::Capabilities::OperandPerformance;
+ using OperandType = V1_3::OperandType;
};
template <HalVersion version>
@@ -218,32 +232,32 @@
// separately using Capabilities::ifPerformance and
// Capabilities::whilePerformance.
template <HalVersion version>
-hal::hidl_vec<VersionedOperandPerformance<version>> nonExtensionOperandPerformance(
- hal::PerformanceInfo perf);
+hardware::hidl_vec<VersionedOperandPerformance<version>> nonExtensionOperandPerformance(
+ V1_0::PerformanceInfo perf);
// Update the vector entry corresponding to the specified OperandType with the
// specified PerformanceInfo value. The vector must already have an entry for
// that OperandType, and must be sorted by OperandType.
-void update(hal::hidl_vec<hal::V1_2::Capabilities::OperandPerformance>* operandPerformance,
- hal::V1_2::OperandType type, hal::PerformanceInfo perf);
-void update(hal::hidl_vec<hal::V1_3::Capabilities::OperandPerformance>* operandPerformance,
- hal::V1_3::OperandType type, hal::PerformanceInfo perf);
+void update(hardware::hidl_vec<V1_2::Capabilities::OperandPerformance>* operandPerformance,
+ V1_2::OperandType type, V1_0::PerformanceInfo perf);
+void update(hardware::hidl_vec<V1_3::Capabilities::OperandPerformance>* operandPerformance,
+ V1_3::OperandType type, V1_0::PerformanceInfo perf);
// Look for a vector entry corresponding to the specified OperandType. If
// found, return the associated PerformanceInfo. If not, return a pessimistic
// PerformanceInfo (FLT_MAX). The vector must be sorted by OperandType.
-hal::PerformanceInfo lookup(
- const hal::hidl_vec<hal::V1_2::Capabilities::OperandPerformance>& operandPerformance,
- hal::V1_2::OperandType type);
-hal::PerformanceInfo lookup(
- const hal::hidl_vec<hal::V1_3::Capabilities::OperandPerformance>& operandPerformance,
- hal::V1_3::OperandType type);
+V1_0::PerformanceInfo lookup(
+ const hardware::hidl_vec<V1_2::Capabilities::OperandPerformance>& operandPerformance,
+ V1_2::OperandType type);
+V1_0::PerformanceInfo lookup(
+ const hardware::hidl_vec<V1_3::Capabilities::OperandPerformance>& operandPerformance,
+ V1_3::OperandType type);
// Returns true if an operand type is an extension type.
-bool isExtensionOperandType(hal::OperandType type);
+bool isExtensionOperandType(V1_3::OperandType type);
// Returns true if an operation type is an extension type.
-bool isExtensionOperationType(hal::OperationType type);
+bool isExtensionOperationType(V1_3::OperationType type);
// Returns the amount of space needed to store a value of the specified
// dimensions and type. For a tensor with unspecified rank or at least one
@@ -253,8 +267,9 @@
// Aborts if the size would overflow the return type.
//
// See also TypeManager::getSizeOfData(OperandType, const std::vector<uint32_t>&).
-uint32_t nonExtensionOperandSizeOfData(hal::OperandType type,
+uint32_t nonExtensionOperandSizeOfData(V1_3::OperandType type,
const std::vector<uint32_t>& dimensions);
+uint32_t nonExtensionOperandSizeOfData(OperandType type, const std::vector<uint32_t>& dimensions);
// Returns the amount of space needed to store a value of the dimensions and
// type of this operand. For a tensor with unspecified rank or at least one
@@ -264,7 +279,10 @@
// Aborts if the size would overflow the return type.
//
// See also TypeManager::getSizeOfData(const Operand&).
-inline uint32_t nonExtensionOperandSizeOfData(const hal::Operand& operand) {
+inline uint32_t nonExtensionOperandSizeOfData(const Operand& operand) {
+ return nonExtensionOperandSizeOfData(operand.type, operand.dimensions);
+}
+inline uint32_t nonExtensionOperandSizeOfData(const V1_3::Operand& operand) {
return nonExtensionOperandSizeOfData(operand.type, operand.dimensions);
}
@@ -283,7 +301,9 @@
// Aborts if the specified type is an extension type.
//
// See also TypeManager::sizeOfDataOverflowsUInt32(OperandType, const std::vector<uint32_t>&).
-bool nonExtensionOperandSizeOfDataOverflowsUInt32(hal::OperandType type,
+bool nonExtensionOperandSizeOfDataOverflowsUInt32(OperandType type,
+ const std::vector<uint32_t>& dimensions);
+bool nonExtensionOperandSizeOfDataOverflowsUInt32(V1_3::OperandType type,
const std::vector<uint32_t>& dimensions);
// Returns true if the amount of space needed to store a value of the specified
@@ -300,17 +320,21 @@
bool nonExtensionOperandTypeIsScalar(int type);
// Returns the name of the operation type in ASCII.
-std::string getOperationName(hal::OperationType opCode);
+std::string getOperationName(V1_3::OperationType opCode);
// Returns the name of the operand type in ASCII.
-std::string getOperandTypeName(hal::OperandType type);
+std::string getOperandTypeName(V1_3::OperandType type);
// Whether an operand of tensor type has unspecified dimensions.
//
// Undefined behavior if the operand type is a scalar type.
bool tensorHasUnspecifiedDimensions(int type, const uint32_t* dim, uint32_t dimCount);
-bool tensorHasUnspecifiedDimensions(hal::OperandType type, const std::vector<uint32_t>& dimensions);
-bool tensorHasUnspecifiedDimensions(const hal::Operand& operand);
+bool tensorHasUnspecifiedDimensions(V1_3::OperandType type,
+ const std::vector<uint32_t>& dimensions);
+bool tensorHasUnspecifiedDimensions(OperandType type, const std::vector<uint32_t>& dimensions);
+bool tensorHasUnspecifiedDimensions(OperandType type, const Dimensions& dimensions);
+bool tensorHasUnspecifiedDimensions(const Operand& operand);
+bool tensorHasUnspecifiedDimensions(const V1_3::Operand& operand);
bool tensorHasUnspecifiedDimensions(const ANeuralNetworksOperandType* type);
// Returns the number of padding bytes needed to align data of the
@@ -323,10 +347,11 @@
uint32_t alignBytesNeeded(uint32_t index, size_t length);
// Does a detailed LOG(INFO) of the model
-void logModelToInfo(const hal::V1_0::Model& model);
-void logModelToInfo(const hal::V1_1::Model& model);
-void logModelToInfo(const hal::V1_2::Model& model);
-void logModelToInfo(const hal::V1_3::Model& model);
+void logModelToInfo(const V1_0::Model& model);
+void logModelToInfo(const V1_1::Model& model);
+void logModelToInfo(const V1_2::Model& model);
+void logModelToInfo(const V1_3::Model& model);
+void logModelToInfo(const Model& model);
inline std::string toString(uint32_t obj) {
return std::to_string(obj);
@@ -344,22 +369,22 @@
template <typename A, typename B>
std::string toString(const std::pair<A, B>& pair) {
std::ostringstream oss;
- oss << "(" << toString(pair.first) << ", " << toString(pair.second) << ")";
+ oss << "(" << pair.first << ", " << pair.second << ")";
return oss.str();
}
-inline std::string toString(HalVersion halVersion) {
+inline std::ostream& operator<<(std::ostream& os, const HalVersion& halVersion) {
switch (halVersion) {
case HalVersion::UNKNOWN:
- return "UNKNOWN HAL version";
+ return os << "UNKNOWN HAL version";
case HalVersion::V1_0:
- return "HAL version 1.0";
+ return os << "HAL version 1.0";
case HalVersion::V1_1:
- return "HAL version 1.1";
+ return os << "HAL version 1.1";
case HalVersion::V1_2:
- return "HAL version 1.2";
+ return os << "HAL version 1.2";
case HalVersion::V1_3:
- return "HAL version 1.3";
+ return os << "HAL version 1.3";
}
}
@@ -368,7 +393,7 @@
}
bool validateOperandSymmPerChannelQuantParams(
- const hal::Operand& halOperand,
+ const V1_3::Operand& halOperand,
const ANeuralNetworksSymmPerChannelQuantParams& channelQuant, const char* tag);
// Validates an operand type.
@@ -376,25 +401,24 @@
// extensionOperandTypeInfo must be nullptr iff the type is not an extension type.
//
// If allowPartial is true, the dimensions may be underspecified.
-int validateOperandType(
- const ANeuralNetworksOperandType& type,
- const hal::Extension::OperandTypeInformation* const extensionOperandTypeInfo,
- const char* tag, bool allowPartial);
+int validateOperandType(const ANeuralNetworksOperandType& type,
+ const Extension::OperandTypeInformation* const extensionOperandTypeInfo,
+ const char* tag, bool allowPartial);
int validateOperandList(uint32_t count, const uint32_t* list, uint32_t operandCount,
const char* tag);
// A set of functions to help validate models containing IF or WHILE operations.
struct SubgraphValidationHelper {
// Checks if a given operand is a SUBGRAPH operand with a valid offset.
- std::function<bool(const hal::Operand&)> isValidSubgraphReference;
+ std::function<bool(const Operand&)> isValidSubgraphReference;
// Gets the input count of a subgraph referenced by a given operand.
- std::function<uint32_t(const hal::Operand&)> getSubgraphInputCount;
+ std::function<uint32_t(const Operand&)> getSubgraphInputCount;
// Gets the output count of a subgraph referenced by a given operand.
- std::function<uint32_t(const hal::Operand&)> getSubgraphOutputCount;
+ std::function<uint32_t(const Operand&)> getSubgraphOutputCount;
// Gets the specified input operand of a subgraph referenced by a given operand.
- std::function<const hal::Operand*(const hal::Operand&, uint32_t)> getSubgraphInputOperand;
+ std::function<const Operand*(const Operand&, uint32_t)> getSubgraphInputOperand;
// Gets the specified output operand of a subgraph referenced by a given operand.
- std::function<const hal::Operand*(const hal::Operand&, uint32_t)> getSubgraphOutputOperand;
+ std::function<const Operand*(const Operand&, uint32_t)> getSubgraphOutputOperand;
// Whether control flow operations with inner or outer input or output
// operands of unknown size are allowed.
bool allowControlFlowOperationWithOperandOfUnknownSize;
@@ -405,7 +429,7 @@
// The last argument is only used for validating IF and WHILE operations.
int validateOperation(ANeuralNetworksOperationType opType, uint32_t inputCount,
const uint32_t* inputIndexes, uint32_t outputCount,
- const uint32_t* outputIndexes, const std::vector<hal::Operand>& operands,
+ const uint32_t* outputIndexes, const std::vector<Operand>& operands,
HalVersion halVersion, const SubgraphValidationHelper& helper);
inline size_t getSizeFromInts(int lower, int higher) {
@@ -414,40 +438,41 @@
// Convert ANEURALNETWORKS_* result code to ErrorStatus.
// Not guaranteed to be a 1-to-1 mapping.
-hal::ErrorStatus convertResultCodeToErrorStatus(int resultCode);
+ErrorStatus convertResultCodeToErrorStatus(int resultCode);
+V1_3::ErrorStatus convertResultCodeToHalErrorStatus(int resultCode);
// Convert ErrorStatus to ANEURALNETWORKS_* result code.
// Not guaranteed to be a 1-to-1 mapping.
-int convertErrorStatusToResultCode(hal::ErrorStatus status);
+int convertErrorStatusToResultCode(ErrorStatus status);
+int convertErrorStatusToResultCode(V1_3::ErrorStatus status);
// Convert execution results to runtime format. Additionally checks that the
// returned results abide by the HAL specification, and logs an error if the
// result violates the specification.
-std::tuple<int, std::vector<hal::OutputShape>, hal::Timing> getExecutionResult(
- hal::ErrorStatus status, std::vector<hal::OutputShape> outputShapes, hal::Timing timing);
-
-// Combine two tensor dimensions, both may have unspecified dimensions or rank.
-std::optional<std::vector<uint32_t>> combineDimensions(const std::vector<uint32_t>& lhs,
- const std::vector<uint32_t>& rhs);
+std::tuple<int, std::vector<OutputShape>, Timing> getExecutionResult(
+ V1_3::ErrorStatus status, const hardware::hidl_vec<V1_2::OutputShape>& outputShapes,
+ const V1_2::Timing& timing);
+std::tuple<int, std::vector<OutputShape>, Timing> getExecutionResult(
+ ErrorStatus status, std::vector<OutputShape> outputShapes, Timing timing);
// Versioning
-bool compliantWithV1_0(const hal::V1_0::Capabilities& capabilities);
-bool compliantWithV1_0(const hal::V1_1::Capabilities& capabilities);
-bool compliantWithV1_0(const hal::V1_2::Capabilities& capabilities);
-bool compliantWithV1_0(const hal::V1_3::Capabilities& capabilities);
-bool compliantWithV1_1(const hal::V1_0::Capabilities& capabilities);
-bool compliantWithV1_1(const hal::V1_1::Capabilities& capabilities);
-bool compliantWithV1_1(const hal::V1_2::Capabilities& capabilities);
-bool compliantWithV1_1(const hal::V1_3::Capabilities& capabilities);
-bool compliantWithV1_2(const hal::V1_0::Capabilities& capabilities);
-bool compliantWithV1_2(const hal::V1_1::Capabilities& capabilities);
-bool compliantWithV1_2(const hal::V1_2::Capabilities& capabilities);
-bool compliantWithV1_2(const hal::V1_3::Capabilities& capabilities);
-bool compliantWithV1_3(const hal::V1_0::Capabilities& capabilities);
-bool compliantWithV1_3(const hal::V1_1::Capabilities& capabilities);
-bool compliantWithV1_3(const hal::V1_2::Capabilities& capabilities);
-bool compliantWithV1_3(const hal::V1_3::Capabilities& capabilities);
+bool compliantWithV1_0(const V1_0::Capabilities& capabilities);
+bool compliantWithV1_0(const V1_1::Capabilities& capabilities);
+bool compliantWithV1_0(const V1_2::Capabilities& capabilities);
+bool compliantWithV1_0(const V1_3::Capabilities& capabilities);
+bool compliantWithV1_1(const V1_0::Capabilities& capabilities);
+bool compliantWithV1_1(const V1_1::Capabilities& capabilities);
+bool compliantWithV1_1(const V1_2::Capabilities& capabilities);
+bool compliantWithV1_1(const V1_3::Capabilities& capabilities);
+bool compliantWithV1_2(const V1_0::Capabilities& capabilities);
+bool compliantWithV1_2(const V1_1::Capabilities& capabilities);
+bool compliantWithV1_2(const V1_2::Capabilities& capabilities);
+bool compliantWithV1_2(const V1_3::Capabilities& capabilities);
+bool compliantWithV1_3(const V1_0::Capabilities& capabilities);
+bool compliantWithV1_3(const V1_1::Capabilities& capabilities);
+bool compliantWithV1_3(const V1_2::Capabilities& capabilities);
+bool compliantWithV1_3(const V1_3::Capabilities& capabilities);
// If noncompliantOperations != nullptr, then
// precondition: noncompliantOperations->empty()
@@ -455,114 +480,127 @@
// operations; if the compliance check fails for some reason
// other than a noncompliant operation,
// *noncompliantOperations consists of the indices of all operations
-bool compliantWithV1_0(const hal::V1_0::Model& model);
-bool compliantWithV1_0(const hal::V1_1::Model& model);
-bool compliantWithV1_0(const hal::V1_2::Model& model,
+bool compliantWithV1_0(const V1_0::Model& model);
+bool compliantWithV1_0(const V1_1::Model& model);
+bool compliantWithV1_0(const V1_2::Model& model,
std::set<uint32_t>* noncompliantOperations = nullptr);
-bool compliantWithV1_0(const hal::V1_3::Model& model,
+bool compliantWithV1_0(const V1_3::Model& model,
std::set<uint32_t>* noncompliantOperations = nullptr);
-bool compliantWithV1_1(const hal::V1_0::Model& model);
-bool compliantWithV1_1(const hal::V1_1::Model& model);
-bool compliantWithV1_1(const hal::V1_2::Model& model,
+bool compliantWithV1_1(const V1_0::Model& model);
+bool compliantWithV1_1(const V1_1::Model& model);
+bool compliantWithV1_1(const V1_2::Model& model,
std::set<uint32_t>* noncompliantOperations = nullptr);
-bool compliantWithV1_1(const hal::V1_3::Model& model,
+bool compliantWithV1_1(const V1_3::Model& model,
std::set<uint32_t>* noncompliantOperations = nullptr);
-bool compliantWithV1_2(const hal::V1_0::Model& model);
-bool compliantWithV1_2(const hal::V1_1::Model& model);
-bool compliantWithV1_2(const hal::V1_2::Model& model,
+bool compliantWithV1_2(const V1_0::Model& model);
+bool compliantWithV1_2(const V1_1::Model& model);
+bool compliantWithV1_2(const V1_2::Model& model,
std::set<uint32_t>* noncompliantOperations = nullptr);
-bool compliantWithV1_2(const hal::V1_3::Model& model,
+bool compliantWithV1_2(const V1_3::Model& model,
std::set<uint32_t>* noncompliantOperations = nullptr);
-hal::V1_0::ErrorStatus convertToV1_0(hal::V1_0::ErrorStatus status);
-hal::V1_0::ErrorStatus convertToV1_0(hal::V1_3::ErrorStatus status);
-hal::V1_3::ErrorStatus convertToV1_3(hal::V1_0::ErrorStatus status);
-hal::V1_3::ErrorStatus convertToV1_3(hal::V1_3::ErrorStatus status);
+V1_0::ErrorStatus convertToV1_0(V1_0::ErrorStatus status);
+V1_0::ErrorStatus convertToV1_0(V1_3::ErrorStatus status);
+V1_3::ErrorStatus convertToV1_3(V1_0::ErrorStatus status);
+V1_3::ErrorStatus convertToV1_3(V1_3::ErrorStatus status);
-hal::V1_0::Capabilities convertToV1_0(const hal::V1_0::Capabilities& capabilities);
-hal::V1_0::Capabilities convertToV1_0(const hal::V1_1::Capabilities& capabilities);
-hal::V1_0::Capabilities convertToV1_0(const hal::V1_2::Capabilities& capabilities);
-hal::V1_0::Capabilities convertToV1_0(const hal::V1_3::Capabilities& capabilities);
-hal::V1_1::Capabilities convertToV1_1(const hal::V1_0::Capabilities& capabilities);
-hal::V1_1::Capabilities convertToV1_1(const hal::V1_1::Capabilities& capabilities);
-hal::V1_1::Capabilities convertToV1_1(const hal::V1_2::Capabilities& capabilities);
-hal::V1_1::Capabilities convertToV1_1(const hal::V1_3::Capabilities& capabilities);
-hal::V1_2::Capabilities convertToV1_2(const hal::V1_0::Capabilities& capabilities);
-hal::V1_2::Capabilities convertToV1_2(const hal::V1_1::Capabilities& capabilities);
-hal::V1_2::Capabilities convertToV1_2(const hal::V1_2::Capabilities& capabilities);
-hal::V1_2::Capabilities convertToV1_2(const hal::V1_3::Capabilities& capabilities);
-hal::V1_3::Capabilities convertToV1_3(const hal::V1_0::Capabilities& capabilities);
-hal::V1_3::Capabilities convertToV1_3(const hal::V1_1::Capabilities& capabilities);
-hal::V1_3::Capabilities convertToV1_3(const hal::V1_2::Capabilities& capabilities);
-hal::V1_3::Capabilities convertToV1_3(const hal::V1_3::Capabilities& capabilities);
+V1_0::Capabilities convertToV1_0(const V1_0::Capabilities& capabilities);
+V1_0::Capabilities convertToV1_0(const V1_1::Capabilities& capabilities);
+V1_0::Capabilities convertToV1_0(const V1_2::Capabilities& capabilities);
+V1_0::Capabilities convertToV1_0(const V1_3::Capabilities& capabilities);
+V1_1::Capabilities convertToV1_1(const V1_0::Capabilities& capabilities);
+V1_1::Capabilities convertToV1_1(const V1_1::Capabilities& capabilities);
+V1_1::Capabilities convertToV1_1(const V1_2::Capabilities& capabilities);
+V1_1::Capabilities convertToV1_1(const V1_3::Capabilities& capabilities);
+V1_2::Capabilities convertToV1_2(const V1_0::Capabilities& capabilities);
+V1_2::Capabilities convertToV1_2(const V1_1::Capabilities& capabilities);
+V1_2::Capabilities convertToV1_2(const V1_2::Capabilities& capabilities);
+V1_2::Capabilities convertToV1_2(const V1_3::Capabilities& capabilities);
+V1_3::Capabilities convertToV1_3(const V1_0::Capabilities& capabilities);
+V1_3::Capabilities convertToV1_3(const V1_1::Capabilities& capabilities);
+V1_3::Capabilities convertToV1_3(const V1_2::Capabilities& capabilities);
+V1_3::Capabilities convertToV1_3(const V1_3::Capabilities& capabilities);
-hal::V1_0::Model convertToV1_0(const hal::V1_0::Model& model);
-hal::V1_0::Model convertToV1_0(const hal::V1_1::Model& model);
-hal::V1_0::Model convertToV1_0(const hal::V1_2::Model& model);
-hal::V1_0::Model convertToV1_0(const hal::V1_3::Model& model);
-hal::V1_1::Model convertToV1_1(const hal::V1_0::Model& model);
-hal::V1_1::Model convertToV1_1(const hal::V1_1::Model& model);
-hal::V1_1::Model convertToV1_1(const hal::V1_2::Model& model);
-hal::V1_1::Model convertToV1_1(const hal::V1_3::Model& model);
-hal::V1_2::Model convertToV1_2(const hal::V1_0::Model& model);
-hal::V1_2::Model convertToV1_2(const hal::V1_1::Model& model);
-hal::V1_2::Model convertToV1_2(const hal::V1_2::Model& model);
-hal::V1_2::Model convertToV1_2(const hal::V1_3::Model& model);
-hal::V1_3::Model convertToV1_3(const hal::V1_0::Model& model);
-hal::V1_3::Model convertToV1_3(const hal::V1_1::Model& model);
-hal::V1_3::Model convertToV1_3(const hal::V1_2::Model& model);
-hal::V1_3::Model convertToV1_3(const hal::V1_3::Model& model);
+V1_0::Model convertToV1_0(const V1_0::Model& model);
+V1_0::Model convertToV1_0(const V1_1::Model& model);
+V1_0::Model convertToV1_0(const V1_2::Model& model);
+V1_0::Model convertToV1_0(const V1_3::Model& model);
+V1_1::Model convertToV1_1(const V1_0::Model& model);
+V1_1::Model convertToV1_1(const V1_1::Model& model);
+V1_1::Model convertToV1_1(const V1_2::Model& model);
+V1_1::Model convertToV1_1(const V1_3::Model& model);
+V1_2::Model convertToV1_2(const V1_0::Model& model);
+V1_2::Model convertToV1_2(const V1_1::Model& model);
+V1_2::Model convertToV1_2(const V1_2::Model& model);
+V1_2::Model convertToV1_2(const V1_3::Model& model);
+V1_3::Model convertToV1_3(const V1_0::Model& model);
+V1_3::Model convertToV1_3(const V1_1::Model& model);
+V1_3::Model convertToV1_3(const V1_2::Model& model);
+V1_3::Model convertToV1_3(const V1_3::Model& model);
-hal::V1_0::OperationType uncheckedConvertToV1_0(hal::V1_3::OperationType type);
-hal::V1_1::OperationType uncheckedConvertToV1_1(hal::V1_3::OperationType type);
-hal::V1_2::OperationType uncheckedConvertToV1_2(hal::V1_3::OperationType type);
+V1_0::OperationType uncheckedConvertToV1_0(V1_3::OperationType type);
+V1_1::OperationType uncheckedConvertToV1_1(V1_3::OperationType type);
+V1_2::OperationType uncheckedConvertToV1_2(V1_3::OperationType type);
-hal::V1_0::Operand convertToV1_0(const hal::V1_2::Operand& operand);
-hal::V1_0::Operand convertToV1_0(const hal::V1_3::Operand& operand);
-hal::V1_2::Operand convertToV1_2(const hal::V1_0::Operand& operand);
-hal::V1_2::Operand convertToV1_2(const hal::V1_3::Operand& operand);
-hal::V1_3::Operand convertToV1_3(const hal::V1_0::Operand& operand);
-hal::V1_3::Operand convertToV1_3(const hal::V1_2::Operand& operand);
-hal::V1_3::Operand convertToV1_3(const hal::V1_3::Operand& operand);
+V1_0::Operand convertToV1_0(const V1_2::Operand& operand);
+V1_0::Operand convertToV1_0(const V1_3::Operand& operand);
+V1_2::Operand convertToV1_2(const V1_0::Operand& operand);
+V1_2::Operand convertToV1_2(const V1_3::Operand& operand);
+V1_3::Operand convertToV1_3(const V1_0::Operand& operand);
+V1_3::Operand convertToV1_3(const V1_2::Operand& operand);
+V1_3::Operand convertToV1_3(const V1_3::Operand& operand);
-hal::hidl_vec<hal::V1_0::Operand> convertToV1_0(const hal::hidl_vec<hal::V1_0::Operand>& operands);
-hal::hidl_vec<hal::V1_0::Operand> convertToV1_0(const hal::hidl_vec<hal::V1_2::Operand>& operands);
-hal::hidl_vec<hal::V1_0::Operand> convertToV1_0(const hal::hidl_vec<hal::V1_3::Operand>& operands);
-hal::hidl_vec<hal::V1_2::Operand> convertToV1_2(const hal::hidl_vec<hal::V1_0::Operand>& operands);
-hal::hidl_vec<hal::V1_2::Operand> convertToV1_2(const hal::hidl_vec<hal::V1_2::Operand>& operands);
-hal::hidl_vec<hal::V1_2::Operand> convertToV1_2(const hal::hidl_vec<hal::V1_3::Operand>& operands);
-hal::hidl_vec<hal::V1_3::Operand> convertToV1_3(const hal::hidl_vec<hal::V1_0::Operand>& operands);
-hal::hidl_vec<hal::V1_3::Operand> convertToV1_3(const hal::hidl_vec<hal::V1_2::Operand>& operands);
-hal::hidl_vec<hal::V1_3::Operand> convertToV1_3(const hal::hidl_vec<hal::V1_3::Operand>& operands);
+hardware::hidl_vec<V1_0::Operand> convertToV1_0(const hardware::hidl_vec<V1_0::Operand>& operands);
+hardware::hidl_vec<V1_0::Operand> convertToV1_0(const hardware::hidl_vec<V1_2::Operand>& operands);
+hardware::hidl_vec<V1_0::Operand> convertToV1_0(const hardware::hidl_vec<V1_3::Operand>& operands);
+hardware::hidl_vec<V1_2::Operand> convertToV1_2(const hardware::hidl_vec<V1_0::Operand>& operands);
+hardware::hidl_vec<V1_2::Operand> convertToV1_2(const hardware::hidl_vec<V1_2::Operand>& operands);
+hardware::hidl_vec<V1_2::Operand> convertToV1_2(const hardware::hidl_vec<V1_3::Operand>& operands);
+hardware::hidl_vec<V1_3::Operand> convertToV1_3(const hardware::hidl_vec<V1_0::Operand>& operands);
+hardware::hidl_vec<V1_3::Operand> convertToV1_3(const hardware::hidl_vec<V1_2::Operand>& operands);
+hardware::hidl_vec<V1_3::Operand> convertToV1_3(const hardware::hidl_vec<V1_3::Operand>& operands);
-bool compliantWithV1_0(const hal::V1_0::Request& request);
-bool compliantWithV1_0(const hal::V1_3::Request& request);
-bool compliantWithV1_2(const hal::V1_3::Request& request);
+bool compliantWithV1_0(const V1_0::Request& request);
+bool compliantWithV1_0(const V1_3::Request& request);
+bool compliantWithV1_2(const V1_3::Request& request);
-hal::V1_0::Request convertToV1_0(const hal::V1_0::Request& request);
-hal::V1_0::Request convertToV1_0(const hal::V1_3::Request& request);
-hal::V1_0::Request convertToV1_2(const hal::V1_3::Request& request);
-hal::V1_3::Request convertToV1_3(const hal::V1_0::Request& request);
-hal::V1_3::Request convertToV1_3(const hal::V1_3::Request& request);
+V1_0::Request convertToV1_0(const V1_0::Request& request);
+V1_0::Request convertToV1_0(const V1_3::Request& request);
+V1_0::Request convertToV1_2(const V1_3::Request& request);
+V1_3::Request convertToV1_3(const V1_0::Request& request);
+V1_3::Request convertToV1_3(const V1_3::Request& request);
-bool compliantWithV1_0(hal::V1_0::OperandLifeTime lifetime);
-bool compliantWithV1_0(hal::V1_3::OperandLifeTime lifetime);
-bool compliantWithV1_3(hal::V1_0::OperandLifeTime lifetime);
-bool compliantWithV1_3(hal::V1_3::OperandLifeTime lifetime);
+bool compliantWithV1_0(V1_0::OperandLifeTime lifetime);
+bool compliantWithV1_0(V1_3::OperandLifeTime lifetime);
+bool compliantWithV1_3(V1_0::OperandLifeTime lifetime);
+bool compliantWithV1_3(V1_3::OperandLifeTime lifetime);
-hal::V1_0::OperandLifeTime convertToV1_0(hal::V1_0::OperandLifeTime lifetime);
-hal::V1_0::OperandLifeTime convertToV1_0(hal::V1_3::OperandLifeTime lifetime);
-hal::V1_3::OperandLifeTime convertToV1_3(hal::V1_0::OperandLifeTime lifetime);
-hal::V1_3::OperandLifeTime convertToV1_3(hal::V1_3::OperandLifeTime lifetime);
+V1_0::OperandLifeTime convertToV1_0(V1_0::OperandLifeTime lifetime);
+V1_0::OperandLifeTime convertToV1_0(V1_3::OperandLifeTime lifetime);
+V1_3::OperandLifeTime convertToV1_3(V1_0::OperandLifeTime lifetime);
+V1_3::OperandLifeTime convertToV1_3(V1_3::OperandLifeTime lifetime);
-constexpr hal::Priority convertToHalPriority(int32_t priority) {
+constexpr V1_3::Priority convertToHalPriority(int32_t priority) {
switch (priority) {
case ANEURALNETWORKS_PRIORITY_LOW:
- return hal::Priority::LOW;
+ return V1_3::Priority::LOW;
case ANEURALNETWORKS_PRIORITY_MEDIUM:
- return hal::Priority::MEDIUM;
+ return V1_3::Priority::MEDIUM;
case ANEURALNETWORKS_PRIORITY_HIGH:
- return hal::Priority::HIGH;
+ return V1_3::Priority::HIGH;
+ }
+ LOG(FATAL) << "unrecognized priority: " << priority;
+ return {};
+}
+
+constexpr Priority convertToCanonicalPriority(int32_t priority) {
+ switch (priority) {
+ case ANEURALNETWORKS_PRIORITY_LOW:
+ return Priority::LOW;
+ case ANEURALNETWORKS_PRIORITY_MEDIUM:
+ return Priority::MEDIUM;
+ case ANEURALNETWORKS_PRIORITY_HIGH:
+ return Priority::HIGH;
}
LOG(FATAL) << "unrecognized priority: " << priority;
return {};
@@ -583,6 +621,76 @@
uint32_t getProp(const char* str, uint32_t defaultValue = 0);
#endif // NN_DEBUGGABLE
+// DEPRECATED. Use checked conversions from nnapi/hal/1.X/Conversions.h.
+Capabilities::OperandPerformance uncheckedConvert(
+ const V1_3::Capabilities::OperandPerformance& operandPerformance);
+Capabilities::PerformanceInfo uncheckedConvert(const V1_0::PerformanceInfo& performanceInfo);
+Capabilities uncheckedConvert(const V1_3::Capabilities& capabilities);
+DataLocation uncheckedConvert(const V1_0::DataLocation& location);
+ErrorStatus uncheckedConvert(V1_0::ErrorStatus status);
+ErrorStatus uncheckedConvert(V1_3::ErrorStatus status);
+Extension::OperandTypeInformation uncheckedConvert(const V1_2::Extension::OperandTypeInformation&);
+Extension uncheckedConvert(const V1_2::Extension& extension);
+hardware::hidl_vec<uint8_t> uncheckedConvert(const Operand::ExtensionParams& params);
+MeasureTiming uncheckedConvert(V1_2::MeasureTiming measure);
+Memory uncheckedConvert(const hardware::hidl_memory& memory);
+Model::ExtensionNameAndPrefix uncheckedConvert(const V1_2::Model::ExtensionNameAndPrefix&);
+Model::Subgraph uncheckedConvert(const V1_3::Subgraph& subgraph);
+Model uncheckedConvert(const V1_3::Model& model);
+Operand::ExtensionParams uncheckedConvert(const hardware::hidl_vec<uint8_t>& params);
+Operand::ExtraParams uncheckedConvert(const V1_2::Operand::ExtraParams& params);
+Operand::LifeTime uncheckedConvert(V1_3::OperandLifeTime lifetime);
+Operand::SymmPerChannelQuantParams uncheckedConvert(const V1_2::SymmPerChannelQuantParams& params);
+OperandType uncheckedConvert(V1_3::OperandType operandType);
+Operand uncheckedConvert(const V1_3::Operand& operand);
+OperationType uncheckedConvert(V1_3::OperationType operationType);
+Operation uncheckedConvert(const V1_3::Operation& operation);
+OptionalTimeoutDuration uncheckedConvert(const V1_3::OptionalTimeoutDuration& timeoutDuration);
+OutputShape uncheckedConvert(const V1_2::OutputShape& outputShape);
+Request::Argument uncheckedConvert(const V1_0::RequestArgument& requestArgument);
+Request::MemoryPool uncheckedConvert(const V1_3::Request::MemoryPool& memoryPool);
+Request uncheckedConvert(const V1_3::Request& request);
+std::vector<Extension> uncheckedConvert(const hardware::hidl_vec<V1_2::Extension>& extensions);
+std::vector<Memory> uncheckedConvert(const hardware::hidl_vec<hardware::hidl_memory>& memories);
+std::vector<Model::Subgraph> uncheckedConvert(const hardware::hidl_vec<V1_3::Subgraph>& subgraphs);
+std::vector<Operand> uncheckedConvert(const hardware::hidl_vec<V1_3::Operand>& operands);
+std::vector<OutputShape> uncheckedConvert(
+ const hardware::hidl_vec<V1_2::OutputShape>& outputShapes);
+std::vector<Request::MemoryPool> uncheckedConvert(
+ const hardware::hidl_vec<V1_3::Request::MemoryPool>& memoryPools);
+Timing uncheckedConvert(const V1_2::Timing& timing);
+
+// DEPRECATED. Use conversions from nnapi/hal/1.X/Conversions.h.
+hardware::hidl_memory convertToV1_0(const Memory& memory);
+hardware::hidl_vec<hardware::hidl_memory> convertToV1_0(const std::vector<Memory>& memories);
+hardware::hidl_vec<uint8_t> convertToV1_0(const Model::OperandValues& operandValues);
+hardware::hidl_vec<V1_2::OutputShape> convertToV1_2(const std::vector<OutputShape>& outputShapes);
+hardware::hidl_vec<V1_3::BufferRole> convertToV1_3(const std::vector<BufferRole>& bufferRoles);
+V1_0::DataLocation convertToV1_0(const DataLocation& location);
+V1_0::ErrorStatus convertToV1_0(ErrorStatus status);
+V1_0::RequestArgument convertToV1_0(const Request::Argument& requestArgument);
+V1_1::ExecutionPreference convertToV1_1(ExecutionPreference preference);
+V1_2::MeasureTiming convertToV1_2(MeasureTiming measure);
+V1_2::Model::ExtensionNameAndPrefix convertToV1_2(const Model::ExtensionNameAndPrefix&);
+V1_2::Operand::ExtraParams convertToV1_2(const Operand::ExtraParams& params);
+V1_2::OutputShape convertToV1_2(const OutputShape& outputShape);
+V1_2::SymmPerChannelQuantParams convertToV1_2(const Operand::SymmPerChannelQuantParams& params);
+V1_2::Timing convertToV1_2(const Timing& timing);
+V1_3::BufferRole convertToV1_3(const BufferRole& bufferRole);
+V1_3::ErrorStatus convertToV1_3(ErrorStatus status);
+V1_3::Model convertToV1_3(const Model& model);
+V1_3::Operand convertToV1_3(const Operand& operand);
+V1_3::OperandLifeTime convertToV1_3(Operand::LifeTime lifetime);
+V1_3::OperandType convertToV1_3(OperandType operandType);
+V1_3::Operation convertToV1_3(const Operation& operation);
+V1_3::OperationType convertToV1_3(OperationType operationType);
+V1_3::OptionalTimeoutDuration convertToV1_3(const OptionalTimeoutDuration& timeoutDuration);
+V1_3::OptionalTimePoint convertToV1_3(const OptionalTimePoint& timePoint);
+V1_3::Priority convertToV1_3(Priority priority);
+V1_3::Request convertToV1_3(const Request& request);
+V1_3::Request::MemoryPool convertToV1_3(const Request::MemoryPool& memoryPool);
+V1_3::Subgraph convertToV1_3(const Model::Subgraph& model);
+
} // namespace nn
} // namespace android
diff --git a/common/include/ValidateHal.h b/common/include/ValidateHal.h
index 32d7662..c501fc0 100644
--- a/common/include/ValidateHal.h
+++ b/common/include/ValidateHal.h
@@ -35,7 +35,7 @@
};
enum class IOType { INPUT, OUTPUT };
-using PreparedModelRole = std::tuple<const hal::IPreparedModel*, IOType, uint32_t>;
+using PreparedModelRole = std::tuple<const V1_3::IPreparedModel*, IOType, uint32_t>;
// 1.3 HAL does not support control flow operations with operands of unknown size.
// See http://b/132458982#comment63.
@@ -62,35 +62,35 @@
bool allowUnspecifiedOutput = true);
// Verifies that the execution preference is valid.
-bool validateExecutionPreference(hal::ExecutionPreference preference);
+bool validateExecutionPreference(V1_1::ExecutionPreference preference);
// Verifies that the priority is valid.
-bool validatePriority(hal::Priority priority);
+bool validatePriority(V1_3::Priority priority);
-bool validOperationType(hal::V1_0::OperationType operation);
-bool validOperationType(hal::V1_1::OperationType operation);
-bool validOperationType(hal::V1_2::OperationType operation);
+bool validOperationType(V1_0::OperationType operation);
+bool validOperationType(V1_1::OperationType operation);
+bool validOperationType(V1_2::OperationType operation);
-bool validOperandType(hal::V1_0::OperandType operand);
-bool validOperandType(hal::V1_2::OperandType operand);
-bool validOperandType(hal::V1_3::OperandType operand);
+bool validOperandType(V1_0::OperandType operand);
+bool validOperandType(V1_2::OperandType operand);
+bool validOperandType(V1_3::OperandType operand);
// Verifies that the memory pool is valid in the specified HAL version.
-bool validatePool(const hal::hidl_memory& pool, HalVersion ver = HalVersion::LATEST);
-bool validatePool(const hal::V1_3::Request::MemoryPool& pool, HalVersion ver = HalVersion::LATEST);
+bool validatePool(const hardware::hidl_memory& pool, HalVersion ver = HalVersion::LATEST);
+bool validatePool(const V1_3::Request::MemoryPool& pool, HalVersion ver = HalVersion::LATEST);
// Verifies that the input arguments to IDevice::allocate are valid.
// Optionally, this function can return a flattened prepared model roles and a combined operand.
// Pass nullptr if either value is not needed.
// IMPORTANT: This function cannot validate dimensions and extraParams with extension operand type.
// Each driver should do their own validation of extension type dimensions and extraParams.
-bool validateMemoryDesc(
- const hal::V1_3::BufferDesc& desc,
- const hal::hidl_vec<sp<hal::V1_3::IPreparedModel>>& preparedModels,
- const hal::hidl_vec<hal::V1_3::BufferRole>& inputRoles,
- const hal::hidl_vec<hal::V1_3::BufferRole>& outputRoles,
- std::function<const hal::V1_3::Model*(const sp<hal::V1_3::IPreparedModel>&)> getModel,
- std::set<PreparedModelRole>* preparedModelRoles, hal::V1_3::Operand* combinedOperand);
+bool validateMemoryDesc(const V1_3::BufferDesc& desc,
+ const hardware::hidl_vec<sp<V1_3::IPreparedModel>>& preparedModels,
+ const hardware::hidl_vec<V1_3::BufferRole>& inputRoles,
+ const hardware::hidl_vec<V1_3::BufferRole>& outputRoles,
+ std::function<const V1_3::Model*(const sp<V1_3::IPreparedModel>&)> getModel,
+ std::set<PreparedModelRole>* preparedModelRoles,
+ V1_3::Operand* combinedOperand);
} // namespace nn
} // namespace android
diff --git a/common/operations/Activation.cpp b/common/operations/Activation.cpp
index ff5a55d..c0a1934 100644
--- a/common/operations/Activation.cpp
+++ b/common/operations/Activation.cpp
@@ -28,7 +28,6 @@
#include "ActivationFunctor.h"
#include "CpuOperationUtils.h"
-#include "HalInterfaces.h"
#include "OperationResolver.h"
#include "OperationsUtils.h"
#include "Tracing.h"
@@ -36,8 +35,6 @@
namespace android {
namespace nn {
-using namespace hal;
-
namespace activation {
constexpr uint32_t kNumInputs = 1;
@@ -373,7 +370,7 @@
} else if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) {
NN_RET_CHECK(validateHalVersion(context, HalVersion::V1_3));
} else {
- NN_RET_CHECK_FAIL() << "Unsupported tensor type for operation " << getOperationName(opType);
+ NN_RET_CHECK_FAIL() << "Unsupported tensor type for operation " << opType;
}
const Shape& input = context->getInputShape(kInputTensor);
if (hasKnownRank(input)) {
diff --git a/common/operations/ArgMinMax.cpp b/common/operations/ArgMinMax.cpp
index f53ba47..2ee413c 100644
--- a/common/operations/ArgMinMax.cpp
+++ b/common/operations/ArgMinMax.cpp
@@ -19,7 +19,6 @@
#define LOG_TAG "Operations"
#include "CpuOperationUtils.h"
-#include "HalInterfaces.h"
#include "Operations.h"
#include "Tracing.h"
@@ -27,8 +26,6 @@
namespace android {
namespace nn {
-using namespace hal;
-
template <typename In, typename Out>
static void argMinMaxImpl(const In* inputData, const Shape& inputShape, int32_t axis, bool isArgMin,
Out* outputData, const Shape& outputShape) {
diff --git a/common/operations/BidirectionalSequenceLSTM.cpp b/common/operations/BidirectionalSequenceLSTM.cpp
index 12ac43f..6cf095b 100644
--- a/common/operations/BidirectionalSequenceLSTM.cpp
+++ b/common/operations/BidirectionalSequenceLSTM.cpp
@@ -23,7 +23,6 @@
#include "CpuExecutor.h"
#include "CpuOperationUtils.h"
-#include "HalInterfaces.h"
#include "OperationsUtils.h"
#include "Tracing.h"
@@ -32,8 +31,6 @@
namespace {
-using namespace hal;
-
template <typename T>
inline T* GetBuffer(RunTimeOperandInfo* operand) {
return reinterpret_cast<T*>(operand->buffer);
diff --git a/common/operations/BidirectionalSequenceLSTM.h b/common/operations/BidirectionalSequenceLSTM.h
index 184b65d..7077d3b 100644
--- a/common/operations/BidirectionalSequenceLSTM.h
+++ b/common/operations/BidirectionalSequenceLSTM.h
@@ -34,12 +34,11 @@
class BidirectionalSequenceLSTM {
public:
- BidirectionalSequenceLSTM(const hal::Operation& operation, RunTimeOperandInfo* operands);
+ BidirectionalSequenceLSTM(const Operation& operation, RunTimeOperandInfo* operands);
- bool Prepare(const hal::Operation& operation, RunTimeOperandInfo* operands,
- Shape* fwOutputShape, Shape* bwOutputShape, Shape* fwOutputActivationState,
- Shape* fwOutputCellState, Shape* bwOutputActivationState,
- Shape* bwOutputCellState);
+ bool Prepare(const Operation& operation, RunTimeOperandInfo* operands, Shape* fwOutputShape,
+ Shape* bwOutputShape, Shape* fwOutputActivationState, Shape* fwOutputCellState,
+ Shape* bwOutputActivationState, Shape* bwOutputCellState);
bool Eval();
// Input Tensors of size {max_time, n_batch, n_input}
diff --git a/common/operations/BidirectionalSequenceRNN.cpp b/common/operations/BidirectionalSequenceRNN.cpp
index 98917c0..adacea0 100644
--- a/common/operations/BidirectionalSequenceRNN.cpp
+++ b/common/operations/BidirectionalSequenceRNN.cpp
@@ -20,7 +20,6 @@
#include <utility>
#include <vector>
-#include "HalInterfaces.h"
#include "OperationResolver.h"
#include "RNN.h"
@@ -61,8 +60,6 @@
namespace {
-using namespace hal;
-
template <typename T>
void transposeFirstTwoDims(const T* input, const Shape& inputShape, T* output) {
const uint32_t firstDimSize = getSizeOfDimension(inputShape, 0);
@@ -327,7 +324,7 @@
OperandType inputType = context->getInputType(kInputTensor);
if (inputType != OperandType::TENSOR_FLOAT16 && inputType != OperandType::TENSOR_FLOAT32) {
LOG(ERROR) << "Unsupported input operand type for UNIDIRECTIONAL_SEQUENCE_RNN op: "
- << toString(inputType);
+ << inputType;
return false;
}
NN_RET_CHECK(validateInputTypes(
diff --git a/common/operations/Broadcast.cpp b/common/operations/Broadcast.cpp
index 17094af..67bb914 100644
--- a/common/operations/Broadcast.cpp
+++ b/common/operations/Broadcast.cpp
@@ -29,16 +29,14 @@
#include <vector>
#include "CpuOperationUtils.h"
-#include "HalInterfaces.h"
#include "IndexedShapeWrapper.h"
#include "OperationResolver.h"
#include "Tracing.h"
+#include "nnapi/Types.h"
namespace android {
namespace nn {
-using namespace hal;
-
namespace broadcast {
constexpr uint32_t kNumInputs = 3;
@@ -53,16 +51,16 @@
#define ANDROID_NN_MACRO_DISPATCH(macro) \
switch (activation) { \
- case (int32_t)FusedActivationFunc::NONE: \
+ case static_cast<int32_t>(FusedActivationFunc::NONE): \
macro(kNone); \
break; \
- case (int32_t)FusedActivationFunc::RELU: \
+ case static_cast<int32_t>(FusedActivationFunc::RELU): \
macro(kRelu); \
break; \
- case (int32_t)FusedActivationFunc::RELU1: \
+ case static_cast<int32_t>(FusedActivationFunc::RELU1): \
macro(kRelu1); \
break; \
- case (int32_t)FusedActivationFunc::RELU6: \
+ case static_cast<int32_t>(FusedActivationFunc::RELU6): \
macro(kRelu6); \
break; \
default: \
@@ -464,7 +462,7 @@
inputType == OperandType::TENSOR_INT32) {
NN_RET_CHECK(validateHalVersion(context, std::max(HalVersion::V1_3, opIntroducedAt)));
} else {
- NN_RET_CHECK_FAIL() << "Unsupported tensor type for operation " << getOperationName(opType);
+ NN_RET_CHECK_FAIL() << "Unsupported tensor type for operation " << opType;
}
const Shape& input1 = context->getInputShape(kInputTensor1);
const Shape& input2 = context->getInputShape(kInputTensor2);
diff --git a/common/operations/Cast.cpp b/common/operations/Cast.cpp
index 77e35af..aef3baf 100644
--- a/common/operations/Cast.cpp
+++ b/common/operations/Cast.cpp
@@ -20,7 +20,6 @@
#include <algorithm>
-#include "HalInterfaces.h"
#include "Operations.h"
#include "Tracing.h"
@@ -30,8 +29,6 @@
namespace {
-using namespace hal;
-
template <typename FromT, typename ToT>
void copyCast(const FromT* in, ToT* out, int numElements) {
std::transform(in, in + numElements, out, [](FromT a) -> ToT {
diff --git a/common/operations/ChannelShuffle.cpp b/common/operations/ChannelShuffle.cpp
index 7abf224..779a8d8 100644
--- a/common/operations/ChannelShuffle.cpp
+++ b/common/operations/ChannelShuffle.cpp
@@ -16,7 +16,6 @@
#define LOG_TAG "Operations"
-#include "HalInterfaces.h"
#include "OperationResolver.h"
#include "OperationsUtils.h"
#include "Tracing.h"
@@ -25,8 +24,6 @@
namespace nn {
namespace channel_shuffle {
-using namespace hal;
-
constexpr char kOperationName[] = "CHANNEL_SHUFFLE";
constexpr uint32_t kNumInputs = 3;
diff --git a/common/operations/Comparisons.cpp b/common/operations/Comparisons.cpp
index a8f8622..50ed806 100644
--- a/common/operations/Comparisons.cpp
+++ b/common/operations/Comparisons.cpp
@@ -19,7 +19,6 @@
#include <functional>
#include <vector>
-#include "HalInterfaces.h"
#include "IndexedShapeWrapper.h"
#include "OperationResolver.h"
#include "OperationsUtils.h"
@@ -37,8 +36,6 @@
namespace {
-using namespace hal;
-
template <typename DataType, typename ComparisonType>
bool compute(const std::function<bool(ComparisonType, ComparisonType)>& func, const DataType* aData,
const Shape& aShape, const DataType* bData, const Shape& bShape, bool8* outputData,
@@ -135,7 +132,7 @@
inputType == OperandType::TENSOR_FLOAT32 || inputType == OperandType::TENSOR_INT32 ||
inputType == OperandType::TENSOR_QUANT8_ASYMM ||
inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED)
- << "Unsupported input operand type for comparison op: " << toString(inputType);
+ << "Unsupported input operand type for comparison op: " << inputType;
NN_RET_CHECK(validateInputTypes(context, {inputType, inputType}));
NN_RET_CHECK(validateOutputTypes(context, {OperandType::TENSOR_BOOL8}));
if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) {
diff --git a/common/operations/Concatenation.cpp b/common/operations/Concatenation.cpp
index 08c9c61..6de5bad 100644
--- a/common/operations/Concatenation.cpp
+++ b/common/operations/Concatenation.cpp
@@ -27,7 +27,6 @@
#include <vector>
#include "CpuOperationUtils.h"
-#include "HalInterfaces.h"
#include "OperationResolver.h"
#include "Tracing.h"
@@ -42,8 +41,6 @@
namespace {
-using namespace hal;
-
template <typename T>
bool concatenation(const std::vector<const T*>& inputDataPtrs,
const std::vector<Shape>& inputShapes, int32_t axis, T* outputData,
diff --git a/common/operations/Conv2D.cpp b/common/operations/Conv2D.cpp
index f34e908..5b7d8d0 100644
--- a/common/operations/Conv2D.cpp
+++ b/common/operations/Conv2D.cpp
@@ -26,7 +26,6 @@
#include <vector>
#include "CpuOperationUtils.h"
-#include "HalInterfaces.h"
#include "OperationResolver.h"
#include "Operations.h"
#include "OperationsUtils.h"
@@ -49,8 +48,6 @@
namespace {
-using namespace hal;
-
// If possible we will use this static buffer for the tensor.
constexpr size_t kStaticBufferSize = 1605632;
char static_scratch_buffer[kStaticBufferSize];
@@ -566,7 +563,9 @@
OperandType::INT32};
if (filterType == OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL) {
- NN_RET_CHECK_EQ(context->getInputExtraParams(kFilterTensor).channelQuant().channelDim,
+ NN_RET_CHECK_EQ(std::get<Operand::SymmPerChannelQuantParams>(
+ context->getInputExtraParams(kFilterTensor))
+ .channelDim,
0)
<< "Unsupported filter tensor channel dimension for operation "
<< kOperationName;
@@ -727,7 +726,9 @@
context->getInputShape(kInputTensor),
context->getInputBuffer<int8_t>(kFilterTensor),
context->getInputShape(kFilterTensor),
- context->getInputExtraParams(kFilterTensor).channelQuant().scales.data(),
+ std::get<Operand::SymmPerChannelQuantParams>(
+ context->getInputExtraParams(kFilterTensor))
+ .scales.data(),
context->getInputBuffer<int32_t>(kBiasTensor),
context->getInputShape(kBiasTensor), param.padding_left,
param.padding_right, param.padding_top, param.padding_bottom,
@@ -758,7 +759,9 @@
context->getInputShape(kInputTensor),
context->getInputBuffer<int8_t>(kFilterTensor),
context->getInputShape(kFilterTensor),
- context->getInputExtraParams(kFilterTensor).channelQuant().scales.data(),
+ std::get<Operand::SymmPerChannelQuantParams>(
+ context->getInputExtraParams(kFilterTensor))
+ .scales.data(),
context->getInputBuffer<int32_t>(kBiasTensor),
context->getInputShape(kBiasTensor), param.padding_left,
param.padding_right, param.padding_top, param.padding_bottom,
diff --git a/common/operations/DepthwiseConv2D.cpp b/common/operations/DepthwiseConv2D.cpp
index 32e8b55..47bf010 100644
--- a/common/operations/DepthwiseConv2D.cpp
+++ b/common/operations/DepthwiseConv2D.cpp
@@ -42,8 +42,6 @@
namespace {
-using namespace hal;
-
struct DepthwiseConv2dParam {
int32_t padding_left, padding_right;
int32_t padding_top, padding_bottom;
@@ -443,7 +441,9 @@
filterType == inputType)
<< "Unsupported filter tensor type for operation " << kOperationName;
if (filterType == OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL) {
- NN_RET_CHECK_EQ(context->getInputExtraParams(kFilterTensor).channelQuant().channelDim,
+ NN_RET_CHECK_EQ(std::get<Operand::SymmPerChannelQuantParams>(
+ context->getInputExtraParams(kFilterTensor))
+ .channelDim,
3)
<< "Unsupported filter tensor channel dimension for operation "
<< kOperationName;
@@ -607,7 +607,9 @@
context->getInputShape(kInputTensor),
context->getInputBuffer<int8_t>(kFilterTensor),
context->getInputShape(kFilterTensor),
- context->getInputExtraParams(kFilterTensor).channelQuant().scales.data(),
+ std::get<Operand::SymmPerChannelQuantParams>(
+ context->getInputExtraParams(kFilterTensor))
+ .scales.data(),
context->getInputBuffer<int32_t>(kBiasTensor),
context->getInputShape(kBiasTensor), param.padding_left,
param.padding_right, param.padding_top, param.padding_bottom,
@@ -639,7 +641,9 @@
context->getInputShape(kInputTensor),
context->getInputBuffer<int8_t>(kFilterTensor),
context->getInputShape(kFilterTensor),
- context->getInputExtraParams(kFilterTensor).channelQuant().scales.data(),
+ std::get<Operand::SymmPerChannelQuantParams>(
+ context->getInputExtraParams(kFilterTensor))
+ .scales.data(),
context->getInputBuffer<int32_t>(kBiasTensor),
context->getInputShape(kBiasTensor), param.padding_left,
param.padding_right, param.padding_top, param.padding_bottom,
diff --git a/common/operations/Dequantize.cpp b/common/operations/Dequantize.cpp
index 2fb2d5c..7b81143 100644
--- a/common/operations/Dequantize.cpp
+++ b/common/operations/Dequantize.cpp
@@ -17,7 +17,6 @@
#include "OperationsUtils.h"
#define LOG_TAG "Operations"
-#include "HalInterfaces.h"
#include "IndexedShapeWrapper.h"
#include "OperationResolver.h"
@@ -33,8 +32,6 @@
namespace {
-using namespace hal;
-
template <typename InputType, typename OutputType>
bool compute(const InputType* inputData, const Shape& inputShape, OutputType* outputData) {
const int numElements = getNumberOfElements(inputShape);
@@ -52,7 +49,8 @@
// First we calculate a stride which is the number of elements we need to
// skip to change an index along a dimension with different quantization
// scales.
- const int channelDim = inputShape.extraParams.channelQuant().channelDim;
+ const int channelDim =
+ std::get<Operand::SymmPerChannelQuantParams>(inputShape.extraParams).channelDim;
int stride = 1;
for (int i = getNumberOfDimensions(inputShape) - 1; i > channelDim; --i) {
stride *= getSizeOfDimension(inputShape, i);
@@ -67,7 +65,8 @@
// size of the dimension (so that we don't have an overflow if the
// channelDim is not 0).
const int scaleIndex = (i / stride) % getSizeOfDimension(inputShape, channelDim);
- const float scale = inputShape.extraParams.channelQuant().scales[scaleIndex];
+ const float scale = std::get<Operand::SymmPerChannelQuantParams>(inputShape.extraParams)
+ .scales[scaleIndex];
const int32_t value = inputData[i];
outputData[i] = static_cast<OutputType>(scale * (value - zeroPoint));
}
@@ -97,10 +96,10 @@
inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED ||
inputType == OperandType::TENSOR_QUANT8_SYMM ||
inputType == OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL)
- << "Unsupported input operand type for DEQUANTIZE op: " << toString(inputType);
+ << "Unsupported input operand type for DEQUANTIZE op: " << inputType;
NN_RET_CHECK(outputType == OperandType::TENSOR_FLOAT16 ||
outputType == OperandType::TENSOR_FLOAT32)
- << "Unsupported output operand type for DEQUANTIZE op: " << toString(outputType);
+ << "Unsupported output operand type for DEQUANTIZE op: " << outputType;
return validateHalVersion(context, HalVersion::V1_2);
}
@@ -155,7 +154,7 @@
}
}
NN_RET_CHECK_FAIL() << "Unsupported tensor types combination for dequantize op. (input type: "
- << toString(inputType) << " output type: " << toString(outputType) << ")";
+ << inputType << " output type: " << outputType << ")";
}
} // namespace dequantize
diff --git a/common/operations/Elementwise.cpp b/common/operations/Elementwise.cpp
index 82a2687..3ddae90 100644
--- a/common/operations/Elementwise.cpp
+++ b/common/operations/Elementwise.cpp
@@ -18,7 +18,6 @@
#include <cmath>
-#include "HalInterfaces.h"
#include "OperationResolver.h"
#include "OperationsUtils.h"
#include "Tracing.h"
@@ -35,8 +34,6 @@
namespace {
-using namespace hal;
-
template <typename IntermediateType, typename T>
inline bool compute(IntermediateType func(IntermediateType), const T* input, const Shape& shape,
T* output) {
diff --git a/common/operations/Elu.cpp b/common/operations/Elu.cpp
index 07304e7..dfb221c 100644
--- a/common/operations/Elu.cpp
+++ b/common/operations/Elu.cpp
@@ -20,7 +20,6 @@
#include <cmath>
#include <vector>
-#include "HalInterfaces.h"
#include "IndexedShapeWrapper.h"
#include "OperationResolver.h"
#include "OperationsUtils.h"
@@ -30,8 +29,6 @@
namespace nn {
namespace elu {
-using namespace hal;
-
constexpr uint32_t kNumInputs = 2;
constexpr uint32_t kInputTensor = 0;
constexpr uint32_t kAlphaScalar = 1;
diff --git a/common/operations/EmbeddingLookup.cpp b/common/operations/EmbeddingLookup.cpp
index 12e4a65..5ff26e8 100644
--- a/common/operations/EmbeddingLookup.cpp
+++ b/common/operations/EmbeddingLookup.cpp
@@ -19,7 +19,6 @@
#include "EmbeddingLookup.h"
#include "CpuExecutor.h"
-#include "HalInterfaces.h"
#include "Operations.h"
#include "Tracing.h"
@@ -27,8 +26,6 @@
namespace android {
namespace nn {
-using namespace hal;
-
EmbeddingLookup::EmbeddingLookup(const Operation& operation, RunTimeOperandInfo* operands) {
value_ = GetInput(operation, operands, kValueTensor);
lookup_ = GetInput(operation, operands, kLookupTensor);
diff --git a/common/operations/EmbeddingLookup.h b/common/operations/EmbeddingLookup.h
index 9a82dda..0388b35 100644
--- a/common/operations/EmbeddingLookup.h
+++ b/common/operations/EmbeddingLookup.h
@@ -19,7 +19,7 @@
#include <vector>
-#include "HalInterfaces.h"
+#include "nnapi/Types.h"
namespace android {
namespace nn {
@@ -28,7 +28,7 @@
class EmbeddingLookup {
public:
- EmbeddingLookup(const hal::Operation& operation, RunTimeOperandInfo* operands);
+ EmbeddingLookup(const Operation& operation, RunTimeOperandInfo* operands);
bool Eval();
diff --git a/common/operations/Fill.cpp b/common/operations/Fill.cpp
index a6b3906..a233627 100644
--- a/common/operations/Fill.cpp
+++ b/common/operations/Fill.cpp
@@ -17,7 +17,6 @@
#include "OperationsUtils.h"
#define LOG_TAG "Operations"
-#include "HalInterfaces.h"
#include "OperationResolver.h"
namespace android {
@@ -33,8 +32,6 @@
namespace {
-using namespace hal;
-
template <typename T>
bool executeTyped(IOperationExecutionContext* context) {
T* output = context->getOutputBuffer<T>(kOutputTensor);
@@ -58,7 +55,7 @@
*valueType = OperandType::INT32;
return true;
default:
- NN_RET_CHECK_FAIL() << "Unsupported value type for fill op: " << toString(outputType);
+ NN_RET_CHECK_FAIL() << "Unsupported value type for fill op: " << outputType;
}
}
@@ -73,7 +70,7 @@
NN_RET_CHECK(outputType == OperandType::TENSOR_FLOAT16 ||
outputType == OperandType::TENSOR_FLOAT32 ||
outputType == OperandType::TENSOR_INT32)
- << "Unsupported output type for fill op: " << toString(outputType);
+ << "Unsupported output type for fill op: " << outputType;
NN_RET_CHECK(validateOutputTypes(context, {outputType}));
OperandType valueType;
diff --git a/common/operations/FullyConnected.cpp b/common/operations/FullyConnected.cpp
index 9bdd0ba..9fcc072 100644
--- a/common/operations/FullyConnected.cpp
+++ b/common/operations/FullyConnected.cpp
@@ -24,7 +24,6 @@
#include <vector>
#include "CpuOperationUtils.h"
-#include "HalInterfaces.h"
#include "OperationResolver.h"
#include "Tracing.h"
@@ -45,8 +44,6 @@
namespace {
-using namespace hal;
-
// executionMutex is used to protect concurrent access of non-threadsafe resources
// like gemmlowp::GemmContext.
// std::mutex is safe for pthreads on Android.
diff --git a/common/operations/Gather.cpp b/common/operations/Gather.cpp
index d496d6a..e73a22e 100644
--- a/common/operations/Gather.cpp
+++ b/common/operations/Gather.cpp
@@ -16,7 +16,6 @@
#define LOG_TAG "Operations"
-#include "HalInterfaces.h"
#include "OperationResolver.h"
#include "OperationsUtils.h"
#include "Tracing.h"
@@ -37,8 +36,6 @@
namespace {
-using namespace hal;
-
template <typename T>
inline bool eval(const T* inputData, const Shape& inputShape, int32_t axis,
const int32_t* indicesData, const Shape& indicesShape, T* outputData) {
diff --git a/common/operations/GenerateProposals.cpp b/common/operations/GenerateProposals.cpp
index 4e3aa3f..2ef733e 100644
--- a/common/operations/GenerateProposals.cpp
+++ b/common/operations/GenerateProposals.cpp
@@ -24,7 +24,6 @@
#include <vector>
#include "CpuOperationUtils.h"
-#include "HalInterfaces.h"
#include "OperationResolver.h"
#include "OperationsUtils.h"
#include "Tracing.h"
@@ -35,8 +34,6 @@
namespace {
-using namespace hal;
-
struct BoxEncodingCorner {
float x1, y1, x2, y2;
};
diff --git a/common/operations/HashtableLookup.cpp b/common/operations/HashtableLookup.cpp
index 287c866..cfb9d98 100644
--- a/common/operations/HashtableLookup.cpp
+++ b/common/operations/HashtableLookup.cpp
@@ -19,7 +19,6 @@
#include "HashtableLookup.h"
#include "CpuExecutor.h"
-#include "HalInterfaces.h"
#include "Operations.h"
#include "Tracing.h"
@@ -29,8 +28,6 @@
namespace {
-using namespace hal;
-
int greater(const void* a, const void* b) {
return *static_cast<const int*>(a) - *static_cast<const int*>(b);
}
diff --git a/common/operations/HashtableLookup.h b/common/operations/HashtableLookup.h
index c0921e0..1ae554f 100644
--- a/common/operations/HashtableLookup.h
+++ b/common/operations/HashtableLookup.h
@@ -19,7 +19,7 @@
#include <vector>
-#include "HalInterfaces.h"
+#include "nnapi/Types.h"
namespace android {
namespace nn {
@@ -28,7 +28,7 @@
class HashtableLookup {
public:
- HashtableLookup(const hal::Operation& operation, RunTimeOperandInfo* operands);
+ HashtableLookup(const Operation& operation, RunTimeOperandInfo* operands);
bool Eval();
diff --git a/common/operations/HeatmapMaxKeypoint.cpp b/common/operations/HeatmapMaxKeypoint.cpp
index 3608ca5..a07e142 100644
--- a/common/operations/HeatmapMaxKeypoint.cpp
+++ b/common/operations/HeatmapMaxKeypoint.cpp
@@ -22,7 +22,6 @@
#include <vector>
#include "CpuOperationUtils.h"
-#include "HalInterfaces.h"
#include "OperationResolver.h"
#include "OperationsUtils.h"
#include "Tracing.h"
@@ -44,8 +43,6 @@
namespace {
-using namespace hal;
-
// This function uses Taylor expansion up to the quatratic term to approximate bicubic
// upscaling result.
// 2nd order Taylor expansion: D(x) = D - b'x + 1/2 * x'Ax
diff --git a/common/operations/InstanceNormalization.cpp b/common/operations/InstanceNormalization.cpp
index 75b907b..0ce21d0 100644
--- a/common/operations/InstanceNormalization.cpp
+++ b/common/operations/InstanceNormalization.cpp
@@ -20,7 +20,6 @@
#include <vector>
#include "CpuOperationUtils.h"
-#include "HalInterfaces.h"
#include "OperationResolver.h"
#include "Tracing.h"
@@ -42,8 +41,6 @@
namespace {
-using namespace hal;
-
template <typename T>
inline bool instanceNormNhwc(const T* inputData, const Shape& inputShape, T gamma, T beta,
T epsilon, T* outputData, const Shape& outputShape) {
diff --git a/common/operations/L2Normalization.cpp b/common/operations/L2Normalization.cpp
index 1f0c9d0..f86ab80 100644
--- a/common/operations/L2Normalization.cpp
+++ b/common/operations/L2Normalization.cpp
@@ -23,7 +23,6 @@
#include <vector>
#include "CpuOperationUtils.h"
-#include "HalInterfaces.h"
#include "OperationResolver.h"
#include "Tracing.h"
@@ -42,8 +41,6 @@
namespace {
-using namespace hal;
-
inline bool l2normFloat32Impl(const float* inputData, const Shape& inputShape, int32_t axis,
float* outputData, const Shape& outputShape) {
NNTRACE_TRANS("l2normFloat32");
diff --git a/common/operations/LSHProjection.cpp b/common/operations/LSHProjection.cpp
index bdb106e..14d7a79 100644
--- a/common/operations/LSHProjection.cpp
+++ b/common/operations/LSHProjection.cpp
@@ -18,19 +18,18 @@
#include "LSHProjection.h"
+#include <utils/hash/farmhash.h>
+
+#include <memory>
+
#include "CpuExecutor.h"
-#include "HalInterfaces.h"
#include "Tracing.h"
#include "Utils.h"
-
-#include <utils/hash/farmhash.h>
-#include <memory>
+#include "nnapi/Types.h"
namespace android {
namespace nn {
-using namespace hal;
-
LSHProjection::LSHProjection(const Operation& operation, RunTimeOperandInfo* operands) {
input_ = GetInput(operation, operands, kInputTensor);
weight_ = GetInput(operation, operands, kWeightTensor);
@@ -112,7 +111,7 @@
int64_t hash_signature = farmhash::Fingerprint64(key.get(), key_bytes);
double running_value = static_cast<double>(hash_signature);
input_ptr += input_item_bytes;
- if (weight->lifetime == OperandLifeTime::NO_VALUE) {
+ if (weight->lifetime == Operand::LifeTime::NO_VALUE) {
score += running_value;
} else {
score += static_cast<double>(reinterpret_cast<T*>(weight->buffer)[i]) * running_value;
diff --git a/common/operations/LSHProjection.h b/common/operations/LSHProjection.h
index 520f58a..3a953a0 100644
--- a/common/operations/LSHProjection.h
+++ b/common/operations/LSHProjection.h
@@ -19,7 +19,7 @@
#include <vector>
-#include "HalInterfaces.h"
+#include "nnapi/Types.h"
namespace android {
namespace nn {
@@ -36,9 +36,9 @@
class LSHProjection {
public:
- LSHProjection(const hal::Operation& operation, RunTimeOperandInfo* operands);
+ LSHProjection(const Operation& operation, RunTimeOperandInfo* operands);
- static bool Prepare(const hal::Operation& operation, RunTimeOperandInfo* operands,
+ static bool Prepare(const Operation& operation, RunTimeOperandInfo* operands,
Shape* outputShape);
template <typename T>
bool Eval();
diff --git a/common/operations/LSTM.cpp b/common/operations/LSTM.cpp
index 3051cfd..e64d0c4 100644
--- a/common/operations/LSTM.cpp
+++ b/common/operations/LSTM.cpp
@@ -22,18 +22,16 @@
#include "CpuExecutor.h"
#include "CpuOperationUtils.h"
-#include "HalInterfaces.h"
#include "OperationsUtils.h"
#include "Tracing.h"
#include "Utils.h"
+#include "nnapi/Types.h"
namespace android {
namespace nn {
namespace {
-using namespace hal;
-
template <typename T>
inline T* GetBuffer(RunTimeOperandInfo* operand) {
return reinterpret_cast<T*>(operand->buffer);
@@ -113,7 +111,7 @@
} else {
// For LSTM from HAL v1.0 assign operands with no values
static RunTimeOperandInfo no_value;
- no_value.lifetime = OperandLifeTime::NO_VALUE;
+ no_value.lifetime = Operand::LifeTime::NO_VALUE;
input_layer_norm_weights_ = &no_value;
forget_layer_norm_weights_ = &no_value;
@@ -221,8 +219,8 @@
// omitted ones can be omited in case CIFG LSTM is used.
params->use_layer_norm = !IsNullInput(output_layer_norm_weights);
- params->use_projection_weight = (projection_weights->lifetime != OperandLifeTime::NO_VALUE);
- params->use_projection_bias = (projection_bias->lifetime != OperandLifeTime::NO_VALUE);
+ params->use_projection_weight = (projection_weights->lifetime != Operand::LifeTime::NO_VALUE);
+ params->use_projection_bias = (projection_bias->lifetime != Operand::LifeTime::NO_VALUE);
// Make sure the input gate bias is present only when not a CIFG-LSTM.
if (params->use_cifg) {
diff --git a/common/operations/LSTM.h b/common/operations/LSTM.h
index b48c3df..dc6a43c 100644
--- a/common/operations/LSTM.h
+++ b/common/operations/LSTM.h
@@ -24,7 +24,7 @@
#include <vector>
#include "ActivationFunctor.h"
-#include "HalInterfaces.h"
+#include "nnapi/Types.h"
namespace android {
namespace nn {
@@ -48,9 +48,9 @@
class LSTMCell {
public:
- LSTMCell(const hal::Operation& operation, RunTimeOperandInfo* operands);
+ LSTMCell(const Operation& operation, RunTimeOperandInfo* operands);
- bool Prepare(const hal::Operation& operation, RunTimeOperandInfo* operands, Shape* scratchShape,
+ bool Prepare(const Operation& operation, RunTimeOperandInfo* operands, Shape* scratchShape,
Shape* outputStateShape, Shape* cellStateShape, Shape* outputShape);
bool Eval();
diff --git a/common/operations/LocalResponseNormalization.cpp b/common/operations/LocalResponseNormalization.cpp
index 40220e1..26a7a00 100644
--- a/common/operations/LocalResponseNormalization.cpp
+++ b/common/operations/LocalResponseNormalization.cpp
@@ -22,7 +22,6 @@
#include <vector>
#include "CpuOperationUtils.h"
-#include "HalInterfaces.h"
#include "OperationResolver.h"
#include "Tracing.h"
@@ -45,8 +44,6 @@
namespace {
-using namespace hal;
-
inline bool localResponseNormFloat32Impl(const float* inputData, const Shape& inputShape,
int32_t radius, float bias, float alpha, float beta,
int32_t axis, float* outputData,
diff --git a/common/operations/LogSoftmax.cpp b/common/operations/LogSoftmax.cpp
index 4132ef9..fdcccf8 100644
--- a/common/operations/LogSoftmax.cpp
+++ b/common/operations/LogSoftmax.cpp
@@ -16,19 +16,18 @@
#define LOG_TAG "Operations"
-#include "HalInterfaces.h"
+#include <algorithm>
+#include <cmath>
+#include <vector>
+
#include "OperationResolver.h"
#include "OperationsUtils.h"
#include "Tracing.h"
-#include <cmath>
-
namespace android {
namespace nn {
namespace log_softmax {
-using namespace hal;
-
constexpr char kOperationName[] = "LOG_SOFTMAX";
constexpr uint32_t kNumInputs = 3;
diff --git a/common/operations/LogicalAndOr.cpp b/common/operations/LogicalAndOr.cpp
index 6ada724..9d7e5ce 100644
--- a/common/operations/LogicalAndOr.cpp
+++ b/common/operations/LogicalAndOr.cpp
@@ -16,7 +16,9 @@
#define LOG_TAG "Operations"
-#include "HalInterfaces.h"
+#include <functional>
+#include <vector>
+
#include "IndexedShapeWrapper.h"
#include "OperationResolver.h"
#include "OperationsUtils.h"
@@ -34,8 +36,6 @@
namespace {
-using namespace hal;
-
bool compute(const std::function<bool(bool, bool)>& func, const bool8* aData, const Shape& aShape,
const bool8* bData, const Shape& bShape, bool8* outputData, const Shape& outputShape) {
IndexedShapeWrapper aShapeIndexed(aShape);
diff --git a/common/operations/LogicalNot.cpp b/common/operations/LogicalNot.cpp
index 8b41813..c715388 100644
--- a/common/operations/LogicalNot.cpp
+++ b/common/operations/LogicalNot.cpp
@@ -16,7 +16,6 @@
#define LOG_TAG "Operations"
-#include "HalInterfaces.h"
#include "OperationResolver.h"
#include "OperationsUtils.h"
@@ -32,8 +31,6 @@
namespace {
-using namespace hal;
-
bool compute(const bool8* input, const Shape& shape, bool8* output) {
const auto size = getNumberOfElements(shape);
for (uint32_t i = 0; i < size; ++i) {
diff --git a/common/operations/MaximumMinimum.cpp b/common/operations/MaximumMinimum.cpp
index 91a4bb0..339172f 100644
--- a/common/operations/MaximumMinimum.cpp
+++ b/common/operations/MaximumMinimum.cpp
@@ -20,7 +20,6 @@
#include <vector>
#include "MaximumMinimum.h"
-#include "HalInterfaces.h"
#include "IndexedShapeWrapper.h"
#include "OperationsUtils.h"
#include "Tracing.h"
@@ -31,8 +30,6 @@
namespace {
-using namespace hal;
-
template <typename T>
bool evalGeneric(const T* aData, const Shape& aShape, const T* bData, const Shape& bShape,
bool isMinimum, T* outputData, const Shape& outputShape) {
@@ -124,7 +121,7 @@
reinterpret_cast<int8_t*>(output), outputShape);
}
default: {
- LOG(ERROR) << "Unsupported data type: " << toString(shape1.type);
+ LOG(ERROR) << "Unsupported data type: " << shape1.type;
return false;
}
}
diff --git a/common/operations/Multinomial.cpp b/common/operations/Multinomial.cpp
index 7e1d2c6..80fb7e8 100644
--- a/common/operations/Multinomial.cpp
+++ b/common/operations/Multinomial.cpp
@@ -20,7 +20,6 @@
#include "CpuExecutor.h"
#include "CpuOperationUtils.h"
-#include "HalInterfaces.h"
#include "Tracing.h"
#include "guarded_philox_random.h"
@@ -37,8 +36,6 @@
namespace {
-using namespace hal;
-
template <typename T>
inline T* GetBuffer(RunTimeOperandInfo* operand) {
return reinterpret_cast<T*>(operand->buffer);
diff --git a/common/operations/Multinomial.h b/common/operations/Multinomial.h
index 0f5434e..bdfe587 100644
--- a/common/operations/Multinomial.h
+++ b/common/operations/Multinomial.h
@@ -23,7 +23,7 @@
#include <cmath>
#include <vector>
-#include "HalInterfaces.h"
+#include "nnapi/Types.h"
namespace android {
namespace nn {
@@ -33,9 +33,9 @@
class Multinomial {
public:
- Multinomial(const hal::Operation& operation, RunTimeOperandInfo* operands);
+ Multinomial(const Operation& operation, RunTimeOperandInfo* operands);
- static bool Prepare(const hal::Operation& operation, RunTimeOperandInfo* operands,
+ static bool Prepare(const Operation& operation, RunTimeOperandInfo* operands,
Shape* outputShape);
bool Eval();
diff --git a/common/operations/MultinomialTest.cpp b/common/operations/MultinomialTest.cpp
index e34de63..668ed36 100644
--- a/common/operations/MultinomialTest.cpp
+++ b/common/operations/MultinomialTest.cpp
@@ -14,17 +14,17 @@
* limitations under the License.
*/
-#include "Multinomial.h"
+#include <gmock/gmock-matchers.h>
+#include <gtest/gtest.h>
-#include "HalInterfaces.h"
+#include <unsupported/Eigen/CXX11/Tensor>
+#include <vector>
+
+#include "Multinomial.h"
#include "NeuralNetworksWrapper.h"
#include "philox_random.h"
#include "simple_philox.h"
-#include <gmock/gmock-matchers.h>
-#include <gtest/gtest.h>
-#include <unsupported/Eigen/CXX11/Tensor>
-
namespace android {
namespace nn {
namespace wrapper {
diff --git a/common/operations/Neg.cpp b/common/operations/Neg.cpp
index 48d962c..bf21727 100644
--- a/common/operations/Neg.cpp
+++ b/common/operations/Neg.cpp
@@ -16,7 +16,6 @@
#define LOG_TAG "Operations"
-#include "HalInterfaces.h"
#include "OperationResolver.h"
#include "OperationsUtils.h"
#include "Tracing.h"
@@ -37,8 +36,6 @@
namespace {
-using namespace hal;
-
template <typename T>
inline bool compute(const T* input, const Shape& shape, T* output) {
const auto size = getNumberOfElements(shape);
diff --git a/common/operations/PRelu.cpp b/common/operations/PRelu.cpp
index a799a84..7e0c8c3 100644
--- a/common/operations/PRelu.cpp
+++ b/common/operations/PRelu.cpp
@@ -19,7 +19,6 @@
#include <algorithm>
#include <vector>
-#include "HalInterfaces.h"
#include "IndexedShapeWrapper.h"
#include "OperationResolver.h"
#include "OperationsUtils.h"
@@ -31,8 +30,6 @@
namespace nn {
namespace prelu {
-using namespace hal;
-
constexpr char kOperationName[] = "PRELU";
constexpr uint32_t kNumInputs = 2;
diff --git a/common/operations/Pooling.cpp b/common/operations/Pooling.cpp
index 3ffa70f..62594c7 100644
--- a/common/operations/Pooling.cpp
+++ b/common/operations/Pooling.cpp
@@ -22,15 +22,12 @@
#include <vector>
#include "CpuOperationUtils.h"
-#include "HalInterfaces.h"
#include "OperationResolver.h"
#include "Tracing.h"
namespace android {
namespace nn {
-using namespace hal;
-
namespace pooling {
constexpr uint32_t kInputTensor = 0;
@@ -334,8 +331,7 @@
OperandType::INT32,
};
} else {
- NN_RET_CHECK_FAIL() << "Unsupported input tensor type for operation "
- << getOperationName(opType);
+ NN_RET_CHECK_FAIL() << "Unsupported input tensor type for operation " << opType;
}
if (inputCount >= 10) {
diff --git a/common/operations/Pow.cpp b/common/operations/Pow.cpp
index 40c4adf..03892a2 100644
--- a/common/operations/Pow.cpp
+++ b/common/operations/Pow.cpp
@@ -17,11 +17,11 @@
#define LOG_TAG "Operations"
#include "Pow.h"
-#include "HalInterfaces.h"
#include "IndexedShapeWrapper.h"
#include "OperationsUtils.h"
#include <cmath>
+#include <vector>
namespace android {
namespace nn {
@@ -29,8 +29,6 @@
namespace {
-using namespace hal;
-
template <typename T>
bool evalGeneric(const T* baseData, const Shape& baseShape, const T* exponentData,
const Shape& exponentShape, T* outputData, const Shape& outputShape) {
@@ -81,7 +79,7 @@
reinterpret_cast<float*>(outputData), outputShape);
} break;
default: {
- LOG(ERROR) << "Unsupported data type: " << toString(baseShape.type);
+ LOG(ERROR) << "Unsupported data type: " << baseShape.type;
return false;
}
}
diff --git a/common/operations/QLSTM.cpp b/common/operations/QLSTM.cpp
index 3b2dd05..68a9489 100644
--- a/common/operations/QLSTM.cpp
+++ b/common/operations/QLSTM.cpp
@@ -101,8 +101,6 @@
} // namespace
-using hal::OperandType;
-
bool validate(const IOperationValidationContext* context) {
NN_RET_CHECK_EQ(context->getNumInputs(), kNumInputs);
NN_RET_CHECK_EQ(context->getNumOutputs(), kNumOutputs);
diff --git a/common/operations/Quantize.cpp b/common/operations/Quantize.cpp
index fa04bdd..943a33d 100644
--- a/common/operations/Quantize.cpp
+++ b/common/operations/Quantize.cpp
@@ -17,7 +17,6 @@
#include "OperationsUtils.h"
#define LOG_TAG "Operations"
-#include "HalInterfaces.h"
#include "IndexedShapeWrapper.h"
#include "OperationResolver.h"
#include "Tracing.h"
@@ -37,8 +36,6 @@
namespace {
-using namespace hal;
-
template <typename T>
bool quantizeToQuant8(const T* inputData, uint8_t* outputData, const Shape& outputShape) {
NNTRACE_COMP("quantizeToQuant8");
@@ -75,10 +72,10 @@
NN_RET_CHECK(inputType == OperandType::TENSOR_FLOAT16 ||
inputType == OperandType::TENSOR_FLOAT32)
- << "Unsupported input operand type for QUANTIZE op: " << toString(inputType);
+ << "Unsupported input operand type for QUANTIZE op: " << inputType;
NN_RET_CHECK(outputType == OperandType::TENSOR_QUANT8_ASYMM ||
outputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED)
- << "Unsupported output operand type for QUANTIZE op: " << toString(outputType);
+ << "Unsupported output operand type for QUANTIZE op: " << outputType;
if (outputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) {
return validateHalVersion(context, HalVersion::V1_3);
} else {
@@ -121,8 +118,7 @@
}
}
NN_RET_CHECK_FAIL() << "Unsupported tensor types combination for QUANTIZE op. (input type: "
- << toString(inputType)
- << " output type: " << toString(context->getOutputType(kOutputTensor))
+ << inputType << " output type: " << context->getOutputType(kOutputTensor)
<< ")";
}
diff --git a/common/operations/QuantizedLSTM.cpp b/common/operations/QuantizedLSTM.cpp
index e059026..f07bc0a 100644
--- a/common/operations/QuantizedLSTM.cpp
+++ b/common/operations/QuantizedLSTM.cpp
@@ -20,7 +20,6 @@
#include "CpuExecutor.h"
#include "CpuOperationUtils.h"
-#include "HalInterfaces.h"
#include "Tracing.h"
@@ -34,8 +33,6 @@
namespace {
-using namespace hal;
-
template <typename T>
inline T* GetBuffer(RunTimeOperandInfo* operand) {
return reinterpret_cast<T*>(operand->buffer);
diff --git a/common/operations/QuantizedLSTM.h b/common/operations/QuantizedLSTM.h
index 76e74c6..61963c0 100644
--- a/common/operations/QuantizedLSTM.h
+++ b/common/operations/QuantizedLSTM.h
@@ -28,9 +28,9 @@
class QuantizedLSTMCell {
public:
- QuantizedLSTMCell(const hal::Operation& operation, RunTimeOperandInfo* operands);
+ QuantizedLSTMCell(const Operation& operation, RunTimeOperandInfo* operands);
- static bool prepare(const hal::Operation& operation, RunTimeOperandInfo* operands,
+ static bool prepare(const Operation& operation, RunTimeOperandInfo* operands,
Shape* cellStateShape, Shape* outputShape);
bool eval();
diff --git a/common/operations/RNN.cpp b/common/operations/RNN.cpp
index 259c091..f584f0e 100644
--- a/common/operations/RNN.cpp
+++ b/common/operations/RNN.cpp
@@ -22,15 +22,12 @@
#include "CpuExecutor.h"
#include "CpuOperationUtils.h"
-#include "HalInterfaces.h"
#include "Tracing.h"
namespace android {
namespace nn {
-using namespace hal;
-
RNN::RNN(const Operation& operation, RunTimeOperandInfo* operands) {
NNTRACE_TRANS("RNN::RNN");
input_ = GetInput(operation, operands, kInputTensor);
diff --git a/common/operations/RNN.h b/common/operations/RNN.h
index 245eb1d..0a5765b 100644
--- a/common/operations/RNN.h
+++ b/common/operations/RNN.h
@@ -20,7 +20,7 @@
#include <vector>
#include "ActivationFunctor.h"
-#include "HalInterfaces.h"
+#include "nnapi/Types.h"
namespace android {
namespace nn {
@@ -30,9 +30,9 @@
class RNN {
public:
- RNN(const hal::Operation& operation, RunTimeOperandInfo* operands);
+ RNN(const Operation& operation, RunTimeOperandInfo* operands);
- static bool Prepare(const hal::Operation& operation, RunTimeOperandInfo* operands,
+ static bool Prepare(const Operation& operation, RunTimeOperandInfo* operands,
Shape* hiddenStateShape, Shape* outputShape);
bool Eval();
diff --git a/common/operations/Rank.cpp b/common/operations/Rank.cpp
index 5f74437..8a6931b 100644
--- a/common/operations/Rank.cpp
+++ b/common/operations/Rank.cpp
@@ -16,7 +16,6 @@
#define LOG_TAG "Operations"
-#include "HalInterfaces.h"
#include "OperationResolver.h"
#include "OperationsUtils.h"
#include "Utils.h"
@@ -34,19 +33,19 @@
bool validate(const IOperationValidationContext* context) {
NN_RET_CHECK_EQ(context->getNumInputs(), kNumInputs);
NN_RET_CHECK_EQ(context->getNumOutputs(), kNumOutputs);
- hal::OperandType inputType = context->getInputType(kInputTensor);
- NN_RET_CHECK(inputType == hal::OperandType::TENSOR_FLOAT16 ||
- inputType == hal::OperandType::TENSOR_FLOAT32 ||
- inputType == hal::OperandType::TENSOR_INT32 ||
- inputType == hal::OperandType::TENSOR_QUANT8_ASYMM ||
- inputType == hal::OperandType::TENSOR_QUANT16_SYMM ||
- inputType == hal::OperandType::TENSOR_BOOL8 ||
- inputType == hal::OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL ||
- inputType == hal::OperandType::TENSOR_QUANT16_ASYMM ||
- inputType == hal::OperandType::TENSOR_QUANT8_SYMM ||
- inputType == hal::OperandType::TENSOR_QUANT8_ASYMM_SIGNED)
- << "Incorrect input type for a RANK op: " << toString(inputType);
- NN_RET_CHECK(validateOutputTypes(context, {hal::OperandType::INT32}));
+ OperandType inputType = context->getInputType(kInputTensor);
+ NN_RET_CHECK(inputType == OperandType::TENSOR_FLOAT16 ||
+ inputType == OperandType::TENSOR_FLOAT32 ||
+ inputType == OperandType::TENSOR_INT32 ||
+ inputType == OperandType::TENSOR_QUANT8_ASYMM ||
+ inputType == OperandType::TENSOR_QUANT16_SYMM ||
+ inputType == OperandType::TENSOR_BOOL8 ||
+ inputType == OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL ||
+ inputType == OperandType::TENSOR_QUANT16_ASYMM ||
+ inputType == OperandType::TENSOR_QUANT8_SYMM ||
+ inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED)
+ << "Incorrect input type for a RANK op: " << inputType;
+ NN_RET_CHECK(validateOutputTypes(context, {OperandType::INT32}));
return validateHalVersion(context, HalVersion::V1_3);
}
diff --git a/common/operations/Reduce.cpp b/common/operations/Reduce.cpp
index 220a4dc..c56771c 100644
--- a/common/operations/Reduce.cpp
+++ b/common/operations/Reduce.cpp
@@ -22,7 +22,6 @@
#include <limits>
#include <vector>
-#include "HalInterfaces.h"
#include "OperationResolver.h"
#include "OperationsUtils.h"
#include "Tracing.h"
@@ -46,8 +45,6 @@
namespace {
-using namespace hal;
-
template <typename T>
inline bool compute(IOperationExecutionContext* context, T init, T func(T, T)) {
const Shape inputShape = context->getInputShape(kInputTensor);
diff --git a/common/operations/ResizeImageOps.cpp b/common/operations/ResizeImageOps.cpp
index c33abaf..9042099 100644
--- a/common/operations/ResizeImageOps.cpp
+++ b/common/operations/ResizeImageOps.cpp
@@ -23,15 +23,12 @@
#include <vector>
#include "CpuOperationUtils.h"
-#include "HalInterfaces.h"
#include "OperationResolver.h"
#include "Tracing.h"
namespace android {
namespace nn {
-using namespace hal;
-
namespace resize_image {
constexpr uint32_t kNumInputs = 4;
@@ -178,7 +175,7 @@
} else if (opType == OperationType::RESIZE_NEAREST_NEIGHBOR) {
NN_RET_CHECK(numInputs >= kNumInputs && numInputs <= kNumInputs + kNumOptionalInputs);
} else {
- NN_RET_CHECK_FAIL() << "Unsupported operation " << getOperationName(opType);
+ NN_RET_CHECK_FAIL() << "Unsupported operation " << opType;
}
NN_RET_CHECK_EQ(context->getNumOutputs(), kNumOutputs);
auto inputType = context->getInputType(kInputTensor);
@@ -188,7 +185,7 @@
inputType == OperandType::TENSOR_FLOAT32 ||
inputType == OperandType::TENSOR_QUANT8_ASYMM ||
inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED)
- << "Unsupported tensor type for operation " << getOperationName(opType);
+ << "Unsupported tensor type for operation " << opType;
if (inputType == OperandType::TENSOR_FLOAT16 || inputType == OperandType::TENSOR_QUANT8_ASYMM) {
NN_RET_CHECK(validateHalVersion(context, HalVersion::V1_2));
}
@@ -258,7 +255,7 @@
static_cast<float>(inWidth) *
static_cast<float>(context->getInputValue<_Float16>(kOutputWidthParamScalar)));
} else {
- NN_RET_CHECK_FAIL() << "Unsupported scalar type for operation " << getOperationName(opType);
+ NN_RET_CHECK_FAIL() << "Unsupported scalar type for operation " << opType;
}
NN_RET_CHECK_GT(height, 0);
NN_RET_CHECK_GT(width, 0);
@@ -304,8 +301,7 @@
context->getOutputShape(kOutputTensor));
default:
- NN_RET_CHECK_FAIL() << "Unsupported tensor type for operation "
- << getOperationName(opType);
+ NN_RET_CHECK_FAIL() << "Unsupported tensor type for operation " << opType;
}
}
diff --git a/common/operations/RoiAlign.cpp b/common/operations/RoiAlign.cpp
index b9daf45..01008cc 100644
--- a/common/operations/RoiAlign.cpp
+++ b/common/operations/RoiAlign.cpp
@@ -17,7 +17,6 @@
#define LOG_TAG "Operations"
#include "CpuOperationUtils.h"
-#include "HalInterfaces.h"
#include "OperationResolver.h"
#include "OperationsUtils.h"
#include "Tracing.h"
@@ -51,8 +50,6 @@
namespace {
-using namespace hal;
-
template <typename T_Input, typename T_Roi>
inline bool roiAlignNhwc(const T_Input* inputData, const Shape& inputShape, const T_Roi* roiData,
const Shape& roiShape, const int32_t* batchSplitData,
diff --git a/common/operations/RoiPooling.cpp b/common/operations/RoiPooling.cpp
index a4f8214..373669a 100644
--- a/common/operations/RoiPooling.cpp
+++ b/common/operations/RoiPooling.cpp
@@ -22,7 +22,6 @@
#include <vector>
#include "CpuOperationUtils.h"
-#include "HalInterfaces.h"
#include "OperationResolver.h"
#include "OperationsUtils.h"
#include "Tracing.h"
@@ -48,8 +47,6 @@
namespace {
-using namespace hal;
-
template <typename T_Input, typename T_Roi>
inline bool roiPoolingNhwc(const T_Input* inputData, const Shape& inputShape, const T_Roi* roiData,
const Shape& roiShape, const int32_t* batchSplitData,
diff --git a/common/operations/SVDF.cpp b/common/operations/SVDF.cpp
index 8314838..953e2a8 100644
--- a/common/operations/SVDF.cpp
+++ b/common/operations/SVDF.cpp
@@ -20,7 +20,6 @@
#include "CpuExecutor.h"
#include "CpuOperationUtils.h"
-#include "HalInterfaces.h"
#include <algorithm>
#include <vector>
@@ -29,8 +28,6 @@
namespace android {
namespace nn {
-using namespace hal;
-
SVDF::SVDF(const Operation& operation, RunTimeOperandInfo* operands) {
NNTRACE_TRANS("SVDF::SVDF");
input_ = GetInput(operation, operands, kInputTensor);
diff --git a/common/operations/SVDF.h b/common/operations/SVDF.h
index ca9b54e..da18568 100644
--- a/common/operations/SVDF.h
+++ b/common/operations/SVDF.h
@@ -23,7 +23,7 @@
#include <cmath>
#include <vector>
-#include "HalInterfaces.h"
+#include "nnapi/Types.h"
namespace android {
namespace nn {
@@ -38,10 +38,10 @@
class SVDF {
public:
- SVDF(const hal::Operation& operation, RunTimeOperandInfo* operands);
+ SVDF(const Operation& operation, RunTimeOperandInfo* operands);
- static bool Prepare(const hal::Operation& operation, RunTimeOperandInfo* operands,
- Shape* stateShape, Shape* outputShape);
+ static bool Prepare(const Operation& operation, RunTimeOperandInfo* operands, Shape* stateShape,
+ Shape* outputShape);
bool Eval();
static constexpr int kInputTensor = 0;
diff --git a/common/operations/Select.cpp b/common/operations/Select.cpp
index 2026595..9105389 100644
--- a/common/operations/Select.cpp
+++ b/common/operations/Select.cpp
@@ -16,7 +16,6 @@
#define LOG_TAG "Operations"
-#include "HalInterfaces.h"
#include "IndexedShapeWrapper.h"
#include "OperationResolver.h"
#include "OperationsUtils.h"
@@ -35,8 +34,6 @@
namespace {
-using namespace hal;
-
template <typename T>
bool compute(const bool8* conditionData, const Shape& conditionShape, const T* aData,
const Shape& aShape, const T* bData, const Shape& bShape, T* outputData,
@@ -78,7 +75,7 @@
inputType == OperandType::TENSOR_INT32 ||
inputType == OperandType::TENSOR_QUANT8_ASYMM ||
inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED)
- << "Unsupported input operand type for select op: " << toString(inputType);
+ << "Unsupported input operand type for select op: " << inputType;
NN_RET_CHECK(validateInputTypes(context, {OperandType::TENSOR_BOOL8, inputType, inputType}));
NN_RET_CHECK(validateOutputTypes(context, {inputType}));
return validateHalVersion(context, HalVersion::V1_2);
diff --git a/common/operations/Slice.cpp b/common/operations/Slice.cpp
index 3c4f2fa..1b5a493 100644
--- a/common/operations/Slice.cpp
+++ b/common/operations/Slice.cpp
@@ -17,7 +17,6 @@
#define LOG_TAG "Operations"
#include "CpuOperationUtils.h"
-#include "HalInterfaces.h"
#include "IndexedShapeWrapper.h"
#include "OperationResolver.h"
@@ -37,8 +36,6 @@
constexpr uint32_t kNumOutputs = 1;
constexpr uint32_t kOutputTensor = 0;
-using namespace hal;
-
namespace {
template <typename T>
diff --git a/common/operations/Softmax.cpp b/common/operations/Softmax.cpp
index a986390..bb85c0b 100644
--- a/common/operations/Softmax.cpp
+++ b/common/operations/Softmax.cpp
@@ -25,7 +25,6 @@
#include <vector>
#include "CpuOperationUtils.h"
-#include "HalInterfaces.h"
#include "OperationResolver.h"
#include "Tracing.h"
@@ -46,8 +45,6 @@
namespace {
-using namespace hal;
-
inline bool softmaxSlowFloat32(const float* inputData, const Shape& inputShape, const float beta,
int32_t axis, float* outputData, const Shape& outputShape) {
NNTRACE_TRANS("softmaxFloatSlow32");
diff --git a/common/operations/Squeeze.cpp b/common/operations/Squeeze.cpp
index 276461d..d734550 100644
--- a/common/operations/Squeeze.cpp
+++ b/common/operations/Squeeze.cpp
@@ -20,7 +20,6 @@
#include <vector>
-#include "HalInterfaces.h"
#include "OperationResolver.h"
#include "Operations.h"
#include "Tracing.h"
@@ -36,8 +35,6 @@
constexpr uint32_t kNumOutputs = 1;
constexpr uint32_t kOutputTensor = 0;
-using namespace hal;
-
bool validate(const IOperationValidationContext* context) {
NN_RET_CHECK_EQ(context->getNumInputs(), kNumInputs);
NN_RET_CHECK_EQ(context->getNumOutputs(), kNumOutputs);
@@ -46,7 +43,7 @@
inputType == OperandType::TENSOR_FLOAT32 ||
inputType == OperandType::TENSOR_QUANT8_ASYMM ||
inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED)
- << "Unsupported input operand type for SQUEEZE op: " << toString(inputType);
+ << "Unsupported input operand type for SQUEEZE op: " << inputType;
HalVersion minSupportedHalVersion;
if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) {
diff --git a/common/operations/StridedSlice.cpp b/common/operations/StridedSlice.cpp
index 5ff5aec..3bb3a82 100644
--- a/common/operations/StridedSlice.cpp
+++ b/common/operations/StridedSlice.cpp
@@ -23,7 +23,6 @@
#include <vector>
#include "CpuOperationUtils.h"
-#include "HalInterfaces.h"
#include "OperationResolver.h"
#include "Operations.h"
#include "Tracing.h"
@@ -46,8 +45,6 @@
namespace {
-using namespace hal;
-
template <typename T>
bool compute(const T* inputData, const Shape& inputShape, const int32_t* beginData,
const int32_t* endData, const int32_t* stridesData, int32_t beginMask, int32_t endMask,
@@ -107,7 +104,7 @@
inputType == OperandType::TENSOR_FLOAT32 ||
inputType == OperandType::TENSOR_QUANT8_ASYMM ||
inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED)
- << "Unsupported input operand type for STRIDED_SLICE op: " << toString(inputType);
+ << "Unsupported input operand type for STRIDED_SLICE op: " << inputType;
HalVersion minSupportedHalVersion;
if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) {
diff --git a/common/operations/Tile.cpp b/common/operations/Tile.cpp
index 517d75e..af17df1 100644
--- a/common/operations/Tile.cpp
+++ b/common/operations/Tile.cpp
@@ -20,7 +20,6 @@
#include <utility>
#include "Tile.h"
-#include "HalInterfaces.h"
#include "Tracing.h"
namespace android {
@@ -29,8 +28,6 @@
namespace {
-using namespace hal;
-
template <typename T>
void CopyMultipleTimes(const T* in_data, int32_t in_size, int32_t multiplier, T* out_data) {
for (int i = 0; i < multiplier; ++i) {
diff --git a/common/operations/TopK_V2.cpp b/common/operations/TopK_V2.cpp
index e005b9a..9e4ceed 100644
--- a/common/operations/TopK_V2.cpp
+++ b/common/operations/TopK_V2.cpp
@@ -20,7 +20,6 @@
#include <utility>
#include <vector>
-#include "HalInterfaces.h"
#include "OperationResolver.h"
#include "OperationsUtils.h"
@@ -38,8 +37,6 @@
namespace {
-using namespace hal;
-
template <typename T>
bool evalGeneric(const T* inputData, const Shape& inputShape, const int32_t k, T* valuesData,
int32_t* indicesData) {
@@ -85,7 +82,7 @@
inputType == OperandType::TENSOR_INT32 ||
inputType == OperandType::TENSOR_QUANT8_ASYMM ||
inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED)
- << "Unsupported input operand type for select op: " << toString(inputType);
+ << "Unsupported input operand type for select op: " << inputType;
NN_RET_CHECK(validateInputTypes(context, {inputType, OperandType::INT32}));
NN_RET_CHECK(validateOutputTypes(context, {inputType, OperandType::TENSOR_INT32}));
HalVersion minSupportedHalVersion = HalVersion::V1_2;
@@ -132,7 +129,7 @@
return executeTyped<int8_t>(context);
} break;
default: {
- LOG(ERROR) << "Unsupported data type: " << toString(inputShape.type);
+ LOG(ERROR) << "Unsupported data type: " << inputShape.type;
return false;
}
}
diff --git a/common/operations/Transpose.cpp b/common/operations/Transpose.cpp
index ff70f9e..423b3de 100644
--- a/common/operations/Transpose.cpp
+++ b/common/operations/Transpose.cpp
@@ -19,7 +19,6 @@
#include <vector>
#include "CpuOperationUtils.h"
-#include "HalInterfaces.h"
#include "OperationResolver.h"
#include <tensorflow/lite/kernels/internal/optimized/legacy_optimized_ops.h>
@@ -42,8 +41,6 @@
namespace {
-using namespace hal;
-
template <typename T>
bool transposeGeneric(const T* inputData, const Shape& inputShape, const int32_t* perm,
const Shape& permShape, T* outputData, const Shape& outputShape) {
diff --git a/common/operations/TransposeConv2D.cpp b/common/operations/TransposeConv2D.cpp
index d67a473..0ee5d04 100644
--- a/common/operations/TransposeConv2D.cpp
+++ b/common/operations/TransposeConv2D.cpp
@@ -25,7 +25,6 @@
#include <vector>
#include "CpuOperationUtils.h"
-#include "HalInterfaces.h"
#include "OperationResolver.h"
#include "Tracing.h"
@@ -46,8 +45,6 @@
namespace {
-using namespace hal;
-
// If possible we will use this static buffer for the tensor.
constexpr size_t kStaticBufferSize = 1605632;
char static_scratch_buffer[kStaticBufferSize];
@@ -452,7 +449,9 @@
filterType == inputType)
<< "Unsupported filter tensor type for operation " << kOperationName;
if (filterType == OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL) {
- NN_RET_CHECK_EQ(context->getInputExtraParams(kFilterTensor).channelQuant().channelDim,
+ NN_RET_CHECK_EQ(std::get<Operand::SymmPerChannelQuantParams>(
+ context->getInputExtraParams(kFilterTensor))
+ .channelDim,
0)
<< "Unsupported filter tensor channel dimension for operation "
<< kOperationName;
@@ -570,7 +569,9 @@
context->getInputShape(kInputTensor),
context->getInputBuffer<int8_t>(kFilterTensor),
context->getInputShape(kFilterTensor),
- context->getInputExtraParams(kFilterTensor).channelQuant().scales.data(),
+ std::get<Operand::SymmPerChannelQuantParams>(
+ context->getInputExtraParams(kFilterTensor))
+ .scales.data(),
context->getInputBuffer<int32_t>(kBiasTensor),
context->getInputShape(kBiasTensor), param,
context->getOutputBuffer<uint8_t>(kOutputTensor),
@@ -595,7 +596,9 @@
context->getInputShape(kInputTensor),
context->getInputBuffer<int8_t>(kFilterTensor),
context->getInputShape(kFilterTensor),
- context->getInputExtraParams(kFilterTensor).channelQuant().scales.data(),
+ std::get<Operand::SymmPerChannelQuantParams>(
+ context->getInputExtraParams(kFilterTensor))
+ .scales.data(),
context->getInputBuffer<int32_t>(kBiasTensor),
context->getInputShape(kBiasTensor), param,
context->getOutputBuffer<int8_t>(kOutputTensor),
diff --git a/common/operations/UnidirectionalSequenceLSTM.cpp b/common/operations/UnidirectionalSequenceLSTM.cpp
index 03854f6..9a00e1f 100644
--- a/common/operations/UnidirectionalSequenceLSTM.cpp
+++ b/common/operations/UnidirectionalSequenceLSTM.cpp
@@ -18,7 +18,6 @@
#include <vector>
-#include "HalInterfaces.h"
#include "IndexedShapeWrapper.h"
#include "LSTM.h"
#include "OperationResolver.h"
@@ -88,8 +87,6 @@
namespace {
-using namespace hal;
-
inline bool hasTensor(IOperationExecutionContext* context, const uint32_t tensor) {
return context->getInputBuffer(tensor) != nullptr;
}
@@ -157,7 +154,7 @@
} else {
NN_RET_CHECK_FAIL()
<< "Unsupported input operand type for UNIDIRECTIONAL_SEQUENCE_LSTM op: "
- << toString(inputType);
+ << inputType;
}
HalVersion minHalVersionSupported = HalVersion::V1_2;
if (context->getNumOutputs() == kNumOutputsWithState) {
diff --git a/common/operations/UnidirectionalSequenceRNN.cpp b/common/operations/UnidirectionalSequenceRNN.cpp
index 273b701..aa79739 100644
--- a/common/operations/UnidirectionalSequenceRNN.cpp
+++ b/common/operations/UnidirectionalSequenceRNN.cpp
@@ -20,9 +20,9 @@
#include <utility>
#include <vector>
-#include "HalInterfaces.h"
#include "OperationResolver.h"
#include "RNN.h"
+#include "nnapi/TypeUtils.h"
namespace android {
namespace nn {
@@ -44,8 +44,6 @@
namespace {
-using namespace hal;
-
template <typename T>
void transposeFirstTwoDims(const T* input, const Shape& inputShape, T* output) {
const uint32_t firstDimSize = getSizeOfDimension(inputShape, 0);
@@ -135,7 +133,7 @@
OperandType inputType = context->getInputType(kInputTensor);
if (inputType != OperandType::TENSOR_FLOAT16 && inputType != OperandType::TENSOR_FLOAT32) {
LOG(ERROR) << "Unsupported input operand type for UNIDIRECTIONAL_SEQUENCE_RNN op: "
- << toString(inputType);
+ << inputType;
return false;
}
NN_RET_CHECK(validateInputTypes(context, {inputType, inputType, inputType, inputType, inputType,