Port sample driver to AIDL
The implementation is mostly copied from HIDL Sample Driver but also
makes use of the canonical types and their utils.
XNNPACK driver isn't updated in this CL.
Bug: 172922059
Test: VtsNeuralnetworksTargetTest on blueline
Change-Id: I0745cce49790c51b7dd7e4e2b2875e7061dfb092
Merged-In: I0745cce49790c51b7dd7e4e2b2875e7061dfb092
(cherry picked from commit ca12f8c2b4be9fc0e2b3de7bc99a8daadf88aca7)
diff --git a/common/AidlBufferTracker.cpp b/common/AidlBufferTracker.cpp
new file mode 100644
index 0000000..fae1620
--- /dev/null
+++ b/common/AidlBufferTracker.cpp
@@ -0,0 +1,228 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "AidlBufferTracker.h"
+
+#include <android-base/macros.h>
+
+#include <memory>
+#include <mutex>
+#include <set>
+#include <stack>
+#include <utility>
+#include <vector>
+
+#include "AidlHalInterfaces.h"
+#include "CpuExecutor.h"
+#include "nnapi/TypeUtils.h"
+
+namespace android::nn {
+
+std::shared_ptr<AidlManagedBuffer> AidlManagedBuffer::create(
+ uint32_t size, std::set<AidlHalPreparedModelRole> roles, const Operand& operand) {
+ std::unique_ptr<uint8_t[]> buffer(new (std::nothrow) uint8_t[size]);
+ if (buffer == nullptr) {
+ return nullptr;
+ }
+ if (isExtension(operand.type)) {
+ LOG(ERROR) << "AidlManagedBuffer cannot handle extension operands.";
+ return nullptr;
+ }
+ return std::make_shared<AidlManagedBuffer>(std::move(buffer), size, std::move(roles), operand);
+}
+
+AidlManagedBuffer::AidlManagedBuffer(std::unique_ptr<uint8_t[]> buffer, uint32_t size,
+ std::set<AidlHalPreparedModelRole> roles,
+ const Operand& operand)
+ : kBuffer(std::move(buffer)),
+ kSize(size),
+ kRoles(std::move(roles)),
+ kOperandType(operand.type),
+ kInitialDimensions(operand.dimensions),
+ mUpdatedDimensions(operand.dimensions) {
+ CHECK(!isExtension(kOperandType));
+}
+
+ErrorStatus AidlManagedBuffer::validateRequest(
+ uint32_t poolIndex, const Request& request,
+ const aidl_hal::IPreparedModel* preparedModel) const {
+ CHECK_LT(poolIndex, request.pools.size());
+ CHECK(std::holds_alternative<Request::MemoryDomainToken>(request.pools[poolIndex]));
+ std::lock_guard<std::mutex> guard(mMutex);
+
+ bool usedAsInput = false, usedAsOutput = false;
+ for (uint32_t i = 0; i < request.inputs.size(); i++) {
+ if (request.inputs[i].lifetime != Request::Argument::LifeTime::POOL) continue;
+ if (request.inputs[i].location.poolIndex != poolIndex) continue;
+ // Validate if the input role is specified during allocation.
+ if (kRoles.count({preparedModel, IOType::INPUT, i}) == 0) {
+ LOG(ERROR) << "AidlManagedBuffer::validateRequest -- invalid buffer role.";
+ return ErrorStatus::INVALID_ARGUMENT;
+ }
+ if (!mInitialized) {
+ LOG(ERROR)
+ << "AidlManagedBuffer::validateRequest -- using uninitialized buffer as input "
+ "request.";
+ return ErrorStatus::GENERAL_FAILURE;
+ }
+ auto combined = combineDimensions(mUpdatedDimensions, request.inputs[i].dimensions);
+ if (!combined.has_value()) {
+ LOG(ERROR) << "AidlManagedBuffer::validateRequest -- incompatible dimensions ("
+ << toString(mUpdatedDimensions) << " vs "
+ << toString(request.inputs[i].dimensions) << ")";
+ return ErrorStatus::INVALID_ARGUMENT;
+ }
+ usedAsInput = true;
+ }
+ for (uint32_t i = 0; i < request.outputs.size(); i++) {
+ if (request.outputs[i].lifetime != Request::Argument::LifeTime::POOL) continue;
+ if (request.outputs[i].location.poolIndex != poolIndex) continue;
+ if (usedAsInput || usedAsOutput) {
+ LOG(ERROR) << "AidlManagedBuffer::validateRequest -- using the same device memory for "
+ "input/output or multiple outputs";
+ return ErrorStatus::INVALID_ARGUMENT;
+ }
+ // Validate if the output role is specified during allocation.
+ if (kRoles.count({preparedModel, IOType::OUTPUT, i}) == 0) {
+ LOG(ERROR) << "AidlManagedBuffer::validateRequest -- invalid buffer role.";
+ return ErrorStatus::INVALID_ARGUMENT;
+ }
+ auto combined = combineDimensions(kInitialDimensions, request.outputs[i].dimensions);
+ if (!combined.has_value()) {
+ LOG(ERROR) << "AidlManagedBuffer::validateRequest -- incompatible dimensions ("
+ << toString(kInitialDimensions) << " vs "
+ << toString(request.outputs[i].dimensions) << ")";
+ return ErrorStatus::INVALID_ARGUMENT;
+ }
+ usedAsOutput = true;
+ }
+ return ErrorStatus::NONE;
+}
+
+ErrorStatus AidlManagedBuffer::validateCopyFrom(const std::vector<uint32_t>& dimensions,
+ uint32_t size) const {
+ if (size != kSize) {
+ LOG(ERROR) << "AidlManagedBuffer::validateCopyFrom -- invalid memory size: " << kSize
+ << " vs " << size;
+ return ErrorStatus::INVALID_ARGUMENT;
+ }
+
+ if (nonExtensionOperandTypeIsScalar(static_cast<int>(kOperandType))) {
+ if (!dimensions.empty()) {
+ LOG(ERROR) << "AidlManagedBuffer::validateCopyFrom -- invalid dimensions for scalar "
+ "operand: "
+ << toString(dimensions);
+ return ErrorStatus::INVALID_ARGUMENT;
+ }
+ return ErrorStatus::NONE;
+ }
+
+ if (dimensions.empty()) {
+ if (tensorHasUnspecifiedDimensions(kOperandType, kInitialDimensions)) {
+ LOG(ERROR) << "AidlManagedBuffer::validateCopyFrom -- the initial dimensions are not "
+ "fully "
+ "specified and no dimension update is provided: "
+ << toString(kInitialDimensions);
+ return ErrorStatus::INVALID_ARGUMENT;
+ }
+ } else {
+ if (tensorHasUnspecifiedDimensions(kOperandType, dimensions)) {
+ LOG(ERROR) << "AidlManagedBuffer::validateCopyFrom -- the updated dimensions are not "
+ "fully "
+ "specified: "
+ << toString(dimensions);
+ return ErrorStatus::INVALID_ARGUMENT;
+ }
+ }
+
+ const auto combined = combineDimensions(kInitialDimensions, dimensions);
+ if (!combined.has_value()) {
+ LOG(ERROR) << "AidlManagedBuffer::validateCopyFrom -- incompatible dimensions ("
+ << toString(kInitialDimensions) << " vs " << toString(dimensions) << ")";
+ return ErrorStatus::INVALID_ARGUMENT;
+ }
+ return ErrorStatus::NONE;
+}
+
+ErrorStatus AidlManagedBuffer::validateCopyTo(uint32_t size) const {
+ if (size != kSize) {
+ LOG(ERROR) << "AidlManagedBuffer::validateCopyTo -- invalid memory size: " << kSize
+ << " vs " << size;
+ return ErrorStatus::INVALID_ARGUMENT;
+ }
+ std::lock_guard<std::mutex> guard(mMutex);
+ if (!mInitialized) {
+ LOG(ERROR) << "AidlManagedBuffer::validateCopyTo -- using uninitialized buffer as source.";
+ return ErrorStatus::GENERAL_FAILURE;
+ }
+ return ErrorStatus::NONE;
+}
+
+bool AidlManagedBuffer::updateDimensions(const std::vector<uint32_t>& dimensions) {
+ auto combined = combineDimensions(kInitialDimensions, dimensions);
+ if (!combined.has_value()) {
+ LOG(ERROR) << "AidlManagedBuffer::updateDimensions -- incompatible dimensions ("
+ << toString(kInitialDimensions) << " vs " << toString(dimensions) << ")";
+ return false;
+ }
+ std::lock_guard<std::mutex> guard(mMutex);
+ mUpdatedDimensions = std::move(combined).value();
+ return true;
+}
+
+void AidlManagedBuffer::setInitialized(bool initialized) {
+ std::lock_guard<std::mutex> guard(mMutex);
+ mInitialized = initialized;
+}
+
+std::unique_ptr<AidlBufferTracker::Token> AidlBufferTracker::add(
+ std::shared_ptr<AidlManagedBuffer> buffer) {
+ if (buffer == nullptr) {
+ return nullptr;
+ }
+ std::lock_guard<std::mutex> guard(mMutex);
+ uint32_t token = 0;
+ if (mFreeTokens.empty()) {
+ token = mTokenToBuffers.size();
+ mTokenToBuffers.push_back(std::move(buffer));
+ } else {
+ token = mFreeTokens.top();
+ mFreeTokens.pop();
+ mTokenToBuffers[token] = std::move(buffer);
+ }
+ VLOG(MEMORY) << "AidlBufferTracker::add -- new token = " << token;
+ return std::make_unique<Token>(token, shared_from_this());
+}
+
+std::shared_ptr<AidlManagedBuffer> AidlBufferTracker::get(uint32_t token) const {
+ std::lock_guard<std::mutex> guard(mMutex);
+ if (mTokenToBuffers.size() <= token || mTokenToBuffers[token] == nullptr) {
+ LOG(ERROR) << "AidlBufferTracker::get -- unknown token " << token;
+ return nullptr;
+ }
+ return mTokenToBuffers[token];
+}
+
+void AidlBufferTracker::free(uint32_t token) {
+ std::lock_guard<std::mutex> guard(mMutex);
+ CHECK_LT(token, mTokenToBuffers.size());
+ CHECK(mTokenToBuffers[token] != nullptr);
+ VLOG(MEMORY) << "AidlBufferTracker::free -- release token = " << token;
+ mTokenToBuffers[token] = nullptr;
+ mFreeTokens.push(token);
+}
+
+} // namespace android::nn
diff --git a/common/AidlHalUtils.cpp b/common/AidlHalUtils.cpp
new file mode 100644
index 0000000..c99803b
--- /dev/null
+++ b/common/AidlHalUtils.cpp
@@ -0,0 +1,75 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+// This file contains pre-canonical-types utility code and includes HAL
+// utilities. LegacyUtils.h is the subset of these utilities that do not touch
+// HAL.
+
+#include "AidlHalUtils.h"
+
+#include <android-base/logging.h>
+#include <nnapi/hal/aidl/Conversions.h>
+
+#include <algorithm>
+#include <iterator>
+#include <type_traits>
+#include <vector>
+
+#include "AidlHalInterfaces.h"
+#include "LegacyUtils.h"
+
+namespace android::nn {
+
+std::vector<aidl_hal::OperandPerformance> nonExtensionOperandPerformance(
+ aidl_hal::PerformanceInfo perf) {
+ static constexpr ndk::enum_range<aidl_hal::OperandType> kOperandTypeRange;
+ std::vector<aidl_hal::OperandPerformance> ret;
+ ret.reserve(std::distance(kOperandTypeRange.begin(), kOperandTypeRange.end()));
+ for (aidl_hal::OperandType type : kOperandTypeRange) {
+ if (type != aidl_hal::OperandType::SUBGRAPH) {
+ ret.push_back(aidl_hal::OperandPerformance{type, perf});
+ }
+ }
+ std::sort(ret.begin(), ret.end(),
+ [](const aidl_hal::OperandPerformance& a, const aidl_hal::OperandPerformance& b) {
+ return a.type < b.type;
+ });
+
+ return ret;
+}
+
+void update(std::vector<aidl_hal::OperandPerformance>* operandPerformance,
+ aidl_hal::OperandType type, aidl_hal::PerformanceInfo perf) {
+ CHECK(operandPerformance != nullptr);
+ const auto it = std::lower_bound(operandPerformance->begin(), operandPerformance->end(), type,
+ [](const aidl_hal::OperandPerformance& perf,
+ aidl_hal::OperandType type) { return perf.type < type; });
+ CHECK(it != operandPerformance->end())
+ << toString(type) << " not in operand performance vector";
+ it->info = perf;
+}
+
+bool isExtensionOperandType(aidl_hal::OperandType type) {
+ return isExtensionOperandType(convert(type).value());
+}
+
+aidl_hal::ErrorStatus convertResultCodeToAidlErrorStatus(int resultCode) {
+ const auto errorStatus = aidl_hal::utils::convert(convertResultCodeToErrorStatus(resultCode));
+ CHECK(errorStatus.has_value()) << "Unhandled error (" << errorStatus.error().code
+ << "): " << errorStatus.error().message;
+ return errorStatus.value();
+}
+
+} // namespace android::nn
diff --git a/common/AidlValidateHal.cpp b/common/AidlValidateHal.cpp
new file mode 100644
index 0000000..c129bfc
--- /dev/null
+++ b/common/AidlValidateHal.cpp
@@ -0,0 +1,133 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "ValidateHal"
+
+#include "AidlValidateHal.h"
+
+#include <android-base/logging.h>
+#include <nnapi/hal/aidl/Conversions.h>
+
+#include <algorithm>
+#include <memory>
+#include <set>
+#include <utility>
+#include <vector>
+
+#include "LegacyUtils.h"
+#include "nnapi/TypeUtils.h"
+
+namespace android {
+namespace nn {
+
+bool validateMemoryDesc(
+ const aidl_hal::BufferDesc& desc,
+ const std::vector<std::shared_ptr<aidl_hal::IPreparedModel>>& preparedModels,
+ const std::vector<aidl_hal::BufferRole>& inputRoles,
+ const std::vector<aidl_hal::BufferRole>& outputRoles,
+ std::function<const aidl_hal::Model*(const std::shared_ptr<aidl_hal::IPreparedModel>&)>
+ getModel,
+ std::set<AidlHalPreparedModelRole>* preparedModelRoles,
+ aidl_hal::Operand* combinedOperand) {
+ NN_RET_CHECK(preparedModels.size() != 0);
+ NN_RET_CHECK(inputRoles.size() != 0 || outputRoles.size() != 0);
+
+ std::set<AidlHalPreparedModelRole> roles;
+ std::vector<aidl_hal::Operand> operands;
+ operands.reserve(inputRoles.size() + outputRoles.size());
+ for (const auto& role : inputRoles) {
+ NN_RET_CHECK_LT(role.modelIndex, preparedModels.size());
+ const auto& preparedModel = preparedModels[role.modelIndex];
+ NN_RET_CHECK(preparedModel != nullptr);
+ const auto* model = getModel(preparedModel);
+ NN_RET_CHECK(model != nullptr);
+ const auto& inputIndexes = model->main.inputIndexes;
+ NN_RET_CHECK_LT(role.ioIndex, inputIndexes.size());
+ NN_RET_CHECK_GT(role.frequency, 0.0f);
+ NN_RET_CHECK_LE(role.frequency, 1.0f);
+ const auto [it, success] = roles.emplace(preparedModel.get(), IOType::INPUT, role.ioIndex);
+ NN_RET_CHECK(success);
+ operands.push_back(model->main.operands[inputIndexes[role.ioIndex]]);
+ }
+ for (const auto& role : outputRoles) {
+ NN_RET_CHECK_LT(role.modelIndex, preparedModels.size());
+ const auto& preparedModel = preparedModels[role.modelIndex];
+ NN_RET_CHECK(preparedModel != nullptr);
+ const auto* model = getModel(preparedModel);
+ NN_RET_CHECK(model != nullptr);
+ const auto& outputIndexes = model->main.outputIndexes;
+ NN_RET_CHECK_LT(role.ioIndex, outputIndexes.size());
+ NN_RET_CHECK_GT(role.frequency, 0.0f);
+ NN_RET_CHECK_LE(role.frequency, 1.0f);
+ const auto [it, success] = roles.emplace(preparedModel.get(), IOType::OUTPUT, role.ioIndex);
+ NN_RET_CHECK(success);
+ operands.push_back(model->main.operands[outputIndexes[role.ioIndex]]);
+ }
+
+ CHECK(!operands.empty());
+ const auto opType = operands[0].type;
+ const auto canonicalOperandType = convert(opType);
+ NN_RET_CHECK(canonicalOperandType.has_value()) << canonicalOperandType.error().message;
+ const bool isExtensionOperand = isExtension(canonicalOperandType.value());
+
+ auto maybeDimensions = toUnsigned(desc.dimensions);
+ NN_RET_CHECK(maybeDimensions.has_value()) << maybeDimensions.error().message;
+ std::vector<uint32_t> dimensions = std::move(maybeDimensions).value();
+
+ for (const auto& operand : operands) {
+ NN_RET_CHECK(operand.type == operands[0].type)
+ << toString(operand.type) << " vs " << toString(operands[0].type);
+ NN_RET_CHECK_EQ(operand.scale, operands[0].scale);
+ NN_RET_CHECK_EQ(operand.zeroPoint, operands[0].zeroPoint);
+ // NOTE: validateMemoryDesc cannot validate extra parameters for extension operand type.
+ if (!isExtensionOperand) {
+ const auto& lhsExtraParams = operand.extraParams;
+ const auto& rhsExtraParams = operands[0].extraParams;
+ NN_RET_CHECK(lhsExtraParams == rhsExtraParams)
+ << (lhsExtraParams.has_value() ? lhsExtraParams.value().toString()
+ : "std::nullopt")
+ << " vs "
+ << (rhsExtraParams.has_value() ? rhsExtraParams.value().toString()
+ : "std::nullopt");
+ }
+ const auto maybeRhsDimensions = toUnsigned(operand.dimensions);
+ NN_RET_CHECK(maybeRhsDimensions.has_value()) << maybeRhsDimensions.error().message;
+ const auto combined = combineDimensions(dimensions, maybeRhsDimensions.value());
+ NN_RET_CHECK(combined.has_value());
+ dimensions = combined.value();
+ }
+
+ // NOTE: validateMemoryDesc cannot validate scalar dimensions with extension operand type.
+ if (!isExtensionOperand) {
+ NN_RET_CHECK(!nonExtensionOperandTypeIsScalar(static_cast<int>(opType)) ||
+ dimensions.empty())
+ << "invalid dimensions with scalar operand type.";
+ }
+
+ if (preparedModelRoles != nullptr) {
+ *preparedModelRoles = std::move(roles);
+ }
+ if (combinedOperand != nullptr) {
+ *combinedOperand = operands[0];
+ // No need to check that values fit int32_t here, since the original values are obtained
+ // from int32_t.
+ combinedOperand->dimensions = aidl_hal::utils::toSigned(dimensions).value();
+ }
+ return true;
+}
+
+} // namespace nn
+} // namespace android
diff --git a/common/Android.bp b/common/Android.bp
index 2496728..8283809 100644
--- a/common/Android.bp
+++ b/common/Android.bp
@@ -84,6 +84,7 @@
],
export_include_dirs: ["include"],
srcs: [
+ "AidlHalUtils.cpp",
"ExecutionBurstController.cpp",
"ExecutionBurstServer.cpp",
"LegacyHalUtils.cpp",
@@ -97,6 +98,7 @@
"tensorflow_headers",
],
shared_libs: [
+ "android.hardware.neuralnetworks-V1-ndk_platform",
"[email protected]",
"[email protected]",
"[email protected]",
@@ -115,6 +117,7 @@
"neuralnetworks_utils_hal_1_1",
"neuralnetworks_utils_hal_1_2",
"neuralnetworks_utils_hal_1_3",
+ "neuralnetworks_utils_hal_aidl",
"neuralnetworks_utils_hal_common",
],
cflags: [
@@ -151,6 +154,9 @@
"include",
],
srcs: [
+ "AidlBufferTracker.cpp",
+ "AidlHalUtils.cpp",
+ "AidlValidateHal.cpp",
"BufferTracker.cpp",
"CpuExecutor.cpp",
"ExecutionBurstController.cpp",
@@ -187,6 +193,7 @@
"operations/Tile.cpp",
],
shared_libs: [
+ "android.hardware.neuralnetworks-V1-ndk_platform",
"[email protected]",
"[email protected]",
"[email protected]",
@@ -218,6 +225,7 @@
"neuralnetworks_utils_hal_1_1",
"neuralnetworks_utils_hal_1_2",
"neuralnetworks_utils_hal_1_3",
+ "neuralnetworks_utils_hal_aidl",
"neuralnetworks_utils_hal_common",
"neuralnetworks_utils_hal_service",
"philox_random",
@@ -315,6 +323,7 @@
"libutils",
],
static_libs: [
+ "android.hardware.neuralnetworks-V1-ndk_platform",
"[email protected]",
"[email protected]",
"[email protected]",
diff --git a/common/LegacyHalUtils.cpp b/common/LegacyHalUtils.cpp
index 5f12fbd..3072726 100644
--- a/common/LegacyHalUtils.cpp
+++ b/common/LegacyHalUtils.cpp
@@ -23,11 +23,14 @@
#include <nnapi/hal/1.1/Conversions.h>
#include <nnapi/hal/1.2/Conversions.h>
#include <nnapi/hal/1.3/Conversions.h>
+#include <nnapi/hal/aidl/Conversions.h>
#include <algorithm>
+#include <limits>
#include <set>
#include <string>
#include <tuple>
+#include <type_traits>
#include <utility>
#include <vector>
@@ -728,6 +731,27 @@
return true;
}
+bool compliantWithAidl(const V1_3::Operand& operand) {
+ if (static_cast<std::underlying_type_t<V1_3::OperandType>>(operand.type) >
+ std::numeric_limits<int32_t>::max()) {
+ return false;
+ }
+ if (operand.location.poolIndex > std::numeric_limits<int32_t>::max()) {
+ return false;
+ }
+ if (operand.extraParams.getDiscriminator() ==
+ V1_2::Operand::ExtraParams::hidl_discriminator::channelQuant &&
+ operand.extraParams.channelQuant().channelDim > std::numeric_limits<int32_t>::max()) {
+ return false;
+ }
+ for (auto dim : operand.dimensions) {
+ if (dim > std::numeric_limits<int32_t>::max()) {
+ return false;
+ }
+ }
+ return true;
+}
+
static bool compliantWith(HalVersion version, const V1_3::Model& model,
std::set<uint32_t>* noncompliantOperations) {
// A boolean vector indicating whether each pool is compliant with the target HAL version.
@@ -760,6 +784,9 @@
case HalVersion::V1_3:
is_operand_compliant = compliantWithV1_3(op);
break;
+ case HalVersion::AIDL_UNSTABLE:
+ is_operand_compliant = compliantWithAidl(op);
+ break;
}
return is_operand_compliant &&
!(op.lifetime == V1_3::OperandLifeTime::CONSTANT_REFERENCE &&
diff --git a/common/LegacyUtils.cpp b/common/LegacyUtils.cpp
index 5b9e0f3..6810033 100644
--- a/common/LegacyUtils.cpp
+++ b/common/LegacyUtils.cpp
@@ -164,6 +164,8 @@
return Version::ANDROID_Q;
case HalVersion::V1_3:
return Version::ANDROID_R;
+ case HalVersion::AIDL_UNSTABLE:
+ return Version::ANDROID_S;
}
LOG(FATAL) << "Cannot convert " << halVersion;
return {};
diff --git a/common/TypeUtils.cpp b/common/TypeUtils.cpp
index 56a2db7..9d9ba13 100644
--- a/common/TypeUtils.cpp
+++ b/common/TypeUtils.cpp
@@ -826,6 +826,8 @@
return os << "ANDROID_Q";
case Version::ANDROID_R:
return os << "ANDROID_R";
+ case Version::ANDROID_S:
+ return os << "ANDROID_S";
case Version::CURRENT_RUNTIME:
return os << "CURRENT_RUNTIME";
}
@@ -844,6 +846,8 @@
return os << "HAL version 1.2";
case HalVersion::V1_3:
return os << "HAL version 1.3";
+ case HalVersion::AIDL_UNSTABLE:
+ return os << "HAL uses unstable AIDL";
}
return os << "HalVersion{" << underlyingType(halVersion) << "}";
}
diff --git a/common/Validation.cpp b/common/Validation.cpp
index 089bf51..0862db3 100644
--- a/common/Validation.cpp
+++ b/common/Validation.cpp
@@ -1108,7 +1108,7 @@
Result<Version> validateRequestArgumentsForModel(
const std::vector<Request::Argument>& requestArguments,
const std::vector<uint32_t>& operandIndexes, const std::vector<Operand>& operands,
- bool isOutput) {
+ bool isOutput, bool allowUnspecifiedOutput) {
auto version = Version::ANDROID_OC_MR1;
// The request should specify as many arguments as were described in the model.
const std::string_view type = isOutput ? "output" : "input";
@@ -1136,6 +1136,8 @@
NN_VALIDATE(isOutput)
<< "Model has unknown input rank but the request does not "
"specify the rank.";
+ NN_VALIDATE(allowUnspecifiedOutput)
+ << "Model has unknown output rank and request does not specify it.";
// Unspecified output dimensions introduced in Android Q.
version = combineVersions(version, Version::ANDROID_Q);
}
@@ -1143,7 +1145,7 @@
// Validate that all the dimensions are specified in the model.
for (size_t i = 0; i < modelRank; i++) {
if (operand.dimensions[i] == 0) {
- NN_VALIDATE(isOutput)
+ NN_VALIDATE(isOutput && allowUnspecifiedOutput)
<< "Model has dimension " << i
<< " set to 0 but the request does not specify the dimension.";
// Unspecified output dimensions introduced in Android Q.
@@ -1162,8 +1164,9 @@
<< " has dimension " << i << " of " << requestArgument.dimensions[i]
<< " different than the model's " << operand.dimensions[i];
if (requestArgument.dimensions[i] == 0) {
- NN_VALIDATE(isOutput) << "Request " << type << " " << requestArgumentIndex
- << " has dimension " << i << " of zero";
+ NN_VALIDATE(isOutput && allowUnspecifiedOutput)
+ << "Request " << type << " " << requestArgumentIndex
+ << " has dimension " << i << " of zero";
// Unspecified output dimensions introduced in Android Q.
version = combineVersions(version, Version::ANDROID_Q);
}
@@ -1174,15 +1177,18 @@
return version;
}
-Result<Version> validateRequestForModelImpl(const Request& request, const Model& model) {
+Result<Version> validateRequestForModelImpl(const Request& request, const Model& model,
+ bool allowUnspecifiedOutput) {
auto version = NN_TRY(validateRequest(request));
version = combineVersions(version, NN_TRY(validateModel(model)));
- version = combineVersions(version, NN_TRY(validateRequestArgumentsForModel(
- request.inputs, model.main.inputIndexes,
- model.main.operands, /*isOutput=*/false)));
- version = combineVersions(version, NN_TRY(validateRequestArgumentsForModel(
- request.outputs, model.main.outputIndexes,
- model.main.operands, /*isOutput=*/true)));
+ version = combineVersions(version,
+ NN_TRY(validateRequestArgumentsForModel(
+ request.inputs, model.main.inputIndexes, model.main.operands,
+ /*isOutput=*/false, /*allowUnspecifiedOutput=*/true)));
+ version = combineVersions(
+ version, NN_TRY(validateRequestArgumentsForModel(
+ request.outputs, model.main.outputIndexes, model.main.operands,
+ /*isOutput=*/true, allowUnspecifiedOutput)));
return version;
}
@@ -2720,8 +2726,9 @@
return validateVector(bufferRoles, validateBufferRole);
}
-Result<Version> validateRequestForModel(const Request& request, const Model& model) {
- return validateRequestForModelImpl(request, model);
+Result<Version> validateRequestForModel(const Request& request, const Model& model,
+ bool allowUnspecifiedOutput) {
+ return validateRequestForModelImpl(request, model, allowUnspecifiedOutput);
}
Result<Version> validateMemoryDesc(
diff --git a/common/include/AidlBufferTracker.h b/common/include/AidlBufferTracker.h
new file mode 100644
index 0000000..e7afa5e
--- /dev/null
+++ b/common/include/AidlBufferTracker.h
@@ -0,0 +1,121 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_FRAMEWORKS_ML_NN_COMMON_AIDL_BUFFER_TRACKER_H
+#define ANDROID_FRAMEWORKS_ML_NN_COMMON_AIDL_BUFFER_TRACKER_H
+
+#include <android-base/macros.h>
+#include <android-base/thread_annotations.h>
+
+#include <map>
+#include <memory>
+#include <mutex>
+#include <set>
+#include <stack>
+#include <utility>
+#include <vector>
+
+#include "AidlHalInterfaces.h"
+#include "AidlValidateHal.h"
+#include "CpuExecutor.h"
+
+namespace android::nn {
+
+// This class manages a CPU buffer allocated on heap and provides validation methods.
+class AidlManagedBuffer {
+ public:
+ static std::shared_ptr<AidlManagedBuffer> create(uint32_t size,
+ std::set<AidlHalPreparedModelRole> roles,
+ const Operand& operand);
+
+ // Prefer AidlManagedBuffer::create.
+ AidlManagedBuffer(std::unique_ptr<uint8_t[]> buffer, uint32_t size,
+ std::set<AidlHalPreparedModelRole> roles, const Operand& operand);
+
+ RunTimePoolInfo createRunTimePoolInfo() const {
+ return RunTimePoolInfo::createFromExistingBuffer(kBuffer.get(), kSize);
+ }
+
+ // "poolIndex" is the index of this buffer in the request.pools.
+ ErrorStatus validateRequest(uint32_t poolIndex, const Request& request,
+ const aidl_hal::IPreparedModel* preparedModel) const;
+
+ // "size" is the byte size of the Memory provided to the copyFrom or copyTo method.
+ ErrorStatus validateCopyFrom(const std::vector<uint32_t>& dimensions, uint32_t size) const;
+ ErrorStatus validateCopyTo(uint32_t size) const;
+
+ bool updateDimensions(const std::vector<uint32_t>& dimensions);
+ void setInitialized(bool initialized);
+
+ private:
+ mutable std::mutex mMutex;
+ const std::unique_ptr<uint8_t[]> kBuffer;
+ const uint32_t kSize;
+ const std::set<AidlHalPreparedModelRole> kRoles;
+ const OperandType kOperandType;
+ const std::vector<uint32_t> kInitialDimensions;
+ std::vector<uint32_t> mUpdatedDimensions GUARDED_BY(mMutex);
+ bool mInitialized GUARDED_BY(mMutex) = false;
+};
+
+// Keep track of all AidlManagedBuffers and assign each with a unique token.
+class AidlBufferTracker : public std::enable_shared_from_this<AidlBufferTracker> {
+ DISALLOW_COPY_AND_ASSIGN(AidlBufferTracker);
+
+ public:
+ // A RAII class to help manage the lifetime of the token.
+ // It is only supposed to be constructed in AidlBufferTracker::add.
+ class Token {
+ DISALLOW_COPY_AND_ASSIGN(Token);
+
+ public:
+ Token(uint32_t token, std::shared_ptr<AidlBufferTracker> tracker)
+ : kToken(token), kBufferTracker(std::move(tracker)) {}
+ ~Token() { kBufferTracker->free(kToken); }
+ uint32_t get() const { return kToken; }
+
+ private:
+ const uint32_t kToken;
+ const std::shared_ptr<AidlBufferTracker> kBufferTracker;
+ };
+
+ // The factory of AidlBufferTracker. This ensures that the AidlBufferTracker is always managed
+ // by a shared_ptr.
+ static std::shared_ptr<AidlBufferTracker> create() {
+ return std::make_shared<AidlBufferTracker>();
+ }
+
+ // Prefer AidlBufferTracker::create.
+ AidlBufferTracker() : mTokenToBuffers(1) {}
+
+ std::unique_ptr<Token> add(std::shared_ptr<AidlManagedBuffer> buffer);
+ std::shared_ptr<AidlManagedBuffer> get(uint32_t token) const;
+
+ private:
+ void free(uint32_t token);
+
+ mutable std::mutex mMutex;
+ std::stack<uint32_t, std::vector<uint32_t>> mFreeTokens GUARDED_BY(mMutex);
+
+ // Since the tokens are allocated in a non-sparse way, we use a vector to represent the mapping.
+ // The index of the vector is the token. When the token gets freed, the corresponding entry is
+ // set to nullptr. mTokenToBuffers[0] is always set to nullptr because 0 is an invalid token.
+ std::vector<std::shared_ptr<AidlManagedBuffer>> mTokenToBuffers GUARDED_BY(mMutex);
+};
+
+} // namespace android::nn
+
+#endif // ANDROID_FRAMEWORKS_ML_NN_COMMON_AIDL_BUFFER_TRACKER_H
diff --git a/common/include/AidlHalInterfaces.h b/common/include/AidlHalInterfaces.h
new file mode 100644
index 0000000..743fc4b
--- /dev/null
+++ b/common/include/AidlHalInterfaces.h
@@ -0,0 +1,71 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_FRAMEWORKS_ML_NN_COMMON_AIDL_HAL_INTERFACES_H
+#define ANDROID_FRAMEWORKS_ML_NN_COMMON_AIDL_HAL_INTERFACES_H
+
+#include <aidl/android/hardware/neuralnetworks/BnBuffer.h>
+#include <aidl/android/hardware/neuralnetworks/BnDevice.h>
+#include <aidl/android/hardware/neuralnetworks/BnFencedExecutionCallback.h>
+#include <aidl/android/hardware/neuralnetworks/BnPreparedModel.h>
+#include <aidl/android/hardware/neuralnetworks/BnPreparedModelCallback.h>
+#include <aidl/android/hardware/neuralnetworks/BufferDesc.h>
+#include <aidl/android/hardware/neuralnetworks/BufferRole.h>
+#include <aidl/android/hardware/neuralnetworks/Capabilities.h>
+#include <aidl/android/hardware/neuralnetworks/DataLocation.h>
+#include <aidl/android/hardware/neuralnetworks/DeviceBuffer.h>
+#include <aidl/android/hardware/neuralnetworks/DeviceType.h>
+#include <aidl/android/hardware/neuralnetworks/ErrorStatus.h>
+#include <aidl/android/hardware/neuralnetworks/ExecutionPreference.h>
+#include <aidl/android/hardware/neuralnetworks/Extension.h>
+#include <aidl/android/hardware/neuralnetworks/ExtensionNameAndPrefix.h>
+#include <aidl/android/hardware/neuralnetworks/ExtensionOperandTypeInformation.h>
+#include <aidl/android/hardware/neuralnetworks/FusedActivationFunc.h>
+#include <aidl/android/hardware/neuralnetworks/IBuffer.h>
+#include <aidl/android/hardware/neuralnetworks/IDevice.h>
+#include <aidl/android/hardware/neuralnetworks/IFencedExecutionCallback.h>
+#include <aidl/android/hardware/neuralnetworks/IPreparedModel.h>
+#include <aidl/android/hardware/neuralnetworks/IPreparedModelCallback.h>
+#include <aidl/android/hardware/neuralnetworks/IPreparedModelParcel.h>
+#include <aidl/android/hardware/neuralnetworks/Memory.h>
+#include <aidl/android/hardware/neuralnetworks/Model.h>
+#include <aidl/android/hardware/neuralnetworks/NumberOfCacheFiles.h>
+#include <aidl/android/hardware/neuralnetworks/Operand.h>
+#include <aidl/android/hardware/neuralnetworks/OperandExtraParams.h>
+#include <aidl/android/hardware/neuralnetworks/OperandLifeTime.h>
+#include <aidl/android/hardware/neuralnetworks/OperandPerformance.h>
+#include <aidl/android/hardware/neuralnetworks/OperandType.h>
+#include <aidl/android/hardware/neuralnetworks/Operation.h>
+#include <aidl/android/hardware/neuralnetworks/OperationType.h>
+#include <aidl/android/hardware/neuralnetworks/OutputShape.h>
+#include <aidl/android/hardware/neuralnetworks/PerformanceInfo.h>
+#include <aidl/android/hardware/neuralnetworks/Priority.h>
+#include <aidl/android/hardware/neuralnetworks/Request.h>
+#include <aidl/android/hardware/neuralnetworks/RequestArgument.h>
+#include <aidl/android/hardware/neuralnetworks/RequestMemoryPool.h>
+#include <aidl/android/hardware/neuralnetworks/Subgraph.h>
+#include <aidl/android/hardware/neuralnetworks/SymmPerChannelQuantParams.h>
+#include <aidl/android/hardware/neuralnetworks/Timing.h>
+
+namespace android::nn {
+
+namespace aidl_hal = ::aidl::android::hardware::neuralnetworks;
+
+inline constexpr aidl_hal::Priority kDefaultPriorityAidl = aidl_hal::Priority::MEDIUM;
+
+} // namespace android::nn
+
+#endif // ANDROID_FRAMEWORKS_ML_NN_COMMON_AIDL_HAL_INTERFACES_H
diff --git a/common/include/AidlHalUtils.h b/common/include/AidlHalUtils.h
new file mode 100644
index 0000000..1053603
--- /dev/null
+++ b/common/include/AidlHalUtils.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_FRAMEWORKS_ML_NN_COMMON_AIDL_HAL_UTILS_H
+#define ANDROID_FRAMEWORKS_ML_NN_COMMON_AIDL_HAL_UTILS_H
+
+#include <vector>
+
+#include "AidlHalInterfaces.h"
+
+namespace android {
+namespace nn {
+
+// Return a vector with one entry for each non-extension OperandType except
+// SUBGRAPH, set to the specified PerformanceInfo value. The vector will be
+// sorted by OperandType.
+//
+// Control flow (OperandType::SUBGRAPH) operation performance is specified
+// separately using Capabilities::ifPerformance and
+// Capabilities::whilePerformance.
+std::vector<aidl_hal::OperandPerformance> nonExtensionOperandPerformance(
+ aidl_hal::PerformanceInfo perf);
+
+// Update the vector entry corresponding to the specified OperandType with the
+// specified PerformanceInfo value. The vector must already have an entry for
+// that OperandType, and must be sorted by OperandType.
+void update(std::vector<aidl_hal::OperandPerformance>* operandPerformance,
+ aidl_hal::OperandType type, aidl_hal::PerformanceInfo perf);
+
+// Returns true if an operand type is an extension type.
+bool isExtensionOperandType(aidl_hal::OperandType type);
+
+aidl_hal::ErrorStatus convertResultCodeToAidlErrorStatus(int resultCode);
+
+} // namespace nn
+} // namespace android
+
+#endif // ANDROID_FRAMEWORKS_ML_NN_COMMON_AIDL_HAL_UTILS_H
diff --git a/common/include/AidlValidateHal.h b/common/include/AidlValidateHal.h
new file mode 100644
index 0000000..0354631
--- /dev/null
+++ b/common/include/AidlValidateHal.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_FRAMEWORKS_ML_NN_COMMON_AIDL_VALIDATE_HAL_H
+#define ANDROID_FRAMEWORKS_ML_NN_COMMON_AIDL_VALIDATE_HAL_H
+
+#include <memory>
+#include <set>
+#include <tuple>
+#include <vector>
+
+#include "AidlHalInterfaces.h"
+#include "nnapi/TypeUtils.h"
+#include "nnapi/Validation.h"
+
+namespace android {
+namespace nn {
+
+using AidlHalPreparedModelRole = std::tuple<const aidl_hal::IPreparedModel*, IOType, uint32_t>;
+
+bool validateMemoryDesc(
+ const aidl_hal::BufferDesc& desc,
+ const std::vector<std::shared_ptr<aidl_hal::IPreparedModel>>& preparedModels,
+ const std::vector<aidl_hal::BufferRole>& inputRoles,
+ const std::vector<aidl_hal::BufferRole>& outputRoles,
+ std::function<const aidl_hal::Model*(const std::shared_ptr<aidl_hal::IPreparedModel>&)>
+ getModel,
+ std::set<AidlHalPreparedModelRole>* preparedModelRoles, aidl_hal::Operand* combinedOperand);
+
+} // namespace nn
+} // namespace android
+
+#endif // ANDROID_FRAMEWORKS_ML_NN_COMMON_AIDL_VALIDATE_HAL_H
diff --git a/common/include/ControlFlow.h b/common/include/ControlFlow.h
index 9149903..ac446c2 100644
--- a/common/include/ControlFlow.h
+++ b/common/include/ControlFlow.h
@@ -17,6 +17,8 @@
#ifndef ANDROID_FRAMEWORKS_ML_NN_COMMON_CONTROLFLOW_H
#define ANDROID_FRAMEWORKS_ML_NN_COMMON_CONTROLFLOW_H
+#include <cstdint>
+
namespace android {
namespace nn {
namespace operation_if {
diff --git a/common/include/LegacyUtils.h b/common/include/LegacyUtils.h
index 75fabf8..5a6e3bf 100644
--- a/common/include/LegacyUtils.h
+++ b/common/include/LegacyUtils.h
@@ -27,10 +27,10 @@
#include <utility>
#include <vector>
-#include <nnapi/TypeUtils.h>
-#include <nnapi/Types.h>
#include "NeuralNetworks.h"
#include "OperationResolver.h"
+#include "nnapi/TypeUtils.h"
+#include "nnapi/Types.h"
namespace android {
namespace nn {
@@ -109,6 +109,15 @@
inline OptionalTimePoint makeDeadline(std::optional<uint64_t> duration) {
return duration.has_value() ? std::make_optional(makeDeadline(*duration)) : OptionalTimePoint{};
}
+inline OptionalTimePoint makeDeadline(int64_t duration) {
+ // NN AIDL interface defines -1 to indicate that the duration has been omitted and forbids all
+ // other negative values.
+ CHECK_GE(duration, -1);
+ if (duration == -1) {
+ return OptionalTimePoint{};
+ }
+ return makeDeadline(static_cast<uint64_t>(duration));
+}
// Returns true if the deadline has passed. Returns false if either the deadline
// has not been exceeded or if the deadline is not present.
diff --git a/common/include/ValidateHal.h b/common/include/ValidateHal.h
index 86d9520..3d811e0 100644
--- a/common/include/ValidateHal.h
+++ b/common/include/ValidateHal.h
@@ -20,9 +20,9 @@
#include <set>
#include <tuple>
-#include <nnapi/TypeUtils.h>
-#include <nnapi/Validation.h>
#include "HalInterfaces.h"
+#include "nnapi/TypeUtils.h"
+#include "nnapi/Validation.h"
namespace android {
namespace nn {
diff --git a/common/include/nnapi/TypeUtils.h b/common/include/nnapi/TypeUtils.h
index 5f58c71..a07504b 100644
--- a/common/include/nnapi/TypeUtils.h
+++ b/common/include/nnapi/TypeUtils.h
@@ -37,6 +37,7 @@
V1_1,
V1_2,
V1_3,
+ AIDL_UNSTABLE,
LATEST = V1_3,
};
diff --git a/common/include/nnapi/Types.h b/common/include/nnapi/Types.h
index 3fb786d..753f162 100644
--- a/common/include/nnapi/Types.h
+++ b/common/include/nnapi/Types.h
@@ -353,7 +353,7 @@
OptionalDuration timeInDriver;
};
-enum class Version { ANDROID_OC_MR1, ANDROID_P, ANDROID_Q, ANDROID_R, CURRENT_RUNTIME };
+enum class Version { ANDROID_OC_MR1, ANDROID_P, ANDROID_Q, ANDROID_R, ANDROID_S, CURRENT_RUNTIME };
} // namespace android::nn
diff --git a/common/include/nnapi/Validation.h b/common/include/nnapi/Validation.h
index 493763c..1ae745e 100644
--- a/common/include/nnapi/Validation.h
+++ b/common/include/nnapi/Validation.h
@@ -57,7 +57,8 @@
Result<Version> validate(const std::vector<BufferRole>& bufferRoles);
// Validate request applied to model.
-Result<Version> validateRequestForModel(const Request& request, const Model& model);
+Result<Version> validateRequestForModel(const Request& request, const Model& model,
+ bool allowUnspecifiedOutput = true);
// Validate memory descriptor.
enum class IOType { INPUT, OUTPUT };
diff --git a/driver/sample/Android.bp b/driver/sample/Android.bp
index bb535d8..ae7cbbc 100644
--- a/driver/sample/Android.bp
+++ b/driver/sample/Android.bp
@@ -29,6 +29,7 @@
"libneuralnetworks_headers",
],
shared_libs: [
+ "android.hardware.neuralnetworks-V1-ndk_platform",
"[email protected]",
"[email protected]",
"[email protected]",
diff --git a/driver/sample_aidl/Android.bp b/driver/sample_aidl/Android.bp
new file mode 100644
index 0000000..6db6603
--- /dev/null
+++ b/driver/sample_aidl/Android.bp
@@ -0,0 +1,104 @@
+/*
+ * Copyright 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+cc_defaults {
+ name: "NeuralNetworksSampleDriverAidl_defaults",
+ defaults: ["neuralnetworks_defaults"],
+ // b/109953668, disable OpenMP
+ // openmp: true,
+ srcs: [
+ "SampleDriver.cpp",
+ "SampleDriverFull.cpp",
+ "SampleDriverPartial.cpp",
+ "SampleDriverUtils.cpp",
+ ],
+ header_libs: [
+ "libneuralnetworks_headers",
+ ],
+ shared_libs: [
+ "android.hardware.neuralnetworks-V1-ndk_platform",
+ "[email protected]",
+ "[email protected]",
+ "[email protected]",
+ "[email protected]",
+ "[email protected]",
+ "[email protected]",
+ "libbase",
+ "libbinder_ndk",
+ "libcutils",
+ "libdl",
+ "libfmq",
+ "libhardware",
+ "libhidlbase",
+ "libhidlmemory",
+ "liblog",
+ "libnativewindow",
+ "libtextclassifier_hash",
+ "libutils",
+ ],
+ static_libs: [
+ "libneuralnetworks_common",
+ "neuralnetworks_utils_hal_aidl",
+ "neuralnetworks_utils_hal_common",
+ ],
+}
+
+cc_defaults {
+ name: "NeuralNetworksSampleDriverAidl_server_defaults",
+ defaults: ["NeuralNetworksSampleDriverAidl_defaults"],
+ relative_install_path: "hw",
+ proprietary: true,
+}
+
+cc_binary {
+ name: "android.hardware.neuralnetworks-service-sample-all",
+ srcs: ["SampleDriverAll.cpp"],
+ defaults: ["NeuralNetworksSampleDriverAidl_server_defaults"],
+ init_rc: ["config/android.hardware.neuralnetworks-service-sample-all.rc"],
+ vintf_fragments: ["config/android.hardware.neuralnetworks-service-sample-all.xml"],
+}
+
+cc_binary {
+ name: "android.hardware.neuralnetworks-service-sample-float-fast",
+ srcs: ["SampleDriverFloatFast.cpp"],
+ defaults: ["NeuralNetworksSampleDriverAidl_server_defaults"],
+ init_rc: ["config/android.hardware.neuralnetworks-service-sample-float-fast.rc"],
+ vintf_fragments: ["config/android.hardware.neuralnetworks-service-sample-float-fast.xml"],
+}
+
+cc_binary {
+ name: "android.hardware.neuralnetworks-service-sample-float-slow",
+ srcs: ["SampleDriverFloatSlow.cpp"],
+ defaults: ["NeuralNetworksSampleDriverAidl_server_defaults"],
+ init_rc: ["config/android.hardware.neuralnetworks-service-sample-float-slow.rc"],
+ vintf_fragments: ["config/android.hardware.neuralnetworks-service-sample-float-slow.xml"],
+}
+
+cc_binary {
+ name: "android.hardware.neuralnetworks-service-sample-quant",
+ srcs: ["SampleDriverQuant.cpp"],
+ defaults: ["NeuralNetworksSampleDriverAidl_server_defaults"],
+ init_rc: ["config/android.hardware.neuralnetworks-service-sample-quant.rc"],
+ vintf_fragments: ["config/android.hardware.neuralnetworks-service-sample-quant.xml"],
+}
+
+cc_binary {
+ name: "android.hardware.neuralnetworks-service-sample-minimal",
+ srcs: ["SampleDriverMinimal.cpp"],
+ defaults: ["NeuralNetworksSampleDriverAidl_server_defaults"],
+ init_rc: ["config/android.hardware.neuralnetworks-service-sample-minimal.rc"],
+ vintf_fragments: ["config/android.hardware.neuralnetworks-service-sample-minimal.xml"],
+}
diff --git a/driver/sample_aidl/SampleDriver.cpp b/driver/sample_aidl/SampleDriver.cpp
new file mode 100644
index 0000000..689d000
--- /dev/null
+++ b/driver/sample_aidl/SampleDriver.cpp
@@ -0,0 +1,597 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "SampleDriver"
+
+#include "SampleDriver.h"
+
+#include <android-base/logging.h>
+#include <android-base/properties.h>
+#include <android/binder_auto_utils.h>
+#include <android/binder_interface_utils.h>
+#include <android/binder_manager.h>
+#include <android/binder_process.h>
+#include <nnapi/Result.h>
+#include <nnapi/Validation.h>
+#include <nnapi/hal/aidl/Conversions.h>
+#include <nnapi/hal/aidl/Utils.h>
+
+#include <algorithm>
+#include <chrono>
+#include <map>
+#include <memory>
+#include <optional>
+#include <set>
+#include <string>
+#include <thread>
+#include <tuple>
+#include <utility>
+#include <variant>
+#include <vector>
+
+#include "AidlBufferTracker.h"
+#include "AidlHalUtils.h"
+#include "CpuExecutor.h"
+#include "SampleDriverUtils.h"
+#include "Tracing.h"
+#include "Utils.h"
+
+namespace android {
+namespace nn {
+namespace sample_driver {
+
+namespace {
+
+auto now() {
+ return std::chrono::steady_clock::now();
+};
+
+int64_t nanosecondsDuration(TimePoint end, TimePoint start) {
+ return std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count();
+};
+
+constexpr aidl_hal::Timing kNoTiming = {.timeOnDevice = -1, .timeInDriver = -1};
+
+} // namespace
+
+ndk::ScopedAStatus SampleDriver::getVersionString(std::string* versionString) {
+ NNTRACE_FULL(NNTRACE_LAYER_DRIVER, NNTRACE_PHASE_INITIALIZATION,
+ "SampleDriver::getVersionString");
+ *versionString = "JUST_AN_EXAMPLE";
+ return ndk::ScopedAStatus::ok();
+}
+
+ndk::ScopedAStatus SampleDriver::getType(aidl_hal::DeviceType* deviceType) {
+ NNTRACE_FULL(NNTRACE_LAYER_DRIVER, NNTRACE_PHASE_INITIALIZATION, "SampleDriver::getType");
+ *deviceType = aidl_hal::DeviceType::CPU;
+ return ndk::ScopedAStatus::ok();
+}
+
+ndk::ScopedAStatus SampleDriver::getSupportedExtensions(
+ std::vector<aidl_hal::Extension>* supportedExtensions) {
+ NNTRACE_FULL(NNTRACE_LAYER_DRIVER, NNTRACE_PHASE_INITIALIZATION,
+ "SampleDriver::getSupportedExtensions");
+ *supportedExtensions = {/* No extensions. */};
+ return ndk::ScopedAStatus::ok();
+}
+
+ndk::ScopedAStatus SampleDriver::getNumberOfCacheFilesNeeded(
+ aidl_hal::NumberOfCacheFiles* numberOfCacheFiles) {
+ NNTRACE_FULL(NNTRACE_LAYER_DRIVER, NNTRACE_PHASE_INITIALIZATION,
+ "SampleDriver::getNumberOfCacheFilesNeeded");
+ // Set both numbers to be 0 for cache not supported.
+ numberOfCacheFiles->numDataCache = 0;
+ numberOfCacheFiles->numModelCache = 0;
+ return ndk::ScopedAStatus::ok();
+}
+
+ndk::ScopedAStatus SampleDriver::prepareModel(
+ const aidl_hal::Model& model, aidl_hal::ExecutionPreference preference,
+ aidl_hal::Priority priority, int64_t deadline,
+ const std::vector<ndk::ScopedFileDescriptor>& /*modelCache*/,
+ const std::vector<ndk::ScopedFileDescriptor>& /*dataCache*/,
+ const std::vector<uint8_t>& /*token*/,
+ const std::shared_ptr<aidl_hal::IPreparedModelCallback>& callback) {
+ NNTRACE_FULL(NNTRACE_LAYER_DRIVER, NNTRACE_PHASE_COMPILATION, "SampleDriver::prepareModel");
+ auto copiedModel = aidl_hal::utils::clone(model);
+ if (!copiedModel.has_value()) {
+ return toAStatus(aidl_hal::ErrorStatus::GENERAL_FAILURE, copiedModel.error().message);
+ }
+ return prepareModelBase(std::move(copiedModel).value(), this, preference, priority, deadline,
+ callback);
+}
+
+ndk::ScopedAStatus SampleDriver::prepareModelFromCache(
+ int64_t /*deadline*/, const std::vector<ndk::ScopedFileDescriptor>& /*modelCache*/,
+ const std::vector<ndk::ScopedFileDescriptor>& /*dataCache*/,
+ const std::vector<uint8_t>& /*token*/,
+ const std::shared_ptr<aidl_hal::IPreparedModelCallback>& callback) {
+ NNTRACE_FULL(NNTRACE_LAYER_DRIVER, NNTRACE_PHASE_COMPILATION,
+ "SampleDriver::prepareModelFromCache");
+ notify(callback, aidl_hal::ErrorStatus::GENERAL_FAILURE, nullptr);
+ return toAStatus(aidl_hal::ErrorStatus::GENERAL_FAILURE,
+ "Caching is not supported in the sample driver.");
+}
+
+// Safely downcast an IPreparedModel object to SamplePreparedModel.
+// This function will return nullptr if the IPreparedModel object is not originated from the sample
+// driver process.
+static const SamplePreparedModel* castToSamplePreparedModel(
+ const std::shared_ptr<aidl_hal::IPreparedModel>& preparedModel) {
+ if (preparedModel->isRemote()) {
+ return nullptr;
+ } else {
+ // This static_cast is safe because SamplePreparedModel is the only class that implements
+ // the IPreparedModel interface in the sample driver process.
+ return static_cast<const SamplePreparedModel*>(preparedModel.get());
+ }
+}
+
+ndk::ScopedAStatus SampleDriver::allocate(
+ const aidl_hal::BufferDesc& desc,
+ const std::vector<aidl_hal::IPreparedModelParcel>& halPreparedModels,
+ const std::vector<aidl_hal::BufferRole>& inputRoles,
+ const std::vector<aidl_hal::BufferRole>& outputRoles, aidl_hal::DeviceBuffer* buffer) {
+ VLOG(DRIVER) << "SampleDriver::allocate";
+ constexpr auto getModel = [](const std::shared_ptr<aidl_hal::IPreparedModel>& preparedModel)
+ -> const aidl_hal::Model* {
+ const auto* samplePreparedModel = castToSamplePreparedModel(preparedModel);
+ if (samplePreparedModel == nullptr) {
+ LOG(ERROR) << "SampleDriver::allocate -- unknown remote IPreparedModel.";
+ return nullptr;
+ }
+ return samplePreparedModel->getModel();
+ };
+
+ std::vector<std::shared_ptr<aidl_hal::IPreparedModel>> preparedModels;
+ preparedModels.reserve(halPreparedModels.size());
+ for (const auto& halPreparedModelParcel : halPreparedModels) {
+ preparedModels.push_back(halPreparedModelParcel.preparedModel);
+ }
+ std::set<AidlHalPreparedModelRole> roles;
+ aidl_hal::Operand operand;
+ if (!validateMemoryDesc(desc, preparedModels, inputRoles, outputRoles, getModel, &roles,
+ &operand)) {
+ LOG(ERROR) << "SampleDriver::allocate -- validation failed.";
+ return toAStatus(aidl_hal::ErrorStatus::INVALID_ARGUMENT,
+ "SampleDriver::allocate -- validation failed.");
+ }
+
+ if (isExtensionOperandType(operand.type)) {
+ LOG(ERROR) << "SampleDriver::allocate -- does not support extension type.";
+ return toAStatus(aidl_hal::ErrorStatus::GENERAL_FAILURE,
+ "SampleDriver::allocate -- does not support extension type.");
+ }
+
+ // TODO(xusongw): Support allocating buffers with unknown dimensions or rank.
+
+ // An operand obtained from validateMemoryDesc is guaranteed to be representable in canonical
+ // types.
+ uint32_t size = nonExtensionOperandSizeOfData(convert(operand.type).value(),
+ toUnsigned(operand.dimensions).value());
+ VLOG(DRIVER) << "SampleDriver::allocate -- type = " << toString(operand.type)
+ << ", dimensions = " << toString(operand.dimensions) << ", size = " << size;
+ if (size == 0) {
+ LOG(ERROR) << "SampleDriver::allocate -- does not support dynamic output shape.";
+ return toAStatus(aidl_hal::ErrorStatus::GENERAL_FAILURE,
+ "SampleDriver::allocate -- does not support dynamic output shape.");
+ }
+
+ auto bufferWrapper =
+ AidlManagedBuffer::create(size, std::move(roles), convert(operand).value());
+ if (bufferWrapper == nullptr) {
+ LOG(ERROR) << "SampleDriver::allocate -- not enough memory.";
+ return toAStatus(aidl_hal::ErrorStatus::GENERAL_FAILURE,
+ "SampleDriver::allocate -- not enough memory.");
+ }
+
+ auto token = mBufferTracker->add(bufferWrapper);
+ if (token == nullptr) {
+ LOG(ERROR) << "SampleDriver::allocate -- AidlBufferTracker returned invalid token.";
+ return toAStatus(aidl_hal::ErrorStatus::GENERAL_FAILURE,
+ "SampleDriver::allocate -- AidlBufferTracker returned invalid token.");
+ }
+
+ const uint32_t tokenValue = token->get();
+ std::shared_ptr<SampleBuffer> sampleBuffer =
+ ndk::SharedRefBase::make<SampleBuffer>(std::move(bufferWrapper), std::move(token));
+ VLOG(DRIVER) << "SampleDriver::allocate -- successfully allocates the requested memory";
+ buffer->buffer = std::move(sampleBuffer);
+ buffer->token = tokenValue;
+ return ndk::ScopedAStatus::ok();
+}
+
+int SampleDriver::run() {
+ ABinderProcess_setThreadPoolMaxThreadCount(4);
+ const std::string name = std::string(SampleDriver::descriptor) + "/" + mName;
+ const binder_status_t status = AServiceManager_addService(this->asBinder().get(), name.c_str());
+ if (status != STATUS_OK) {
+ return 1;
+ }
+ ABinderProcess_joinThreadPool();
+ return 1;
+}
+
+static void copyRunTimePoolInfos(const RunTimePoolInfo& srcPool, const RunTimePoolInfo& dstPool) {
+ CHECK(srcPool.getBuffer() != nullptr);
+ CHECK(dstPool.getBuffer() != nullptr);
+ CHECK(srcPool.getSize() == dstPool.getSize());
+ std::copy(srcPool.getBuffer(), srcPool.getBuffer() + srcPool.getSize(), dstPool.getBuffer());
+ dstPool.flush();
+}
+
+ndk::ScopedAStatus SampleBuffer::copyTo(const aidl_hal::Memory& dst) {
+ const auto canonicalMemory = convert(dst);
+ if (!canonicalMemory.has_value()) {
+ return toAStatus(aidl_hal::ErrorStatus::INVALID_ARGUMENT, canonicalMemory.error().message);
+ }
+ const auto dstPool = RunTimePoolInfo::createFromMemory(canonicalMemory.value());
+ if (!dstPool.has_value()) {
+ LOG(ERROR) << "SampleBuffer::copyTo -- unable to map dst memory.";
+ return toAStatus(aidl_hal::ErrorStatus::GENERAL_FAILURE,
+ "SampleBuffer::copyTo -- unable to map dst memory.");
+ }
+ const auto validationStatus =
+ aidl_hal::utils::convert(kBuffer->validateCopyTo(dstPool->getSize())).value();
+ if (validationStatus != aidl_hal::ErrorStatus::NONE) {
+ return toAStatus(validationStatus);
+ }
+ const auto srcPool = kBuffer->createRunTimePoolInfo();
+ copyRunTimePoolInfos(srcPool, dstPool.value());
+ return ndk::ScopedAStatus::ok();
+}
+
+static aidl_hal::ErrorStatus copyFromInternal(
+ const aidl_hal::Memory& src, const std::vector<uint32_t>& dimensions,
+ const std::shared_ptr<AidlManagedBuffer>& bufferWrapper) {
+ CHECK(bufferWrapper != nullptr);
+ const auto canonicalMemory = convert(src);
+ if (!canonicalMemory.has_value()) {
+ return aidl_hal::ErrorStatus::INVALID_ARGUMENT;
+ }
+ const auto srcPool = RunTimePoolInfo::createFromMemory(canonicalMemory.value());
+ if (!srcPool.has_value()) {
+ LOG(ERROR) << "SampleBuffer::copyFrom -- unable to map src memory.";
+ return aidl_hal::ErrorStatus::GENERAL_FAILURE;
+ }
+ const auto validationStatus = aidl_hal::utils::convert(bufferWrapper->validateCopyFrom(
+ dimensions, srcPool->getSize()))
+ .value();
+ if (validationStatus != aidl_hal::ErrorStatus::NONE) {
+ return validationStatus;
+ }
+ const auto dstPool = bufferWrapper->createRunTimePoolInfo();
+ copyRunTimePoolInfos(srcPool.value(), dstPool);
+ return aidl_hal::ErrorStatus::NONE;
+}
+
+ndk::ScopedAStatus SampleBuffer::copyFrom(const aidl_hal::Memory& src,
+ const std::vector<int32_t>& dimensions) {
+ const auto unsignedDimensions = toUnsigned(dimensions);
+ if (!unsignedDimensions.has_value()) {
+ return toAStatus(aidl_hal::ErrorStatus::INVALID_ARGUMENT,
+ unsignedDimensions.error().message);
+ }
+ const auto status = copyFromInternal(src, unsignedDimensions.value(), kBuffer);
+ if (status != aidl_hal::ErrorStatus::NONE) {
+ kBuffer->setInitialized(false);
+ return toAStatus(status);
+ }
+ kBuffer->updateDimensions(unsignedDimensions.value());
+ kBuffer->setInitialized(true);
+ return ndk::ScopedAStatus::ok();
+}
+
+bool SamplePreparedModel::initialize() {
+ const auto canonicalPools = convert(mModel.pools);
+ if (!canonicalPools.has_value()) {
+ return false;
+ }
+ return setRunTimePoolInfosFromCanonicalMemories(&mPoolInfos, canonicalPools.value());
+}
+
+static std::tuple<aidl_hal::ErrorStatus, std::vector<RunTimePoolInfo>,
+ std::vector<std::shared_ptr<AidlManagedBuffer>>>
+createRunTimePoolInfos(const Request& request, const SampleDriver& driver,
+ const SamplePreparedModel* preparedModel) {
+ std::vector<RunTimePoolInfo> requestPoolInfos;
+ std::vector<std::shared_ptr<AidlManagedBuffer>> bufferWrappers;
+ requestPoolInfos.reserve(request.pools.size());
+ bufferWrappers.reserve(request.pools.size());
+ for (uint32_t i = 0; i < request.pools.size(); i++) {
+ const auto& pool = request.pools[i];
+ if (const auto* memory = std::get_if<Memory>(&pool)) {
+ auto buffer = RunTimePoolInfo::createFromMemory(*memory);
+ if (!buffer.has_value()) {
+ LOG(ERROR) << "createRuntimeMemoriesFromMemoryPools -- could not map pools";
+ return {aidl_hal::ErrorStatus::GENERAL_FAILURE, {}, {}};
+ }
+ requestPoolInfos.push_back(std::move(*buffer));
+ bufferWrappers.push_back(nullptr);
+ } else if (const auto* token = std::get_if<Request::MemoryDomainToken>(&pool)) {
+ auto bufferWrapper = driver.getBufferTracker()->get(static_cast<uint32_t>(*token));
+ if (bufferWrapper == nullptr) {
+ return {aidl_hal::ErrorStatus::INVALID_ARGUMENT, {}, {}};
+ }
+ const auto validationStatus =
+ aidl_hal::utils::convert(
+ bufferWrapper->validateRequest(i, request, preparedModel))
+ .value();
+ if (validationStatus != aidl_hal::ErrorStatus::NONE) {
+ return {validationStatus, {}, {}};
+ }
+ requestPoolInfos.push_back(bufferWrapper->createRunTimePoolInfo());
+ bufferWrappers.push_back(std::move(bufferWrapper));
+ } else {
+ // If the pool is not a Memory or a token, the input is invalid.
+ return {aidl_hal::ErrorStatus::INVALID_ARGUMENT, {}, {}};
+ }
+ }
+ return {aidl_hal::ErrorStatus::NONE, std::move(requestPoolInfos), std::move(bufferWrappers)};
+}
+
+static aidl_hal::ErrorStatus updateDeviceMemories(
+ aidl_hal::ErrorStatus status, const Request& request,
+ const std::vector<std::shared_ptr<AidlManagedBuffer>>& bufferWrappers,
+ const std::vector<aidl_hal::OutputShape>& outputShapes) {
+ if (status == aidl_hal::ErrorStatus::NONE) {
+ for (uint32_t i = 0; i < request.outputs.size(); i++) {
+ const uint32_t poolIndex = request.outputs[i].location.poolIndex;
+ const auto& pool = request.pools[poolIndex];
+ if (std::holds_alternative<Request::MemoryDomainToken>(pool)) {
+ const auto unsignedDimensions = toUnsigned(outputShapes[i].dimensions).value();
+ if (!bufferWrappers[poolIndex]->updateDimensions(unsignedDimensions)) {
+ return aidl_hal::ErrorStatus::GENERAL_FAILURE;
+ }
+ }
+ }
+ for (uint32_t i = 0; i < request.outputs.size(); i++) {
+ const uint32_t poolIndex = request.outputs[i].location.poolIndex;
+ const auto& pool = request.pools[poolIndex];
+ if (std::holds_alternative<Request::MemoryDomainToken>(pool)) {
+ bufferWrappers[poolIndex]->setInitialized(true);
+ }
+ }
+ } else if (status == aidl_hal::ErrorStatus::OUTPUT_INSUFFICIENT_SIZE) {
+ // If CpuExecutor reports OUTPUT_INSUFFCIENT_SIZE on a device memory, this is because the
+ // dimensions of the device memory are incorrectly specified. The driver should return
+ // GENERAL_FAILURE instead in this case.
+ for (uint32_t i = 0; i < request.outputs.size(); i++) {
+ const uint32_t poolIndex = request.outputs[i].location.poolIndex;
+ const auto& pool = request.pools[poolIndex];
+ if (std::holds_alternative<Request::MemoryDomainToken>(pool)) {
+ if (!outputShapes[i].isSufficient) {
+ LOG(ERROR) << "Invalid dimensions for output " << i
+ << ": actual shape = " << toString(outputShapes[i].dimensions);
+ return aidl_hal::ErrorStatus::GENERAL_FAILURE;
+ }
+ }
+ }
+ }
+ return aidl_hal::ErrorStatus::NONE;
+}
+
+ndk::ScopedAStatus SamplePreparedModel::executeSynchronously(
+ const aidl_hal::Request& halRequest, bool measureTiming, int64_t halDeadline,
+ int64_t loopTimeoutDuration, aidl_hal::ExecutionResult* executionResult) {
+ NNTRACE_FULL(NNTRACE_LAYER_DRIVER, NNTRACE_PHASE_EXECUTION,
+ "SampleDriver::executeSynchronously");
+ VLOG(DRIVER) << "executeSynchronously(" << SHOW_IF_DEBUG(halRequest.toString()) << ")";
+
+ TimePoint driverStart, driverEnd, deviceStart, deviceEnd;
+ if (measureTiming) driverStart = now();
+
+ const auto model = convert(mModel).value();
+
+ auto maybeRequest = convert(halRequest);
+ if (!maybeRequest.has_value()) {
+ return toAStatus(aidl_hal::ErrorStatus::INVALID_ARGUMENT, maybeRequest.error().message);
+ }
+ const auto request = std::move(maybeRequest).value();
+
+ const auto validationResult = validateRequestForModel(request, model);
+ if (!validationResult.ok()) {
+ return toAStatus(aidl_hal::ErrorStatus::INVALID_ARGUMENT, validationResult.error());
+ }
+
+ if (halDeadline < -1) {
+ return toAStatus(aidl_hal::ErrorStatus::INVALID_ARGUMENT,
+ "Invalid deadline: " + toString(halDeadline));
+ }
+ if (loopTimeoutDuration < -1) {
+ return toAStatus(aidl_hal::ErrorStatus::INVALID_ARGUMENT,
+ "Invalid loop timeout duration: " + toString(loopTimeoutDuration));
+ }
+
+ const auto deadline = makeDeadline(halDeadline);
+ if (hasDeadlinePassed(deadline)) {
+ return toAStatus(aidl_hal::ErrorStatus::MISSED_DEADLINE_PERSISTENT);
+ }
+
+ NNTRACE_FULL_SWITCH(NNTRACE_LAYER_DRIVER, NNTRACE_PHASE_INPUTS_AND_OUTPUTS,
+ "SampleDriver::executeSynchronouslyBase");
+ const auto [poolStatus, requestPoolInfos, bufferWrappers] =
+ createRunTimePoolInfos(request, *mDriver, this);
+ if (poolStatus != aidl_hal::ErrorStatus::NONE) {
+ return toAStatus(poolStatus);
+ }
+
+ NNTRACE_FULL_SWITCH(NNTRACE_LAYER_DRIVER, NNTRACE_PHASE_EXECUTION,
+ "SampleDriver::executeSynchronouslyBase");
+ CpuExecutor executor = mDriver->getExecutor();
+ if (loopTimeoutDuration >= 0) {
+ executor.setLoopTimeout(loopTimeoutDuration);
+ }
+ if (deadline.has_value()) {
+ executor.setDeadline(*deadline);
+ }
+ if (measureTiming) deviceStart = now();
+ int n = executor.run(model, request, mPoolInfos, requestPoolInfos);
+ if (measureTiming) deviceEnd = now();
+ VLOG(DRIVER) << "executor.run returned " << n;
+ aidl_hal::ErrorStatus executionStatus = convertResultCodeToAidlErrorStatus(n);
+ if (executionStatus != aidl_hal::ErrorStatus::NONE &&
+ executionStatus != aidl_hal::ErrorStatus::OUTPUT_INSUFFICIENT_SIZE) {
+ return toAStatus(executionStatus);
+ }
+ auto outputShapes = aidl_hal::utils::convert(executor.getOutputShapes()).value();
+
+ // Update device memory metadata.
+ const aidl_hal::ErrorStatus updateStatus =
+ updateDeviceMemories(executionStatus, request, bufferWrappers, outputShapes);
+ if (updateStatus != aidl_hal::ErrorStatus::NONE) {
+ return toAStatus(updateStatus);
+ }
+
+ executionResult->outputSufficientSize =
+ executionStatus != aidl_hal::ErrorStatus::OUTPUT_INSUFFICIENT_SIZE;
+ executionResult->outputShapes = std::move(outputShapes);
+ executionResult->timing = kNoTiming;
+ if (measureTiming && executionStatus == aidl_hal::ErrorStatus::NONE) {
+ driverEnd = now();
+ aidl_hal::Timing timing = {.timeOnDevice = nanosecondsDuration(deviceEnd, deviceStart),
+ .timeInDriver = nanosecondsDuration(driverEnd, driverStart)};
+ VLOG(DRIVER) << "executeSynchronously timing = " << timing.toString();
+
+ executionResult->timing = timing;
+ }
+ return ndk::ScopedAStatus::ok();
+}
+
+// The sample driver will finish the execution and then return.
+ndk::ScopedAStatus SamplePreparedModel::executeFenced(
+ const aidl_hal::Request& halRequest, const std::vector<ndk::ScopedFileDescriptor>& waitFor,
+ bool measureTiming, int64_t halDeadline, int64_t loopTimeoutDuration, int64_t duration,
+ ndk::ScopedFileDescriptor* syncFence,
+ std::shared_ptr<aidl_hal::IFencedExecutionCallback>* callback) {
+ NNTRACE_FULL(NNTRACE_LAYER_DRIVER, NNTRACE_PHASE_EXECUTION,
+ "SamplePreparedModel::executeFenced");
+ VLOG(DRIVER) << "executeFenced(" << SHOW_IF_DEBUG(halRequest.toString()) << ")";
+
+ TimePoint driverStart, driverEnd, deviceStart, deviceEnd;
+ if (measureTiming) driverStart = now();
+
+ const auto model = convert(mModel).value();
+
+ auto maybeRequest = convert(halRequest);
+ if (!maybeRequest.has_value()) {
+ return toAStatus(aidl_hal::ErrorStatus::INVALID_ARGUMENT, maybeRequest.error().message);
+ }
+ const auto request = std::move(maybeRequest).value();
+
+ const auto validationResult =
+ validateRequestForModel(request, model, /*allowUnspecifiedOutput=*/false);
+ if (!validationResult.ok()) {
+ return toAStatus(aidl_hal::ErrorStatus::INVALID_ARGUMENT, validationResult.error());
+ }
+
+ if (halDeadline < -1) {
+ return toAStatus(aidl_hal::ErrorStatus::INVALID_ARGUMENT,
+ "Invalid deadline: " + toString(halDeadline));
+ }
+ if (loopTimeoutDuration < -1) {
+ return toAStatus(aidl_hal::ErrorStatus::INVALID_ARGUMENT,
+ "Invalid loop timeout duration: " + toString(loopTimeoutDuration));
+ }
+ if (duration < -1) {
+ return toAStatus(aidl_hal::ErrorStatus::INVALID_ARGUMENT,
+ "Invalid fenced execution duration: " + toString(duration));
+ }
+
+ const auto deadline = makeDeadline(halDeadline);
+ if (hasDeadlinePassed(deadline)) {
+ return toAStatus(aidl_hal::ErrorStatus::MISSED_DEADLINE_PERSISTENT);
+ }
+
+ // Wait for the dependent events to signal
+ for (const auto& fenceHandle : waitFor) {
+ int syncFenceFd = fenceHandle.get();
+ if (syncWait(syncFenceFd, -1) != FenceState::SIGNALED) {
+ LOG(ERROR) << "syncWait failed";
+ return toAStatus(aidl_hal::ErrorStatus::GENERAL_FAILURE, "syncWait failed");
+ }
+ }
+
+ // Update deadline if the timeout duration is closer than the deadline.
+ auto closestDeadline = deadline;
+ if (duration >= 0) {
+ const auto timeoutDurationDeadline = makeDeadline(duration);
+ if (!closestDeadline.has_value() || *closestDeadline > timeoutDurationDeadline) {
+ closestDeadline = timeoutDurationDeadline;
+ }
+ }
+
+ TimePoint driverStartAfterFence;
+ if (measureTiming) driverStartAfterFence = now();
+
+ NNTRACE_FULL_SWITCH(NNTRACE_LAYER_DRIVER, NNTRACE_PHASE_INPUTS_AND_OUTPUTS,
+ "SamplePreparedModel::executeFenced");
+ const auto [poolStatus, requestPoolInfos, bufferWrappers] =
+ createRunTimePoolInfos(request, *mDriver, this);
+ if (poolStatus != aidl_hal::ErrorStatus::NONE) {
+ return toAStatus(poolStatus);
+ }
+
+ NNTRACE_FULL_SWITCH(NNTRACE_LAYER_DRIVER, NNTRACE_PHASE_EXECUTION,
+ "SamplePreparedModel::executeFenced");
+ CpuExecutor executor = mDriver->getExecutor();
+ if (loopTimeoutDuration >= 0) {
+ executor.setLoopTimeout(loopTimeoutDuration);
+ }
+ if (closestDeadline.has_value()) {
+ executor.setDeadline(*closestDeadline);
+ }
+ if (measureTiming) deviceStart = now();
+ int n = executor.run(model, request, mPoolInfos, requestPoolInfos);
+ if (measureTiming) deviceEnd = now();
+ VLOG(DRIVER) << "executor.run returned " << n;
+ aidl_hal::ErrorStatus executionStatus = convertResultCodeToAidlErrorStatus(n);
+ if (executionStatus != aidl_hal::ErrorStatus::NONE) {
+ return toAStatus(executionStatus);
+ }
+
+ // Set output memories to the initialized state.
+ if (executionStatus == aidl_hal::ErrorStatus::NONE) {
+ for (const auto& output : request.outputs) {
+ const uint32_t poolIndex = output.location.poolIndex;
+ const auto& pool = request.pools[poolIndex];
+ if (std::holds_alternative<Request::MemoryDomainToken>(pool)) {
+ bufferWrappers[poolIndex]->setInitialized(true);
+ }
+ }
+ }
+
+ aidl_hal::Timing timingSinceLaunch = kNoTiming;
+ aidl_hal::Timing timingAfterFence = kNoTiming;
+ if (measureTiming) {
+ driverEnd = now();
+ timingSinceLaunch = {.timeOnDevice = nanosecondsDuration(deviceEnd, deviceStart),
+ .timeInDriver = nanosecondsDuration(driverEnd, driverStart)};
+ timingAfterFence = {.timeOnDevice = nanosecondsDuration(deviceEnd, deviceStart),
+ .timeInDriver = nanosecondsDuration(driverEnd, driverStartAfterFence)};
+ VLOG(DRIVER) << "executeFenced timingSinceLaunch = " << timingSinceLaunch.toString();
+ VLOG(DRIVER) << "executeFenced timingAfterFence = " << timingAfterFence.toString();
+ }
+
+ *callback = ndk::SharedRefBase::make<SampleFencedExecutionCallback>(
+ timingSinceLaunch, timingAfterFence, executionStatus);
+ *syncFence = ndk::ScopedFileDescriptor();
+ return ndk::ScopedAStatus::ok();
+}
+
+} // namespace sample_driver
+} // namespace nn
+} // namespace android
diff --git a/driver/sample_aidl/SampleDriver.h b/driver/sample_aidl/SampleDriver.h
new file mode 100644
index 0000000..ba8a67c
--- /dev/null
+++ b/driver/sample_aidl/SampleDriver.h
@@ -0,0 +1,164 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_FRAMEWORKS_ML_NN_DRIVER_SAMPLE_AIDL_SAMPLE_DRIVER_H
+#define ANDROID_FRAMEWORKS_ML_NN_DRIVER_SAMPLE_AIDL_SAMPLE_DRIVER_H
+
+#include <android/binder_auto_utils.h>
+
+#include <memory>
+#include <string>
+#include <utility>
+#include <vector>
+
+#include "AidlBufferTracker.h"
+#include "AidlHalInterfaces.h"
+#include "CpuExecutor.h"
+#include "NeuralNetworks.h"
+
+namespace android {
+namespace nn {
+namespace sample_driver {
+
+// Manages the data buffer for an operand.
+class SampleBuffer : public aidl_hal::BnBuffer {
+ public:
+ SampleBuffer(std::shared_ptr<AidlManagedBuffer> buffer,
+ std::unique_ptr<AidlBufferTracker::Token> token)
+ : kBuffer(std::move(buffer)), kToken(std::move(token)) {
+ CHECK(kBuffer != nullptr);
+ CHECK(kToken != nullptr);
+ }
+ ndk::ScopedAStatus copyFrom(const aidl_hal::Memory& src,
+ const std::vector<int32_t>& dimensions) override;
+ ndk::ScopedAStatus copyTo(const aidl_hal::Memory& dst) override;
+
+ private:
+ const std::shared_ptr<AidlManagedBuffer> kBuffer;
+ const std::unique_ptr<AidlBufferTracker::Token> kToken;
+};
+
+// Base class used to create sample drivers for the NN HAL. This class
+// provides some implementation of the more common functions.
+//
+// Since these drivers simulate hardware, they must run the computations
+// on the CPU. An actual driver would not do that.
+class SampleDriver : public aidl_hal::BnDevice {
+ public:
+ SampleDriver(const char* name,
+ const IOperationResolver* operationResolver = BuiltinOperationResolver::get())
+ : mName(name),
+ mOperationResolver(operationResolver),
+ mBufferTracker(AidlBufferTracker::create()) {
+ android::nn::initVLogMask();
+ }
+ ndk::ScopedAStatus allocate(const aidl_hal::BufferDesc& desc,
+ const std::vector<aidl_hal::IPreparedModelParcel>& preparedModels,
+ const std::vector<aidl_hal::BufferRole>& inputRoles,
+ const std::vector<aidl_hal::BufferRole>& outputRoles,
+ aidl_hal::DeviceBuffer* buffer) override;
+ ndk::ScopedAStatus getNumberOfCacheFilesNeeded(
+ aidl_hal::NumberOfCacheFiles* numberOfCacheFiles) override;
+ ndk::ScopedAStatus getSupportedExtensions(
+ std::vector<aidl_hal::Extension>* extensions) override;
+ ndk::ScopedAStatus getType(aidl_hal::DeviceType* deviceType) override;
+ ndk::ScopedAStatus getVersionString(std::string* version) override;
+ ndk::ScopedAStatus prepareModel(
+ const aidl_hal::Model& model, aidl_hal::ExecutionPreference preference,
+ aidl_hal::Priority priority, int64_t deadline,
+ const std::vector<ndk::ScopedFileDescriptor>& modelCache,
+ const std::vector<ndk::ScopedFileDescriptor>& dataCache,
+ const std::vector<uint8_t>& token,
+ const std::shared_ptr<aidl_hal::IPreparedModelCallback>& callback) override;
+ ndk::ScopedAStatus prepareModelFromCache(
+ int64_t deadline, const std::vector<ndk::ScopedFileDescriptor>& modelCache,
+ const std::vector<ndk::ScopedFileDescriptor>& dataCache,
+ const std::vector<uint8_t>& token,
+ const std::shared_ptr<aidl_hal::IPreparedModelCallback>& callback) override;
+
+ // Starts and runs the driver service. Typically called from main().
+ // This will return only once the service shuts down.
+ int run();
+
+ CpuExecutor getExecutor() const { return CpuExecutor(mOperationResolver); }
+ const std::shared_ptr<AidlBufferTracker>& getBufferTracker() const { return mBufferTracker; }
+
+ protected:
+ std::string mName;
+ const IOperationResolver* mOperationResolver;
+ const std::shared_ptr<AidlBufferTracker> mBufferTracker;
+};
+
+class SamplePreparedModel : public aidl_hal::BnPreparedModel {
+ public:
+ SamplePreparedModel(aidl_hal::Model&& model, const SampleDriver* driver,
+ aidl_hal::ExecutionPreference preference, uid_t userId,
+ aidl_hal::Priority priority)
+ : mModel(std::move(model)),
+ mDriver(driver),
+ kPreference(preference),
+ kUserId(userId),
+ kPriority(priority) {
+ (void)kUserId;
+ (void)kPriority;
+ }
+ bool initialize();
+ ndk::ScopedAStatus executeSynchronously(const aidl_hal::Request& request, bool measureTiming,
+ int64_t deadline, int64_t loopTimeoutDuration,
+ aidl_hal::ExecutionResult* executionResult) override;
+ ndk::ScopedAStatus executeFenced(
+ const aidl_hal::Request& request, const std::vector<ndk::ScopedFileDescriptor>& waitFor,
+ bool measureTiming, int64_t deadline, int64_t loopTimeoutDuration, int64_t duration,
+ ndk::ScopedFileDescriptor* syncFence,
+ std::shared_ptr<aidl_hal::IFencedExecutionCallback>* callback) override;
+ const aidl_hal::Model* getModel() const { return &mModel; }
+
+ protected:
+ aidl_hal::Model mModel;
+ const SampleDriver* mDriver;
+ std::vector<RunTimePoolInfo> mPoolInfos;
+ const aidl_hal::ExecutionPreference kPreference;
+ const uid_t kUserId;
+ const aidl_hal::Priority kPriority;
+};
+
+class SampleFencedExecutionCallback : public aidl_hal::BnFencedExecutionCallback {
+ public:
+ SampleFencedExecutionCallback(aidl_hal::Timing timingSinceLaunch,
+ aidl_hal::Timing timingAfterFence, aidl_hal::ErrorStatus error)
+ : kTimingSinceLaunch(timingSinceLaunch),
+ kTimingAfterFence(timingAfterFence),
+ kErrorStatus(error) {}
+ ndk::ScopedAStatus getExecutionInfo(aidl_hal::Timing* timingLaunched,
+ aidl_hal::Timing* timingFenced,
+ aidl_hal::ErrorStatus* errorStatus) override {
+ *timingLaunched = kTimingSinceLaunch;
+ *timingFenced = kTimingAfterFence;
+ *errorStatus = kErrorStatus;
+ return ndk::ScopedAStatus::ok();
+ }
+
+ private:
+ const aidl_hal::Timing kTimingSinceLaunch;
+ const aidl_hal::Timing kTimingAfterFence;
+ const aidl_hal::ErrorStatus kErrorStatus;
+};
+
+} // namespace sample_driver
+} // namespace nn
+} // namespace android
+
+#endif // ANDROID_FRAMEWORKS_ML_NN_DRIVER_SAMPLE_AIDL_SAMPLE_DRIVER_H
diff --git a/driver/sample_aidl/SampleDriverAll.cpp b/driver/sample_aidl/SampleDriverAll.cpp
new file mode 100644
index 0000000..1efa818
--- /dev/null
+++ b/driver/sample_aidl/SampleDriverAll.cpp
@@ -0,0 +1,33 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "SampleDriverAll"
+
+#include <android/binder_interface_utils.h>
+
+#include <memory>
+
+#include "SampleDriverFull.h"
+
+using aidl::android::hardware::neuralnetworks::PerformanceInfo;
+using android::nn::sample_driver::SampleDriverFull;
+
+int main() {
+ const PerformanceInfo performance{.execTime = 1.1f, .powerUsage = 1.1f};
+ std::shared_ptr<SampleDriverFull> driver =
+ ndk::SharedRefBase::make<SampleDriverFull>("nnapi-sample_all", performance);
+ return driver->run();
+}
diff --git a/driver/sample_aidl/SampleDriverFloatFast.cpp b/driver/sample_aidl/SampleDriverFloatFast.cpp
new file mode 100644
index 0000000..19b2d7e
--- /dev/null
+++ b/driver/sample_aidl/SampleDriverFloatFast.cpp
@@ -0,0 +1,85 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "SampleDriverFloatFast"
+
+#include <android-base/logging.h>
+#include <android/binder_auto_utils.h>
+#include <hidl/LegacySupport.h>
+#include <nnapi/hal/aidl/Conversions.h>
+
+#include <memory>
+#include <thread>
+#include <vector>
+
+#include "AidlHalUtils.h"
+#include "SampleDriverPartial.h"
+
+namespace android {
+namespace nn {
+namespace sample_driver {
+
+class SampleDriverFloatFast : public SampleDriverPartial {
+ public:
+ SampleDriverFloatFast() : SampleDriverPartial("nnapi-sample_float_fast") {}
+ ndk::ScopedAStatus getCapabilities(aidl_hal::Capabilities* capabilities) override;
+
+ private:
+ std::vector<bool> getSupportedOperationsImpl(const Model& model) const override;
+};
+
+ndk::ScopedAStatus SampleDriverFloatFast::getCapabilities(aidl_hal::Capabilities* capabilities) {
+ android::nn::initVLogMask();
+ VLOG(DRIVER) << "getCapabilities()";
+
+ *capabilities = {
+ .relaxedFloat32toFloat16PerformanceScalar = {.execTime = 0.7f, .powerUsage = 1.1f},
+ .relaxedFloat32toFloat16PerformanceTensor = {.execTime = 0.7f, .powerUsage = 1.1f},
+ .operandPerformance = nonExtensionOperandPerformance({1.0f, 1.0f}),
+ .ifPerformance = {.execTime = 1.0f, .powerUsage = 1.0f},
+ .whilePerformance = {.execTime = 1.0f, .powerUsage = 1.0f}};
+ update(&capabilities->operandPerformance, aidl_hal::OperandType::TENSOR_FLOAT32,
+ {.execTime = 0.8f, .powerUsage = 1.2f});
+ update(&capabilities->operandPerformance, aidl_hal::OperandType::FLOAT32,
+ {.execTime = 0.8f, .powerUsage = 1.2f});
+
+ return ndk::ScopedAStatus::ok();
+}
+
+std::vector<bool> SampleDriverFloatFast::getSupportedOperationsImpl(const Model& model) const {
+ const size_t count = model.main.operations.size();
+ std::vector<bool> supported(count);
+ for (size_t i = 0; i < count; i++) {
+ const Operation& operation = model.main.operations[i];
+ if (!isExtensionOperationType(operation.type) && operation.inputs.size() > 0) {
+ const Operand& firstOperand = model.main.operands[operation.inputs[0]];
+ supported[i] = firstOperand.type == OperandType::TENSOR_FLOAT32;
+ }
+ }
+ return supported;
+}
+
+} // namespace sample_driver
+} // namespace nn
+} // namespace android
+
+using android::nn::sample_driver::SampleDriverFloatFast;
+
+int main() {
+ std::shared_ptr<SampleDriverFloatFast> driver =
+ ndk::SharedRefBase::make<SampleDriverFloatFast>();
+ return driver->run();
+}
diff --git a/driver/sample_aidl/SampleDriverFloatSlow.cpp b/driver/sample_aidl/SampleDriverFloatSlow.cpp
new file mode 100644
index 0000000..f149608
--- /dev/null
+++ b/driver/sample_aidl/SampleDriverFloatSlow.cpp
@@ -0,0 +1,84 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "SampleDriverFloatSlow"
+
+#include <android-base/logging.h>
+#include <hidl/LegacySupport.h>
+#include <nnapi/hal/aidl/Conversions.h>
+
+#include <memory>
+#include <thread>
+#include <vector>
+
+#include "AidlHalUtils.h"
+#include "SampleDriverPartial.h"
+
+namespace android {
+namespace nn {
+namespace sample_driver {
+
+class SampleDriverFloatSlow : public SampleDriverPartial {
+ public:
+ SampleDriverFloatSlow() : SampleDriverPartial("nnapi-sample_float_slow") {}
+ ndk::ScopedAStatus getCapabilities(aidl_hal::Capabilities* capabilities) override;
+
+ private:
+ std::vector<bool> getSupportedOperationsImpl(const Model& model) const override;
+};
+
+ndk::ScopedAStatus SampleDriverFloatSlow::getCapabilities(aidl_hal::Capabilities* capabilities) {
+ android::nn::initVLogMask();
+ VLOG(DRIVER) << "getCapabilities()";
+
+ *capabilities = {
+ .relaxedFloat32toFloat16PerformanceScalar = {.execTime = 1.2f, .powerUsage = 0.6f},
+ .relaxedFloat32toFloat16PerformanceTensor = {.execTime = 1.2f, .powerUsage = 0.6f},
+ .operandPerformance = nonExtensionOperandPerformance({1.0f, 1.0f}),
+ .ifPerformance = {.execTime = 1.0f, .powerUsage = 1.0f},
+ .whilePerformance = {.execTime = 1.0f, .powerUsage = 1.0f}};
+ update(&capabilities->operandPerformance, aidl_hal::OperandType::TENSOR_FLOAT32,
+ {.execTime = 1.3f, .powerUsage = 0.7f});
+ update(&capabilities->operandPerformance, aidl_hal::OperandType::FLOAT32,
+ {.execTime = 1.3f, .powerUsage = 0.7f});
+
+ return ndk::ScopedAStatus::ok();
+}
+
+std::vector<bool> SampleDriverFloatSlow::getSupportedOperationsImpl(const Model& model) const {
+ const size_t count = model.main.operations.size();
+ std::vector<bool> supported(count);
+ for (size_t i = 0; i < count; i++) {
+ const Operation& operation = model.main.operations[i];
+ if (!isExtensionOperationType(operation.type) && operation.inputs.size() > 0) {
+ const Operand& firstOperand = model.main.operands[operation.inputs[0]];
+ supported[i] = firstOperand.type == OperandType::TENSOR_FLOAT32;
+ }
+ }
+ return supported;
+}
+
+} // namespace sample_driver
+} // namespace nn
+} // namespace android
+
+using android::nn::sample_driver::SampleDriverFloatSlow;
+
+int main() {
+ std::shared_ptr<SampleDriverFloatSlow> driver =
+ ndk::SharedRefBase::make<SampleDriverFloatSlow>();
+ return driver->run();
+}
diff --git a/driver/sample_aidl/SampleDriverFull.cpp b/driver/sample_aidl/SampleDriverFull.cpp
new file mode 100644
index 0000000..f984815
--- /dev/null
+++ b/driver/sample_aidl/SampleDriverFull.cpp
@@ -0,0 +1,64 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "SampleDriverFull"
+
+#include "SampleDriverFull.h"
+
+#include <nnapi/Validation.h>
+#include <nnapi/hal/aidl/Conversions.h>
+
+#include <string>
+#include <vector>
+
+#include "AidlHalUtils.h"
+#include "LegacyUtils.h"
+#include "SampleDriverUtils.h"
+
+namespace android {
+namespace nn {
+namespace sample_driver {
+
+ndk::ScopedAStatus SampleDriverFull::getCapabilities(aidl_hal::Capabilities* capabilities) {
+ android::nn::initVLogMask();
+ VLOG(DRIVER) << "getCapabilities()";
+ *capabilities = {.relaxedFloat32toFloat16PerformanceScalar = mPerf,
+ .relaxedFloat32toFloat16PerformanceTensor = mPerf,
+ .operandPerformance = nonExtensionOperandPerformance(mPerf),
+ .ifPerformance = mPerf,
+ .whilePerformance = mPerf};
+ return ndk::ScopedAStatus::ok();
+}
+
+ndk::ScopedAStatus SampleDriverFull::getSupportedOperations(
+ const aidl_hal::Model& model, std::vector<bool>* supportedOperations) {
+ VLOG(DRIVER) << "getSupportedOperations()";
+ const auto canonicalModel = convert(model);
+ if (!canonicalModel.has_value()) {
+ return toAStatus(aidl_hal::ErrorStatus::INVALID_ARGUMENT, canonicalModel.error().message);
+ }
+ const size_t count = canonicalModel.value().main.operations.size();
+ *supportedOperations = std::vector<bool>(count, true);
+ for (size_t i = 0; i < count; i++) {
+ const Operation& operation = canonicalModel.value().main.operations[i];
+ supportedOperations->at(i) = !isExtensionOperationType(operation.type);
+ }
+ return ndk::ScopedAStatus::ok();
+}
+
+} // namespace sample_driver
+} // namespace nn
+} // namespace android
diff --git a/driver/sample_aidl/SampleDriverFull.h b/driver/sample_aidl/SampleDriverFull.h
new file mode 100644
index 0000000..144af37
--- /dev/null
+++ b/driver/sample_aidl/SampleDriverFull.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_FRAMEWORKS_ML_NN_DRIVER_SAMPLE_AIDL_SAMPLE_DRIVER_FULL_H
+#define ANDROID_FRAMEWORKS_ML_NN_DRIVER_SAMPLE_AIDL_SAMPLE_DRIVER_FULL_H
+
+#include <vector>
+
+#include "SampleDriver.h"
+
+namespace android {
+namespace nn {
+namespace sample_driver {
+
+class SampleDriverFull : public SampleDriver {
+ public:
+ SampleDriverFull(const char* name, aidl_hal::PerformanceInfo perf)
+ : SampleDriver(name), mPerf(perf) {}
+ ndk::ScopedAStatus getCapabilities(aidl_hal::Capabilities* capabilities) override;
+ ndk::ScopedAStatus getSupportedOperations(const aidl_hal::Model& model,
+ std::vector<bool>* supportedOperations) override;
+
+ private:
+ aidl_hal::PerformanceInfo mPerf;
+};
+
+} // namespace sample_driver
+} // namespace nn
+} // namespace android
+
+#endif // ANDROID_FRAMEWORKS_ML_NN_DRIVER_SAMPLE_AIDL_SAMPLE_DRIVER_FULL_H
diff --git a/driver/sample_aidl/SampleDriverMinimal.cpp b/driver/sample_aidl/SampleDriverMinimal.cpp
new file mode 100644
index 0000000..d17e780
--- /dev/null
+++ b/driver/sample_aidl/SampleDriverMinimal.cpp
@@ -0,0 +1,93 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "SampleDriverMinimal"
+
+#include <android-base/logging.h>
+#include <android/binder_auto_utils.h>
+
+#include <memory>
+#include <thread>
+#include <vector>
+
+#include "AidlHalUtils.h"
+#include "SampleDriverPartial.h"
+
+namespace android {
+namespace nn {
+namespace sample_driver {
+
+class SampleDriverMinimal : public SampleDriverPartial {
+ public:
+ SampleDriverMinimal() : SampleDriverPartial("nnapi-sample_minimal") {}
+ ndk::ScopedAStatus getCapabilities(aidl_hal::Capabilities* capabilities) override;
+
+ private:
+ std::vector<bool> getSupportedOperationsImpl(const Model& model) const override;
+};
+
+ndk::ScopedAStatus SampleDriverMinimal::getCapabilities(aidl_hal::Capabilities* capabilities) {
+ android::nn::initVLogMask();
+ VLOG(DRIVER) << "getCapabilities()";
+
+ *capabilities = {
+ .relaxedFloat32toFloat16PerformanceScalar = {.execTime = 0.4f, .powerUsage = 0.5f},
+ .relaxedFloat32toFloat16PerformanceTensor = {.execTime = 0.4f, .powerUsage = 0.5f},
+ .operandPerformance = nonExtensionOperandPerformance({1.0f, 1.0f}),
+ .ifPerformance = {.execTime = 1.0f, .powerUsage = 1.0f},
+ .whilePerformance = {.execTime = 1.0f, .powerUsage = 1.0f}};
+ update(&capabilities->operandPerformance, aidl_hal::OperandType::TENSOR_FLOAT32,
+ {.execTime = 0.4f, .powerUsage = 0.5f});
+ update(&capabilities->operandPerformance, aidl_hal::OperandType::FLOAT32,
+ {.execTime = 0.4f, .powerUsage = 0.5f});
+
+ return ndk::ScopedAStatus::ok();
+}
+
+std::vector<bool> SampleDriverMinimal::getSupportedOperationsImpl(const Model& model) const {
+ const size_t count = model.main.operations.size();
+ std::vector<bool> supported(count);
+ // Simulate supporting just a few ops
+ for (size_t i = 0; i < count; i++) {
+ supported[i] = false;
+ const Operation& operation = model.main.operations[i];
+ switch (operation.type) {
+ case OperationType::ADD:
+ case OperationType::CONCATENATION:
+ case OperationType::CONV_2D: {
+ const Operand& firstOperand = model.main.operands[operation.inputs[0]];
+ if (firstOperand.type == OperandType::TENSOR_FLOAT32) {
+ supported[i] = true;
+ }
+ break;
+ }
+ default:
+ break;
+ }
+ }
+ return supported;
+}
+
+} // namespace sample_driver
+} // namespace nn
+} // namespace android
+
+using android::nn::sample_driver::SampleDriverMinimal;
+
+int main() {
+ std::shared_ptr<SampleDriverMinimal> driver = ndk::SharedRefBase::make<SampleDriverMinimal>();
+ return driver->run();
+}
diff --git a/driver/sample_aidl/SampleDriverPartial.cpp b/driver/sample_aidl/SampleDriverPartial.cpp
new file mode 100644
index 0000000..45bf8de
--- /dev/null
+++ b/driver/sample_aidl/SampleDriverPartial.cpp
@@ -0,0 +1,73 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "SampleDriverPartial"
+
+#include "SampleDriverPartial.h"
+
+#include <android-base/logging.h>
+#include <android/binder_auto_utils.h>
+#include <nnapi/Validation.h>
+#include <nnapi/hal/aidl/Conversions.h>
+#include <nnapi/hal/aidl/Utils.h>
+
+#include <memory>
+#include <thread>
+#include <utility>
+#include <vector>
+
+#include "SampleDriverUtils.h"
+
+namespace android {
+namespace nn {
+namespace sample_driver {
+
+ndk::ScopedAStatus SampleDriverPartial::getSupportedOperations(
+ const aidl_hal::Model& model, std::vector<bool>* supportedOperations) {
+ VLOG(DRIVER) << "getSupportedOperations()";
+ const auto canonicalModel = convert(model);
+ if (!canonicalModel.has_value()) {
+ return toAStatus(aidl_hal::ErrorStatus::INVALID_ARGUMENT, canonicalModel.error().message);
+ }
+ *supportedOperations = getSupportedOperationsImpl(canonicalModel.value());
+ return ndk::ScopedAStatus::ok();
+}
+
+ndk::ScopedAStatus SampleDriverPartial::prepareModel(
+ const aidl_hal::Model& model, aidl_hal::ExecutionPreference preference,
+ aidl_hal::Priority priority, int64_t deadline,
+ const std::vector<ndk::ScopedFileDescriptor>&,
+ const std::vector<ndk::ScopedFileDescriptor>&, const std::vector<uint8_t>&,
+ const std::shared_ptr<aidl_hal::IPreparedModelCallback>& callback) {
+ const auto canonicalModel = convert(model);
+ if (!canonicalModel.has_value()) {
+ notify(callback, aidl_hal::ErrorStatus::INVALID_ARGUMENT, nullptr);
+ return toAStatus(aidl_hal::ErrorStatus::INVALID_ARGUMENT, canonicalModel.error().message);
+ }
+ std::vector<bool> supported = getSupportedOperationsImpl(canonicalModel.value());
+ bool isModelFullySupported =
+ std::all_of(supported.begin(), supported.end(), [](bool v) { return v; });
+ auto copiedModel = aidl_hal::utils::clone(model);
+ if (!copiedModel.has_value()) {
+ return toAStatus(aidl_hal::ErrorStatus::GENERAL_FAILURE, copiedModel.error().message);
+ }
+ return prepareModelBase(std::move(copiedModel).value(), this, preference, priority, deadline,
+ callback, isModelFullySupported);
+}
+
+} // namespace sample_driver
+} // namespace nn
+} // namespace android
diff --git a/driver/sample_aidl/SampleDriverPartial.h b/driver/sample_aidl/SampleDriverPartial.h
new file mode 100644
index 0000000..33de485
--- /dev/null
+++ b/driver/sample_aidl/SampleDriverPartial.h
@@ -0,0 +1,61 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_FRAMEWORKS_ML_NN_DRIVER_SAMPLE_AIDL_SAMPLE_DRIVER_PARTIAL_H
+#define ANDROID_FRAMEWORKS_ML_NN_DRIVER_SAMPLE_AIDL_SAMPLE_DRIVER_PARTIAL_H
+
+#include <android-base/logging.h>
+
+#include <memory>
+#include <thread>
+#include <vector>
+
+#include "SampleDriver.h"
+
+namespace android {
+namespace nn {
+namespace sample_driver {
+
+// A base class for sample drivers that support only a subset of NNAPI
+// operations. Classes of such drivers should inherit from this class and
+// implement getSupportedOperationsImpl function which is used for filtering out
+// unsupported ops.
+class SampleDriverPartial : public SampleDriver {
+ public:
+ SampleDriverPartial(const char* name, const IOperationResolver* operationResolver =
+ BuiltinOperationResolver::get())
+ : SampleDriver(name, operationResolver) {}
+ ndk::ScopedAStatus getSupportedOperations(const aidl_hal::Model& model,
+ std::vector<bool>* supportedOperations) override;
+ ndk::ScopedAStatus prepareModel(
+ const aidl_hal::Model& model, aidl_hal::ExecutionPreference preference,
+ aidl_hal::Priority priority, int64_t deadline,
+ const std::vector<ndk::ScopedFileDescriptor>& modelCache,
+ const std::vector<ndk::ScopedFileDescriptor>& dataCache,
+ const std::vector<uint8_t>& token,
+ const std::shared_ptr<aidl_hal::IPreparedModelCallback>& callback) override;
+
+ protected:
+ // Given a valid NNAPI Model returns a boolean vector that indicates which
+ // ops in the model are supported by a driver.
+ virtual std::vector<bool> getSupportedOperationsImpl(const Model& model) const = 0;
+};
+
+} // namespace sample_driver
+} // namespace nn
+} // namespace android
+
+#endif // ANDROID_FRAMEWORKS_ML_NN_DRIVER_SAMPLE_AIDL_SAMPLE_DRIVER_PARTIAL_H
diff --git a/driver/sample_aidl/SampleDriverQuant.cpp b/driver/sample_aidl/SampleDriverQuant.cpp
new file mode 100644
index 0000000..10350ed
--- /dev/null
+++ b/driver/sample_aidl/SampleDriverQuant.cpp
@@ -0,0 +1,87 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "SampleDriverQuant"
+
+#include <android-base/logging.h>
+#include <nnapi/hal/aidl/Conversions.h>
+
+#include <memory>
+#include <thread>
+#include <vector>
+
+#include "AidlHalUtils.h"
+#include "SampleDriverPartial.h"
+
+namespace android {
+namespace nn {
+namespace sample_driver {
+
+class SampleDriverQuant : public SampleDriverPartial {
+ public:
+ SampleDriverQuant() : SampleDriverPartial("nnapi-sample_quant") {}
+ ndk::ScopedAStatus getCapabilities(aidl_hal::Capabilities* capabilities) override;
+
+ private:
+ std::vector<bool> getSupportedOperationsImpl(const Model& model) const override;
+};
+
+ndk::ScopedAStatus SampleDriverQuant::getCapabilities(aidl_hal::Capabilities* capabilities) {
+ android::nn::initVLogMask();
+ VLOG(DRIVER) << "getCapabilities()";
+
+ *capabilities = {
+ .relaxedFloat32toFloat16PerformanceScalar = {.execTime = 50.0f, .powerUsage = 1.0f},
+ .relaxedFloat32toFloat16PerformanceTensor = {.execTime = 50.0f, .powerUsage = 1.0f},
+ .operandPerformance = nonExtensionOperandPerformance({50.0f, 1.0f}),
+ .ifPerformance = {.execTime = 50.0f, .powerUsage = 1.0f},
+ .whilePerformance = {.execTime = 50.0f, .powerUsage = 1.0f}};
+
+ return ndk::ScopedAStatus::ok();
+}
+
+static bool isQuantized(OperandType opType) {
+ return opType == OperandType::TENSOR_QUANT8_ASYMM ||
+ opType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED;
+}
+
+std::vector<bool> SampleDriverQuant::getSupportedOperationsImpl(const Model& model) const {
+ const size_t count = model.main.operations.size();
+ std::vector<bool> supported(count);
+ for (size_t i = 0; i < count; i++) {
+ const Operation& operation = model.main.operations[i];
+ if (!isExtensionOperationType(operation.type) && operation.inputs.size() > 0) {
+ const Operand& firstOperand = model.main.operands[operation.inputs[0]];
+ supported[i] = isQuantized(firstOperand.type);
+ if (operation.type == OperationType::SELECT) {
+ const Operand& secondOperand = model.main.operands[operation.inputs[1]];
+ supported[i] = isQuantized(secondOperand.type);
+ }
+ }
+ }
+ return supported;
+}
+
+} // namespace sample_driver
+} // namespace nn
+} // namespace android
+
+using android::nn::sample_driver::SampleDriverQuant;
+
+int main() {
+ std::shared_ptr<SampleDriverQuant> driver = ndk::SharedRefBase::make<SampleDriverQuant>();
+ return driver->run();
+}
diff --git a/driver/sample_aidl/SampleDriverUtils.cpp b/driver/sample_aidl/SampleDriverUtils.cpp
new file mode 100644
index 0000000..d7d4364
--- /dev/null
+++ b/driver/sample_aidl/SampleDriverUtils.cpp
@@ -0,0 +1,137 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "SampleDriverUtils.h"
+
+#include <aidl/android/hardware/common/NativeHandle.h>
+#include <android/binder_auto_utils.h>
+#include <android/binder_ibinder.h>
+#include <nnapi/Validation.h>
+#include <nnapi/hal/aidl/Conversions.h>
+#include <nnapi/hal/aidl/Utils.h>
+#include <utils/NativeHandle.h>
+
+#include <memory>
+#include <string>
+#include <thread>
+#include <utility>
+
+#include "SampleDriver.h"
+
+namespace android {
+namespace nn {
+namespace sample_driver {
+
+void notify(const std::shared_ptr<aidl_hal::IPreparedModelCallback>& callback,
+ const aidl_hal::ErrorStatus& status,
+ const std::shared_ptr<aidl_hal::IPreparedModel>& preparedModel) {
+ const auto ret = callback->notify(status, preparedModel);
+ if (!ret.isOk()) {
+ LOG(ERROR) << "Error when calling IPreparedModelCallback::notify: " << ret.getDescription()
+ << " " << ret.getMessage();
+ }
+}
+
+ndk::ScopedAStatus toAStatus(aidl_hal::ErrorStatus errorStatus) {
+ if (errorStatus == aidl_hal::ErrorStatus::NONE) {
+ return ndk::ScopedAStatus::ok();
+ }
+ return ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(errorStatus));
+}
+
+ndk::ScopedAStatus toAStatus(aidl_hal::ErrorStatus errorStatus, const std::string& errorMessage) {
+ if (errorStatus == aidl_hal::ErrorStatus::NONE) {
+ return ndk::ScopedAStatus::ok();
+ }
+ return ndk::ScopedAStatus::fromServiceSpecificErrorWithMessage(
+ static_cast<int32_t>(errorStatus), errorMessage.c_str());
+}
+
+ndk::ScopedAStatus prepareModelBase(
+ aidl_hal::Model&& model, const SampleDriver* driver,
+ aidl_hal::ExecutionPreference preference, aidl_hal::Priority priority, int64_t halDeadline,
+ const std::shared_ptr<aidl_hal::IPreparedModelCallback>& callback,
+ bool isFullModelSupported) {
+ const uid_t userId = AIBinder_getCallingUid();
+ if (callback.get() == nullptr) {
+ LOG(ERROR) << "invalid callback passed to prepareModelBase";
+ return toAStatus(aidl_hal::ErrorStatus::INVALID_ARGUMENT,
+ "invalid callback passed to prepareModelBase");
+ }
+ const auto canonicalModel = convert(model);
+ if (!canonicalModel.has_value()) {
+ VLOG(DRIVER) << "invalid model passed to prepareModelBase";
+ notify(callback, aidl_hal::ErrorStatus::INVALID_ARGUMENT, nullptr);
+ return toAStatus(aidl_hal::ErrorStatus::INVALID_ARGUMENT,
+ "invalid model passed to prepareModelBase");
+ }
+ if (VLOG_IS_ON(DRIVER)) {
+ VLOG(DRIVER) << "prepareModelBase";
+ logModelToInfo(canonicalModel.value());
+ }
+ if (!aidl_hal::utils::valid(preference)) {
+ const std::string log_message =
+ "invalid execution preference passed to prepareModelBase: " + toString(preference);
+ VLOG(DRIVER) << log_message;
+ notify(callback, aidl_hal::ErrorStatus::INVALID_ARGUMENT, nullptr);
+ return toAStatus(aidl_hal::ErrorStatus::INVALID_ARGUMENT, log_message);
+ }
+ if (!aidl_hal::utils::valid(priority)) {
+ const std::string log_message =
+ "invalid priority passed to prepareModelBase: " + toString(priority);
+ VLOG(DRIVER) << log_message;
+ notify(callback, aidl_hal::ErrorStatus::INVALID_ARGUMENT, nullptr);
+ return toAStatus(aidl_hal::ErrorStatus::INVALID_ARGUMENT, log_message);
+ }
+
+ if (!isFullModelSupported) {
+ VLOG(DRIVER) << "model is not fully supported";
+ notify(callback, aidl_hal::ErrorStatus::INVALID_ARGUMENT, nullptr);
+ return ndk::ScopedAStatus::ok();
+ }
+
+ if (halDeadline < -1) {
+ notify(callback, aidl_hal::ErrorStatus::INVALID_ARGUMENT, nullptr);
+ return toAStatus(aidl_hal::ErrorStatus::INVALID_ARGUMENT,
+ "Invalid deadline: " + toString(halDeadline));
+ }
+ const auto deadline = makeDeadline(halDeadline);
+ if (hasDeadlinePassed(deadline)) {
+ notify(callback, aidl_hal::ErrorStatus::MISSED_DEADLINE_PERSISTENT, nullptr);
+ return ndk::ScopedAStatus::ok();
+ }
+
+ // asynchronously prepare the model from a new, detached thread
+ std::thread(
+ [driver, preference, userId, priority, callback](aidl_hal::Model&& model) {
+ std::shared_ptr<SamplePreparedModel> preparedModel =
+ ndk::SharedRefBase::make<SamplePreparedModel>(std::move(model), driver,
+ preference, userId, priority);
+ if (!preparedModel->initialize()) {
+ notify(callback, aidl_hal::ErrorStatus::INVALID_ARGUMENT, nullptr);
+ return;
+ }
+ notify(callback, aidl_hal::ErrorStatus::NONE, preparedModel);
+ },
+ std::move(model))
+ .detach();
+
+ return ndk::ScopedAStatus::ok();
+}
+
+} // namespace sample_driver
+} // namespace nn
+} // namespace android
diff --git a/driver/sample_aidl/SampleDriverUtils.h b/driver/sample_aidl/SampleDriverUtils.h
new file mode 100644
index 0000000..7205318
--- /dev/null
+++ b/driver/sample_aidl/SampleDriverUtils.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_FRAMEWORKS_ML_NN_DRIVER_SAMPLE_AIDL_SAMPLE_DRIVER_UTILS_H
+#define ANDROID_FRAMEWORKS_ML_NN_DRIVER_SAMPLE_AIDL_SAMPLE_DRIVER_UTILS_H
+
+#include <android/binder_auto_utils.h>
+
+#include <memory>
+#include <string>
+
+#include "SampleDriver.h"
+
+namespace android {
+namespace nn {
+namespace sample_driver {
+
+void notify(const std::shared_ptr<aidl_hal::IPreparedModelCallback>& callback,
+ const aidl_hal::ErrorStatus& status,
+ const std::shared_ptr<aidl_hal::IPreparedModel>& preparedModel);
+
+ndk::ScopedAStatus prepareModelBase(
+ aidl_hal::Model&& model, const SampleDriver* driver,
+ aidl_hal::ExecutionPreference preference, aidl_hal::Priority priority, int64_t halDeadline,
+ const std::shared_ptr<aidl_hal::IPreparedModelCallback>& callback,
+ bool isFullModelSupported = true);
+
+ndk::ScopedAStatus toAStatus(aidl_hal::ErrorStatus errorStatus);
+ndk::ScopedAStatus toAStatus(aidl_hal::ErrorStatus errorStatus, const std::string& errorMessage);
+
+} // namespace sample_driver
+} // namespace nn
+} // namespace android
+
+#endif // ANDROID_FRAMEWORKS_ML_NN_DRIVER_SAMPLE_AIDL_SAMPLE_DRIVER_UTILS_H
diff --git a/driver/sample_aidl/config/android.hardware.neuralnetworks-service-sample-all.rc b/driver/sample_aidl/config/android.hardware.neuralnetworks-service-sample-all.rc
new file mode 100644
index 0000000..4923289
--- /dev/null
+++ b/driver/sample_aidl/config/android.hardware.neuralnetworks-service-sample-all.rc
@@ -0,0 +1,4 @@
+service neuralnetworks_hal_service_aidl_sample_all /vendor/bin/hw/android.hardware.neuralnetworks-service-sample-all
+ class hal
+ user system
+ group system
diff --git a/driver/sample_aidl/config/android.hardware.neuralnetworks-service-sample-all.xml b/driver/sample_aidl/config/android.hardware.neuralnetworks-service-sample-all.xml
new file mode 100644
index 0000000..fea5fcc
--- /dev/null
+++ b/driver/sample_aidl/config/android.hardware.neuralnetworks-service-sample-all.xml
@@ -0,0 +1,6 @@
+<manifest version="1.0" type="device">
+ <hal format="aidl">
+ <name>android.hardware.neuralnetworks</name>
+ <fqname>IDevice/nnapi-sample_all</fqname>
+ </hal>
+</manifest>
diff --git a/driver/sample_aidl/config/android.hardware.neuralnetworks-service-sample-float-fast.rc b/driver/sample_aidl/config/android.hardware.neuralnetworks-service-sample-float-fast.rc
new file mode 100644
index 0000000..de6b807
--- /dev/null
+++ b/driver/sample_aidl/config/android.hardware.neuralnetworks-service-sample-float-fast.rc
@@ -0,0 +1,4 @@
+service neuralnetworks_hal_service_aidl_sample_float_fast /vendor/bin/hw/android.hardware.neuralnetworks-service-sample-float-fast
+ class hal
+ user system
+ group system
diff --git a/driver/sample_aidl/config/android.hardware.neuralnetworks-service-sample-float-fast.xml b/driver/sample_aidl/config/android.hardware.neuralnetworks-service-sample-float-fast.xml
new file mode 100644
index 0000000..a245114
--- /dev/null
+++ b/driver/sample_aidl/config/android.hardware.neuralnetworks-service-sample-float-fast.xml
@@ -0,0 +1,6 @@
+<manifest version="1.0" type="device">
+ <hal format="aidl">
+ <name>android.hardware.neuralnetworks</name>
+ <fqname>IDevice/nnapi-sample_float_fast</fqname>
+ </hal>
+</manifest>
diff --git a/driver/sample_aidl/config/android.hardware.neuralnetworks-service-sample-float-slow.rc b/driver/sample_aidl/config/android.hardware.neuralnetworks-service-sample-float-slow.rc
new file mode 100644
index 0000000..e99171a
--- /dev/null
+++ b/driver/sample_aidl/config/android.hardware.neuralnetworks-service-sample-float-slow.rc
@@ -0,0 +1,4 @@
+service neuralnetworks_hal_service_aidl_sample_float_slow /vendor/bin/hw/android.hardware.neuralnetworks-service-sample-float-slow
+ class hal
+ user system
+ group system
diff --git a/driver/sample_aidl/config/android.hardware.neuralnetworks-service-sample-float-slow.xml b/driver/sample_aidl/config/android.hardware.neuralnetworks-service-sample-float-slow.xml
new file mode 100644
index 0000000..e1126a4
--- /dev/null
+++ b/driver/sample_aidl/config/android.hardware.neuralnetworks-service-sample-float-slow.xml
@@ -0,0 +1,6 @@
+<manifest version="1.0" type="device">
+ <hal format="aidl">
+ <name>android.hardware.neuralnetworks</name>
+ <fqname>IDevice/nnapi-sample_float_slow</fqname>
+ </hal>
+</manifest>
diff --git a/driver/sample_aidl/config/android.hardware.neuralnetworks-service-sample-minimal.rc b/driver/sample_aidl/config/android.hardware.neuralnetworks-service-sample-minimal.rc
new file mode 100644
index 0000000..58ad570
--- /dev/null
+++ b/driver/sample_aidl/config/android.hardware.neuralnetworks-service-sample-minimal.rc
@@ -0,0 +1,4 @@
+service neuralnetworks_hal_service_aidl_sample_minimal /vendor/bin/hw/android.hardware.neuralnetworks-service-sample-minimal
+ class hal
+ user system
+ group system
diff --git a/driver/sample_aidl/config/android.hardware.neuralnetworks-service-sample-minimal.xml b/driver/sample_aidl/config/android.hardware.neuralnetworks-service-sample-minimal.xml
new file mode 100644
index 0000000..dcd2b1d
--- /dev/null
+++ b/driver/sample_aidl/config/android.hardware.neuralnetworks-service-sample-minimal.xml
@@ -0,0 +1,6 @@
+<manifest version="1.0" type="device">
+ <hal format="aidl">
+ <name>android.hardware.neuralnetworks</name>
+ <fqname>IDevice/nnapi-sample_minimal</fqname>
+ </hal>
+</manifest>
diff --git a/driver/sample_aidl/config/android.hardware.neuralnetworks-service-sample-quant.rc b/driver/sample_aidl/config/android.hardware.neuralnetworks-service-sample-quant.rc
new file mode 100644
index 0000000..3151db5
--- /dev/null
+++ b/driver/sample_aidl/config/android.hardware.neuralnetworks-service-sample-quant.rc
@@ -0,0 +1,4 @@
+service neuralnetworks_hal_service_aidl_sample_quant /vendor/bin/hw/android.hardware.neuralnetworks-service-sample-quant
+ class hal
+ user system
+ group system
diff --git a/driver/sample_aidl/config/android.hardware.neuralnetworks-service-sample-quant.xml b/driver/sample_aidl/config/android.hardware.neuralnetworks-service-sample-quant.xml
new file mode 100644
index 0000000..30dc2ee
--- /dev/null
+++ b/driver/sample_aidl/config/android.hardware.neuralnetworks-service-sample-quant.xml
@@ -0,0 +1,6 @@
+<manifest version="1.0" type="device">
+ <hal format="aidl">
+ <name>android.hardware.neuralnetworks</name>
+ <fqname>IDevice/nnapi-sample_quant</fqname>
+ </hal>
+</manifest>
diff --git a/runtime/Android.bp b/runtime/Android.bp
index 712b122..5a6ad5f 100644
--- a/runtime/Android.bp
+++ b/runtime/Android.bp
@@ -79,6 +79,7 @@
// TODO(pszczepaniak, b/144488395): Use system libnativewindow,
// this would remove half of dependencies here.
static_libs: [
+ "android.hardware.neuralnetworks-V1-ndk_platform",
"[email protected]",
"[email protected]",
"[email protected]",
diff --git a/runtime/test/Android.bp b/runtime/test/Android.bp
index a0ffb2b..720b43a 100644
--- a/runtime/test/Android.bp
+++ b/runtime/test/Android.bp
@@ -35,6 +35,7 @@
"libutils",
],
static_libs: [
+ "android.hardware.neuralnetworks-V1-ndk_platform",
"[email protected]",
"[email protected]",
"[email protected]",