Add versioned interface for 1.2 HAL changes.
Add versioned interface for IPreparedModel. Support 1.2 callbacks.
Implement 1.2 HAL interface in sample driver.
Bug: 73506513
Test: NeuralNetworksTest_static
Test: VtsHalNeuralnetworksV1_xTargetTest with 1.2 sample driver
Change-Id: I1bc7aed424ebf3fd9635b1e411ee41a853d5bc9b
Merged-In: I1bc7aed424ebf3fd9635b1e411ee41a853d5bc9b
(cherry picked from commit cce4cb4dfae7a48c3f9f53e5eff6a3e246ed54c5)
diff --git a/runtime/Android.bp b/runtime/Android.bp
index 50aed19..8af49ae 100644
--- a/runtime/Android.bp
+++ b/runtime/Android.bp
@@ -43,7 +43,7 @@
"Memory.cpp",
"ModelBuilder.cpp",
"NeuralNetworks.cpp",
- "VersionedIDevice.cpp",
+ "VersionedInterfaces.cpp",
],
target: {
diff --git a/runtime/Callbacks.cpp b/runtime/Callbacks.cpp
index 3b838d8..b08563d 100644
--- a/runtime/Callbacks.cpp
+++ b/runtime/Callbacks.cpp
@@ -20,7 +20,7 @@
namespace android {
namespace hardware {
namespace neuralnetworks {
-namespace V1_0 {
+namespace V1_2 {
namespace implementation {
CallbackBase::CallbackBase() : mNotified(false) {}
@@ -104,7 +104,15 @@
PreparedModelCallback::~PreparedModelCallback() {}
Return<void> PreparedModelCallback::notify(ErrorStatus errorStatus,
- const sp<IPreparedModel>& preparedModel) {
+ const sp<V1_0::IPreparedModel>& preparedModel) {
+ mErrorStatus = errorStatus;
+ mPreparedModel = preparedModel;
+ CallbackBase::notify();
+ return Void();
+}
+
+Return<void> PreparedModelCallback::notify_1_2(ErrorStatus errorStatus,
+ const sp<V1_2::IPreparedModel>& preparedModel) {
mErrorStatus = errorStatus;
mPreparedModel = preparedModel;
CallbackBase::notify();
@@ -116,7 +124,7 @@
return mErrorStatus;
}
-sp<IPreparedModel> PreparedModelCallback::getPreparedModel() {
+sp<V1_0::IPreparedModel> PreparedModelCallback::getPreparedModel() {
wait();
return mPreparedModel;
}
@@ -131,13 +139,19 @@
return Void();
}
+Return<void> ExecutionCallback::notify_1_2(ErrorStatus errorStatus) {
+ mErrorStatus = errorStatus;
+ CallbackBase::notify();
+ return Void();
+}
+
ErrorStatus ExecutionCallback::getStatus() {
wait();
return mErrorStatus;
}
} // namespace implementation
-} // namespace V1_0
+} // namespace V1_2
} // namespace neuralnetworks
} // namespace hardware
} // namespace android
diff --git a/runtime/Callbacks.h b/runtime/Callbacks.h
index fb9b29d..5acedda 100644
--- a/runtime/Callbacks.h
+++ b/runtime/Callbacks.h
@@ -19,27 +19,30 @@
#include <android/hardware/neuralnetworks/1.0/IExecutionCallback.h>
#include <android/hardware/neuralnetworks/1.0/IPreparedModelCallback.h>
+#include <android/hardware/neuralnetworks/1.2/IExecutionCallback.h>
+#include <android/hardware/neuralnetworks/1.2/IPreparedModelCallback.h>
+#include <hidl/MQDescriptor.h>
+#include <hidl/Status.h>
#include <chrono>
#include <condition_variable>
#include <functional>
-#include <hidl/MQDescriptor.h>
-#include <hidl/Status.h>
#include <mutex>
#include <thread>
namespace android {
namespace hardware {
namespace neuralnetworks {
-namespace V1_0 {
+namespace V1_2 {
namespace implementation {
+using ::android::sp;
using ::android::hardware::hidl_array;
using ::android::hardware::hidl_memory;
using ::android::hardware::hidl_string;
using ::android::hardware::hidl_vec;
using ::android::hardware::Return;
using ::android::hardware::Void;
-using ::android::sp;
+using V1_0::ErrorStatus;
/**
* The CallbackBase class is used internally by the NeuralNetworks runtime to
@@ -180,11 +183,11 @@
* asynchronously with respect to the runtime. If a calling thread calls wait*
* or get* on a PreparedModelCallback object and the corresponding asynchronous
* task has not finished preparing the model, the calling thread will block
- * until the asynchronous task has called notify. For more information on the
- * synchronization behavior, refer to the CallbackBase class.
+ * until the asynchronous task has either called notify or notify_1_2. For more
+ * information on the synchronization behavior, refer to the CallbackBase class.
*
* This class inherits the basic blocking and signaling calls from
- * CallbackBase, and implements the HIDL notify call from
+ * CallbackBase, and implements the HIDL notify and notify_1_2 calls from
* IPreparedModelCallback. This callback object is passed as an argument to
* IDevice::prepareModel.
*/
@@ -194,15 +197,15 @@
~PreparedModelCallback() override;
/**
- * IPreparedModelCallback::notify marks the callback object with the return
- * status of the asynchronous model preparation along with the prepared
- * model, and calls CallbackBase::notify, enabling all prior and future
- * wait* calls on the PreparedModelCallback object to proceed. For more
- * information on the synchronization behavior, refer to the CallbackBase
- * class.
+ * IPreparedModelCallback::notify and IPreparedModelCallback::notify_1_2
+ * mark the callback object with the return status of the asynchronous
+ * model preparation along with the prepared model, and call
+ * CallbackBase::notify, enabling all prior and future wait* calls on the
+ * PreparedModelCallback object to proceed. For more information on the
+ * synchronization behavior, refer to the CallbackBase class.
*
- * IPreparedModelCallback::notify must be called exactly once on a given
- * PreparedModelCallback object.
+ * Either IPreparedModelCallback::notify or IPreparedModelCallback::notify_1_2
+ * must be called exactly once on a given PreparedModelCallback object.
*
* @param status Error status returned from asynchronously preparing the
* model; will be:
@@ -213,7 +216,9 @@
* @param preparedModel Returned model that has been prepared for execution,
* nullptr if the model was unable to be prepared.
*/
- Return<void> notify(ErrorStatus status, const sp<IPreparedModel>& preparedModel) override;
+ Return<void> notify(ErrorStatus status, const sp<V1_0::IPreparedModel>& preparedModel) override;
+ Return<void> notify_1_2(ErrorStatus status,
+ const sp<V1_2::IPreparedModel>& preparedModel) override;
/**
* Retrieves the error status returned from the asynchronous task launched
@@ -241,11 +246,11 @@
* execution, nullptr if the model was unable to be
* prepared.
*/
- sp<IPreparedModel> getPreparedModel();
+ sp<V1_0::IPreparedModel> getPreparedModel();
- private:
+ private:
ErrorStatus mErrorStatus;
- sp<IPreparedModel> mPreparedModel;
+ sp<V1_0::IPreparedModel> mPreparedModel;
};
/**
@@ -253,12 +258,12 @@
* execution from a task executing asynchronously with respect to the runtime.
* If a calling thread calls wait* or get* on a PreparedModelCallback object and
* the corresponding asynchronous task has not finished the execution, the
- * calling thread will block until the asynchronous task has called notify. For
- * more information on the synchronization behavior, refer to the CallbackBase
- * class.
+ * calling thread will block until the asynchronous task has either called notify
+ * or notify_1_2. For more information on the synchronization behavior, refer to
+ * the CallbackBase class.
*
* This class inherits the basic blocking and signaling calls from
- * CallbackBase, and implements the HIDL notify call from
+ * CallbackBase, and implements the HIDL notify and notify_1_2 calls from
* IExecutionCallback. This callback object is passed as an argument to
* IPreparedModel::execute.
*/
@@ -268,14 +273,14 @@
~ExecutionCallback() override;
/**
- * IExecutionCallback::notify marks the callback object with the return
- * status of the asynchronous execution that held this callback and enables
- * all prior and future wait* calls on the ExecutionCallback object to
- * proceed. For more information on the synchronization behavior, refer to
- * the CallbackBase class.
+ * IExecutionCallback::notify and IExecutionCallback::notify_1_2 mark the
+ * callback object with the return status of the asynchronous execution that
+ * held this callback and enable all prior and future wait* calls on the
+ * ExecutionCallback object to proceed. For more information on the
+ * synchronization behavior, refer to the CallbackBase class.
*
- * IExecutionCallback::notify must be called exactly once on a given
- * ExecutionCallback object.
+ * Either IExecutionCallback::notify or IExecutionCallback::notify_1_2 must
+ * be called exactly once on a given ExecutionCallback object.
*
* @param status Error status returned from asynchronously preparing the
* model; will be:
@@ -287,6 +292,7 @@
* - INVALID_ARGUMENT if the input request is invalid
*/
Return<void> notify(ErrorStatus status) override;
+ Return<void> notify_1_2(ErrorStatus status) override;
/**
* Retrieves the error status returned from the asynchronous task launched
@@ -323,7 +329,7 @@
}
} // namespace implementation
-} // namespace V1_0
+} // namespace V1_2
} // namespace neuralnetworks
} // namespace hardware
} // namespace android
diff --git a/runtime/ExecutionBuilder.cpp b/runtime/ExecutionBuilder.cpp
index bf758ad..f6226d5 100644
--- a/runtime/ExecutionBuilder.cpp
+++ b/runtime/ExecutionBuilder.cpp
@@ -455,12 +455,15 @@
}
}
-StepExecutor::StepExecutor(const ExecutionBuilder* executionBuilder,
- const ModelBuilder* model,
- VersionedIDevice* driver, sp<IPreparedModel> preparedModel) :
- mExecutionBuilder(executionBuilder), mModel(model),
- mDriver(driver), mPreparedModel(preparedModel),
- mInputs(model->inputCount()), mOutputs(model->outputCount()) {}
+StepExecutor::StepExecutor(const ExecutionBuilder* executionBuilder, const ModelBuilder* model,
+ VersionedIDevice* driver,
+ std::shared_ptr<VersionedIPreparedModel> preparedModel)
+ : mExecutionBuilder(executionBuilder),
+ mModel(model),
+ mDriver(driver),
+ mPreparedModel(preparedModel),
+ mInputs(model->inputCount()),
+ mOutputs(model->outputCount()) {}
void StepExecutor::mapInputsAndOutputsTrivially() {
mInputs = mExecutionBuilder->mInputs;
@@ -570,7 +573,9 @@
// TODO: change to asynchronous later
preparedModelCallback->wait();
ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus();
- mPreparedModel = preparedModelCallback->getPreparedModel();
+ if (auto preparedModel = preparedModelCallback->getPreparedModel()) {
+ mPreparedModel = std::make_shared<VersionedIPreparedModel>(preparedModel);
+ }
if (prepareReturnStatus != ErrorStatus::NONE) {
return convertErrorStatusToResultCode(prepareReturnStatus);
}
@@ -693,7 +698,7 @@
NNTRACE_RT(NNTRACE_PHASE_EXECUTION, "computeOnCpu");
CpuExecutor executor;
int err = executor.run(model, request, modelPoolInfos, requestPoolInfos);
- executionCallback->notify(convertResultCodeToErrorStatus(err));
+ executionCallback->notify_1_2(convertResultCodeToErrorStatus(err));
}
int StepExecutor::startComputeOnCpu(sp<ExecutionCallback>* synchronizationCallback) {
diff --git a/runtime/ExecutionBuilder.h b/runtime/ExecutionBuilder.h
index 045d032..ba24f03 100644
--- a/runtime/ExecutionBuilder.h
+++ b/runtime/ExecutionBuilder.h
@@ -22,12 +22,13 @@
#include "Memory.h"
#include "ModelBuilder.h"
#include "NeuralNetworks.h"
+#include "VersionedInterfaces.h"
#include <unordered_map>
#include <vector>
-using ::android::hardware::neuralnetworks::V1_0::implementation::ExecutionCallback;
-using ::android::hardware::neuralnetworks::V1_0::implementation::PreparedModelCallback;
+using ::android::hardware::neuralnetworks::V1_2::implementation::ExecutionCallback;
+using ::android::hardware::neuralnetworks::V1_2::implementation::PreparedModelCallback;
namespace android {
namespace nn {
@@ -117,7 +118,7 @@
// with that step is executed in its entirety on a single device (or
// on the CPU).
class StepExecutor {
-public:
+ public:
// executionBuilder
// Describes the full (possibly multiple-"step") execution.
// model
@@ -127,9 +128,8 @@
// The device on which to execute the "step", and the prepared
// model to execute on that device. (Both are nullptr in the
// case of CPU.)
- StepExecutor(const ExecutionBuilder* executionBuilder,
- const ModelBuilder* model,
- VersionedIDevice* driver, sp<IPreparedModel> preparedModel);
+ StepExecutor(const ExecutionBuilder* executionBuilder, const ModelBuilder* model,
+ VersionedIDevice* driver, std::shared_ptr<VersionedIPreparedModel> preparedModel);
// Map inputs and outputs from ExecutionBuilder to StepExecutor,
// in the case where we have a single-"step" execution (i.e., the executor
@@ -140,12 +140,10 @@
// one at a time. Note that these are input/output indexes, not
// operand indexes.
void mapInput(uint32_t builderIndex, uint32_t executorIndex) {
- mapInputOrOutput(mExecutionBuilder->mInputs[builderIndex],
- &mInputs[executorIndex]);
+ mapInputOrOutput(mExecutionBuilder->mInputs[builderIndex], &mInputs[executorIndex]);
}
void mapOutput(uint32_t builderIndex, uint32_t executorIndex) {
- mapInputOrOutput(mExecutionBuilder->mOutputs[builderIndex],
- &mOutputs[executorIndex]);
+ mapInputOrOutput(mExecutionBuilder->mOutputs[builderIndex], &mOutputs[executorIndex]);
}
void mapOutputToInput(uint32_t builderIndex, uint32_t executorIndex) {
mapInputOrOutput(mExecutionBuilder->mOutputs[builderIndex],
@@ -174,7 +172,7 @@
bool isCpu() const { return mDriver == nullptr; }
-private:
+ private:
int allocatePointerArgumentsToPool(std::vector<ModelArgumentInfo>* args, Memory* memory);
int startComputeOnDevice(sp<ExecutionCallback>* synchronizationCallback);
@@ -192,7 +190,8 @@
// compiled forms; and device on which to execute it
const ModelBuilder* mModel;
VersionedIDevice* mDriver; // nullptr if CPU execution
- sp<IPreparedModel> mPreparedModel; // nullptr if CPU execution or if bypassing ExecutionPlan
+ std::shared_ptr<VersionedIPreparedModel>
+ mPreparedModel; // nullptr if CPU execution or if bypassing ExecutionPlan
// The information we'll send to the driver about the inputs and outputs.
// Note that we build this in two steps:
diff --git a/runtime/ExecutionPlan.cpp b/runtime/ExecutionPlan.cpp
index ba5d485..1452229 100644
--- a/runtime/ExecutionPlan.cpp
+++ b/runtime/ExecutionPlan.cpp
@@ -33,15 +33,17 @@
#include <utility>
#include <vector>
-using ::android::hardware::neuralnetworks::V1_0::implementation::ExecutionCallback;
-using ::android::hardware::neuralnetworks::V1_0::implementation::PreparedModelCallback;
+using ::android::hardware::neuralnetworks::V1_2::implementation::ExecutionCallback;
+using ::android::hardware::neuralnetworks::V1_2::implementation::PreparedModelCallback;
namespace android {
namespace nn {
static int compile(std::shared_ptr<Device> device, const ModelBuilder* model,
- int32_t executionPreference, sp<IPreparedModel>* preparedModel) {
+ int32_t executionPreference,
+ std::shared_ptr<VersionedIPreparedModel>* preparedModel) {
nnAssert(device != nullptr); // nullptr indicates CPU
+ *preparedModel = nullptr;
// Compilation logic copied from ExecutionBuilder::startComputeOnDevice().
Model hidlModel;
model->setHidlModel(&hidlModel);
@@ -66,7 +68,9 @@
preparedModelCallback->wait();
ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus();
- *preparedModel = preparedModelCallback->getPreparedModel();
+ if (auto returnedPreparedModel = preparedModelCallback->getPreparedModel()) {
+ *preparedModel = std::make_shared<VersionedIPreparedModel>(returnedPreparedModel);
+ }
if (prepareReturnStatus != ErrorStatus::NONE || *preparedModel == nullptr) {
LOG(ERROR) << "ExecutionPlan compilation on " << device->getName() << " failed:"
<< " prepareReturnStatus=" << toString(prepareReturnStatus)
diff --git a/runtime/ExecutionPlan.h b/runtime/ExecutionPlan.h
index a2d018c..518ec29 100644
--- a/runtime/ExecutionPlan.h
+++ b/runtime/ExecutionPlan.h
@@ -24,6 +24,7 @@
#include "ModelBuilder.h"
#include "NeuralNetworks.h"
#include "Utils.h"
+#include "VersionedInterfaces.h"
#include <set>
@@ -87,7 +88,9 @@
std::shared_ptr<Device> getDevice() const { return mDevice; }
// only available after calling finishSubModel()
- sp<IPreparedModel> getPreparedSubModel() const { return mPreparedSubModel; }
+ std::shared_ptr<VersionedIPreparedModel> getPreparedSubModel() const {
+ return mPreparedSubModel;
+ }
// Map inputs and outputs from ExecutionBuilder to StepExecutor.
void mapInputsAndOutputs(std::shared_ptr<StepExecutor> stepExecutor) const;
@@ -105,7 +108,7 @@
uint32_t mIndex; // index of step within plan
ModelBuilder mSubModel;
std::shared_ptr<Device> mDevice; // nullptr signifies CPU
- sp<IPreparedModel> mPreparedSubModel; // not used for CPU
+ std::shared_ptr<VersionedIPreparedModel> mPreparedSubModel; // not used for CPU
// Inputs of original model that are also inputs of this submodel:
// (fromModel index, subModel index)
@@ -243,7 +246,7 @@
std::shared_ptr<Device> mDevice; // nullptr signifies CPU
const ModelBuilder* mModel;
- sp<IPreparedModel> mPreparedModel; // not used for CPU
+ std::shared_ptr<VersionedIPreparedModel> mPreparedModel; // not used for CPU
};
struct CompoundBody : Body {
diff --git a/runtime/Manager.h b/runtime/Manager.h
index 36efd7c..8ae97d9 100644
--- a/runtime/Manager.h
+++ b/runtime/Manager.h
@@ -19,7 +19,7 @@
#include "HalInterfaces.h"
#include "Utils.h"
-#include "VersionedIDevice.h"
+#include "VersionedInterfaces.h"
#include <android-base/macros.h>
#include <map>
diff --git a/runtime/VersionedIDevice.cpp b/runtime/VersionedInterfaces.cpp
similarity index 76%
rename from runtime/VersionedIDevice.cpp
rename to runtime/VersionedInterfaces.cpp
index 4d47f13..38c1573 100644
--- a/runtime/VersionedIDevice.cpp
+++ b/runtime/VersionedInterfaces.cpp
@@ -14,7 +14,7 @@
* limitations under the License.
*/
-#include "VersionedIDevice.h"
+#include "VersionedInterfaces.h"
#include "Tracing.h"
#include "Utils.h"
@@ -24,11 +24,41 @@
namespace android {
namespace nn {
+ErrorStatus VersionedIPreparedModel::execute(const Request& request,
+ const sp<IExecutionCallback>& callback) {
+ if (mPreparedModelV1_2 != nullptr) {
+ Return<ErrorStatus> ret = mPreparedModelV1_2->execute_1_2(request, callback);
+ if (!ret.isOk()) {
+ LOG(ERROR) << "execute_1_2 failure: " << ret.description();
+ return ErrorStatus::GENERAL_FAILURE;
+ }
+ return static_cast<ErrorStatus>(ret);
+ } else if (mPreparedModelV1_0 != nullptr) {
+ Return<ErrorStatus> ret = mPreparedModelV1_0->execute(request, callback);
+ if (!ret.isOk()) {
+ LOG(ERROR) << "execute failure: " << ret.description();
+ return ErrorStatus::GENERAL_FAILURE;
+ }
+ return static_cast<ErrorStatus>(ret);
+ } else {
+ LOG(ERROR) << "execute called with no preparedModel";
+ return ErrorStatus::GENERAL_FAILURE;
+ }
+}
+
+bool VersionedIPreparedModel::operator==(nullptr_t) const {
+ return mPreparedModelV1_0 == nullptr;
+}
+
+bool VersionedIPreparedModel::operator!=(nullptr_t) const {
+ return mPreparedModelV1_0 != nullptr;
+}
+
// HIDL guarantees all V1_1 interfaces inherit from their corresponding V1_0 interfaces.
-VersionedIDevice::VersionedIDevice(sp<V1_0::IDevice> device) :
- mDeviceV1_0(device),
- mDeviceV1_1(V1_1::IDevice::castFrom(mDeviceV1_0).withDefault(nullptr)),
- mDeviceV1_2(V1_2::IDevice::castFrom(mDeviceV1_0).withDefault(nullptr)) {}
+VersionedIDevice::VersionedIDevice(sp<V1_0::IDevice> device)
+ : mDeviceV1_0(device),
+ mDeviceV1_1(V1_1::IDevice::castFrom(mDeviceV1_0).withDefault(nullptr)),
+ mDeviceV1_2(V1_2::IDevice::castFrom(mDeviceV1_0).withDefault(nullptr)) {}
std::pair<ErrorStatus, Capabilities> VersionedIDevice::getCapabilities() {
std::pair<ErrorStatus, Capabilities> result;
@@ -36,9 +66,9 @@
if (mDeviceV1_1 != nullptr) {
NNTRACE_FULL(NNTRACE_LAYER_IPC, NNTRACE_PHASE_INITIALIZATION, "getCapabilities_1_1");
Return<void> ret = mDeviceV1_1->getCapabilities_1_1(
- [&result](ErrorStatus error, const Capabilities& capabilities) {
- result = std::make_pair(error, capabilities);
- });
+ [&result](ErrorStatus error, const Capabilities& capabilities) {
+ result = std::make_pair(error, capabilities);
+ });
if (!ret.isOk()) {
LOG(ERROR) << "getCapabilities_1_1 failure: " << ret.description();
return {ErrorStatus::GENERAL_FAILURE, {}};
@@ -46,10 +76,10 @@
} else if (mDeviceV1_0 != nullptr) {
NNTRACE_FULL(NNTRACE_LAYER_IPC, NNTRACE_PHASE_INITIALIZATION, "getCapabilities");
Return<void> ret = mDeviceV1_0->getCapabilities(
- [&result](ErrorStatus error, const V1_0::Capabilities& capabilities) {
- // Time taken to convert capabilities is trivial
- result = std::make_pair(error, convertToV1_1(capabilities));
- });
+ [&result](ErrorStatus error, const V1_0::Capabilities& capabilities) {
+ // Time taken to convert capabilities is trivial
+ result = std::make_pair(error, convertToV1_1(capabilities));
+ });
if (!ret.isOk()) {
LOG(ERROR) << "getCapabilities failure: " << ret.description();
return {ErrorStatus::GENERAL_FAILURE, {}};
@@ -69,9 +99,9 @@
if (mDeviceV1_2 != nullptr) {
NNTRACE_FULL(NNTRACE_LAYER_IPC, NNTRACE_PHASE_COMPILATION, "getSupportedOperations_1_2");
Return<void> ret = mDeviceV1_2->getSupportedOperations_1_2(
- model, [&result](ErrorStatus error, const hidl_vec<bool>& supported) {
- result = std::make_pair(error, supported);
- });
+ model, [&result](ErrorStatus error, const hidl_vec<bool>& supported) {
+ result = std::make_pair(error, supported);
+ });
if (!ret.isOk()) {
LOG(ERROR) << "getSupportedOperations_1_2 failure: " << ret.description();
return {ErrorStatus::GENERAL_FAILURE, {}};
@@ -80,9 +110,9 @@
V1_1::Model model11 = convertToV1_1(model);
NNTRACE_FULL(NNTRACE_LAYER_IPC, NNTRACE_PHASE_COMPILATION, "getSupportedOperations_1_1");
Return<void> ret = mDeviceV1_1->getSupportedOperations_1_1(
- model11, [&result](ErrorStatus error, const hidl_vec<bool>& supported) {
- result = std::make_pair(error, supported);
- });
+ model11, [&result](ErrorStatus error, const hidl_vec<bool>& supported) {
+ result = std::make_pair(error, supported);
+ });
if (!ret.isOk()) {
LOG(ERROR) << "getSupportedOperations_1_1 failure: " << ret.description();
return {ErrorStatus::GENERAL_FAILURE, {}};
@@ -91,9 +121,9 @@
V1_0::Model model10 = convertToV1_0(model);
NNTRACE_FULL(NNTRACE_LAYER_IPC, NNTRACE_PHASE_COMPILATION, "getSupportedOperations_1_0");
Return<void> ret = mDeviceV1_0->getSupportedOperations(
- model10, [&result](ErrorStatus error, const hidl_vec<bool>& supported) {
- result = std::make_pair(error, supported);
- });
+ model10, [&result](ErrorStatus error, const hidl_vec<bool>& supported) {
+ result = std::make_pair(error, supported);
+ });
if (!ret.isOk()) {
LOG(ERROR) << "getSupportedOperations failure: " << ret.description();
return {ErrorStatus::GENERAL_FAILURE, {}};
@@ -226,11 +256,11 @@
}
}
-bool VersionedIDevice::operator==(nullptr_t) {
+bool VersionedIDevice::operator==(nullptr_t) const {
return mDeviceV1_0 == nullptr;
}
-bool VersionedIDevice::operator!=(nullptr_t) {
+bool VersionedIDevice::operator!=(nullptr_t) const {
return mDeviceV1_0 != nullptr;
}
diff --git a/runtime/VersionedIDevice.h b/runtime/VersionedInterfaces.h
similarity index 63%
rename from runtime/VersionedIDevice.h
rename to runtime/VersionedInterfaces.h
index d190063..919a9de 100644
--- a/runtime/VersionedIDevice.h
+++ b/runtime/VersionedInterfaces.h
@@ -14,12 +14,13 @@
* limitations under the License.
*/
-#ifndef ANDROID_ML_NN_RUNTIME_VERSIONED_IDEVICE_H
-#define ANDROID_ML_NN_RUNTIME_VERSIONED_IDEVICE_H
+#ifndef ANDROID_ML_NN_RUNTIME_VERSIONED_INTERFACES_H
+#define ANDROID_ML_NN_RUNTIME_VERSIONED_INTERFACES_H
#include "HalInterfaces.h"
#include <android-base/macros.h>
+#include <memory>
#include <string>
#include <tuple>
@@ -27,20 +28,24 @@
namespace nn {
/**
- * This class wraps an IDevice object of any version to abstract away version
- * differences. It allows the remainder of the runtime to always use the most
- * up-to-date version of all HIDL types. As such, any reference to a HIDL type
- * in the rest of the runtime will--by default--be the latest HIDL version.
+ * Each class (VersionedIDevice, VersionedIPreparedModel) wraps a HIDL interface
+ * of any version to abstract away version differences. It allows the remainder
+ * of the runtime to always use the most up-to-date version of all HIDL types.
+ * As such, any reference to a HIDL type in the rest of the runtime
+ * will--by default--be the latest HIDL version.
*
- * This class will attempt to call the latest version of each interface method
- * if possible. If the latest method is unavailable, the VersionedIDevice class
+ * Each class will attempt to call the latest version of each interface method
+ * if possible. If the latest method is unavailable, the versioned class
* will attempt to upcast the type (e.g., V1_1::Model to V1_0::Model), and
- * invoke the latest interface method possible. If the VersionedIDevice class
+ * invoke the latest interface method possible. If the versioned class
* fails to find a matching applicable function, it will return an error.
*/
+
+/** This class wraps an IDevice object of any version. */
class VersionedIDevice {
DISALLOW_IMPLICIT_CONSTRUCTORS(VersionedIDevice);
-public:
+
+ public:
/**
* Constructor for the VersionedIDevice object.
*
@@ -201,7 +206,7 @@
* @return bool true if V1_0::IDevice (which could be V1_1::IDevice) is
* valid, false otherwise.
*/
- bool operator!=(nullptr_t);
+ bool operator!=(nullptr_t) const;
/**
* Returns whether this handle to an IDevice object is valid or not.
@@ -209,9 +214,9 @@
* @return bool true if V1_0::IDevice (which could be V1_1::IDevice) is
* invalid, false otherwise.
*/
- bool operator==(nullptr_t);
+ bool operator==(nullptr_t) const;
-private:
+ private:
/**
* All versions of IDevice are necessary because the driver could be v1.0,
* v1.1, or a later version. All these pointers logically represent the same
@@ -237,7 +242,113 @@
sp<V1_2::IDevice> mDeviceV1_2;
};
+/** This class wraps an IPreparedModel object of any version. */
+class VersionedIPreparedModel {
+ DISALLOW_IMPLICIT_CONSTRUCTORS(VersionedIPreparedModel);
+
+ public:
+ /**
+ * Constructor for the VersionedIPreparedModel object.
+ *
+ * VersionedIPreparedModel is constructed with the V1_0::IPreparedModel object, which
+ * represents a device that is at least v1.0 of the interface. The constructor downcasts
+ * to the latest version of the IPreparedModel interface, and will default to using the
+ * latest version of all IPreparedModel interface methods automatically.
+ *
+ * @param preparedModel A prepared model object that is least version 1.0 of the
+ * IPreparedModel interface.
+ */
+ VersionedIPreparedModel(sp<V1_0::IPreparedModel> preparedModel)
+ : mPreparedModelV1_0(preparedModel),
+ mPreparedModelV1_2(
+ V1_2::IPreparedModel::castFrom(mPreparedModelV1_0).withDefault(nullptr)) {}
+
+ /**
+ * Launches an asynchronous execution on a prepared model.
+ *
+ * The execution is performed asynchronously with respect to the caller.
+ * execute must verify the inputs to the function are correct. If there is
+ * an error, execute must immediately invoke the callback with the
+ * appropriate ErrorStatus value, then return with the same ErrorStatus. If
+ * the inputs to the function are valid and there is no error, execute must
+ * launch an asynchronous task to perform the execution in the background,
+ * and immediately return with ErrorStatus::NONE. If the asynchronous task
+ * fails to launch, execute must immediately invoke the callback with
+ * ErrorStatus::GENERAL_FAILURE, then return with
+ * ErrorStatus::GENERAL_FAILURE.
+ *
+ * When the asynchronous task has finished its execution, it must
+ * immediately invoke the callback object provided as an input to the
+ * execute function. This callback must be provided with the ErrorStatus of
+ * the execution.
+ *
+ * If the prepared model was prepared from a model wherein all
+ * tensor operands have fully specified dimensions, and the inputs
+ * to the function are valid, then the execution should launch
+ * and complete successfully (ErrorStatus::NONE). There must be
+ * no failure unless the device itself is in a bad state.
+ *
+ * Multiple threads can call the execute function on the same IPreparedModel
+ * object concurrently with different requests.
+ *
+ * @param request The input and output information on which the prepared
+ * model is to be executed.
+ * @param callback A callback object used to return the error status of
+ * the execution. The callback object's notify function must
+ * be called exactly once, even if the execution was
+ * unsuccessful.
+ * @return status Error status of the call, must be:
+ * - NONE if task is successfully launched
+ * - DEVICE_UNAVAILABLE if driver is offline or busy
+ * - GENERAL_FAILURE if there is an unspecified error
+ * - OUTPUT_INSUFFICIENT_SIZE if provided output buffer is
+ * not large enough to store the resultant values
+ * - INVALID_ARGUMENT if one of the input arguments is
+ * invalid
+ */
+ ErrorStatus execute(const Request& request, const sp<IExecutionCallback>& callback);
+
+ /**
+ * Returns whether this handle to an IPreparedModel object is valid or not.
+ *
+ * @return bool true if V1_0::IPreparedModel (which could be V1_2::IPreparedModel) is
+ * valid, false otherwise.
+ */
+ bool operator!=(nullptr_t) const;
+
+ /**
+ * Returns whether this handle to an IPreparedModel object is valid or not.
+ *
+ * @return bool true if V1_0::IPreparedModel (which could be V1_2::IPreparedModel) is
+ * invalid, false otherwise.
+ */
+ bool operator==(nullptr_t) const;
+
+ private:
+ /**
+ * All versions of IPreparedModel are necessary because the preparedModel could be v1.0,
+ * v1.2, or a later version. All these pointers logically represent the same object.
+ *
+ * The general strategy is: HIDL returns a V1_0 prepared model object, which
+ * (if not nullptr) could be v1.0, v1.2, or a greater version. The V1_0
+ * object is then "dynamically cast" to a V1_2 object. If successful,
+ * mPreparedModelV1_2 will point to the same object as mPreparedModelV1_0; otherwise,
+ * mPreparedModelV1_2 will be nullptr.
+ *
+ * In general:
+ * * If the prepared model is truly v1.0, mPreparedModelV1_0 will point to a valid object
+ * and mPreparedModelV1_2 will be nullptr.
+ * * If the prepared model is truly v1.2 or later, both mPreparedModelV1_0 and
+ * mPreparedModelV1_2 will point to the same valid object.
+ *
+ * Idiomatic usage: if mPreparedModelV1_2 is non-null, do V1_2 dispatch; otherwise,
+ * do V1_0 dispatch.
+ */
+ sp<V1_0::IPreparedModel> mPreparedModelV1_0;
+ sp<V1_2::IPreparedModel> mPreparedModelV1_2;
+};
+
} // namespace nn
} // namespace android
-#endif // ANDROID_ML_NN_RUNTIME_VERSIONED_IDEVICE_H
+#endif // ANDROID_ML_NN_RUNTIME_VERSIONED_INTERFACES_H
diff --git a/runtime/test/TestExecution.cpp b/runtime/test/TestExecution.cpp
index 0bf4665..ef34524 100644
--- a/runtime/test/TestExecution.cpp
+++ b/runtime/test/TestExecution.cpp
@@ -37,7 +37,7 @@
using Device = nn::Device;
using DeviceManager = nn::DeviceManager;
using HidlModel = hardware::neuralnetworks::V1_2::Model;
-using PreparedModelCallback = hardware::neuralnetworks::V1_0::implementation::PreparedModelCallback;
+using PreparedModelCallback = hardware::neuralnetworks::V1_2::implementation::PreparedModelCallback;
using Result = nn::test_wrapper::Result;
using SampleDriver = nn::sample_driver::SampleDriver;
using WrapperCompilation = nn::test_wrapper::Compilation;
@@ -49,41 +49,72 @@
namespace {
-// Wraps an IPreparedModel to allow dummying up the execution status.
-class TestPreparedModel : public IPreparedModel {
-public:
+// Wraps an V1_2::IPreparedModel to allow dummying up the execution status.
+class TestPreparedModel12 : public V1_2::IPreparedModel {
+ public:
// If errorStatus is NONE, then execute behaves normally (and sends back
// the actual execution status). Otherwise, don't bother to execute, and
// just send back errorStatus (as the execution status, not the launch
// status).
- TestPreparedModel(sp<IPreparedModel> preparedModel, ErrorStatus errorStatus) :
- mPreparedModel(preparedModel), mErrorStatus(errorStatus) {}
+ TestPreparedModel12(sp<V1_0::IPreparedModel> preparedModel, ErrorStatus errorStatus)
+ : mPreparedModelV1_0(preparedModel),
+ mPreparedModelV1_2(V1_2::IPreparedModel::castFrom(preparedModel).withDefault(nullptr)),
+ mErrorStatus(errorStatus) {}
Return<ErrorStatus> execute(const Request& request,
- const sp<IExecutionCallback>& callback) override {
+ const sp<V1_0::IExecutionCallback>& callback) override {
+ CHECK(mPreparedModelV1_0 != nullptr) << "V1_0 prepared model is nullptr.";
if (mErrorStatus == ErrorStatus::NONE) {
- return mPreparedModel->execute(request, callback);
+ return mPreparedModelV1_0->execute(request, callback);
} else {
callback->notify(mErrorStatus);
return ErrorStatus::NONE;
}
}
-private:
- sp<IPreparedModel> mPreparedModel;
+
+ Return<ErrorStatus> execute_1_2(const Request& request,
+ const sp<V1_2::IExecutionCallback>& callback) override {
+ CHECK(mPreparedModelV1_2 != nullptr) << "V1_2 prepared model is nullptr.";
+ if (mErrorStatus == ErrorStatus::NONE) {
+ return mPreparedModelV1_2->execute_1_2(request, callback);
+ } else {
+ callback->notify_1_2(mErrorStatus);
+ return ErrorStatus::NONE;
+ }
+ }
+
+ private:
+ sp<V1_0::IPreparedModel> mPreparedModelV1_0;
+ sp<V1_2::IPreparedModel> mPreparedModelV1_2;
ErrorStatus mErrorStatus;
};
+// Like TestPreparedModel12, but implementing 1.0
+class TestPreparedModel10 : public V1_0::IPreparedModel {
+ public:
+ TestPreparedModel10(sp<V1_0::IPreparedModel> preparedModel, ErrorStatus errorStatus)
+ : m12PreparedModel(preparedModel, errorStatus) {}
+
+ Return<ErrorStatus> execute(const Request& request,
+ const sp<V1_0::IExecutionCallback>& callback) override {
+ return m12PreparedModel.execute(request, callback);
+ }
+
+ private:
+ TestPreparedModel12 m12PreparedModel;
+};
+
// Behaves like SampleDriver, except that it produces wrapped IPreparedModel.
-class TestDriver11 : public SampleDriver {
-public:
+class TestDriver12 : public SampleDriver {
+ public:
// Allow dummying up the error status for execution of all models
// prepared from this driver. If errorStatus is NONE, then
// execute behaves normally (and sends back the actual execution
// status). Otherwise, don't bother to execute, and just send
// back errorStatus (as the execution status, not the launch
// status).
- TestDriver11(const std::string& name, ErrorStatus errorStatus) :
- SampleDriver(name.c_str()), mErrorStatus(errorStatus) { }
+ TestDriver12(const std::string& name, ErrorStatus errorStatus)
+ : SampleDriver(name.c_str()), mErrorStatus(errorStatus) {}
Return<void> getCapabilities_1_1(getCapabilities_1_1_cb _hidl_cb) override {
android::nn::initVLogMask();
@@ -108,10 +139,8 @@
}
Return<ErrorStatus> prepareModel_1_2(
- const HidlModel& model,
- ExecutionPreference preference,
- const sp<IPreparedModelCallback>& actualCallback) override {
-
+ const HidlModel& model, ExecutionPreference preference,
+ const sp<IPreparedModelCallback>& actualCallback) override {
sp<PreparedModelCallback> localCallback = new PreparedModelCallback;
Return<ErrorStatus> prepareModelReturn =
SampleDriver::prepareModel_1_2(model, preference, localCallback);
@@ -119,6 +148,34 @@
return prepareModelReturn;
}
if (prepareModelReturn != ErrorStatus::NONE) {
+ actualCallback->notify_1_2(
+ localCallback->getStatus(),
+ V1_2::IPreparedModel::castFrom(localCallback->getPreparedModel()));
+ return prepareModelReturn;
+ }
+ localCallback->wait();
+ if (localCallback->getStatus() != ErrorStatus::NONE) {
+ actualCallback->notify_1_2(
+ localCallback->getStatus(),
+ V1_2::IPreparedModel::castFrom(localCallback->getPreparedModel()));
+ } else {
+ actualCallback->notify_1_2(
+ ErrorStatus::NONE,
+ new TestPreparedModel12(localCallback->getPreparedModel(), mErrorStatus));
+ }
+ return prepareModelReturn;
+ }
+
+ Return<ErrorStatus> prepareModel_1_1(
+ const V1_1::Model& model, ExecutionPreference preference,
+ const sp<V1_0::IPreparedModelCallback>& actualCallback) override {
+ sp<PreparedModelCallback> localCallback = new PreparedModelCallback;
+ Return<ErrorStatus> prepareModelReturn =
+ SampleDriver::prepareModel_1_1(model, preference, localCallback);
+ if (!prepareModelReturn.isOkUnchecked()) {
+ return prepareModelReturn;
+ }
+ if (prepareModelReturn != ErrorStatus::NONE) {
actualCallback->notify(localCallback->getStatus(), localCallback->getPreparedModel());
return prepareModelReturn;
}
@@ -126,39 +183,78 @@
if (localCallback->getStatus() != ErrorStatus::NONE) {
actualCallback->notify(localCallback->getStatus(), localCallback->getPreparedModel());
} else {
- actualCallback->notify(ErrorStatus::NONE,
- new TestPreparedModel(localCallback->getPreparedModel(),
- mErrorStatus));
+ actualCallback->notify(
+ ErrorStatus::NONE,
+ new TestPreparedModel10(localCallback->getPreparedModel(), mErrorStatus));
}
return prepareModelReturn;
}
-private:
+ Return<ErrorStatus> prepareModel(
+ const V1_0::Model& model,
+ const sp<V1_0::IPreparedModelCallback>& actualCallback) override {
+ return prepareModel_1_1(nn::convertToV1_1(model), ExecutionPreference::FAST_SINGLE_ANSWER,
+ actualCallback);
+ }
+
+ private:
ErrorStatus mErrorStatus;
};
-// Like TestDriver, but implementing 1.0
-// TODO: Add TestDriver11.
-class TestDriver10 : public V1_0::IDevice {
-public:
- TestDriver10(const std::string& name, ErrorStatus errorStatus) : m11Driver(name, errorStatus) {}
+// Like TestDriver, but implementing 1.1
+class TestDriver11 : public V1_1::IDevice {
+ public:
+ TestDriver11(const std::string& name, ErrorStatus errorStatus) : m12Driver(name, errorStatus) {}
+ Return<void> getCapabilities_1_1(getCapabilities_1_1_cb _hidl_cb) override {
+ return m12Driver.getCapabilities_1_1(_hidl_cb);
+ }
+ Return<void> getSupportedOperations_1_1(const V1_1::Model& model,
+ getSupportedOperations_1_1_cb _hidl_cb) override {
+ return m12Driver.getSupportedOperations_1_1(model, _hidl_cb);
+ }
+ Return<ErrorStatus> prepareModel_1_1(
+ const V1_1::Model& model, ExecutionPreference preference,
+ const sp<V1_0::IPreparedModelCallback>& actualCallback) override {
+ return m12Driver.prepareModel_1_1(model, preference, actualCallback);
+ }
+ Return<DeviceStatus> getStatus() override { return m12Driver.getStatus(); }
Return<void> getCapabilities(getCapabilities_cb _hidl_cb) override {
- return m11Driver.getCapabilities(_hidl_cb);
+ return m12Driver.getCapabilities(_hidl_cb);
}
Return<void> getSupportedOperations(const V1_0::Model& model,
getSupportedOperations_cb _hidl_cb) override {
- return m11Driver.getSupportedOperations(model, _hidl_cb);
+ return m12Driver.getSupportedOperations(model, _hidl_cb);
}
Return<ErrorStatus> prepareModel(
- const V1_0::Model& model,
- const sp<IPreparedModelCallback>& actualCallback) override {
- return m11Driver.prepareModel(model, actualCallback);
+ const V1_0::Model& model,
+ const sp<V1_0::IPreparedModelCallback>& actualCallback) override {
+ return m12Driver.prepareModel(model, actualCallback);
}
- Return<DeviceStatus> getStatus() override {
- return m11Driver.getStatus();
+
+ private:
+ TestDriver12 m12Driver;
+};
+
+// Like TestDriver, but implementing 1.0
+class TestDriver10 : public V1_0::IDevice {
+ public:
+ TestDriver10(const std::string& name, ErrorStatus errorStatus) : m12Driver(name, errorStatus) {}
+ Return<void> getCapabilities(getCapabilities_cb _hidl_cb) override {
+ return m12Driver.getCapabilities(_hidl_cb);
}
-private:
- TestDriver11 m11Driver;
+ Return<void> getSupportedOperations(const V1_0::Model& model,
+ getSupportedOperations_cb _hidl_cb) override {
+ return m12Driver.getSupportedOperations(model, _hidl_cb);
+ }
+ Return<ErrorStatus> prepareModel(
+ const V1_0::Model& model,
+ const sp<V1_0::IPreparedModelCallback>& actualCallback) override {
+ return m12Driver.prepareModel(model, actualCallback);
+ }
+ Return<DeviceStatus> getStatus() override { return m12Driver.getStatus(); }
+
+ private:
+ TestDriver12 m12Driver;
};
// This class adds some simple utilities on top of WrapperCompilation in order
@@ -289,6 +385,12 @@
std::make_tuple(ErrorStatus::INVALID_ARGUMENT,
Result::BAD_DATA));
+class ExecutionTest12 : public ExecutionTestTemplate<TestDriver12> {};
+TEST_P(ExecutionTest12, Wait) {
+ TestWait();
+}
+INSTANTIATE_TEST_CASE_P(Flavor, ExecutionTest12, kTestValues);
+
class ExecutionTest11 : public ExecutionTestTemplate<TestDriver11> {};
TEST_P(ExecutionTest11, Wait) {
TestWait();
diff --git a/runtime/test/TestPartitioning.cpp b/runtime/test/TestPartitioning.cpp
index d0a53bc..37804d3 100644
--- a/runtime/test/TestPartitioning.cpp
+++ b/runtime/test/TestPartitioning.cpp
@@ -209,10 +209,12 @@
// Dummy class -- a prepared model must not be nullptr.
class PartitioningPreparedModel : public IPreparedModel {
public:
- Return<ErrorStatus> execute(const Request&,
- const sp<IExecutionCallback>&) override {
- return ErrorStatus::DEVICE_UNAVAILABLE;
- }
+ Return<ErrorStatus> execute(const Request&, const sp<V1_0::IExecutionCallback>&) override {
+ return ErrorStatus::DEVICE_UNAVAILABLE;
+ }
+ Return<ErrorStatus> execute_1_2(const Request&, const sp<V1_2::IExecutionCallback>&) override {
+ return ErrorStatus::DEVICE_UNAVAILABLE;
+ }
};
public:
enum OEM {
@@ -238,7 +240,7 @@
}
}
}
- cb->notify(status, new PartitioningPreparedModel);
+ cb->notify_1_2(status, new PartitioningPreparedModel);
return status;
}
diff --git a/runtime/test/TestPartitioningRandom.cpp b/runtime/test/TestPartitioningRandom.cpp
index dcbc7a5..42f6e37 100644
--- a/runtime/test/TestPartitioningRandom.cpp
+++ b/runtime/test/TestPartitioningRandom.cpp
@@ -518,7 +518,7 @@
if (ret.isOk() && (outStatus == ErrorStatus::NONE)) {
return SampleDriver::prepareModel_1_2(model, preference, callback);
} else {
- callback->notify(ErrorStatus::INVALID_ARGUMENT, nullptr);
+ callback->notify_1_2(ErrorStatus::INVALID_ARGUMENT, nullptr);
return ErrorStatus::INVALID_ARGUMENT;
}
}