Migrate NNAPI runtime to canonical types
This change replaces most uses of HAL types in the codebase with
equivalent canonical types. Later changes will introduce more
refactorings.
Also removes unused files nn/runtime/test/Bridge.{h,cpp}.
Bug: 160669906
Fix: 155923931
Test: NeuralNetworksTest_static (all 7 passes)
Test: NeuralNetworksTest_operations
Test: NeuralNetworksTest_utils
Test: NeuralNetworksTest_logtag
Test: nnCache_test
Test: BlobCache_test
Change-Id: I63fa286e926a096948f1b1b172d1d562c4f52f29
Merged-In: I63fa286e926a096948f1b1b172d1d562c4f52f29
(cherry picked from commit daa4b515bc15a2ac7755f0666c023d7e3caa951a)
diff --git a/runtime/Callbacks.cpp b/runtime/Callbacks.cpp
index 6a81b9c..d31098c 100644
--- a/runtime/Callbacks.cpp
+++ b/runtime/Callbacks.cpp
@@ -18,28 +18,25 @@
#include "Callbacks.h"
+#include <Utils.h>
#include <android-base/logging.h>
+
#include <limits>
#include <utility>
#include <vector>
namespace android::nn {
-using namespace hal;
-
-constexpr Timing kNoTiming = {.timeOnDevice = std::numeric_limits<uint64_t>::max(),
- .timeInDriver = std::numeric_limits<uint64_t>::max()};
-
// PreparedModelCallback methods begin here
-Return<void> PreparedModelCallback::notifyInternal(bool deadObject, ErrorStatus errorStatus,
- const sp<V1_0::IPreparedModel>& preparedModel) {
+hardware::Return<void> PreparedModelCallback::notifyInternal(
+ bool deadObject, ErrorStatus errorStatus, const sp<V1_0::IPreparedModel>& preparedModel) {
{
std::lock_guard<std::mutex> hold(mMutex);
// quick-return if object has already been notified
if (mNotified) {
- return Void();
+ return hardware::Void();
}
// store results and mark as notified
@@ -50,22 +47,22 @@
}
mCondition.notify_all();
- return Void();
+ return hardware::Void();
}
-Return<void> PreparedModelCallback::notify(V1_0::ErrorStatus errorStatus,
- const sp<V1_0::IPreparedModel>& preparedModel) {
- return notifyInternal(false, static_cast<ErrorStatus>(errorStatus), preparedModel);
+hardware::Return<void> PreparedModelCallback::notify(
+ V1_0::ErrorStatus errorStatus, const sp<V1_0::IPreparedModel>& preparedModel) {
+ return notifyInternal(false, uncheckedConvert(errorStatus), preparedModel);
}
-Return<void> PreparedModelCallback::notify_1_2(V1_0::ErrorStatus errorStatus,
- const sp<V1_2::IPreparedModel>& preparedModel) {
- return notifyInternal(false, static_cast<ErrorStatus>(errorStatus), preparedModel);
+hardware::Return<void> PreparedModelCallback::notify_1_2(
+ V1_0::ErrorStatus errorStatus, const sp<V1_2::IPreparedModel>& preparedModel) {
+ return notifyInternal(false, uncheckedConvert(errorStatus), preparedModel);
}
-Return<void> PreparedModelCallback::notify_1_3(ErrorStatus errorStatus,
- const sp<V1_3::IPreparedModel>& preparedModel) {
- return notifyInternal(false, errorStatus, preparedModel);
+hardware::Return<void> PreparedModelCallback::notify_1_3(
+ V1_3::ErrorStatus errorStatus, const sp<V1_3::IPreparedModel>& preparedModel) {
+ return notifyInternal(false, uncheckedConvert(errorStatus), preparedModel);
}
void PreparedModelCallback::notifyAsDeadObject() {
@@ -94,24 +91,26 @@
// ExecutionCallback methods begin here
-Return<void> ExecutionCallback::notify(V1_0::ErrorStatus errorStatus) {
- return notifyInternal(false, static_cast<ErrorStatus>(errorStatus), {}, kNoTiming);
+hardware::Return<void> ExecutionCallback::notify(V1_0::ErrorStatus errorStatus) {
+ return notifyInternal(false, uncheckedConvert(errorStatus), {}, {});
}
-Return<void> ExecutionCallback::notify_1_2(V1_0::ErrorStatus errorStatus,
- const hidl_vec<OutputShape>& outputShapes,
- const Timing& timing) {
- return notifyInternal(false, static_cast<ErrorStatus>(errorStatus), outputShapes, timing);
+hardware::Return<void> ExecutionCallback::notify_1_2(
+ V1_0::ErrorStatus errorStatus, const hardware::hidl_vec<V1_2::OutputShape>& outputShapes,
+ const V1_2::Timing& timing) {
+ return notifyInternal(false, uncheckedConvert(errorStatus), uncheckedConvert(outputShapes),
+ uncheckedConvert(timing));
}
-Return<void> ExecutionCallback::notify_1_3(V1_3::ErrorStatus errorStatus,
- const hidl_vec<OutputShape>& outputShapes,
- const Timing& timing) {
- return notifyInternal(false, errorStatus, outputShapes, timing);
+hardware::Return<void> ExecutionCallback::notify_1_3(
+ V1_3::ErrorStatus errorStatus, const hardware::hidl_vec<V1_2::OutputShape>& outputShapes,
+ const V1_2::Timing& timing) {
+ return notifyInternal(false, uncheckedConvert(errorStatus), uncheckedConvert(outputShapes),
+ uncheckedConvert(timing));
}
void ExecutionCallback::notifyAsDeadObject() {
- notifyInternal(true, ErrorStatus::GENERAL_FAILURE, {}, kNoTiming);
+ notifyInternal(true, ErrorStatus::GENERAL_FAILURE, {}, {});
}
void ExecutionCallback::wait() const {
@@ -199,9 +198,9 @@
mOnFinish = finish;
}
-Return<void> ExecutionCallback::notifyInternal(bool deadObject, ErrorStatus errorStatus,
- std::vector<OutputShape> outputShapes,
- Timing timing) {
+hardware::Return<void> ExecutionCallback::notifyInternal(bool deadObject, ErrorStatus errorStatus,
+ std::vector<OutputShape> outputShapes,
+ Timing timing) {
// check results
if (!deadObject) {
if (errorStatus == ErrorStatus::OUTPUT_INSUFFICIENT_SIZE) {
@@ -211,7 +210,7 @@
<< "Notified with empty output shape vector when OUTPUT_INSUFFICIENT_SIZE";
errorStatus = ErrorStatus::GENERAL_FAILURE;
outputShapes = {};
- timing = kNoTiming;
+ timing = {};
}
} else if (errorStatus != ErrorStatus::NONE) {
// outputShapes must be empty if errorStatus is neither NONE nor
@@ -221,7 +220,7 @@
"neither NONE nor OUTPUT_INSUFFICIENT_SIZE";
errorStatus = ErrorStatus::GENERAL_FAILURE;
outputShapes = {};
- timing = kNoTiming;
+ timing = {};
}
}
}
@@ -232,7 +231,7 @@
// quick-return if object has already been notified
if (mNotified) {
- return Void();
+ return hardware::Void();
}
mDeadObject = deadObject;
@@ -250,7 +249,7 @@
}
}
mCondition.notify_all();
- return Void();
+ return hardware::Void();
}
} // namespace android::nn
diff --git a/runtime/Callbacks.h b/runtime/Callbacks.h
index 7537025..66408ce 100644
--- a/runtime/Callbacks.h
+++ b/runtime/Callbacks.h
@@ -17,9 +17,11 @@
#ifndef ANDROID_FRAMEWORKS_ML_NN_RUNTIME_CALLBACKS_H
#define ANDROID_FRAMEWORKS_ML_NN_RUNTIME_CALLBACKS_H
-#include "HalInterfaces.h"
-
+#include <HalInterfaces.h>
+#include <Utils.h>
#include <android-base/thread_annotations.h>
+#include <nnapi/Types.h>
+
#include <condition_variable>
#include <functional>
#include <mutex>
@@ -60,7 +62,7 @@
*
* This callback object is passed as an argument to IDevice::prepareModel*.
*/
-class PreparedModelCallback : public hal::IPreparedModelCallback {
+class PreparedModelCallback : public V1_3::IPreparedModelCallback {
public:
/**
* IPreparedModelCallback::notify marks the callback object with the return
@@ -85,8 +87,8 @@
* @param preparedModel Returned model that has been prepared for execution,
* nullptr if the model was unable to be prepared.
*/
- hal::Return<void> notify(hal::V1_0::ErrorStatus status,
- const sp<hal::V1_0::IPreparedModel>& preparedModel) override;
+ hardware::Return<void> notify(V1_0::ErrorStatus status,
+ const sp<V1_0::IPreparedModel>& preparedModel) override;
/**
* IPreparedModelCallback::notify_1_2 marks the callback object with the
@@ -111,8 +113,8 @@
* @param preparedModel Returned model that has been prepared for execution,
* nullptr if the model was unable to be prepared.
*/
- hal::Return<void> notify_1_2(hal::V1_0::ErrorStatus status,
- const sp<hal::V1_2::IPreparedModel>& preparedModel) override;
+ hardware::Return<void> notify_1_2(V1_0::ErrorStatus status,
+ const sp<V1_2::IPreparedModel>& preparedModel) override;
/**
* IPreparedModelCallback::notify_1_3 marks the callback object with the
@@ -139,8 +141,8 @@
* @param preparedModel Returned model that has been prepared for execution,
* nullptr if the model was unable to be prepared.
*/
- hal::Return<void> notify_1_3(hal::V1_3::ErrorStatus status,
- const sp<hal::V1_3::IPreparedModel>& preparedModel) override;
+ hardware::Return<void> notify_1_3(V1_3::ErrorStatus status,
+ const sp<V1_3::IPreparedModel>& preparedModel) override;
/**
* Mark the callback object as a dead object. This acts as a call to notify.
@@ -169,7 +171,7 @@
* - RESOURCE_EXHAUSTED_* if the task was aborted by the driver
* - DEAD_OBJECT if the driver crashed without returning a result
*/
- hal::V1_3::ErrorStatus getStatus() const;
+ ErrorStatus getStatus() const;
/**
* Retrieves the model that has been prepared for execution from the
@@ -181,7 +183,7 @@
* @return preparedModel Returned model that has been prepared for
* execution, nullptr if the model was unable to be prepared.
*/
- sp<hal::V1_0::IPreparedModel> getPreparedModel() const;
+ sp<V1_0::IPreparedModel> getPreparedModel() const;
/**
* Queries whether the object is dead.
@@ -191,15 +193,15 @@
bool isDeadObject() const;
private:
- hal::Return<void> notifyInternal(bool deadObject, hal::ErrorStatus errorStatus,
- const sp<hal::V1_0::IPreparedModel>& preparedModel);
+ hardware::Return<void> notifyInternal(bool deadObject, ErrorStatus errorStatus,
+ const sp<V1_0::IPreparedModel>& preparedModel);
mutable std::mutex mMutex;
mutable std::condition_variable mCondition;
bool mNotified GUARDED_BY(mMutex) = false;
bool mDeadObject = false;
- hal::ErrorStatus mErrorStatus = hal::ErrorStatus::GENERAL_FAILURE;
- sp<hal::V1_0::IPreparedModel> mPreparedModel;
+ ErrorStatus mErrorStatus = ErrorStatus::GENERAL_FAILURE;
+ sp<V1_0::IPreparedModel> mPreparedModel;
};
/**
@@ -216,9 +218,9 @@
*
* This callback object is passed as an argument to IPreparedModel::execute*.
*/
-class ExecutionCallback : public hal::IExecutionCallback {
+class ExecutionCallback : public V1_3::IExecutionCallback {
using ExecutionFinish =
- std::function<hal::ErrorStatus(hal::ErrorStatus, const std::vector<hal::OutputShape>&)>;
+ std::function<ErrorStatus(ErrorStatus, const std::vector<OutputShape>&)>;
public:
/**
@@ -244,7 +246,7 @@
* enough to store the resultant values
* - INVALID_ARGUMENT if the input request is invalid
*/
- hal::Return<void> notify(hal::V1_0::ErrorStatus status) override;
+ hardware::Return<void> notify(V1_0::ErrorStatus status) override;
/**
* IExecutionCallback::notify_1_2 marks the callback object with the results
@@ -279,9 +281,9 @@
* reported as UINT64_MAX. A driver may choose to report any time as
* UINT64_MAX, indicating that particular measurement is not available.
*/
- hal::Return<void> notify_1_2(hal::V1_0::ErrorStatus status,
- const hal::hidl_vec<hal::OutputShape>& outputShapes,
- const hal::Timing& timing) override;
+ hardware::Return<void> notify_1_2(V1_0::ErrorStatus status,
+ const hardware::hidl_vec<V1_2::OutputShape>& outputShapes,
+ const V1_2::Timing& timing) override;
/**
* IExecutionCallback::notify_1_3 marks the callback object with the results
@@ -318,15 +320,15 @@
* reported as UINT64_MAX. A driver may choose to report any time as
* UINT64_MAX, indicating that particular measurement is not available.
*/
- hal::Return<void> notify_1_3(hal::V1_3::ErrorStatus status,
- const hal::hidl_vec<hal::OutputShape>& outputShapes,
- const hal::Timing& timing) override;
+ hardware::Return<void> notify_1_3(V1_3::ErrorStatus status,
+ const hardware::hidl_vec<V1_2::OutputShape>& outputShapes,
+ const V1_2::Timing& timing) override;
// An overload of the latest notify interface to hide the version from ExecutionBuilder.
- hal::Return<void> notify(hal::V1_3::ErrorStatus status,
- const hal::hidl_vec<hal::OutputShape>& outputShapes,
- const hal::Timing& timing) {
- return notify_1_3(status, outputShapes, timing);
+ hardware::Return<void> notify(ErrorStatus status, const std::vector<OutputShape>& outputShapes,
+ const Timing& timing) {
+ return notify_1_3(convertToV1_3(status), convertToV1_2(outputShapes),
+ convertToV1_2(timing));
}
/**
@@ -362,7 +364,7 @@
* - RESOURCE_EXHAUSTED_* if the task was aborted by the driver
* - DEAD_OBJECT if the driver crashed without returning a result
*/
- hal::V1_3::ErrorStatus getStatus() const;
+ ErrorStatus getStatus() const;
/**
* Retrieves the output shapes returned from the asynchronous task launched
@@ -385,7 +387,7 @@
* OUTPUT_INSUFFICIENT_SIZE, or if the status is NONE and the model has
* at least one output operand that is not fully-specified.
*/
- const std::vector<hal::OutputShape>& getOutputShapes() const;
+ const std::vector<OutputShape>& getOutputShapes() const;
/**
* Retrieves the duration of execution of the asynchronous task launched by
@@ -400,7 +402,7 @@
* @return timing Duration of the execution. Every time must be UINT64_MAX
* unless the status is NONE.
*/
- hal::Timing getTiming() const;
+ Timing getTiming() const;
/**
* ExecutionCallback::bindThread binds a thread to the ExecutionCallback
@@ -461,9 +463,8 @@
* before any call to wait or get* return. It then enables all prior and
* future wait calls on the ExecutionCallback object to proceed.
*/
- hal::Return<void> notifyInternal(bool deadObject, hal::ErrorStatus errorStatus,
- std::vector<hal::OutputShape> outputShapes,
- hal::Timing timing);
+ hardware::Return<void> notifyInternal(bool deadObject, ErrorStatus errorStatus,
+ std::vector<OutputShape> outputShapes, Timing timing);
// members
mutable std::mutex mMutex;
@@ -472,9 +473,9 @@
ExecutionFinish mOnFinish GUARDED_BY(mMutex);
bool mNotified GUARDED_BY(mMutex) = false;
bool mDeadObject = false;
- hal::ErrorStatus mErrorStatus = hal::ErrorStatus::GENERAL_FAILURE;
- std::vector<hal::OutputShape> mOutputShapes;
- hal::Timing mTiming = {};
+ ErrorStatus mErrorStatus = ErrorStatus::GENERAL_FAILURE;
+ std::vector<OutputShape> mOutputShapes;
+ Timing mTiming = {};
};
} // namespace android::nn
diff --git a/runtime/CompilationBuilder.cpp b/runtime/CompilationBuilder.cpp
index 051ac88..5d2d5db 100644
--- a/runtime/CompilationBuilder.cpp
+++ b/runtime/CompilationBuilder.cpp
@@ -36,8 +36,6 @@
namespace android {
namespace nn {
-using namespace hal;
-
CompilationBuilder::CompilationBuilder(const ModelBuilder* model,
const std::vector<std::shared_ptr<Device>>& devices,
bool explicitDeviceList)
diff --git a/runtime/Event.h b/runtime/Event.h
index 982381a..41b9a28 100644
--- a/runtime/Event.h
+++ b/runtime/Event.h
@@ -28,7 +28,7 @@
public:
virtual ~IEvent() = default;
virtual void wait() const = 0;
- virtual hal::ErrorStatus getStatus() const = 0;
+ virtual ErrorStatus getStatus() const = 0;
virtual int getSyncFenceFd(bool shouldDup) const = 0;
};
@@ -40,7 +40,7 @@
}
void wait() const override { kExecutionCallback->wait(); }
- hal::ErrorStatus getStatus() const override { return kExecutionCallback->getStatus(); }
+ ErrorStatus getStatus() const override { return kExecutionCallback->getStatus(); }
// Always return -1 as this is not backed by a sync fence.
int getSyncFenceFd(bool /*should_dup*/) const override { return -1; }
@@ -51,7 +51,7 @@
// The SyncFenceEvent wraps sync fence and IFencedExecutionCallback
class SyncFenceEvent : public IEvent {
public:
- SyncFenceEvent(int sync_fence_fd, const sp<hal::IFencedExecutionCallback>& callback)
+ SyncFenceEvent(int sync_fence_fd, const sp<V1_3::IFencedExecutionCallback>& callback)
: kFencedExecutionCallback(callback) {
if (sync_fence_fd > 0) {
// Dup the provided file descriptor
@@ -69,18 +69,18 @@
// Get the status of the event.
// In case of syncWait error, query the dispatch callback for detailed
// error status.
- hal::ErrorStatus getStatus() const override {
- auto error = hal::ErrorStatus::NONE;
+ ErrorStatus getStatus() const override {
+ auto error = ErrorStatus::NONE;
if (mSyncFenceFd > 0 && syncWait(mSyncFenceFd, -1) != FenceState::SIGNALED) {
- error = hal::ErrorStatus::GENERAL_FAILURE;
+ error = ErrorStatus::GENERAL_FAILURE;
// If there is a callback available, use the callback to get the error code.
if (kFencedExecutionCallback != nullptr) {
- const hal::Return<void> ret = kFencedExecutionCallback->getExecutionInfo(
- [&error](hal::ErrorStatus status, hal::Timing, hal::Timing) {
- error = status;
+ const hardware::Return<void> ret = kFencedExecutionCallback->getExecutionInfo(
+ [&error](V1_3::ErrorStatus status, V1_2::Timing, V1_2::Timing) {
+ error = uncheckedConvert(status);
});
if (!ret.isOk()) {
- error = hal::ErrorStatus::GENERAL_FAILURE;
+ error = ErrorStatus::GENERAL_FAILURE;
}
}
}
@@ -102,7 +102,7 @@
private:
// TODO(b/148423931): used android::base::unique_fd instead.
int mSyncFenceFd = -1;
- const sp<hal::IFencedExecutionCallback> kFencedExecutionCallback;
+ const sp<V1_3::IFencedExecutionCallback> kFencedExecutionCallback;
};
} // namespace android::nn
diff --git a/runtime/ExecutionBuilder.cpp b/runtime/ExecutionBuilder.cpp
index 8b6b817..aaf2bbd 100644
--- a/runtime/ExecutionBuilder.cpp
+++ b/runtime/ExecutionBuilder.cpp
@@ -45,12 +45,10 @@
namespace android {
namespace nn {
-using namespace hal;
-
// Partial validation of output shapes returned from driver, to ensure they
// conform to a very specific set of rules.
static bool validateOutputShapesFromDriver(ErrorStatus executionStatus, const ModelBuilder* model,
- const std::vector<hal::OutputShape>& shapes) {
+ const std::vector<OutputShape>& shapes) {
// Enforces the following rules (some of which are from b/154054474):
// - shapes vector is empty except in the case of NONE or OUTPUT_INSUFFICIENT_SIZE.
// If the vector is not empty, it must have as many entries as the step model has outputs.
@@ -61,21 +59,21 @@
switch (executionStatus) {
case ErrorStatus::NONE: {
NN_RET_CHECK(shapes.size() == 0 || shapes.size() == model->outputCount())
- << "With execution ErrorStatus " << toString(executionStatus)
+ << "With execution ErrorStatus " << executionStatus
<< " output shapes vector must be empty or of length " << model->outputCount()
<< " but has length " << shapes.size();
NN_RET_CHECK(std::all_of(shapes.begin(), shapes.end(),
[](const OutputShape& shape) { return shape.isSufficient; }))
- << "With execution ErrorStatus " << toString(executionStatus)
+ << "With execution ErrorStatus " << executionStatus
<< " at least one output shape is unexpectedly marked !isSufficient";
const TypeManager* tm = TypeManager::get();
for (uint32_t outputIndex = 0, outputCount = shapes.size(); outputIndex < outputCount;
++outputIndex) {
- const hal::Operand& outputOperand = model->getOutputOperand(outputIndex);
+ const Operand& outputOperand = model->getOutputOperand(outputIndex);
NN_RET_CHECK(!tm->isTensorType(outputOperand.type) ||
(shapes[outputIndex].dimensions.size() != 0))
- << "With execution ErrorStatus " << toString(executionStatus) << " output#"
+ << "With execution ErrorStatus " << executionStatus << " output#"
<< outputIndex << " shape unexpectedly has zero rank";
}
@@ -83,18 +81,18 @@
}
case ErrorStatus::OUTPUT_INSUFFICIENT_SIZE: {
NN_RET_CHECK(shapes.size() == model->outputCount())
- << "With execution ErrorStatus " << toString(executionStatus)
+ << "With execution ErrorStatus " << executionStatus
<< " output shapes vector must be of length " << model->outputCount()
<< " but has length " << shapes.size();
NN_RET_CHECK(std::any_of(shapes.begin(), shapes.end(),
[](const OutputShape& shape) { return !shape.isSufficient; }))
- << "With execution ErrorStatus " << toString(executionStatus)
+ << "With execution ErrorStatus " << executionStatus
<< " at least one output shape must have been marked !isSufficient";
break;
}
default: {
NN_RET_CHECK(shapes.size() == 0)
- << "With execution ErrorStatus " << toString(executionStatus)
+ << "With execution ErrorStatus " << executionStatus
<< " output shapes vector must be empty but has length " << shapes.size();
break;
}
@@ -102,13 +100,11 @@
return true;
}
static bool validateOutputShapesFromDriver(int executionResultCode, const ModelBuilder* model,
- const std::vector<hal::OutputShape>& shapes) {
+ const std::vector<OutputShape>& shapes) {
return validateOutputShapesFromDriver(convertResultCodeToErrorStatus(executionResultCode),
model, shapes);
}
-const Timing kNoTiming = {.timeOnDevice = UINT64_MAX, .timeInDriver = UINT64_MAX};
-
static MeasureTiming measureTiming(const ExecutionBuilder* execution) {
return execution->measureTiming() ? MeasureTiming::YES : MeasureTiming::NO;
}
@@ -117,7 +113,7 @@
const char* tag, bool allowUnspecified) {
if (newType != nullptr) {
const Extension::OperandTypeInformation* info = nullptr;
- if (isExtensionOperandType(operand.type)) {
+ if (isExtension(operand.type)) {
NN_RET_CHECK(TypeManager::get()->getExtensionOperandTypeInfo(operand.type, &info));
}
if (validateOperandType(*newType, info, tag, allowUnspecified) !=
@@ -220,7 +216,8 @@
}
int ExecutionBuilder::setInputFromMemory(uint32_t index, const ANeuralNetworksOperandType* type,
- const Memory* memory, size_t offset, size_t length) {
+ const RuntimeMemory* memory, size_t offset,
+ size_t length) {
// Should be similar to StepExecutor::setInputOrOutputFromMemory()
if (mStarted) {
@@ -297,7 +294,8 @@
}
int ExecutionBuilder::setOutputFromMemory(uint32_t index, const ANeuralNetworksOperandType* type,
- const Memory* memory, size_t offset, size_t length) {
+ const RuntimeMemory* memory, size_t offset,
+ size_t length) {
// Should be similar to StepExecutor::setInputOrOutputFromMemory()
if (mStarted) {
@@ -383,12 +381,12 @@
Timing timingFenced = timingLaunched;
if (mFencedExecutionCallback != nullptr) {
ErrorStatus status;
- const Return<void> ret = mFencedExecutionCallback->getExecutionInfo(
- [&status, &timingLaunched, &timingFenced](ErrorStatus error, Timing tLaunched,
- Timing tFenced) {
- status = error;
- timingLaunched = tLaunched;
- timingFenced = tFenced;
+ const hardware::Return<void> ret = mFencedExecutionCallback->getExecutionInfo(
+ [&status, &timingLaunched, &timingFenced](
+ V1_3::ErrorStatus error, V1_2::Timing tLaunched, V1_2::Timing tFenced) {
+ status = uncheckedConvert(error);
+ timingLaunched = uncheckedConvert(tLaunched);
+ timingFenced = uncheckedConvert(tFenced);
});
if (!ret.isOk()) {
*duration = UINT64_MAX;
@@ -546,7 +544,7 @@
std::shared_ptr<StepExecutor> executor;
int n1 = plan.fallback(controller, &executor, nullptr, nullptr);
if (n1 != ANEURALNETWORKS_NO_ERROR) {
- return {n1, {}, kNoTiming, nullptr};
+ return {n1, {}, {}, nullptr};
}
CHECK(executor != nullptr);
@@ -565,7 +563,7 @@
VLOG(EXECUTION) << "ExecutionBuilder::compute (from plan, iteratively)";
std::vector<OutputShape> outputShapes = executionBuilder->getInitialOutputShapes();
- Timing timing = kNoTiming;
+ Timing timing;
// Disallow CPU fallback when the ExecutionPlan is simple on CPU.
allowCpuFallback &= !plan.isSimpleCpu();
@@ -589,7 +587,7 @@
bool missedDeadline = n == ANEURALNETWORKS_MISSED_DEADLINE_TRANSIENT ||
n == ANEURALNETWORKS_MISSED_DEADLINE_PERSISTENT;
if (allowCpuFallback && !missedDeadline) break;
- executionCallback->notify(convertResultCodeToErrorStatus(n), {}, kNoTiming);
+ executionCallback->notify(convertResultCodeToErrorStatus(n), {}, {});
return;
}
@@ -636,7 +634,7 @@
// - we didn't learn anything new about dynamic temporaries.
// Neither of these is recoverable, so end execution.
const ErrorStatus stepStatus = convertResultCodeToErrorStatus(stepN);
- executionCallback->notify(stepStatus, outputShapes, kNoTiming);
+ executionCallback->notify(stepStatus, outputShapes, {});
return;
}
// Every main model output is of sufficient size. This implies that
@@ -649,7 +647,7 @@
// If CPU fallback is not allowed and there was an error, end execution.
if (!allowCpuFallback) {
const ErrorStatus stepStatus = convertResultCodeToErrorStatus(stepN);
- executionCallback->notify(stepStatus, {}, kNoTiming);
+ executionCallback->notify(stepStatus, {}, {});
return;
}
@@ -658,7 +656,7 @@
// (2) return from the function with an error
if (executorIsCpu) {
if (!plan.isSimple()) break;
- executionCallback->notify(convertResultCodeToErrorStatus(stepN), {}, kNoTiming);
+ executionCallback->notify(convertResultCodeToErrorStatus(stepN), {}, {});
return;
}
@@ -706,7 +704,7 @@
// - we didn't learn anything new about dynamic temporaries.
// Neither of these is recoverable, so end execution.
const ErrorStatus fallbackStatus = convertResultCodeToErrorStatus(fallbackN);
- executionCallback->notify(fallbackStatus, outputShapes, kNoTiming);
+ executionCallback->notify(fallbackStatus, outputShapes, {});
return;
}
// Every main model output is of sufficient size. This implies
@@ -718,7 +716,7 @@
// Do not fallback twice if the ExecutionPlan is simple.
if (plan.isSimple()) {
const ErrorStatus fallbackStatus = convertResultCodeToErrorStatus(fallbackN);
- executionCallback->notify(fallbackStatus, {}, kNoTiming);
+ executionCallback->notify(fallbackStatus, {}, {});
return;
}
@@ -748,7 +746,7 @@
// fence and the fenced compute callback returned from the last partition.
// Any failed partition will result in the whole execution fallback to CPU if
// allowCpuFallback is set to true.
-static std::tuple<int, int, sp<hal::IFencedExecutionCallback>> startComputeFenced(
+static std::tuple<int, int, sp<V1_3::IFencedExecutionCallback>> startComputeFenced(
ExecutionBuilder* executionBuilder, const ExecutionPlan& plan,
std::shared_ptr<ExecutionPlan::Controller> controller, const std::vector<int>& waitFor,
uint64_t timeoutDurationAfterFence, const std::optional<Deadline>& deadline,
@@ -773,7 +771,7 @@
// Initiate waitForFds, syncFence for the first step.
std::vector<int> waitForFds = waitFor;
int syncFence = -1;
- sp<hal::IFencedExecutionCallback> computeFencedCallback;
+ sp<V1_3::IFencedExecutionCallback> computeFencedCallback;
while (true) {
VLOG(EXECUTION) << "looking for next StepExecutor";
@@ -942,7 +940,7 @@
LOG(ERROR) << "ANeuralNetworksExecution_" << name() << " not all inputs specified";
return ANEURALNETWORKS_BAD_DATA;
} else if (p.state() == ModelArgumentInfo::MEMORY) {
- const Memory* memory = mMemories[p.locationAndLength().poolIndex];
+ const RuntimeMemory* memory = mMemories[p.locationAndLength().poolIndex];
if (!memory->getValidator().validateInputDimensions(p.dimensions())) {
return ANEURALNETWORKS_OP_FAILED;
}
@@ -1015,7 +1013,7 @@
std::vector<OutputShape> outputShapes(mOutputs.size());
std::transform(mOutputs.begin(), mOutputs.end(), outputShapes.begin(),
[](const auto& x) -> OutputShape {
- hidl_vec<uint32_t> dimensions;
+ std::vector<uint32_t> dimensions;
if (x.state() != ModelArgumentInfo::HAS_NO_VALUE) {
dimensions = x.dimensions();
}
@@ -1067,7 +1065,7 @@
bool ExecutionBuilder::updateMemories() {
for (const auto& output : mOutputs) {
if (output.state() != ModelArgumentInfo::MEMORY) continue;
- const Memory* memory = mMemories[output.locationAndLength().poolIndex];
+ const RuntimeMemory* memory = mMemories[output.locationAndLength().poolIndex];
NN_RET_CHECK(memory->getValidator().updateMetadata({.dimensions = output.dimensions()}));
}
return true;
@@ -1084,7 +1082,7 @@
bool success = status == ErrorStatus::NONE;
for (const auto& output : mOutputs) {
if (output.state() != ModelArgumentInfo::MEMORY) continue;
- const Memory* memory = mMemories[output.locationAndLength().poolIndex];
+ const RuntimeMemory* memory = mMemories[output.locationAndLength().poolIndex];
memory->getValidator().setInitialized(success);
}
switch (convertErrorStatusToResultCode(status)) {
@@ -1124,7 +1122,7 @@
if (VLOG_IS_ON(EXECUTION)) {
for (const auto& shape : from) {
- VLOG(EXECUTION) << "updateOutputShapes: " << toString(shape);
+ VLOG(EXECUTION) << "updateOutputShapes: " << shape;
}
}
@@ -1233,8 +1231,8 @@
StepExecutor::StepExecutor(ExecutionBuilder* executionBuilder, const ModelBuilder* model,
std::shared_ptr<Device> device,
- std::shared_ptr<PreparedModel> preparedModel, const ExecutionStep* step,
- DynamicTemporaries* dynamicTemporaries)
+ std::shared_ptr<RuntimePreparedModel> preparedModel,
+ const ExecutionStep* step, DynamicTemporaries* dynamicTemporaries)
: mExecutionBuilder(executionBuilder),
mExecutionStep(step),
mDynamicTemporaries(dynamicTemporaries),
@@ -1261,7 +1259,7 @@
void StepExecutor::mapInputOrOutput(const ModelArgumentInfo& builderInputOrOutput,
ModelArgumentInfo* executorInputOrOutput,
- const hidl_vec<uint32_t>* builderDimensions) {
+ const Dimensions* builderDimensions) {
auto updateDimensions = [executorInputOrOutput, builderDimensions] {
if (!builderDimensions) {
return;
@@ -1283,7 +1281,7 @@
case ModelArgumentInfo::MEMORY: {
updateDimensions();
const uint32_t builderPoolIndex = builderInputOrOutput.locationAndLength().poolIndex;
- const Memory* memory = mExecutionBuilder->mMemories[builderPoolIndex];
+ const RuntimeMemory* memory = mExecutionBuilder->mMemories[builderPoolIndex];
const uint32_t executorPoolIndex = mMemories.add(memory);
executorInputOrOutput->locationAndLength().poolIndex = executorPoolIndex;
break;
@@ -1292,8 +1290,8 @@
}
int StepExecutor::setInputOrOutputFromMemory(const Operand& inputOrOutputOperand,
- const Memory* memory, uint32_t offset,
- const hal::hidl_vec<uint32_t>& dimensions,
+ const RuntimeMemory* memory, uint32_t offset,
+ const Dimensions& dimensions,
std::optional<uint32_t> length,
ModelArgumentInfo* inputOrOutputInfo) {
// Should be similar to
@@ -1361,12 +1359,6 @@
return mDevice == DeviceManager::getCpuDevice();
}
-static OptionalTimeoutDuration makeTimeoutDuration(uint64_t nanoseconds) {
- OptionalTimeoutDuration otd;
- otd.nanoseconds(nanoseconds);
- return otd;
-}
-
std::tuple<int, std::vector<OutputShape>, Timing> StepExecutor::compute(
const std::optional<Deadline>& deadline,
const std::shared_ptr<ExecutionBurstController>& burstController) {
@@ -1374,7 +1366,7 @@
}
std::tuple<int, std::vector<OutputShape>, Timing> StepExecutor::computeWithMemories(
- const std::optional<Deadline>& deadline, const std::vector<const Memory*>& memories,
+ const std::optional<Deadline>& deadline, const std::vector<const RuntimeMemory*>& memories,
const std::shared_ptr<ExecutionBurstController>& burstController) {
CHECK(mPreparedModel != nullptr);
@@ -1393,7 +1385,7 @@
return {n, std::move(outputShapes), timing};
}
-std::tuple<int, int, sp<hal::IFencedExecutionCallback>> StepExecutor::computeFenced(
+std::tuple<int, int, sp<V1_3::IFencedExecutionCallback>> StepExecutor::computeFenced(
const std::vector<int>& waitFor, uint64_t timeoutDurationAfterFence,
const std::optional<Deadline>& deadline) {
CHECK(mPreparedModel != nullptr);
@@ -1408,7 +1400,7 @@
makeTimeoutDuration(mExecutionBuilder->getLoopTimeoutDuration());
OptionalTimeoutDuration optionalTimeoutDurationAfterFence;
if (timeoutDurationAfterFence > 0) {
- optionalTimeoutDurationAfterFence.nanoseconds(timeoutDurationAfterFence);
+ optionalTimeoutDurationAfterFence = makeTimeoutDuration(timeoutDurationAfterFence);
}
const auto [n, syncFence, computeFencedCallback, timing] = mPreparedModel->executeFenced(
mInputs, mOutputs, mMemories.getObjects(), waitFor, measure, deadline,
@@ -1425,24 +1417,24 @@
VLOG(EXECUTION) << "Re-compile the model on CPU";
mDevice = DeviceManager::getCpuDevice();
mPreparedModel = nullptr;
- const ModelFactory makeModel = [this] { return mModel->makeHidlModel(); };
+ const ModelFactory makeModel = [this] { return mModel->makeModel(); };
// TODO: Propagate user preference and compilation priority to this point instead of using
// default values of ANEURALNETWORKS_PREFER_FAST_SINGLE_ANSWER and
// ANEURALNETWORKS_PRIORITY_MEDIUM
const ExecutionPreference preference =
static_cast<ExecutionPreference>(ANEURALNETWORKS_PREFER_FAST_SINGLE_ANSWER);
- const Priority priority = convertToHalPriority(ANEURALNETWORKS_PRIORITY_DEFAULT);
+ const Priority priority = convertToCanonicalPriority(ANEURALNETWORKS_PRIORITY_DEFAULT);
auto [n, preparedModel] = mDevice->prepareModel(makeModel, preference, priority, {}, {}, {});
mPreparedModel = std::move(preparedModel);
if (n != ANEURALNETWORKS_NO_ERROR) {
- return {n, {}, kNoTiming};
+ return {n, {}, {}};
}
// Prepare device memories for CPU fallback.
- std::vector<const Memory*> memories = mMemories.getObjects();
+ std::vector<const RuntimeMemory*> memories = mMemories.getObjects();
std::vector<bool> isUsedAsInput(memories.size(), false);
std::vector<bool> isUsedAsOutput(memories.size(), false);
- std::vector<std::unique_ptr<Memory>> blobAhwbs;
+ std::vector<std::unique_ptr<RuntimeMemory>> blobAhwbs;
// Mark the input and output usages.
for (auto& input : mInputs) {
@@ -1458,7 +1450,7 @@
if (mMemories[poolIndex]->getValidator().createdWithUnknownShape()) {
LOG(ERROR) << "Cannot fallback to CPU because at least one of the output operands "
"has unknown shape.";
- return {ANEURALNETWORKS_OP_FAILED, {}, kNoTiming};
+ return {ANEURALNETWORKS_OP_FAILED, {}, {}};
}
isUsedAsOutput[poolIndex] = true;
}
@@ -1466,17 +1458,17 @@
// Allocate BLOB mode AHardwareBuffers and read the data from input device memories.
for (uint32_t i = 0; i < memories.size(); i++) {
- const Memory* memory = mMemories[i];
+ const RuntimeMemory* memory = mMemories[i];
if (memory->getIBuffer() != nullptr) {
const uint32_t size = memory->getValidator().getMetadata().logicalSize;
auto [nAhwb, blobAhwb] = MemoryRuntimeAHWB::create(size);
if (nAhwb != ANEURALNETWORKS_NO_ERROR) {
- return {nAhwb, {}, kNoTiming};
+ return {nAhwb, {}, {}};
}
if (isUsedAsInput[i]) {
n = copyIBufferToHidlMemory(memory->getIBuffer(), blobAhwb->getHidlMemory());
if (n != ANEURALNETWORKS_NO_ERROR) {
- return {n, {}, kNoTiming};
+ return {n, {}, {}};
}
}
memories[i] = blobAhwb.get();
@@ -1491,11 +1483,11 @@
// Write back to output device memories.
for (uint32_t i = 0; i < memories.size(); i++) {
- const Memory* memory = mMemories[i];
+ const RuntimeMemory* memory = mMemories[i];
if (memory->getIBuffer() != nullptr && isUsedAsOutput[i]) {
n = copyHidlMemoryToIBuffer(memories[i]->getHidlMemory(), memory->getIBuffer(), {});
if (n != ANEURALNETWORKS_NO_ERROR) {
- return {n, {}, kNoTiming};
+ return {n, {}, {}};
}
}
}
diff --git a/runtime/ExecutionBuilder.h b/runtime/ExecutionBuilder.h
index 2540f23..1dbfce6 100644
--- a/runtime/ExecutionBuilder.h
+++ b/runtime/ExecutionBuilder.h
@@ -43,9 +43,9 @@
class ExecutionBurstController;
class ExecutionPlan;
class ExecutionStep;
-class Memory;
class ModelBuilder;
-class PreparedModel;
+class RuntimeMemory;
+class RuntimePreparedModel;
class StepExecutor;
class ExecutionBuilder {
@@ -57,11 +57,11 @@
int setInput(uint32_t index, const ANeuralNetworksOperandType* type, const void* buffer,
size_t length);
int setInputFromMemory(uint32_t index, const ANeuralNetworksOperandType* type,
- const Memory* memory, size_t offset, size_t length);
+ const RuntimeMemory* memory, size_t offset, size_t length);
int setOutput(uint32_t index, const ANeuralNetworksOperandType* type, void* buffer,
size_t length);
int setOutputFromMemory(uint32_t index, const ANeuralNetworksOperandType* type,
- const Memory* memory, size_t offset, size_t length);
+ const RuntimeMemory* memory, size_t offset, size_t length);
int setMeasureTiming(bool measure);
@@ -86,30 +86,29 @@
int burstCompute(BurstBuilder* burst) { return compute(nullptr, burst); }
// Initialize output dimensional information from ModelArgumentInfo.
- std::vector<hal::OutputShape> getInitialOutputShapes() const;
+ std::vector<OutputShape> getInitialOutputShapes() const;
int getOutputOperandDimensions(uint32_t index, uint32_t* dimensions);
int getOutputOperandRank(uint32_t index, uint32_t* rank);
// Handshake with lower-level execution support
bool measureTiming() const { return mMeasureTiming; }
- void reportTimingWithoutFencedExecutionCallback(hal::Timing timing) {
+ void reportTimingWithoutFencedExecutionCallback(Timing timing) {
mTimingWithoutFencedExecutionCallback = timing;
}
const CompilationBuilder* getCompilation() const { return mCompilation; }
const ModelBuilder* getModel() const { return mModel; }
const ModelBuilder* getSourceModel(uint32_t index) const;
- const hal::Operand& getSourceOperand(
- const std::pair<uint32_t, uint32_t>& sourceOperandIndex) const {
+ const Operand& getSourceOperand(const std::pair<uint32_t, uint32_t>& sourceOperandIndex) const {
return getSourceModel(sourceOperandIndex.first)->getOperand(sourceOperandIndex.second);
}
- hal::ErrorStatus finishWithoutSyncFence(hal::ErrorStatus error,
- const std::vector<hal::OutputShape>& outputShapes);
+ ErrorStatus finishWithoutSyncFence(ErrorStatus error,
+ const std::vector<OutputShape>& outputShapes);
// Retrieve a reference to the IFencedExecutionCallback callback.
- const sp<hal::IFencedExecutionCallback>& getFencedExecutionCallback() {
+ const sp<V1_3::IFencedExecutionCallback>& getFencedExecutionCallback() {
return mFencedExecutionCallback;
}
@@ -136,8 +135,7 @@
const CompilationBuilder* mCompilation;
// Update output dimensional information from OutputShape to ModelArgumentInfo.
- bool updateOutputShapes(hal::ErrorStatus status,
- const std::vector<hal::OutputShape>& outputShapes);
+ bool updateOutputShapes(ErrorStatus status, const std::vector<OutputShape>& outputShapes);
bool updateMemories();
@@ -153,7 +151,7 @@
// The information we'll send to the driver about the inputs and outputs.
// Note that we build this in two steps:
// 1. As the arguments are specified, set the corresponding mInputs or mOutputs element.
- // If set from a pointer, don't set the location in the RequestArgument but store it
+ // If set from a pointer, don't set the location in the Request::Argument but store it
// instead in mInputBuffers or mOutputBuffers.
// 2. Once we have all the inputs and outputs, if needed, allocate shared memory for
// the m*Buffers entries. Copy the input values into the shared memory.
@@ -169,7 +167,7 @@
// Timing reported from the driver. This field is only used if
// mFencedExecutionCallback is nullptr.
- hal::Timing mTimingWithoutFencedExecutionCallback = {};
+ Timing mTimingWithoutFencedExecutionCallback = {};
// Amount of time to complete or abort the execution.
std::optional<uint64_t> mTimeoutDuration;
@@ -207,7 +205,7 @@
// doesn't support fenced execution (e.g., the driver is too old), or if the
// launch of execution on the driver fails, then this callback will be
// nullptr.
- sp<hal::IFencedExecutionCallback> mFencedExecutionCallback;
+ sp<V1_3::IFencedExecutionCallback> mFencedExecutionCallback;
};
// class StepExecutor is used to execute a single "step" in a
@@ -236,7 +234,8 @@
// of "step" models. Must be nullptr otherwise.
// (step == nullptr) == (dynamicTemporaries == nullptr)
StepExecutor(ExecutionBuilder* executionBuilder, const ModelBuilder* model,
- std::shared_ptr<Device> device, std::shared_ptr<PreparedModel> preparedModel,
+ std::shared_ptr<Device> device,
+ std::shared_ptr<RuntimePreparedModel> preparedModel,
const ExecutionStep* step = nullptr,
DynamicTemporaries* dynamicTemporaries = nullptr);
@@ -255,8 +254,8 @@
bool zeroSizedInput; // is at least one output of this execution step a zero-sized tensor
// that needs to be read by some other step of the same execution?
};
- bool updateOutputShapes(int executionResultCode, const std::vector<hal::OutputShape>& from,
- std::vector<hal::OutputShape>* to, UpdateOutputShapes* update);
+ bool updateOutputShapes(int executionResultCode, const std::vector<OutputShape>& from,
+ std::vector<OutputShape>* to, UpdateOutputShapes* update);
// Map inputs and outputs from ExecutionBuilder to StepExecutor,
// one at a time. Note that these are input/output indexes, not
@@ -271,7 +270,7 @@
mapInputOrOutput(mExecutionBuilder->mOutputs[builderIndex], &mOutputs[executorIndex]);
}
void mapOutputToInput(uint32_t builderIndex, uint32_t executorIndex,
- const hal::hidl_vec<uint32_t>* outputDimensions) {
+ const Dimensions* outputDimensions) {
mapInputOrOutput(mExecutionBuilder->mOutputs[builderIndex], &mInputs[executorIndex],
outputDimensions);
}
@@ -282,33 +281,33 @@
// (i.e., either rank must match, or operand rank must be zero; and for each
// individual dimension, either dimension must match, or operand dimension
// must be zero).
- int setInputFromMemory(uint32_t inputIndex, const Memory* memory, uint32_t offset,
- const hal::hidl_vec<uint32_t>& dimensions = {},
+ int setInputFromMemory(uint32_t inputIndex, const RuntimeMemory* memory, uint32_t offset,
+ const Dimensions& dimensions = {},
std::optional<uint32_t> length = std::nullopt) {
return setInputOrOutputFromMemory(mModel->getInputOperand(inputIndex), memory, offset,
dimensions, length, &mInputs.at(inputIndex));
}
- int setOutputFromMemory(uint32_t outputIndex, const Memory* memory, uint32_t offset,
- const hal::hidl_vec<uint32_t>& dimensions = {},
+ int setOutputFromMemory(uint32_t outputIndex, const RuntimeMemory* memory, uint32_t offset,
+ const Dimensions& dimensions = {},
std::optional<uint32_t> length = std::nullopt) {
return setInputOrOutputFromMemory(mModel->getOutputOperand(outputIndex), memory, offset,
dimensions, length, &mOutputs.at(outputIndex));
}
// Executes using the (driver, preparedModel) specified at construction time.
- std::tuple<int, std::vector<hal::OutputShape>, hal::Timing> compute(
+ std::tuple<int, std::vector<OutputShape>, Timing> compute(
const std::optional<Deadline>& deadline,
const std::shared_ptr<ExecutionBurstController>& burstController = nullptr);
// Re-compiles and executes using the CPU, regardless of the (driver,
// preparedModel) specified at construction time.
- std::tuple<int, std::vector<hal::OutputShape>, hal::Timing> computeOnCpuFallback();
+ std::tuple<int, std::vector<OutputShape>, Timing> computeOnCpuFallback();
bool isCpu() const;
// Perform fenced execution and return error_code, sync_fence_fd and a
// callback.
- std::tuple<int, int, sp<hal::IFencedExecutionCallback>> computeFenced(
+ std::tuple<int, int, sp<V1_3::IFencedExecutionCallback>> computeFenced(
const std::vector<int>& wait_for, uint64_t timeoutDurationAfterFence,
const std::optional<Deadline>& deadline);
@@ -321,7 +320,7 @@
// specified dimensions.
void mapInputOrOutput(const ModelArgumentInfo& builderInputOrOutput,
ModelArgumentInfo* executorInputOrOutput,
- const hal::hidl_vec<uint32_t>* builderDimensions = nullptr);
+ const Dimensions* builderDimensions = nullptr);
// If no length is provided, the input or output is assumed to have the length
// of the corresponding operand. dimensions must either have zero rank or
@@ -329,13 +328,14 @@
// dimensions (i.e., either rank must match, or operand rank must be zero;
// and for each individual dimension, either dimension must match, or
// operand dimension must be zero).
- int setInputOrOutputFromMemory(const hal::Operand& inputOrOutputOperand, const Memory* memory,
- uint32_t offset, const hal::hidl_vec<uint32_t>& dimensions,
+ int setInputOrOutputFromMemory(const Operand& inputOrOutputOperand, const RuntimeMemory* memory,
+ uint32_t offset, const Dimensions& dimensions,
std::optional<uint32_t> length,
ModelArgumentInfo* inputOrOutputInfo);
- std::tuple<int, std::vector<hal::OutputShape>, hal::Timing> computeWithMemories(
- const std::optional<Deadline>& deadline, const std::vector<const Memory*>& memories,
+ std::tuple<int, std::vector<OutputShape>, Timing> computeWithMemories(
+ const std::optional<Deadline>& deadline,
+ const std::vector<const RuntimeMemory*>& memories,
const std::shared_ptr<ExecutionBurstController>& burstController = nullptr);
// describes the full (possibly multiple-"step") execution
@@ -351,12 +351,12 @@
// compiled forms; and device on which to execute it
const ModelBuilder* mModel;
std::shared_ptr<Device> mDevice;
- std::shared_ptr<PreparedModel> mPreparedModel;
+ std::shared_ptr<RuntimePreparedModel> mPreparedModel;
// The information we'll send to the driver about the inputs and outputs.
// Note that we build this in two steps:
// 1. As the arguments are specified, set the corresponding mInputs or mOutputs element.
- // If set from a pointer, don't set the location in the RequestArgument but store it
+ // If set from a pointer, don't set the location in the Request::Argument but store it
// instead in mInputBuffers or mOutputBuffers.
// 2. Once we have all the inputs and outputs, if needed, allocate shared memory for
// the m*Buffers entries. Copy the input values into the shared memory.
diff --git a/runtime/ExecutionPlan.cpp b/runtime/ExecutionPlan.cpp
index c3aa61f..ba69116 100644
--- a/runtime/ExecutionPlan.cpp
+++ b/runtime/ExecutionPlan.cpp
@@ -58,8 +58,6 @@
namespace {
-using namespace hal;
-
// The index of the main model in SourceModels.
constexpr uint32_t kMainModelInSourceModels = 0;
@@ -71,7 +69,7 @@
int compile(const Device& device, const ModelBuilder& model, int executionPreference,
int compilationPriority, const std::optional<Deadline>& deadline,
const std::string& cacheDir, TokenHasher* token,
- std::shared_ptr<PreparedModel>* preparedModel) {
+ std::shared_ptr<RuntimePreparedModel>* preparedModel) {
CHECK(token != nullptr);
CHECK(preparedModel != nullptr);
*preparedModel = nullptr;
@@ -82,12 +80,14 @@
token->updateFromString(device.getVersionString().c_str()) &&
token->update(&executionPreference, sizeof(executionPreference)) &&
token->update(&compilationPriority, sizeof(compilationPriority)) && token->finish()) {
- cacheToken.emplace(token->getCacheToken());
+ cacheToken = CacheToken{};
+ const uint8_t* tokenPtr = token->getCacheToken();
+ std::copy(tokenPtr, tokenPtr + cacheToken->size(), cacheToken->begin());
}
- const ModelFactory makeModel = [&model] { return model.makeHidlModel(); };
+ const ModelFactory makeModel = [&model] { return model.makeModel(); };
const ExecutionPreference preference = static_cast<ExecutionPreference>(executionPreference);
- const Priority priority = convertToHalPriority(compilationPriority);
+ const Priority priority = convertToCanonicalPriority(compilationPriority);
const auto [n, returnedPreparedModel] =
device.prepareModel(makeModel, preference, priority, deadline, cacheDir, cacheToken);
*preparedModel = returnedPreparedModel;
@@ -99,27 +99,24 @@
int copyOperandExtraParams(ModelBuilder& model, uint32_t toOperandIndex,
const Operand& fromOperand) {
if (fromOperand.type == OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL &&
- fromOperand.extraParams.getDiscriminator() ==
- OperandExtraParams::hidl_discriminator::channelQuant) {
- auto& fromChannelQuant = fromOperand.extraParams.channelQuant();
+ std::holds_alternative<Operand::SymmPerChannelQuantParams>(fromOperand.extraParams)) {
+ auto& fromChannelQuant =
+ std::get<Operand::SymmPerChannelQuantParams>(fromOperand.extraParams);
ANeuralNetworksSymmPerChannelQuantParams toChannelQuant = {
.channelDim = fromChannelQuant.channelDim,
.scaleCount = static_cast<uint32_t>(fromChannelQuant.scales.size()),
.scales = fromChannelQuant.scales.data(),
};
return model.setOperandSymmPerChannelQuantParams(toOperandIndex, toChannelQuant);
- } else if (isExtensionOperandType(fromOperand.type) &&
- fromOperand.extraParams.getDiscriminator() ==
- OperandExtraParams::hidl_discriminator::extension) {
- hidl_vec<uint8_t> extensionData = fromOperand.extraParams.extension();
+ } else if (isExtension(fromOperand.type) &&
+ std::holds_alternative<Operand::ExtensionParams>(fromOperand.extraParams)) {
+ auto extensionData = std::get<Operand::ExtensionParams>(fromOperand.extraParams);
return model.setOperandExtensionData(toOperandIndex, extensionData.data(),
extensionData.size());
- } else if (fromOperand.extraParams.getDiscriminator() !=
- OperandExtraParams::hidl_discriminator::none ||
+ } else if (!std::holds_alternative<Operand::NoParams>(fromOperand.extraParams) ||
fromOperand.type == OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL) {
- LOG(ERROR) << "Type " << toString(fromOperand.type)
- << " has an unexpected extraParams discriminator: "
- << static_cast<int>(fromOperand.extraParams.getDiscriminator());
+ LOG(ERROR) << "Type " << fromOperand.type
+ << " has an unexpected extraParams variant: " << fromOperand.extraParams.index();
return ANEURALNETWORKS_BAD_DATA;
} else {
return ANEURALNETWORKS_NO_ERROR;
@@ -153,8 +150,8 @@
uint32_t count = 0;
for (uint32_t operandIndex : operation.inputs) {
auto lifetime = mModel->getOperand(operandIndex).lifetime;
- if (lifetime == OperandLifeTime::TEMPORARY_VARIABLE ||
- lifetime == OperandLifeTime::SUBGRAPH_OUTPUT) {
+ if (lifetime == Operand::LifeTime::TEMPORARY_VARIABLE ||
+ lifetime == Operand::LifeTime::SUBGRAPH_OUTPUT) {
count++;
mOperandToOperations.emplace(operandIndex, operationIndex);
}
@@ -193,21 +190,6 @@
std::to_string(sourceOperandIndex.second) + ")";
};
-std::string toString(hidl_vec<uint32_t> dimensions) {
- std::string ret = "(";
- bool wroteOne = false;
- for (uint32_t dimension : dimensions) {
- if (wroteOne) {
- ret += ", ";
- } else {
- wroteOne = true;
- }
- ret += std::to_string(dimension);
- }
- ret += ")";
- return ret;
-};
-
} // namespace
void DynamicTemporaries::vlogDump(const char* context) const {
@@ -227,8 +209,7 @@
}
void DynamicTemporaries::declare(SourceOperandIndex sourceOperandIndex, uint32_t stepIndex,
- const hidl_vec<uint32_t>& initialDimensions,
- uint32_t initialLength) {
+ const Dimensions& initialDimensions, uint32_t initialLength) {
VLOG(EXECUTION) << "DynamicTemporaries::declare(sourceOperandIndex = "
<< toString(sourceOperandIndex) << ", stepIndex = " << stepIndex
<< ", initialDimensions = " << toString(initialDimensions)
@@ -243,7 +224,7 @@
}
bool DynamicTemporaries::redeclare(SourceOperandIndex sourceOperandIndex,
- const hidl_vec<uint32_t>& newDimensions, uint32_t newLength) {
+ const Dimensions& newDimensions, uint32_t newLength) {
auto createAndLogResult = [sourceOperandIndex, &newDimensions, newLength](bool changedShape) {
VLOG(EXECUTION) << "DynamicTemporaries::redeclare(sourceOperandIndex = "
<< toString(sourceOperandIndex)
@@ -389,31 +370,19 @@
// Sets its value.
switch (operand.lifetime) {
- case OperandLifeTime::CONSTANT_COPY: {
+ case Operand::LifeTime::CONSTANT_COPY: {
const uint8_t* data = sourceModel.getPointerToOperandValue(operand.location.offset);
n = mStepModel.setOperandValue(*stepOperandIndex, data, operand.location.length);
- if (n != ANEURALNETWORKS_NO_ERROR) {
- LOG(ERROR) << "Previous error occurred when partitioning the graph";
- return n;
- }
} break;
- case OperandLifeTime::CONSTANT_REFERENCE: {
- const Memory* memory = sourceModel.getMemories()[operand.location.poolIndex];
+ case Operand::LifeTime::CONSTANT_REFERENCE: {
+ const RuntimeMemory* memory = sourceModel.getMemories()[operand.location.poolIndex];
n = mStepModel.setOperandValueFromMemory(
*stepOperandIndex, memory, operand.location.offset, operand.location.length);
- if (n != ANEURALNETWORKS_NO_ERROR) {
- LOG(ERROR) << "Previous error occurred when partitioning the graph";
- return n;
- }
} break;
- case OperandLifeTime::NO_VALUE: {
+ case Operand::LifeTime::NO_VALUE: {
n = mStepModel.setOperandValue(*stepOperandIndex, nullptr, 0);
- if (n != ANEURALNETWORKS_NO_ERROR) {
- LOG(ERROR) << "Previous error occurred when partitioning the graph";
- return n;
- }
} break;
- case OperandLifeTime::TEMPORARY_VARIABLE: { // handled similarly to SUBGRAPH_OUTPUT
+ case Operand::LifeTime::TEMPORARY_VARIABLE: { // handled similarly to SUBGRAPH_OUTPUT
if (kind == INPUT) {
// The first time we've seen this operand is as an
// input. That means it must be defined by a
@@ -427,10 +396,10 @@
mIndex);
}
} break;
- case OperandLifeTime::SUBGRAPH_INPUT: {
+ case Operand::LifeTime::SUBGRAPH_INPUT: {
mModelInputs.emplace_back(sourceOperandIndex, *stepOperandIndex);
} break;
- case OperandLifeTime::SUBGRAPH_OUTPUT: { // handled similarly to TEMPORARY_VARIABLE
+ case Operand::LifeTime::SUBGRAPH_OUTPUT: { // handled similarly to TEMPORARY_VARIABLE
if (kind == INPUT) {
// The first time we've seen this operand is as an
// input. That means it must be defined by a
@@ -446,20 +415,20 @@
mIndex);
}
} break;
- case OperandLifeTime::SUBGRAPH: {
+ case Operand::LifeTime::SUBGRAPH: {
const ModelBuilder* model = sourceModel.getReferencedModel(operand);
n = mStepModel.setOperandValueFromModel(*stepOperandIndex, model);
- if (n != ANEURALNETWORKS_NO_ERROR) {
- LOG(ERROR) << "Previous error occurred when partitioning the graph";
- return n;
- }
} break;
- default: {
- CHECK(!"unexpected");
+ case Operand::LifeTime::POINTER: {
+ const void* data = std::get<const void*>(operand.location.pointer);
+ n = mStepModel.setOperandValue(*stepOperandIndex, data, operand.location.length);
} break;
}
- return ANEURALNETWORKS_NO_ERROR;
+ if (n != ANEURALNETWORKS_NO_ERROR) {
+ LOG(ERROR) << "Previous error occurred when partitioning the graph";
+ }
+ return n;
}
int ExecutionStep::addOperation(int operationIndex) {
@@ -477,7 +446,7 @@
// constant, or an operand written by a different partition.
//
// - We should not have seen any outputs.
- auto addOperands = [this](const hidl_vec<uint32_t>& sourceModelOperands,
+ auto addOperands = [this](const std::vector<uint32_t>& sourceModelOperands,
std::vector<uint32_t>* stepModelOperands, OperandKind kind) -> int {
const uint32_t operandCount = static_cast<uint32_t>(sourceModelOperands.size());
for (uint32_t i = 0; i < operandCount; i++) {
@@ -498,7 +467,7 @@
void ExecutionStep::mapInputsAndOutputs(
std::shared_ptr<StepExecutor> executor,
- const std::vector<hal::OutputShape>* mainModelOutputShapes, const Memory* temporaryMemory,
+ const std::vector<OutputShape>* mainModelOutputShapes, const RuntimeMemory* temporaryMemory,
const std::map<SourceOperandIndex, uint32_t>& sourceOperandToOffsetOfTemporary,
const DynamicTemporaries& dynamicTemporaries,
const std::map<SourceOperandIndex, uint32_t>& sourceOperandToInputIndex,
@@ -674,10 +643,10 @@
}
static bool hasUnknownSize(const Operand& operand) {
- if (operand.dimensions.size() == 0) {
+ if (operand.dimensions.empty()) {
return TypeManager::get()->isTensorType(operand.type);
}
- for (uint32_t dimension : operand.dimensions) {
+ for (const Dimension& dimension : operand.dimensions) {
if (dimension == 0) {
return true;
}
@@ -693,8 +662,8 @@
const Operand& operand = mStepModel.getOperand(stepModelOutput.second);
if (hasUnknownSize(operand)) {
*hasOutputOfUnknownSize = true;
- VLOG(COMPILATION) << "StepModelOutput (operand#" << toString(stepModelOutput.first)
- << " of source graph) has unknown size: " << toString(operand);
+ VLOG(COMPILATION) << "StepModelOutput (operand#" << stepModelOutput.first
+ << " of source graph) has unknown size: " << operand;
}
}
@@ -779,38 +748,32 @@
void ExecutionStep::dump() const {
if (VLOG_IS_ON(COMPILATION)) {
VLOG(COMPILATION) << "Step#" << mIndex << ": execute on " << mDevice->getName();
- logModelToInfo(mStepModel.makeHidlModel());
+ logModelToInfo(mStepModel.makeModel());
}
}
-std::string toString(const IfStep& step) {
- std::ostringstream oss;
- oss << "Step#" << step.index << ": if " << toString(step.conditionOperandIndex)
- << " then=" << step.thenStepIndex << " else=" << step.elseStepIndex;
- return oss.str();
+std::ostream& operator<<(std::ostream& os, const IfStep& step) {
+ return os << "Step#" << step.index << ": if " << toString(step.conditionOperandIndex)
+ << " then=" << step.thenStepIndex << " else=" << step.elseStepIndex;
}
-std::string toString(const WhileStep& step) {
- std::ostringstream oss;
- oss << "Step#" << step.index << ": while cond=" << step.condStepIndex
- << " body=" << step.bodyStepIndex << " exit=" << step.exitStepIndex;
- return oss.str();
+std::ostream& operator<<(std::ostream& os, const WhileStep& step) {
+ return os << "Step#" << step.index << ": while cond=" << step.condStepIndex
+ << " body=" << step.bodyStepIndex << " exit=" << step.exitStepIndex;
}
-std::string toString(const GotoStep& step) {
- std::ostringstream oss;
- oss << "Step#" << step.index << ": goto " << step.gotoStepIndex;
- return oss.str();
+std::ostream& operator<<(std::ostream& os, const GotoStep& step) {
+ return os << "Step#" << step.index << ": goto " << step.gotoStepIndex;
}
void LogicalStep::dump() const {
if (VLOG_IS_ON(COMPILATION)) {
if (const IfStep* step = tryIfStep()) {
- VLOG(COMPILATION) << toString(*step);
+ VLOG(COMPILATION) << *step;
} else if (const WhileStep* step = tryWhileStep()) {
- VLOG(COMPILATION) << toString(*step);
+ VLOG(COMPILATION) << *step;
} else if (const GotoStep* step = tryGotoStep()) {
- VLOG(COMPILATION) << toString(*step);
+ VLOG(COMPILATION) << *step;
} else {
executionStep()->dump();
}
@@ -897,12 +860,17 @@
const ModelBuilder* sourceModel = sourceModels->getModel(sourceOperandIndex.first);
const Operand& operand = sourceModel->getOperand(sourceOperandIndex.second);
const DataLocation& location = operand.location;
- if (operand.lifetime == OperandLifeTime::CONSTANT_COPY) {
+ if (operand.lifetime == Operand::LifeTime::CONSTANT_COPY) {
mSourceOperandToBoundaryConstantCopy[sourceOperandIndex] = {
.buffer = sourceModel->getPointerToOperandValue(location.offset),
.length = location.length,
};
- } else if (operand.lifetime == OperandLifeTime::CONSTANT_REFERENCE) {
+ } else if (operand.lifetime == Operand::LifeTime::POINTER) {
+ mSourceOperandToBoundaryConstantCopy[sourceOperandIndex] = {
+ .buffer = static_cast<const uint8_t*>(std::get<const void*>(location.pointer)),
+ .length = location.length,
+ };
+ } else if (operand.lifetime == Operand::LifeTime::CONSTANT_REFERENCE) {
mSourceOperandToBoundaryConstantReference[sourceOperandIndex] = {
.memory = sourceModel->getMemories()[location.poolIndex],
.offset = location.offset,
@@ -1043,7 +1011,7 @@
if (mState == SIMPLE) {
return std::shared_ptr<Controller>(new Controller(this, executionBuilder, burstBuilder));
}
- // Create the layout for a Memory object big enough to hold
+ // Create the layout for a RuntimeMemory object big enough to hold
// - every partition boundary TEMPORARY operand that is not a dynamic temporary, and
// - buffers required by the control flow implementation.
//
@@ -1078,17 +1046,17 @@
[executionBuilder, &totalSizeOfTemporaries](
const SourceOperandIndex& sourceOperandIndex,
std::map<SourceOperandIndex, uint32_t>* sourceOperandToOffsetOfTemporary,
- OperandLifeTime lifetime = OperandLifeTime::TEMPORARY_VARIABLE) {
- CHECK(lifetime == OperandLifeTime::TEMPORARY_VARIABLE ||
- lifetime == OperandLifeTime::SUBGRAPH_OUTPUT);
+ Operand::LifeTime lifetime = Operand::LifeTime::TEMPORARY_VARIABLE) {
+ CHECK(lifetime == Operand::LifeTime::TEMPORARY_VARIABLE ||
+ lifetime == Operand::LifeTime::SUBGRAPH_OUTPUT);
const Operand& sourceOperand =
executionBuilder->getSourceOperand(sourceOperandIndex);
- if (lifetime == OperandLifeTime::TEMPORARY_VARIABLE &&
- sourceOperand.lifetime == OperandLifeTime::SUBGRAPH_OUTPUT) {
+ if (lifetime == Operand::LifeTime::TEMPORARY_VARIABLE &&
+ sourceOperand.lifetime == Operand::LifeTime::SUBGRAPH_OUTPUT) {
// See the caller for explanation.
return;
}
- CHECK(sourceOperand.lifetime == lifetime);
+ CHECK_EQ(sourceOperand.lifetime, lifetime);
const uint32_t size = TypeManager::get()->getSizeOfData(sourceOperand);
if (size != 0u) {
const uint32_t offset = addTemporaryOfSize(&totalSizeOfTemporaries, size);
@@ -1100,8 +1068,8 @@
} else {
// Unknown size, hence dynamic temporary. The mapping will
// be established elsewhere (DynamicTemporaries::allocate()).
- CHECK(lifetime == OperandLifeTime::TEMPORARY_VARIABLE);
- CHECK(sourceOperand.lifetime == OperandLifeTime::TEMPORARY_VARIABLE);
+ CHECK_EQ(lifetime, Operand::LifeTime::TEMPORARY_VARIABLE);
+ CHECK_EQ(sourceOperand.lifetime, Operand::LifeTime::TEMPORARY_VARIABLE);
}
};
std::map<SourceOperandIndex, uint32_t> sourceOperandToOffsetOfTemporary;
@@ -1170,15 +1138,15 @@
// so (b/148206073).
for (const auto& sourceOperandIndex : step->bodyOutputOperands) {
mapTemporary(sourceOperandIndex, &sourceOperandToOffsetOfTemporary,
- OperandLifeTime::SUBGRAPH_OUTPUT);
+ Operand::LifeTime::SUBGRAPH_OUTPUT);
// Allocate another set of temporaries for double buffering.
mapTemporary(sourceOperandIndex, &sourceOperandToOffsetOfTemporary2,
- OperandLifeTime::SUBGRAPH_OUTPUT);
+ Operand::LifeTime::SUBGRAPH_OUTPUT);
}
// Allocate memory for condition model output.
// TODO: Share one condition output memory region between all loops.
mapTemporary(step->condOutputOperand, &sourceOperandToOffsetOfTemporary,
- OperandLifeTime::SUBGRAPH_OUTPUT);
+ Operand::LifeTime::SUBGRAPH_OUTPUT);
} else {
CHECK(logicalStep->isGoto());
}
@@ -1245,7 +1213,7 @@
}
ExecutionPlan::Buffer::Buffer(void* pointer, uint32_t size)
- : mInfo(RunTimePoolInfo::createFromExistingBuffer(reinterpret_cast<uint8_t*>(pointer), size)),
+ : mInfo(RunTimePoolInfo::createFromExistingBuffer(static_cast<uint8_t*>(pointer), size)),
mOffset(0) {}
ExecutionPlan::Buffer::Buffer(RunTimePoolInfo info, uint32_t offset)
@@ -1515,7 +1483,7 @@
std::shared_ptr<StepExecutor>* executor,
std::shared_ptr<ExecutionBurstController>* burstController,
const std::vector<OutputShape>* mainModelOutputShapes) const {
- VLOG(EXECUTION) << "next: " << toString(*step);
+ VLOG(EXECUTION) << "next: " << *step;
// If the last step has a sync fence, wait for it to signal before reading the condition value.
// This is safe because the steps are serialized when doing fenced compute.
NN_RETURN_IF_ERROR(controller->waitForLastStepSyncFence());
@@ -1558,7 +1526,7 @@
WhileState& state = controller->mWhileState[controller->mNextStepIndex];
if (state.stage == WhileState::EVALUATE_CONDITION) {
state.iteration = state.iteration == WhileState::kOutsideLoop ? 0 : state.iteration + 1;
- VLOG(EXECUTION) << "next: " << toString(*step) << ": iteration " << state.iteration
+ VLOG(EXECUTION) << "next: " << *step << ": iteration " << state.iteration
<< ": evaluating condition";
controller->mNextStepIndex = step->condStepIndex;
@@ -1602,7 +1570,7 @@
bool condValue;
NN_RETURN_IF_ERROR(readConditionValue(controller, step->condOutputOperand, &condValue));
if (condValue) {
- VLOG(EXECUTION) << "next: " << toString(*step) << ": iteration " << state.iteration
+ VLOG(EXECUTION) << "next: " << *step << ": iteration " << state.iteration
<< ": evaluating body";
controller->mNextStepIndex = step->bodyStepIndex;
@@ -1632,7 +1600,7 @@
}
}
} else {
- VLOG(EXECUTION) << "next: " << toString(*step) << ": iteration " << state.iteration
+ VLOG(EXECUTION) << "next: " << *step << ": iteration " << state.iteration
<< ": exiting loop";
controller->mNextStepIndex = step->exitStepIndex;
@@ -1677,7 +1645,7 @@
std::shared_ptr<StepExecutor>* executor,
std::shared_ptr<ExecutionBurstController>* burstController,
const std::vector<OutputShape>* mainModelOutputShapes) const {
- VLOG(EXECUTION) << "next: " << toString(*step);
+ VLOG(EXECUTION) << "next: " << *step;
controller->mNextStepIndex = step->gotoStepIndex;
return nextCompound(controller, executor, burstController, mainModelOutputShapes);
}
@@ -1905,7 +1873,7 @@
int n = plan->finish(preference, priority, deadline, simulateFailureResultCode);
if (VLOG_IS_ON(COMPILATION)) {
VLOG(COMPILATION) << "ModelBuilder::partitionTheWork: source model: ";
- logModelToInfo(makeHidlModel());
+ logModelToInfo(makeModel());
plan->dump();
}
return n;
@@ -2148,7 +2116,7 @@
bodyModelIndex, bodyModel->getOutputOperandIndex(i));
}
} else {
- CHECK(false) << toString(operation.type) << " is not a control flow operation";
+ CHECK(false) << operation.type << " is not a control flow operation";
}
tracker.markProcessed(operationIndex, enqueueOnAppropriateDevice);
}
@@ -2176,7 +2144,7 @@
float ModelBuilder::getPerformance(uint32_t preference, const std::shared_ptr<Device> device,
uint32_t operationIndex) const {
- auto applyPreference = [preference](const PerformanceInfo& perf) {
+ auto applyPreference = [preference](const Capabilities::PerformanceInfo& perf) {
return preference == ANEURALNETWORKS_PREFER_LOW_POWER ? perf.powerUsage : perf.execTime;
};
@@ -2300,7 +2268,7 @@
int ModelBuilder::findBestDeviceForEachOperation(
uint32_t preference, const std::vector<std::shared_ptr<Device>>& devices,
std::vector<int>* bestDeviceForOperation) const {
- const MetaModel metaModel(makeHidlModel(), DeviceManager::get()->strictSlicing());
+ const MetaModel metaModel(makeModel(), DeviceManager::get()->strictSlicing());
const size_t deviceCount = devices.size();
std::vector<CanDo> canDo(deviceCount);
@@ -2345,13 +2313,13 @@
// Logs O(operationCount * deviceCount) times, but typically deviceCount is
// very small.
VLOG(COMPILATION) << "Device " << device->getName() << " can't do operation "
- << toString(operation.type);
+ << operation.type;
}
}
}
if (bestChoice < 0) {
- LOG(ERROR) << "No driver can do operation " << toString(operation.type);
+ LOG(ERROR) << "No driver can do operation " << operation.type;
return ANEURALNETWORKS_BAD_DATA;
} else if (devices[bestChoice] == DeviceManager::getCpuDevice() &&
supportedByControlFlowInterpreter(operationIndex)) {
@@ -2359,15 +2327,13 @@
// to delegate referenced models.
const int kControlFlowInterpreter = deviceCount;
(*bestDeviceForOperation)[operationIndex] = kControlFlowInterpreter;
- VLOG(COMPILATION) << "ModelBuilder::findBestDeviceForEachOperation("
- << toString(operation.type) << ":" << operationIndex << ") = -1"
- << " (NNAPI)";
+ VLOG(COMPILATION) << "ModelBuilder::findBestDeviceForEachOperation(" << operation.type
+ << operation.type << ":" << operationIndex << ") = -1 (NNAPI)";
} else {
(*bestDeviceForOperation)[operationIndex] = bestChoice;
- VLOG(COMPILATION) << "ModelBuilder::findBestDeviceForEachOperation("
- << toString(operation.type) << ":" << operationIndex
- << ") = " << bestChoice << " (" << devices[bestChoice]->getName()
- << ")";
+ VLOG(COMPILATION) << "ModelBuilder::findBestDeviceForEachOperation(" << operation.type
+ << ":" << operationIndex << ") = " << bestChoice << " ("
+ << devices[bestChoice]->getName() << ")";
}
}
return ANEURALNETWORKS_NO_ERROR;
diff --git a/runtime/ExecutionPlan.h b/runtime/ExecutionPlan.h
index 740912d..097fbd6 100644
--- a/runtime/ExecutionPlan.h
+++ b/runtime/ExecutionPlan.h
@@ -20,6 +20,7 @@
#define ANDROID_FRAMEWORKS_ML_NN_RUNTIME_EXECUTION_PLAN_H
#include <android-base/logging.h>
+#include <nnapi/Types.h>
#include <openssl/sha.h>
#include <algorithm>
@@ -35,7 +36,6 @@
#include <variant>
#include <vector>
-#include "HalInterfaces.h"
#include "Memory.h"
#include "ModelArgumentInfo.h"
#include "ModelBuilder.h"
@@ -52,8 +52,8 @@
class ExecutionBuilder;
class ExecutionBurstController;
class ExecutionPlan;
-class Memory;
-class PreparedModel;
+class RuntimeMemory;
+class RuntimePreparedModel;
class StepExecutor;
struct ConstantReferenceLocation;
@@ -142,7 +142,7 @@
// operand). initialDimensions and initialLength indicate what we know or
// (in the case of length) guess about those properties.
void declare(SourceOperandIndex sourceOperandIndex, uint32_t stepIndex,
- const hal::hidl_vec<uint32_t>& initialDimensions, uint32_t initialLength);
+ const Dimensions& initialDimensions, uint32_t initialLength);
// Indicate that we've finished declaring all dynamic temporaries.
void endDeclarations() {
@@ -153,8 +153,8 @@
// Redeclare a dynamic temporary, indicating what we've learned about it.
// This may invalidate the location of temporaries defined by its step.
// Returns true if dimensions or length changed, false otherwise.
- bool redeclare(SourceOperandIndex sourceOperandIndex,
- const hal::hidl_vec<uint32_t>& newDimensions, uint32_t newLength);
+ bool redeclare(SourceOperandIndex sourceOperandIndex, const Dimensions& newDimensions,
+ uint32_t newLength);
// Ensure that all dynamic temporaries defined by the specified step have
// locations. The return value is a ResultCode (e.g.,
@@ -180,9 +180,9 @@
// - If mustBeAllocated == true, then trigger a failed CHECK().
// - If mustBeAllocated == false, then memory == nullptr and offset == ~0.
struct LocationAndShape {
- const Memory* memory;
+ const RuntimeMemory* memory;
uint32_t offset;
- const hal::hidl_vec<uint32_t>* dimensions;
+ const Dimensions* dimensions;
uint32_t length;
};
std::optional<LocationAndShape> lookup(SourceOperandIndex sourceOperandIndex,
@@ -197,7 +197,7 @@
struct InternalLocationAndShape {
uint32_t stepIndex;
uint32_t offset;
- hal::hidl_vec<uint32_t> dimensions;
+ Dimensions dimensions;
uint32_t length;
};
std::map<SourceOperandIndex, InternalLocationAndShape> mSourceOperandToTemporary;
@@ -267,7 +267,9 @@
std::shared_ptr<Device> getDevice() const { return mDevice; }
// only available after calling finishStepModel()
- std::shared_ptr<PreparedModel> getPreparedStepModel() const { return mPreparedStepModel; }
+ std::shared_ptr<RuntimePreparedModel> getPreparedStepModel() const {
+ return mPreparedStepModel;
+ }
// Map inputs and outputs from ExecutionBuilder to StepExecutor.
//
@@ -278,8 +280,8 @@
// inputs of this step are of fully specified shape.
void mapInputsAndOutputs(
std::shared_ptr<StepExecutor> stepExecutor,
- const std::vector<hal::OutputShape>* mainModelOutputShapes,
- const Memory* temporaryMemory, // for static temporaries
+ const std::vector<OutputShape>* mainModelOutputShapes,
+ const RuntimeMemory* temporaryMemory, // for static temporaries
const std::map<SourceOperandIndex, uint32_t>&
sourceOperandToOffsetOfTemporary, // for static temporaries
const DynamicTemporaries& dynamicTemporaries,
@@ -306,7 +308,7 @@
uint32_t mSourceModelIndex;
ModelBuilder mStepModel; // An excerpt of a source model to be run by one device.
std::shared_ptr<Device> mDevice;
- std::shared_ptr<PreparedModel> mPreparedStepModel;
+ std::shared_ptr<RuntimePreparedModel> mPreparedStepModel;
// All inputs of this step model:
// (source model operand index, step model operand index)
@@ -510,9 +512,9 @@
std::variant<ExecutionStep, IfStep, WhileStep, GotoStep> mStep;
};
-std::string toString(const IfStep& step);
-std::string toString(const WhileStep& step);
-std::string toString(const GotoStep& step);
+std::ostream& operator<<(std::ostream& os, const IfStep& step);
+std::ostream& operator<<(std::ostream& os, const WhileStep& step);
+std::ostream& operator<<(std::ostream& os, const GotoStep& step);
// Describes the state of WhileStep.
struct WhileState {
@@ -533,7 +535,7 @@
};
struct ConstantReferenceLocation {
- const Memory* memory;
+ const RuntimeMemory* memory;
uint32_t offset;
uint32_t length;
};
@@ -660,13 +662,13 @@
// syncFdOfLastStep is the sync fence fd generated by the most recently processed step.
int next(std::shared_ptr<Controller> controller, std::shared_ptr<StepExecutor>* executor,
std::shared_ptr<ExecutionBurstController>* burstController,
- const std::vector<hal::OutputShape>* mainModelOutputShapes,
+ const std::vector<OutputShape>* mainModelOutputShapes,
int syncFdOfLastStep = -1) const;
// Create the same executor as the last one created by next().
int fallback(std::shared_ptr<Controller> controller, std::shared_ptr<StepExecutor>* executor,
std::shared_ptr<ExecutionBurstController>* burstController,
- const std::vector<hal::OutputShape>* mainModelOutputShapes) const;
+ const std::vector<OutputShape>* mainModelOutputShapes) const;
ExecutionStep* createNewExecutionStep(uint32_t sourceModelIndex,
const std::shared_ptr<Device> device);
@@ -737,8 +739,7 @@
// Illegal to call for when mState == SIMPLE.
void becomeCompoundIfEmpty();
- const hal::Operand& getSourceOperand(
- const std::pair<uint32_t, uint32_t>& sourceOperandIndex) const {
+ const Operand& getSourceOperand(const std::pair<uint32_t, uint32_t>& sourceOperandIndex) const {
return getSourceModels()
.getModel(sourceOperandIndex.first)
->getOperand(sourceOperandIndex.second);
@@ -770,23 +771,23 @@
int nextCompound(std::shared_ptr<Controller> controller,
std::shared_ptr<StepExecutor>* executor,
std::shared_ptr<ExecutionBurstController>* burstController,
- const std::vector<hal::OutputShape>* mainModelOutputShapes) const;
+ const std::vector<OutputShape>* mainModelOutputShapes) const;
int nextCompound(const ExecutionStep* step, std::shared_ptr<Controller> controller,
std::shared_ptr<StepExecutor>* executor,
std::shared_ptr<ExecutionBurstController>* burstController,
- const std::vector<hal::OutputShape>* mainModelOutputShapes) const;
+ const std::vector<OutputShape>* mainModelOutputShapes) const;
int nextCompound(const IfStep* step, std::shared_ptr<Controller> controller,
std::shared_ptr<StepExecutor>* executor,
std::shared_ptr<ExecutionBurstController>* burstController,
- const std::vector<hal::OutputShape>* mainModelOutputShapes) const;
+ const std::vector<OutputShape>* mainModelOutputShapes) const;
int nextCompound(const WhileStep* step, std::shared_ptr<Controller> controller,
std::shared_ptr<StepExecutor>* executor,
std::shared_ptr<ExecutionBurstController>* burstController,
- const std::vector<hal::OutputShape>* mainModelOutputShapes) const;
+ const std::vector<OutputShape>* mainModelOutputShapes) const;
int nextCompound(const GotoStep* step, std::shared_ptr<Controller> controller,
std::shared_ptr<StepExecutor>* executor,
std::shared_ptr<ExecutionBurstController>* burstController,
- const std::vector<hal::OutputShape>* mainModelOutputShapes) const;
+ const std::vector<OutputShape>* mainModelOutputShapes) const;
struct Body {
virtual ~Body() {}
@@ -818,7 +819,7 @@
std::shared_ptr<Device> mDevice;
const ModelBuilder* mModel;
- std::shared_ptr<PreparedModel> mPreparedModel;
+ std::shared_ptr<RuntimePreparedModel> mPreparedModel;
const std::string* mCacheDir;
TokenHasher mToken;
@@ -862,7 +863,8 @@
// to initialize ExecutionPlan::Controller::mSourceOperandToOutputIndex;
std::map<SourceOperandIndex, uint32_t> mSourceOperandToOutputIndex;
- // Map from source operand index to location of a CONSTANT_COPY operand.
+ // Map from source operand index to location of a CONSTANT_COPY or
+ // POINTER operand.
// This map only contains constant partition boundary IF and WHILE
// operands and is used to create a ExecutionPlan::Controller.
std::map<SourceOperandIndex, ConstantCopyLocation> mSourceOperandToBoundaryConstantCopy;
@@ -887,9 +889,9 @@
// values with the corresponding SUBGRAPH_INPUT operands in a referenced
// model.
//
- // For CONSTANT_COPY boundary operands, we copy those to temporary
- // memory and treat them similarly to TEMPORARY_VARIABLE operands in
- // Controller.
+ // For CONSTANT_COPY and POINTER boundary operands, we copy those to
+ // temporary memory and treat them similarly to TEMPORARY_VARIABLE
+ // operands in Controller.
//
// For CONSTANT_REFERENCE boundary operands, we keep track of them in
// ExecutionPlan::Controller::mSourceOperandToConstantReference.
@@ -923,7 +925,7 @@
return static_cast<const CompoundBody*>(mBody);
}
- void forEachDynamicTemporary(const std::function<void(SourceOperandIndex, const hal::Operand&,
+ void forEachDynamicTemporary(const std::function<void(SourceOperandIndex, const Operand&,
uint32_t definingStepIndex)>&) const;
// Pointers to compilation caching information in CompilationBuilder.
diff --git a/runtime/Manager.cpp b/runtime/Manager.cpp
index 78d7c36..90d58e4 100644
--- a/runtime/Manager.cpp
+++ b/runtime/Manager.cpp
@@ -47,17 +47,13 @@
namespace android {
namespace nn {
-using namespace hal;
-
-const Timing kNoTiming = {.timeOnDevice = UINT64_MAX, .timeInDriver = UINT64_MAX};
-
// A Device with actual underlying driver
class DriverDevice : public Device {
public:
// Create a DriverDevice from a name and a DeviceFactory function.
// Returns nullptr on failure.
static std::shared_ptr<DriverDevice> create(const std::string& name,
- const DeviceFactory& makeDevice);
+ const HalDeviceFactory& makeDevice);
// Prefer using DriverDevice::create
DriverDevice(std::shared_ptr<VersionedIDevice> device);
@@ -70,25 +66,20 @@
return kInterface->getSupportedExtensions();
}
std::vector<bool> getSupportedOperations(const MetaModel& metaModel) const override;
- PerformanceInfo getPerformance(OperandType type) const override {
- const auto& capabilities = kInterface->getCapabilities();
- return lookup(capabilities.operandPerformance, type);
+ Capabilities::PerformanceInfo getPerformance(OperandType type) const override {
+ return kInterface->getCapabilities().operandPerformance.lookup(type);
}
- PerformanceInfo getRelaxedFloat32toFloat16PerformanceScalar() const override {
- const auto& capabilities = kInterface->getCapabilities();
- return capabilities.relaxedFloat32toFloat16PerformanceScalar;
+ Capabilities::PerformanceInfo getRelaxedFloat32toFloat16PerformanceScalar() const override {
+ return kInterface->getCapabilities().relaxedFloat32toFloat16PerformanceScalar;
}
- PerformanceInfo getRelaxedFloat32toFloat16PerformanceTensor() const override {
- const auto& capabilities = kInterface->getCapabilities();
- return capabilities.relaxedFloat32toFloat16PerformanceTensor;
+ Capabilities::PerformanceInfo getRelaxedFloat32toFloat16PerformanceTensor() const override {
+ return kInterface->getCapabilities().relaxedFloat32toFloat16PerformanceTensor;
}
- PerformanceInfo getIfPerformance() const override {
- const auto& capabilities = kInterface->getCapabilities();
- return capabilities.ifPerformance;
+ Capabilities::PerformanceInfo getIfPerformance() const override {
+ return kInterface->getCapabilities().ifPerformance;
}
- PerformanceInfo getWhilePerformance() const override {
- const auto& capabilities = kInterface->getCapabilities();
- return capabilities.whilePerformance;
+ Capabilities::PerformanceInfo getWhilePerformance() const override {
+ return kInterface->getCapabilities().whilePerformance;
}
bool isCachingSupported() const override {
// Caching is supported if either of numModelCache or numDataCache is greater than 0.
@@ -98,13 +89,13 @@
}
int wait() const override { return kInterface->wait(); }
- std::pair<int, std::shared_ptr<PreparedModel>> prepareModel(
+ std::pair<int, std::shared_ptr<RuntimePreparedModel>> prepareModel(
const ModelFactory& makeModel, ExecutionPreference preference, Priority priority,
const std::optional<Deadline>& deadline, const std::string& cacheDir,
const std::optional<CacheToken>& maybeToken) const override;
- std::pair<int, std::unique_ptr<Memory>> allocate(const MemoryDescriptor& desc,
- hal::OperandType) const override;
+ std::pair<int, std::unique_ptr<RuntimeMemory>> allocate(const MemoryDescriptor& desc,
+ OperandType) const override;
private:
const std::shared_ptr<VersionedIDevice> kInterface;
@@ -117,8 +108,8 @@
#endif // NN_DEBUGGABLE
};
-// A PreparedModel with underlying IPreparedModel instance return by actual driver.
-class DriverPreparedModel : public PreparedModel {
+// A RuntimePreparedModel with underlying IPreparedModel instance return by actual driver.
+class DriverPreparedModel : public RuntimePreparedModel {
public:
DriverPreparedModel(const Device* device,
const std::shared_ptr<VersionedIPreparedModel>& preparedModel)
@@ -134,18 +125,18 @@
std::tuple<int, std::vector<OutputShape>, Timing> execute(
const std::vector<ModelArgumentInfo>& inputs,
const std::vector<ModelArgumentInfo>& outputs,
- const std::vector<const Memory*>& memories,
+ const std::vector<const RuntimeMemory*>& memories,
const std::shared_ptr<ExecutionBurstController>& burstController, MeasureTiming measure,
const std::optional<Deadline>& deadline,
const OptionalTimeoutDuration& loopTimeoutDuration) const override;
- std::tuple<int, int, sp<hal::IFencedExecutionCallback>, hal::Timing> executeFenced(
+ std::tuple<int, int, sp<V1_3::IFencedExecutionCallback>, Timing> executeFenced(
const std::vector<ModelArgumentInfo>& inputs,
const std::vector<ModelArgumentInfo>& outputs,
- const std::vector<const Memory*>& memories, const std::vector<int>& waitFor,
+ const std::vector<const RuntimeMemory*>& memories, const std::vector<int>& waitFor,
MeasureTiming measure, const std::optional<Deadline>& deadline,
const OptionalTimeoutDuration& loopTimeoutDuration,
- const hal::OptionalTimeoutDuration& timeoutDurationAfterFence) const override;
+ const OptionalTimeoutDuration& timeoutDurationAfterFence) const override;
std::shared_ptr<ExecutionBurstController> configureExecutionBurst(
bool preferPowerOverLatency) const override {
@@ -169,7 +160,7 @@
}
std::shared_ptr<DriverDevice> DriverDevice::create(const std::string& name,
- const DeviceFactory& makeDevice) {
+ const HalDeviceFactory& makeDevice) {
CHECK(makeDevice != nullptr);
std::shared_ptr<VersionedIDevice> device = VersionedIDevice::create(name, makeDevice);
if (device == nullptr) {
@@ -187,10 +178,10 @@
std::vector<bool> supportedOperations;
std::tie(status, supportedOperations) = kInterface->getSupportedOperations(metaModel);
- const Model& hidlModel = metaModel.getModel();
- const uint32_t operationCount = hidlModel.main.operations.size();
+ const Model& model = metaModel.getModel();
+ const uint32_t operationCount = model.main.operations.size();
if (status != ErrorStatus::NONE) {
- LOG(ERROR) << "IDevice::getSupportedOperations returned the error " << toString(status);
+ LOG(ERROR) << "IDevice::getSupportedOperations returned the error " << status;
// Set the supported operation vectors to all false, so we won't use this driver.
return std::vector<bool>(operationCount, false);
}
@@ -213,17 +204,18 @@
}
uint32_t accumulator = baseAccumulator;
- const Operation& operation = hidlModel.main.operations[operationIndex];
+ const Operation& operation = model.main.operations[operationIndex];
accumulator ^= static_cast<uint32_t>(operation.type);
- auto accumulateOperands = [&hidlModel, &accumulator](const hidl_vec<uint32_t>& operands) {
+ auto accumulateOperands = [&model, &accumulator](const std::vector<uint32_t>& operands) {
for (uint32_t operandIndex : operands) {
- const Operand& operand = hidlModel.main.operands[operandIndex];
+ const Operand& operand = model.main.operands[operandIndex];
accumulator ^= static_cast<uint32_t>(operand.type);
accumulator ^= operand.dimensions.size();
- for (uint32_t dimension : operand.dimensions) {
+ for (const Dimension& dimension : operand.dimensions) {
accumulator ^= dimension;
- if (operand.lifetime == OperandLifeTime::CONSTANT_COPY ||
- operand.lifetime == OperandLifeTime::CONSTANT_REFERENCE) {
+ if (operand.lifetime == Operand::LifeTime::CONSTANT_COPY ||
+ operand.lifetime == Operand::LifeTime::CONSTANT_REFERENCE ||
+ operand.lifetime == Operand::LifeTime::POINTER) {
accumulator ^= 1;
}
}
@@ -240,7 +232,7 @@
return supportedOperations;
}
-std::pair<int, std::shared_ptr<PreparedModel>> DriverDevice::prepareModel(
+std::pair<int, std::shared_ptr<RuntimePreparedModel>> DriverDevice::prepareModel(
const ModelFactory& makeModel, ExecutionPreference preference, Priority priority,
const std::optional<Deadline>& deadline, const std::string& cacheDir,
const std::optional<CacheToken>& maybeToken) const {
@@ -253,9 +245,9 @@
return {ANEURALNETWORKS_NO_ERROR, std::make_shared<DriverPreparedModel>(this, preparedModel)};
}
-std::pair<int, std::unique_ptr<Memory>> DriverDevice::allocate(const MemoryDescriptor& desc,
- hal::OperandType) const {
- const BufferDesc hidlDesc = {.dimensions = desc.dimensions};
+std::pair<int, std::unique_ptr<RuntimeMemory>> DriverDevice::allocate(const MemoryDescriptor& desc,
+ OperandType) const {
+ const V1_3::BufferDesc hidlDesc = {.dimensions = desc.dimensions};
std::vector<std::shared_ptr<VersionedIPreparedModel>> preparedModels(
desc.preparedModels.size());
std::transform(desc.preparedModels.begin(), desc.preparedModels.end(), preparedModels.begin(),
@@ -266,7 +258,7 @@
});
auto [status, buffer, token] =
kInterface->allocate(hidlDesc, preparedModels, desc.inputRoles, desc.outputRoles);
- if (status != ErrorStatus::NONE) {
+ if (status != V1_3::ErrorStatus::NONE) {
LOG(ERROR) << "DriverDevice::allocate -- memory allocation on device " << getName()
<< " failed!";
return {convertErrorStatusToResultCode(status), nullptr};
@@ -279,7 +271,7 @@
// input a bit.
static std::tuple<int, std::unique_ptr<MemoryAshmem>, std::vector<DataLocation>>
allocatePointerArgumentsToPool(const std::vector<ModelArgumentInfo>& args,
- std::vector<const Memory*>* memories) {
+ std::vector<const RuntimeMemory*>* memories) {
CHECK(memories != nullptr);
std::vector<DataLocation> ptrArgsLocations;
const uint32_t nextPoolIndex = memories->size();
@@ -321,14 +313,14 @@
// DeviceManager::mSyncExecHal.
std::tuple<int, std::vector<OutputShape>, Timing> DriverPreparedModel::execute(
const std::vector<ModelArgumentInfo>& inputs, const std::vector<ModelArgumentInfo>& outputs,
- const std::vector<const Memory*>& memories,
+ const std::vector<const RuntimeMemory*>& memories,
const std::shared_ptr<ExecutionBurstController>& burstController, MeasureTiming measure,
const std::optional<Deadline>& deadline,
const OptionalTimeoutDuration& loopTimeoutDuration) const {
NNTRACE_RT(NNTRACE_PHASE_INPUTS_AND_OUTPUTS, "DriverPreparedModel::execute");
// Make a copy of the memory tracker as we will append memory pools for pointer arguments.
- std::vector<const Memory*> localMemories = memories;
+ std::vector<const RuntimeMemory*> localMemories = memories;
// We separate the input & output pools so accelerators only need to copy
// the contents of the input pools. We could also use it to set protection
@@ -338,12 +330,12 @@
const auto [n1, inputPtrArgsMemory, inputPtrArgsLocations] =
allocatePointerArgumentsToPool(inputs, &localMemories);
if (n1 != ANEURALNETWORKS_NO_ERROR) {
- return {n1, {}, kNoTiming};
+ return {n1, {}, {}};
}
const auto [n2, outputPtrArgsMemory, outputPtrArgsLocations] =
allocatePointerArgumentsToPool(outputs, &localMemories);
if (n2 != ANEURALNETWORKS_NO_ERROR) {
- return {n2, {}, kNoTiming};
+ return {n2, {}, {}};
}
// Copy the input data that was specified via a pointer.
@@ -364,7 +356,7 @@
uint32_t count = localMemories.size();
request.pools.resize(count);
for (uint32_t i = 0; i < count; i++) {
- request.pools[i] = localMemories[i]->getMemoryPool();
+ request.pools[i] = uncheckedConvert(localMemories[i]->getMemoryPool());
}
NNTRACE_FULL_SWITCH(NNTRACE_LAYER_IPC, NNTRACE_PHASE_EXECUTION,
@@ -372,26 +364,30 @@
int n = ANEURALNETWORKS_OP_FAILED;
std::vector<OutputShape> outputShapes;
- Timing timing = kNoTiming;
+ Timing timing;
// compute using burst if present
const bool burstCompute = (burstController != nullptr);
bool burstFallback = true;
if (burstCompute) {
- const bool compliant = compliantWithV1_2(request);
+ const bool compliant = compliantWithV1_2(convertToV1_3(request));
if (compliant) {
- V1_0::Request request12 = convertToV1_2(request);
+ V1_0::Request request12 = convertToV1_2(convertToV1_3(request));
std::vector<intptr_t> memoryIds;
memoryIds.reserve(localMemories.size());
- for (const Memory* memory : localMemories) {
+ for (const RuntimeMemory* memory : localMemories) {
memory->usedBy(burstController);
memoryIds.push_back(memory->getKey());
}
VLOG(EXECUTION) << "Before ExecutionBurstController->compute() "
<< SHOW_IF_DEBUG(toString(request12));
- std::tie(n, outputShapes, timing, burstFallback) =
- burstController->compute(request12, measure, memoryIds);
+ std::vector<V1_2::OutputShape> halOutputShapes;
+ V1_2::Timing halTiming;
+ std::tie(n, halOutputShapes, halTiming, burstFallback) =
+ burstController->compute(request12, convertToV1_2(measure), memoryIds);
+ outputShapes = uncheckedConvert(halOutputShapes);
+ timing = uncheckedConvert(halTiming);
}
}
@@ -426,19 +422,18 @@
return {ANEURALNETWORKS_NO_ERROR, std::move(outputShapes), timing};
}
-std::tuple<int, int, sp<hal::IFencedExecutionCallback>, hal::Timing>
-DriverPreparedModel::executeFenced(
+std::tuple<int, int, sp<V1_3::IFencedExecutionCallback>, Timing> DriverPreparedModel::executeFenced(
const std::vector<ModelArgumentInfo>& inputs, const std::vector<ModelArgumentInfo>& outputs,
- const std::vector<const Memory*>& memories, const std::vector<int>& waitFor,
- hal::MeasureTiming measure, const std::optional<Deadline>& deadline,
+ const std::vector<const RuntimeMemory*>& memories, const std::vector<int>& waitFor,
+ MeasureTiming measure, const std::optional<Deadline>& deadline,
const OptionalTimeoutDuration& loopTimeoutDuration,
- const hal::OptionalTimeoutDuration& timeoutDurationAfterFence) const {
+ const OptionalTimeoutDuration& timeoutDurationAfterFence) const {
NNTRACE_RT(NNTRACE_PHASE_INPUTS_AND_OUTPUTS, "DriverPreparedModel::executeFenced");
CHECK(std::all_of(waitFor.begin(), waitFor.end(), [](int fd) { return fd > 0; }));
// Make a copy of the memory tracker as we will append memory pools for pointer arguments.
- std::vector<const Memory*> localMemories = memories;
- sp<hal::IFencedExecutionCallback> executeFencedCallback;
- hal::Timing timing = kNoTiming;
+ std::vector<const RuntimeMemory*> localMemories = memories;
+ sp<V1_3::IFencedExecutionCallback> executeFencedCallback;
+ Timing timing;
// We separate the input & output pools so accelerators only need to copy
// the contents of the input pools. We could also use it to set protection
@@ -474,14 +469,14 @@
uint32_t count = localMemories.size();
request.pools.resize(count);
for (uint32_t i = 0; i < count; i++) {
- request.pools[i] = localMemories[i]->getMemoryPool();
+ request.pools[i] = uncheckedConvert(localMemories[i]->getMemoryPool());
}
NNTRACE_FULL_SWITCH(NNTRACE_LAYER_IPC, NNTRACE_PHASE_EXECUTION,
"DriverPreparedModel::executeFenced");
int n = ANEURALNETWORKS_OP_FAILED;
- hidl_vec<hidl_handle> waitForHandles;
+ hardware::hidl_vec<hardware::hidl_handle> waitForHandles;
waitForHandles.resize(waitFor.size());
for (uint32_t i = 0; i < waitFor.size(); i++) {
native_handle_t* nativeHandle = native_handle_create(1, 0);
@@ -495,12 +490,12 @@
return {n, -1, nullptr, timing};
}
nativeHandle->data[0] = dupFd;
- hidl_handle hidlHandle;
+ hardware::hidl_handle hidlHandle;
hidlHandle.setTo(nativeHandle, /*shouldOwn=*/true);
waitForHandles[i] = std::move(hidlHandle);
}
- hidl_handle syncFence;
+ hardware::hidl_handle syncFence;
std::tie(n, syncFence, executeFencedCallback, timing) =
mPreparedModel->executeFenced(request, waitForHandles, measure, deadline,
loopTimeoutDuration, timeoutDurationAfterFence);
@@ -561,25 +556,27 @@
return kSupportedExtensions;
}
std::vector<bool> getSupportedOperations(const MetaModel& metaModel) const override;
- PerformanceInfo getPerformance(OperandType) const override { return kPerformance; }
- PerformanceInfo getRelaxedFloat32toFloat16PerformanceScalar() const override {
+ Capabilities::PerformanceInfo getPerformance(OperandType) const override {
return kPerformance;
}
- PerformanceInfo getRelaxedFloat32toFloat16PerformanceTensor() const override {
+ Capabilities::PerformanceInfo getRelaxedFloat32toFloat16PerformanceScalar() const override {
return kPerformance;
}
- PerformanceInfo getIfPerformance() const override { return kPerformance; }
- PerformanceInfo getWhilePerformance() const override { return kPerformance; }
+ Capabilities::PerformanceInfo getRelaxedFloat32toFloat16PerformanceTensor() const override {
+ return kPerformance;
+ }
+ Capabilities::PerformanceInfo getIfPerformance() const override { return kPerformance; }
+ Capabilities::PerformanceInfo getWhilePerformance() const override { return kPerformance; }
bool isCachingSupported() const override { return false; }
int wait() const override { return ANEURALNETWORKS_NO_ERROR; }
- std::pair<int, std::shared_ptr<PreparedModel>> prepareModel(
+ std::pair<int, std::shared_ptr<RuntimePreparedModel>> prepareModel(
const ModelFactory& makeModel, ExecutionPreference preference, Priority priority,
const std::optional<Deadline>& deadline, const std::string& cacheDir,
const std::optional<CacheToken>& maybeToken) const override;
- std::pair<int, std::unique_ptr<Memory>> allocate(const MemoryDescriptor& desc,
- OperandType type) const override;
+ std::pair<int, std::unique_ptr<RuntimeMemory>> allocate(const MemoryDescriptor& desc,
+ OperandType type) const override;
private:
CpuDevice() = default;
@@ -588,17 +585,17 @@
const std::string kVersionString = build::GetBuildNumber();
// Since the performance is a ratio compared to the CPU performance,
// by definition the performance of the CPU is 1.0.
- const PerformanceInfo kPerformance = {.execTime = 1.0f, .powerUsage = 1.0f};
+ const Capabilities::PerformanceInfo kPerformance = {.execTime = 1.0f, .powerUsage = 1.0f};
const std::vector<Extension> kSupportedExtensions{/* No extensions. */};
};
-// A special abstracted PreparedModel for the CPU, constructed by CpuDevice.
-class CpuPreparedModel : public PreparedModel {
+// A special abstracted RuntimePreparedModel for the CPU, constructed by CpuDevice.
+class CpuPreparedModel : public RuntimePreparedModel {
public:
// Factory method for CpuPreparedModel. Returns ANEURALNETWORKS_NO_ERROR and
// a prepared model object if successfully created. Returns an error code
// and nullptr otherwise.
- static std::pair<int, std::shared_ptr<PreparedModel>> create(Model hidlModel);
+ static std::pair<int, std::shared_ptr<RuntimePreparedModel>> create(Model model);
const Device* getDevice() const override { return CpuDevice::get().get(); }
std::shared_ptr<VersionedIPreparedModel> getInterface() const override { return nullptr; }
@@ -606,7 +603,7 @@
std::tuple<int, std::vector<OutputShape>, Timing> execute(
const std::vector<ModelArgumentInfo>& inputs,
const std::vector<ModelArgumentInfo>& outputs,
- const std::vector<const Memory*>& memories,
+ const std::vector<const RuntimeMemory*>& memories,
const std::shared_ptr<ExecutionBurstController>& burstController, MeasureTiming measure,
const std::optional<Deadline>& deadline,
const OptionalTimeoutDuration& loopTimeoutDuration) const override;
@@ -616,13 +613,13 @@
return nullptr;
}
- std::tuple<int, int, sp<hal::IFencedExecutionCallback>, hal::Timing> executeFenced(
+ std::tuple<int, int, sp<V1_3::IFencedExecutionCallback>, Timing> executeFenced(
const std::vector<ModelArgumentInfo>& inputs,
const std::vector<ModelArgumentInfo>& outputs,
- const std::vector<const Memory*>& memories, const std::vector<int>& wait_for,
+ const std::vector<const RuntimeMemory*>& memories, const std::vector<int>& wait_for,
MeasureTiming measure, const std::optional<Deadline>& deadline,
const OptionalTimeoutDuration& loopTimeoutDuration,
- const hal::OptionalTimeoutDuration& timeoutDurationAfterFence) const override;
+ const OptionalTimeoutDuration& timeoutDurationAfterFence) const override;
// Prefer to use CpuPreparedModel::create.
CpuPreparedModel(Model model, std::vector<RunTimePoolInfo> poolInfos)
@@ -634,21 +631,20 @@
};
std::vector<bool> CpuDevice::getSupportedOperations(const MetaModel& metaModel) const {
- const Model& hidlModel = metaModel.getModel();
- const size_t count = hidlModel.main.operations.size();
+ const Model& model = metaModel.getModel();
+ const size_t count = model.main.operations.size();
std::vector<bool> result(count, false);
for (size_t i = 0; i < count; i++) {
// TODO(b/119870033): Decide whether and how post-P operations would be supported on CPU.
// We may want to use the slicer for CpuDevice just as we do for
// DriverDevice.
- OperationType operationType = hidlModel.main.operations[i].type;
- result[i] = !isExtensionOperationType(operationType) &&
- operationType != OperationType::OEM_OPERATION;
+ OperationType operationType = model.main.operations[i].type;
+ result[i] = !isExtension(operationType) && operationType != OperationType::OEM_OPERATION;
}
return result;
}
-std::pair<int, std::shared_ptr<PreparedModel>> CpuDevice::prepareModel(
+std::pair<int, std::shared_ptr<RuntimePreparedModel>> CpuDevice::prepareModel(
const ModelFactory& makeModel, ExecutionPreference preference, Priority priority,
const std::optional<Deadline>& deadline, const std::string& /*cacheDir*/,
const std::optional<CacheToken>& maybeToken) const {
@@ -656,8 +652,9 @@
<< "Should never call prepareModel with cache information on CpuDevice";
const Model model = makeModel();
- if (!validateModel(model, ValidationMode::RUNTIME) ||
- !validateExecutionPreference(preference) || !validatePriority(priority)) {
+ if (!validateModel(convertToV1_3(model), ValidationMode::RUNTIME) ||
+ !validateExecutionPreference(convertToV1_1(preference)) ||
+ !validatePriority(convertToV1_3(priority))) {
return {ANEURALNETWORKS_OP_FAILED, nullptr};
}
if (hasDeadlinePassed(deadline)) {
@@ -667,8 +664,8 @@
return CpuPreparedModel::create(model);
}
-std::pair<int, std::unique_ptr<Memory>> CpuDevice::allocate(const MemoryDescriptor& desc,
- OperandType type) const {
+std::pair<int, std::unique_ptr<RuntimeMemory>> CpuDevice::allocate(const MemoryDescriptor& desc,
+ OperandType type) const {
uint32_t size = TypeManager::get()->getSizeOfData(type, desc.dimensions);
if (size == 0) {
LOG(ERROR) << "CpuDevice::allocate -- does not support unknown dimensions.";
@@ -677,14 +674,14 @@
return MemoryAshmem::create(size);
}
-std::pair<int, std::shared_ptr<PreparedModel>> CpuPreparedModel::create(Model hidlModel) {
+std::pair<int, std::shared_ptr<RuntimePreparedModel>> CpuPreparedModel::create(Model model) {
std::vector<RunTimePoolInfo> poolInfos;
- if (!setRunTimePoolInfosFromHidlMemories(&poolInfos, hidlModel.pools)) {
+ if (!setRunTimePoolInfosFromCanonicalMemories(&poolInfos, model.pools)) {
return {ANEURALNETWORKS_UNMAPPABLE, nullptr};
}
- std::shared_ptr<PreparedModel> preparedModel =
- std::make_shared<CpuPreparedModel>(std::move(hidlModel), std::move(poolInfos));
+ std::shared_ptr<RuntimePreparedModel> preparedModel =
+ std::make_shared<CpuPreparedModel>(std::move(model), std::move(poolInfos));
return {ANEURALNETWORKS_NO_ERROR, std::move(preparedModel)};
}
@@ -696,26 +693,23 @@
const OptionalTimeoutDuration& loopTimeoutDuration) {
NNTRACE_RT(NNTRACE_PHASE_EXECUTION, "computeOnCpu");
CpuExecutor executor;
- if (loopTimeoutDuration.getDiscriminator() !=
- OptionalTimeoutDuration::hidl_discriminator::none) {
- executor.setLoopTimeout(loopTimeoutDuration.nanoseconds());
+ if (loopTimeoutDuration.has_value()) {
+ executor.setLoopTimeout(loopTimeoutDuration->count());
}
if (deadline.has_value()) {
executor.setDeadline(*deadline);
}
int err = executor.run(model, request, modelPoolInfos, requestPoolInfos);
const auto& outputShapes = executor.getOutputShapes();
- return {err, outputShapes, kNoTiming};
+ return {err, outputShapes, {}};
}
-std::tuple<int, int, sp<hal::IFencedExecutionCallback>, hal::Timing>
-CpuPreparedModel::executeFenced(const std::vector<ModelArgumentInfo>& inputs,
- const std::vector<ModelArgumentInfo>& outputs,
- const std::vector<const Memory*>& memories,
- const std::vector<int>& waitFor, hal::MeasureTiming measure,
- const std::optional<Deadline>& deadline,
- const OptionalTimeoutDuration& loopTimeoutDuration,
- const hal::OptionalTimeoutDuration& duration) const {
+std::tuple<int, int, sp<V1_3::IFencedExecutionCallback>, Timing> CpuPreparedModel::executeFenced(
+ const std::vector<ModelArgumentInfo>& inputs, const std::vector<ModelArgumentInfo>& outputs,
+ const std::vector<const RuntimeMemory*>& memories, const std::vector<int>& waitFor,
+ MeasureTiming measure, const std::optional<Deadline>& deadline,
+ const OptionalTimeoutDuration& loopTimeoutDuration,
+ const OptionalTimeoutDuration& duration) const {
VLOG(EXECUTION)
<< "CpuPreparedModel::executeFenced wait for sync fences to signal before execution";
for (int syncFd : waitFor) {
@@ -730,8 +724,8 @@
// Update deadline if the timeout duration is closer than the deadline.
auto closestDeadline = deadline;
- if (duration.getDiscriminator() != OptionalTimeoutDuration::hidl_discriminator::none) {
- const auto timeoutDurationDeadline = makeDeadline(duration.nanoseconds());
+ if (duration.has_value()) {
+ const auto timeoutDurationDeadline = makeDeadline(*duration);
if (!closestDeadline.has_value() || *closestDeadline > timeoutDurationDeadline) {
closestDeadline = timeoutDurationDeadline;
}
@@ -751,21 +745,21 @@
// Will choose between sync/async execution according to DeviceManager::mSyncExecCpu.
std::tuple<int, std::vector<OutputShape>, Timing> CpuPreparedModel::execute(
const std::vector<ModelArgumentInfo>& inputs, const std::vector<ModelArgumentInfo>& outputs,
- const std::vector<const Memory*>& memories,
+ const std::vector<const RuntimeMemory*>& memories,
const std::shared_ptr<ExecutionBurstController>& /*burstController*/,
MeasureTiming /*measure*/, const std::optional<Deadline>& deadline,
const OptionalTimeoutDuration& loopTimeoutDuration) const {
if (hasDeadlinePassed(deadline)) {
- return {ANEURALNETWORKS_MISSED_DEADLINE_PERSISTENT, {}, kNoTiming};
+ return {ANEURALNETWORKS_MISSED_DEADLINE_PERSISTENT, {}, {}};
}
std::vector<RunTimePoolInfo> requestPoolInfos;
requestPoolInfos.reserve(memories.size());
- for (const Memory* mem : memories) {
+ for (const RuntimeMemory* mem : memories) {
if (std::optional<RunTimePoolInfo> poolInfo = mem->getRunTimePoolInfo()) {
requestPoolInfos.emplace_back(*poolInfo);
} else {
- return {ANEURALNETWORKS_UNMAPPABLE, {}, kNoTiming};
+ return {ANEURALNETWORKS_UNMAPPABLE, {}, {}};
}
}
// Create as many pools as there are input / output.
@@ -818,7 +812,7 @@
std::shared_ptr<Device> DeviceManager::forTest_makeDriverDevice(const std::string& name,
const sp<V1_0::IDevice>& device) {
- const DeviceFactory makeDevice = [device](bool /*blocking*/) { return device; };
+ const HalDeviceFactory makeDevice = [device](bool /*blocking*/) { return device; };
const auto driverDevice = DriverDevice::create(name, makeDevice);
CHECK(driverDevice != nullptr);
return driverDevice;
@@ -831,7 +825,7 @@
const auto names = hardware::getAllHalInstanceNames(V1_0::IDevice::descriptor);
for (const auto& name : names) {
VLOG(MANAGER) << "Found interface " << name;
- const DeviceFactory makeDevice = [name](bool blocking) {
+ const HalDeviceFactory makeDevice = [name](bool blocking) {
return blocking ? V1_0::IDevice::getService(name) : V1_0::IDevice::tryGetService(name);
};
registerDevice(name, makeDevice);
@@ -842,7 +836,7 @@
mDevicesCpuOnly.push_back(CpuDevice::get());
}
-void DeviceManager::registerDevice(const std::string& name, const DeviceFactory& makeDevice) {
+void DeviceManager::registerDevice(const std::string& name, const HalDeviceFactory& makeDevice) {
if (auto device = DriverDevice::create(name, makeDevice)) {
mDevices.push_back(std::move(device));
}
diff --git a/runtime/Manager.h b/runtime/Manager.h
index d6d4835..3e3ce03 100644
--- a/runtime/Manager.h
+++ b/runtime/Manager.h
@@ -43,40 +43,42 @@
class VersionedIPreparedModel;
// A unified interface for actual driver prepared model as well as the CPU.
-class PreparedModel {
- DISALLOW_COPY_AND_ASSIGN(PreparedModel);
+class RuntimePreparedModel {
+ DISALLOW_COPY_AND_ASSIGN(RuntimePreparedModel);
public:
- PreparedModel() = default;
- virtual ~PreparedModel() = default;
+ RuntimePreparedModel() = default;
+ virtual ~RuntimePreparedModel() = default;
virtual const Device* getDevice() const = 0;
virtual std::shared_ptr<VersionedIPreparedModel> getInterface() const = 0;
// Perform computation with given input/output argument info and memory pools.
- virtual std::tuple<int, std::vector<hal::OutputShape>, hal::Timing> execute(
+ virtual std::tuple<int, std::vector<OutputShape>, Timing> execute(
const std::vector<ModelArgumentInfo>& inputs,
const std::vector<ModelArgumentInfo>& outputs,
- const std::vector<const Memory*>& memories,
- const std::shared_ptr<ExecutionBurstController>& burstController,
- hal::MeasureTiming measure, const std::optional<Deadline>& deadline,
- const hal::OptionalTimeoutDuration& loopTimeoutDuration) const = 0;
+ const std::vector<const RuntimeMemory*>& memories,
+ const std::shared_ptr<ExecutionBurstController>& burstController, MeasureTiming measure,
+ const std::optional<Deadline>& deadline,
+ const OptionalTimeoutDuration& loopTimeoutDuration) const = 0;
// Perform fenced computation with given input/output argument info and memory pools.
// The returned timing information is only valid if the callback is nullptr.
// Returns error_code, sync_fence, callback and timing.
- virtual std::tuple<int, int, sp<hal::IFencedExecutionCallback>, hal::Timing> executeFenced(
+ virtual std::tuple<int, int, sp<V1_3::IFencedExecutionCallback>, Timing> executeFenced(
const std::vector<ModelArgumentInfo>& inputs,
const std::vector<ModelArgumentInfo>& outputs,
- const std::vector<const Memory*>& memories, const std::vector<int>& waitFor,
- hal::MeasureTiming measure, const std::optional<Deadline>& deadline,
- const hal::OptionalTimeoutDuration& loopTimeoutDuration,
- const hal::OptionalTimeoutDuration& timeoutDurationAfterFence) const = 0;
+ const std::vector<const RuntimeMemory*>& memories, const std::vector<int>& waitFor,
+ MeasureTiming measure, const std::optional<Deadline>& deadline,
+ const OptionalTimeoutDuration& loopTimeoutDuration,
+ const OptionalTimeoutDuration& timeoutDurationAfterFence) const = 0;
virtual std::shared_ptr<ExecutionBurstController> configureExecutionBurst(
bool preferPowerOverLatency) const = 0;
};
+using ModelFactory = std::function<Model()>;
+
// A unified interface for actual driver devices as well as the CPU
class Device {
DISALLOW_COPY_AND_ASSIGN(Device);
@@ -90,29 +92,28 @@
virtual const std::string& getVersionString() const = 0;
virtual int64_t getFeatureLevel() const = 0;
virtual int32_t getType() const = 0;
- virtual const std::vector<hal::Extension>& getSupportedExtensions() const = 0;
+ virtual const std::vector<Extension>& getSupportedExtensions() const = 0;
// See the MetaModel class in MetaModel.h for more details.
virtual std::vector<bool> getSupportedOperations(const MetaModel& metaModel) const = 0;
- virtual hal::PerformanceInfo getPerformance(hal::OperandType type) const = 0;
- virtual hal::PerformanceInfo getRelaxedFloat32toFloat16PerformanceScalar() const = 0;
- virtual hal::PerformanceInfo getRelaxedFloat32toFloat16PerformanceTensor() const = 0;
- virtual hal::PerformanceInfo getIfPerformance() const = 0;
- virtual hal::PerformanceInfo getWhilePerformance() const = 0;
+ virtual Capabilities::PerformanceInfo getPerformance(OperandType type) const = 0;
+ virtual Capabilities::PerformanceInfo getRelaxedFloat32toFloat16PerformanceScalar() const = 0;
+ virtual Capabilities::PerformanceInfo getRelaxedFloat32toFloat16PerformanceTensor() const = 0;
+ virtual Capabilities::PerformanceInfo getIfPerformance() const = 0;
+ virtual Capabilities::PerformanceInfo getWhilePerformance() const = 0;
virtual bool isCachingSupported() const = 0;
virtual int wait() const = 0;
- virtual std::pair<int, std::shared_ptr<PreparedModel>> prepareModel(
- const hal::ModelFactory& makeModel, hal::ExecutionPreference preference,
- hal::Priority priority, const std::optional<Deadline>& deadline,
- const std::string& cacheDir,
- const std::optional<hal::CacheToken>& maybeToken) const = 0;
+ virtual std::pair<int, std::shared_ptr<RuntimePreparedModel>> prepareModel(
+ const ModelFactory& makeModel, ExecutionPreference preference, Priority priority,
+ const std::optional<Deadline>& deadline, const std::string& cacheDir,
+ const std::optional<CacheToken>& maybeToken) const = 0;
- // The caller is responsible for making sure the MemoryDescriptor only contains PreparedModels
- // from the same Device.
- virtual std::pair<int, std::unique_ptr<Memory>> allocate(const MemoryDescriptor& desc,
- hal::OperandType type) const = 0;
+ // The caller is responsible for making sure the MemoryDescriptor only contains
+ // PreparedModels from the same Device.
+ virtual std::pair<int, std::unique_ptr<RuntimeMemory>> allocate(const MemoryDescriptor& desc,
+ OperandType type) const = 0;
};
// Manages the NN HAL devices. Only one instance of this class will exist.
@@ -168,8 +169,8 @@
}
// Register a test device.
- void forTest_registerDevice(const std::string& name, const sp<hal::V1_0::IDevice>& device) {
- const hal::DeviceFactory makeDevice = [device](bool /*blocking*/) { return device; };
+ void forTest_registerDevice(const std::string& name, const sp<V1_0::IDevice>& device) {
+ const HalDeviceFactory makeDevice = [device](bool /*blocking*/) { return device; };
registerDevice(name, makeDevice);
}
@@ -182,7 +183,7 @@
// Make a test device
static std::shared_ptr<Device> forTest_makeDriverDevice(const std::string& name,
- const sp<hal::V1_0::IDevice>& device);
+ const sp<V1_0::IDevice>& device);
bool forTest_isCpuDevice(const ANeuralNetworksDevice* device) const {
return reinterpret_cast<const Device*>(device) == getCpuDevice().get();
@@ -193,7 +194,7 @@
DeviceManager();
// Adds a device for the manager to use.
- void registerDevice(const std::string& name, const hal::DeviceFactory& makeDevice);
+ void registerDevice(const std::string& name, const HalDeviceFactory& makeDevice);
void findAvailableDevices();
diff --git a/runtime/Memory.cpp b/runtime/Memory.cpp
index ee9faf9..7efaf64 100644
--- a/runtime/Memory.cpp
+++ b/runtime/Memory.cpp
@@ -30,6 +30,8 @@
#include <utility>
#include <vector>
+#include <nnapi/TypeUtils.h>
+#include <nnapi/Types.h>
#include "CompilationBuilder.h"
#include "CpuExecutor.h"
#include "ExecutionBurstController.h"
@@ -41,7 +43,7 @@
namespace android {
namespace nn {
-using namespace hal;
+using ::android::hidl::memory::V1_0::IMemory;
namespace {
@@ -183,17 +185,18 @@
} // namespace
-Memory::Memory(hal::hidl_memory memory)
+RuntimeMemory::RuntimeMemory(hardware::hidl_memory memory)
: kHidlMemory(std::move(memory)),
mValidator(std::make_unique<SizedMemoryValidator>(kHidlMemory.size())) {}
-Memory::Memory(hal::hidl_memory memory, std::unique_ptr<MemoryValidatorBase> validator)
+RuntimeMemory::RuntimeMemory(hardware::hidl_memory memory,
+ std::unique_ptr<MemoryValidatorBase> validator)
: kHidlMemory(std::move(memory)), mValidator(std::move(validator)) {}
-Memory::Memory(sp<hal::IBuffer> buffer, uint32_t token)
+RuntimeMemory::RuntimeMemory(sp<V1_3::IBuffer> buffer, uint32_t token)
: kBuffer(std::move(buffer)), kToken(token) {}
-Memory::~Memory() {
+RuntimeMemory::~RuntimeMemory() {
for (const auto& [ptr, weakBurst] : mUsedBy) {
if (const std::shared_ptr<ExecutionBurstController> burst = weakBurst.lock()) {
burst->freeMemory(getKey());
@@ -201,8 +204,8 @@
}
}
-Request::MemoryPool Memory::getMemoryPool() const {
- Request::MemoryPool pool;
+V1_3::Request::MemoryPool RuntimeMemory::getMemoryPool() const {
+ V1_3::Request::MemoryPool pool;
if (kToken > 0) {
pool.token(kToken);
} else {
@@ -211,20 +214,20 @@
return pool;
}
-std::optional<RunTimePoolInfo> Memory::getRunTimePoolInfo() const {
+std::optional<RunTimePoolInfo> RuntimeMemory::getRunTimePoolInfo() const {
std::lock_guard<std::mutex> guard(mMutex);
if (!mHasCachedRunTimePoolInfo) {
- mCachedRunTimePoolInfo = RunTimePoolInfo::createFromHidlMemory(kHidlMemory);
+ mCachedRunTimePoolInfo = RunTimePoolInfo::createFromMemory(uncheckedConvert(kHidlMemory));
mHasCachedRunTimePoolInfo = true;
}
return mCachedRunTimePoolInfo;
}
-intptr_t Memory::getKey() const {
+intptr_t RuntimeMemory::getKey() const {
return reinterpret_cast<intptr_t>(this);
}
-void Memory::usedBy(const std::shared_ptr<ExecutionBurstController>& burst) const {
+void RuntimeMemory::usedBy(const std::shared_ptr<ExecutionBurstController>& burst) const {
std::lock_guard<std::mutex> guard(mMutex);
mUsedBy.emplace(burst.get(), burst);
}
@@ -246,37 +249,37 @@
return ANEURALNETWORKS_NO_ERROR;
}
-int copyIBufferToHidlMemory(const sp<IBuffer>& src, const hidl_memory& dst) {
+int copyIBufferToHidlMemory(const sp<V1_3::IBuffer>& src, const hardware::hidl_memory& dst) {
const auto ret = src->copyTo(dst);
if (!ret.isOk()) {
LOG(ERROR) << "ANeuralNetworksMemory_copy failure: " << ret.description();
return ANEURALNETWORKS_OP_FAILED;
}
- return convertErrorStatusToResultCode(static_cast<ErrorStatus>(ret));
+ return convertErrorStatusToResultCode(static_cast<V1_3::ErrorStatus>(ret));
}
-int copyHidlMemoryToIBuffer(const hidl_memory& src, const sp<IBuffer>& dst,
+int copyHidlMemoryToIBuffer(const hardware::hidl_memory& src, const sp<V1_3::IBuffer>& dst,
const std::vector<uint32_t>& dimensions) {
const auto ret = dst->copyFrom(src, dimensions);
if (!ret.isOk()) {
LOG(ERROR) << "ANeuralNetworksMemory_copy failure: " << ret.description();
return ANEURALNETWORKS_OP_FAILED;
}
- return convertErrorStatusToResultCode(static_cast<ErrorStatus>(ret));
+ return convertErrorStatusToResultCode(static_cast<V1_3::ErrorStatus>(ret));
}
-static int copyIBuffers(const sp<IBuffer>& src, const sp<IBuffer>& dst,
+static int copyIBuffers(const sp<V1_3::IBuffer>& src, const sp<V1_3::IBuffer>& dst,
const MemoryValidatorBase::Metadata& srcMetadata) {
const auto [n, memory] = MemoryRuntimeAHWB::create(srcMetadata.logicalSize);
NN_RETURN_IF_ERROR(n);
- const hidl_memory& hidlMemory = memory->getHidlMemory();
+ const hardware::hidl_memory& hidlMemory = memory->getHidlMemory();
if (!hidlMemory.valid()) return ANEURALNETWORKS_OUT_OF_MEMORY;
NN_RETURN_IF_ERROR(copyIBufferToHidlMemory(src, hidlMemory));
NN_RETURN_IF_ERROR(copyHidlMemoryToIBuffer(hidlMemory, dst, srcMetadata.dimensions));
return ANEURALNETWORKS_NO_ERROR;
}
-static int copyInternal(const Memory& src, const Memory& dst) {
+static int copyInternal(const RuntimeMemory& src, const RuntimeMemory& dst) {
if (&src == &dst) return ANEURALNETWORKS_NO_ERROR;
if (!src.getValidator().isInitialized()) {
@@ -307,7 +310,7 @@
return ANEURALNETWORKS_OP_FAILED;
}
-int Memory::copy(const Memory& src, const Memory& dst) {
+int RuntimeMemory::copy(const RuntimeMemory& src, const RuntimeMemory& dst) {
int n = copyInternal(src, dst);
dst.getValidator().setInitialized(n == ANEURALNETWORKS_NO_ERROR);
return n;
@@ -333,7 +336,7 @@
return ANEURALNETWORKS_BAD_DATA;
}
- std::vector<std::tuple<const PreparedModel*, IOType, uint32_t>> roles;
+ std::vector<std::tuple<const RuntimePreparedModel*, IOType, uint32_t>> roles;
auto callback = [&roles](const auto* preparedModel, IOType type, uint32_t index) {
roles.emplace_back(preparedModel, type, index);
};
@@ -421,10 +424,10 @@
static void logMemoryDescriptorToInfo(const MemoryDescriptor& desc, const Operand& operand) {
LOG(INFO) << "MemoryDescriptor start";
- LOG(INFO) << " Data type: " << toString(operand.type);
- LOG(INFO) << " Scale: " << toString(operand.scale);
- LOG(INFO) << " Zero point: " << toString(operand.zeroPoint);
- LOG(INFO) << " Extra params: " << toString(operand.extraParams);
+ LOG(INFO) << " Data type: " << operand.type;
+ LOG(INFO) << " Scale: " << operand.scale;
+ LOG(INFO) << " Zero point: " << operand.zeroPoint;
+ LOG(INFO) << " Extra params: " << operand.extraParams;
LOG(INFO) << " Dimensions: " << toString(desc.dimensions);
LOG(INFO) << " Prepared models [" << desc.preparedModels.size() << "]:";
for (const auto* preparedModel : desc.preparedModels) {
@@ -432,11 +435,11 @@
}
LOG(INFO) << " Input roles [" << desc.inputRoles.size() << "]:";
for (const auto& usage : desc.inputRoles) {
- LOG(INFO) << " " << toString(usage);
+ LOG(INFO) << " " << usage;
}
LOG(INFO) << " Output roles [" << desc.outputRoles.size() << "]:";
for (const auto& usage : desc.outputRoles) {
- LOG(INFO) << " " << toString(usage);
+ LOG(INFO) << " " << usage;
}
LOG(INFO) << "MemoryDescriptor end";
}
@@ -484,14 +487,14 @@
return ANEURALNETWORKS_NO_ERROR;
}
-std::pair<int, std::unique_ptr<Memory>> MemoryBuilder::allocate() const {
+std::pair<int, std::unique_ptr<RuntimeMemory>> MemoryBuilder::allocate() const {
if (!mFinished) {
LOG(ERROR) << "ANeuralNetworksMemory_createFromDesc -- passed an unfinished descriptor";
return {ANEURALNETWORKS_BAD_STATE, nullptr};
}
int n = ANEURALNETWORKS_OP_FAILED;
- std::unique_ptr<Memory> memory;
+ std::unique_ptr<RuntimeMemory> memory;
CHECK(mOperand.has_value());
// Try allocate the memory on device.
@@ -521,10 +524,10 @@
}
std::pair<int, std::unique_ptr<MemoryAshmem>> MemoryAshmem::create(uint32_t size) {
- hidl_memory hidlMemory = allocateSharedMemory(size);
+ hardware::hidl_memory hidlMemory = allocateSharedMemory(size);
sp<IMemory> mapped = mapMemory(hidlMemory);
if (mapped == nullptr || mapped->getPointer() == nullptr) {
- LOG(ERROR) << "Memory::create failed";
+ LOG(ERROR) << "RuntimeMemory::create failed";
return {ANEURALNETWORKS_OUT_OF_MEMORY, nullptr};
}
return {ANEURALNETWORKS_NO_ERROR,
@@ -535,8 +538,8 @@
return static_cast<uint8_t*>(static_cast<void*>(kMappedMemory->getPointer()));
}
-MemoryAshmem::MemoryAshmem(sp<IMemory> mapped, hidl_memory memory)
- : Memory(std::move(memory)), kMappedMemory(std::move(mapped)) {}
+MemoryAshmem::MemoryAshmem(sp<IMemory> mapped, hardware::hidl_memory memory)
+ : RuntimeMemory(std::move(memory)), kMappedMemory(std::move(mapped)) {}
std::pair<int, std::unique_ptr<MemoryFd>> MemoryFd::create(size_t size, int prot, int fd,
size_t offset) {
@@ -576,25 +579,26 @@
// Push the hidl_handle into a hidl_memory object. The hidl_memory object is
// responsible for cleaning the hidl_handle, the native handle, and the fd.
- hidl_memory hidlMemory = hidl_memory("mmap_fd", std::move(hidlHandle), size);
+ hardware::hidl_memory hidlMemory =
+ hardware::hidl_memory("mmap_fd", std::move(hidlHandle), size);
return {ANEURALNETWORKS_NO_ERROR, std::make_unique<MemoryFd>(std::move(hidlMemory))};
}
-MemoryFd::MemoryFd(hidl_memory memory) : Memory(std::move(memory)) {}
+MemoryFd::MemoryFd(hardware::hidl_memory memory) : RuntimeMemory(std::move(memory)) {}
std::pair<int, std::unique_ptr<MemoryAHWB>> MemoryAHWB::create(const AHardwareBuffer& ahwb) {
AHardwareBuffer_Desc bufferDesc;
AHardwareBuffer_describe(&ahwb, &bufferDesc);
const native_handle_t* handle = AHardwareBuffer_getNativeHandle(&ahwb);
- hidl_memory hidlMemory;
+ hardware::hidl_memory hidlMemory;
std::unique_ptr<MemoryValidatorBase> validator;
if (bufferDesc.format == AHARDWAREBUFFER_FORMAT_BLOB) {
- hidlMemory = hidl_memory("hardware_buffer_blob", handle, bufferDesc.width);
+ hidlMemory = hardware::hidl_memory("hardware_buffer_blob", handle, bufferDesc.width);
validator = std::make_unique<SizedMemoryValidator>(bufferDesc.width);
} else {
// memory size is not used.
- hidlMemory = hidl_memory("hardware_buffer", handle, 0);
+ hidlMemory = hardware::hidl_memory("hardware_buffer", handle, 0);
validator = std::make_unique<AHardwareBufferNonBlobValidator>();
}
auto memory = std::make_unique<MemoryAHWB>(std::move(hidlMemory), std::move(validator));
@@ -633,7 +637,8 @@
return {ANEURALNETWORKS_OP_FAILED, nullptr};
}
- hidl_memory hidlMemory = hidl_memory("hardware_buffer_blob", handle, desc.width);
+ hardware::hidl_memory hidlMemory =
+ hardware::hidl_memory("hardware_buffer_blob", handle, desc.width);
auto memory = std::make_unique<MemoryRuntimeAHWB>(std::move(hidlMemory), ahwb,
static_cast<uint8_t*>(buffer));
allocateGuard.Disable();
@@ -641,9 +646,9 @@
return {ANEURALNETWORKS_NO_ERROR, std::move(memory)};
}
-MemoryRuntimeAHWB::MemoryRuntimeAHWB(hal::hidl_memory memory, AHardwareBuffer* ahwb,
+MemoryRuntimeAHWB::MemoryRuntimeAHWB(hardware::hidl_memory memory, AHardwareBuffer* ahwb,
uint8_t* buffer)
- : Memory(std::move(memory)), mAhwb(ahwb), mBuffer(buffer) {
+ : RuntimeMemory(std::move(memory)), mAhwb(ahwb), mBuffer(buffer) {
CHECK(mAhwb != nullptr);
CHECK(mBuffer != nullptr);
}
@@ -653,7 +658,7 @@
AHardwareBuffer_release(mAhwb);
}
-std::pair<int, std::unique_ptr<MemoryFromDevice>> MemoryFromDevice::create(sp<hal::IBuffer> buffer,
+std::pair<int, std::unique_ptr<MemoryFromDevice>> MemoryFromDevice::create(sp<V1_3::IBuffer> buffer,
uint32_t token) {
if (buffer == nullptr) {
LOG(ERROR) << "nullptr IBuffer for device memory.";
@@ -666,8 +671,8 @@
return {ANEURALNETWORKS_NO_ERROR, std::make_unique<MemoryFromDevice>(std::move(buffer), token)};
};
-MemoryFromDevice::MemoryFromDevice(sp<hal::IBuffer> buffer, uint32_t token)
- : Memory(std::move(buffer), token) {}
+MemoryFromDevice::MemoryFromDevice(sp<V1_3::IBuffer> buffer, uint32_t token)
+ : RuntimeMemory(std::move(buffer), token) {}
} // namespace nn
} // namespace android
diff --git a/runtime/Memory.h b/runtime/Memory.h
index 56bf81d..f78ef80 100644
--- a/runtime/Memory.h
+++ b/runtime/Memory.h
@@ -39,11 +39,13 @@
namespace android {
namespace nn {
+using ::android::hidl::memory::V1_0::IMemory;
+
class CompilationBuilder;
class Device;
class ExecutionBurstController;
class ModelBuilder;
-class PreparedModel;
+class RuntimePreparedModel;
// A utility template class to accumulate multiple objects and assign each
// a distinct index number, starting with 0.
@@ -93,12 +95,12 @@
};
using CompilationRole = std::tuple<const CompilationBuilder*, IOType, uint32_t>;
-using StepRoleCallback = std::function<void(const PreparedModel*, IOType, uint32_t)>;
+using StepRoleCallback = std::function<void(const RuntimePreparedModel*, IOType, uint32_t)>;
struct MemoryDescriptor {
std::vector<uint32_t> dimensions;
- ObjectTracker<PreparedModel> preparedModels;
- std::vector<hal::BufferRole> inputRoles, outputRoles;
+ ObjectTracker<RuntimePreparedModel> preparedModels;
+ std::vector<BufferRole> inputRoles, outputRoles;
};
class MemoryValidatorBase {
@@ -144,7 +146,7 @@
// The data type, scale, zero point, and extra parameters of the target operand.
// Other fields will be ignored, including dimensions, lifetime, location, etc.
// Set to std::nullopt if undefined.
- std::optional<hal::Operand> operand;
+ std::optional<Operand> operand;
};
virtual Metadata getMetadata() const = 0;
@@ -158,24 +160,24 @@
virtual bool isInitialized() const { return true; }
};
-int copyIBufferToHidlMemory(const sp<hal::IBuffer>& src, const hal::hidl_memory& dst);
+int copyIBufferToHidlMemory(const sp<V1_3::IBuffer>& src, const hardware::hidl_memory& dst);
-int copyHidlMemoryToIBuffer(const hal::hidl_memory& src, const sp<hal::IBuffer>& dst,
+int copyHidlMemoryToIBuffer(const hardware::hidl_memory& src, const sp<V1_3::IBuffer>& dst,
const std::vector<uint32_t>& dimensions);
// Represents a memory region.
-class Memory {
+class RuntimeMemory {
// Disallow copy and assign to prevent slicing
- DISALLOW_COPY_AND_ASSIGN(Memory);
+ DISALLOW_COPY_AND_ASSIGN(RuntimeMemory);
public:
// Custom destructor to notify any ExecutionBurstControllers currently using
// this memory that it is being freed.
- virtual ~Memory();
+ virtual ~RuntimeMemory();
- hal::Request::MemoryPool getMemoryPool() const;
- const hal::hidl_memory& getHidlMemory() const { return kHidlMemory; }
- const sp<hal::IBuffer>& getIBuffer() const { return kBuffer; }
+ V1_3::Request::MemoryPool getMemoryPool() const;
+ const hardware::hidl_memory& getHidlMemory() const { return kHidlMemory; }
+ const sp<V1_3::IBuffer>& getIBuffer() const { return kBuffer; }
virtual uint32_t getSize() const { return getHidlMemory().size(); }
virtual std::optional<RunTimePoolInfo> getRunTimePoolInfo() const;
@@ -196,24 +198,24 @@
// the bursts' memory cache.
void usedBy(const std::shared_ptr<ExecutionBurstController>& burst) const;
- static int copy(const Memory& src, const Memory& dst);
+ static int copy(const RuntimeMemory& src, const RuntimeMemory& dst);
protected:
- Memory(hal::hidl_memory memory);
- Memory(hal::hidl_memory memory, std::unique_ptr<MemoryValidatorBase> validator);
- Memory(sp<hal::IBuffer> buffer, uint32_t token);
+ RuntimeMemory(hardware::hidl_memory memory);
+ RuntimeMemory(hardware::hidl_memory memory, std::unique_ptr<MemoryValidatorBase> validator);
+ RuntimeMemory(sp<V1_3::IBuffer> buffer, uint32_t token);
// The HIDL representation for this memory. We will use one of the following values
// when communicating with the drivers.
- const hal::hidl_memory kHidlMemory;
- const sp<hal::IBuffer> kBuffer;
+ const hardware::hidl_memory kHidlMemory;
+ const sp<V1_3::IBuffer> kBuffer;
const uint32_t kToken = 0;
std::unique_ptr<MemoryValidatorBase> mValidator;
private:
mutable std::mutex mMutex;
- // mUsedBy is essentially a set of burst objects which use this Memory
+ // mUsedBy is essentially a set of burst objects which use this RuntimeMemory
// object. However, std::weak_ptr does not have comparison operations nor a
// std::hash implementation. This is because it is either a valid pointer
// (non-null) if the shared object is still alive, or it is null if the
@@ -238,7 +240,7 @@
int finish();
- std::pair<int, std::unique_ptr<Memory>> allocate() const;
+ std::pair<int, std::unique_ptr<RuntimeMemory>> allocate() const;
private:
bool badState(const char* name) const;
@@ -253,7 +255,7 @@
// Keep track of the data type, scale, zero point, and extra parameters of the target operand.
// Other fields will be ignored, including dimensions, lifetime, location, etc.
// It is std::nullopt if no usage has been specified yet.
- std::optional<hal::Operand> mOperand;
+ std::optional<Operand> mOperand;
// Once the descriptor has been finished, we should not allow further modifications.
bool mFinished = false;
@@ -271,7 +273,7 @@
bool mShouldFallback = true;
};
-class MemoryAshmem : public Memory {
+class MemoryAshmem : public RuntimeMemory {
public:
// Creates a memory object containing a new android shared memory ("ashmem")
// object of the size specified in bytes. Because this ashmem region can be
@@ -292,13 +294,13 @@
}
// prefer using MemoryAshmem::create
- MemoryAshmem(sp<hal::IMemory> mapped, hal::hidl_memory memory);
+ MemoryAshmem(sp<IMemory> mapped, hardware::hidl_memory memory);
private:
- const sp<hal::IMemory> kMappedMemory;
+ const sp<IMemory> kMappedMemory;
};
-class MemoryFd : public Memory {
+class MemoryFd : public RuntimeMemory {
public:
// Create a memory object based on input size, prot, and fd that can be sent
// across HIDL. This function duplicates the provided fd, and owns the
@@ -310,10 +312,10 @@
size_t offset);
// prefer using MemoryFd::create
- MemoryFd(hal::hidl_memory memory);
+ MemoryFd(hardware::hidl_memory memory);
};
-class MemoryAHWB : public Memory {
+class MemoryAHWB : public RuntimeMemory {
public:
// Create a memory object to keep track of (but not take ownership of) the
// provided AHardwareBuffer handle.
@@ -323,11 +325,11 @@
static std::pair<int, std::unique_ptr<MemoryAHWB>> create(const AHardwareBuffer& ahwb);
// prefer using MemoryAHWB::create
- MemoryAHWB(hal::hidl_memory memory, std::unique_ptr<MemoryValidatorBase> validator)
- : Memory(std::move(memory), std::move(validator)) {}
+ MemoryAHWB(hardware::hidl_memory memory, std::unique_ptr<MemoryValidatorBase> validator)
+ : RuntimeMemory(std::move(memory), std::move(validator)) {}
};
-class MemoryRuntimeAHWB : public Memory {
+class MemoryRuntimeAHWB : public RuntimeMemory {
public:
// Create a memory object containing a new BLOB-mode AHardwareBuffer memory
// object of the size specified in bytes. The created memory is managed and
@@ -347,7 +349,7 @@
}
// prefer using MemoryRuntimeAHWB::create
- MemoryRuntimeAHWB(hal::hidl_memory memory, AHardwareBuffer* ahwb, uint8_t* buffer);
+ MemoryRuntimeAHWB(hardware::hidl_memory memory, AHardwareBuffer* ahwb, uint8_t* buffer);
~MemoryRuntimeAHWB();
private:
@@ -355,21 +357,21 @@
uint8_t* const mBuffer;
};
-class MemoryFromDevice : public Memory {
+class MemoryFromDevice : public RuntimeMemory {
public:
// Create a memory object to keep track of a driver-allocated device memory.
// The memory is recognized by the driver via a token.
//
// On success, returns ANEURALNETWORKS_NO_ERROR and a memory object.
// On error, returns the appropriate NNAPI error code and nullptr.
- static std::pair<int, std::unique_ptr<MemoryFromDevice>> create(sp<hal::IBuffer> buffer,
+ static std::pair<int, std::unique_ptr<MemoryFromDevice>> create(sp<V1_3::IBuffer> buffer,
uint32_t token);
// prefer using MemoryFromDevice::create
- MemoryFromDevice(sp<hal::IBuffer> buffer, uint32_t token);
+ MemoryFromDevice(sp<V1_3::IBuffer> buffer, uint32_t token);
};
-using MemoryTracker = ObjectTracker<Memory>;
+using MemoryTracker = ObjectTracker<RuntimeMemory>;
} // namespace nn
} // namespace android
diff --git a/runtime/ModelArgumentInfo.cpp b/runtime/ModelArgumentInfo.cpp
index cf24004..a6a8908 100644
--- a/runtime/ModelArgumentInfo.cpp
+++ b/runtime/ModelArgumentInfo.cpp
@@ -30,8 +30,6 @@
namespace android {
namespace nn {
-using namespace hal;
-
static const std::pair<int, ModelArgumentInfo> kBadDataModelArgumentInfo{ANEURALNETWORKS_BAD_DATA,
{}};
@@ -98,33 +96,33 @@
mDimensions = operand.dimensions;
} else {
const uint32_t count = newType->dimensionCount;
- mDimensions = hidl_vec<uint32_t>(count);
+ mDimensions = std::vector<uint32_t>(count);
std::copy(&newType->dimensions[0], &newType->dimensions[count], mDimensions.begin());
}
return ANEURALNETWORKS_NO_ERROR;
}
-hidl_vec<RequestArgument> createRequestArguments(
+std::vector<Request::Argument> createRequestArguments(
const std::vector<ModelArgumentInfo>& argumentInfos,
const std::vector<DataLocation>& ptrArgsLocations) {
const size_t count = argumentInfos.size();
- hidl_vec<RequestArgument> ioInfos(count);
+ std::vector<Request::Argument> ioInfos(count);
uint32_t ptrArgsIndex = 0;
for (size_t i = 0; i < count; i++) {
const auto& info = argumentInfos[i];
switch (info.state()) {
case ModelArgumentInfo::POINTER:
- ioInfos[i] = {.hasNoValue = false,
+ ioInfos[i] = {.lifetime = Request::Argument::LifeTime::POOL,
.location = ptrArgsLocations[ptrArgsIndex++],
.dimensions = info.dimensions()};
break;
case ModelArgumentInfo::MEMORY:
- ioInfos[i] = {.hasNoValue = false,
+ ioInfos[i] = {.lifetime = Request::Argument::LifeTime::POOL,
.location = info.locationAndLength(),
.dimensions = info.dimensions()};
break;
case ModelArgumentInfo::HAS_NO_VALUE:
- ioInfos[i] = {.hasNoValue = true};
+ ioInfos[i] = {.lifetime = Request::Argument::LifeTime::NO_VALUE};
break;
default:
CHECK(false);
diff --git a/runtime/ModelArgumentInfo.h b/runtime/ModelArgumentInfo.h
index 22dd34c..d0e2bb0 100644
--- a/runtime/ModelArgumentInfo.h
+++ b/runtime/ModelArgumentInfo.h
@@ -20,7 +20,6 @@
#include <utility>
#include <vector>
-#include "HalInterfaces.h"
#include "NeuralNetworks.h"
#include "Utils.h"
@@ -38,10 +37,10 @@
ModelArgumentInfo() {}
static std::pair<int, ModelArgumentInfo> createFromPointer(
- const hal::Operand& operand, const ANeuralNetworksOperandType* type,
+ const Operand& operand, const ANeuralNetworksOperandType* type,
void* data /* nullptr means HAS_NO_VALUE */, uint32_t length);
static std::pair<int, ModelArgumentInfo> createFromMemory(
- const hal::Operand& operand, const ANeuralNetworksOperandType* type, uint32_t poolIndex,
+ const Operand& operand, const ANeuralNetworksOperandType* type, uint32_t poolIndex,
uint32_t offset, uint32_t length);
enum State { POINTER, MEMORY, HAS_NO_VALUE, UNSPECIFIED };
@@ -78,17 +77,17 @@
return mLocationAndLength.length;
}
- const hal::DataLocation& locationAndLength() const {
+ const DataLocation& locationAndLength() const {
CHECK_EQ(mState, MEMORY);
return mLocationAndLength;
}
- hal::DataLocation& locationAndLength() {
+ DataLocation& locationAndLength() {
CHECK_EQ(mState, MEMORY);
return mLocationAndLength;
}
private:
- int updateDimensionInfo(const hal::Operand& operand, const ANeuralNetworksOperandType* newType);
+ int updateDimensionInfo(const Operand& operand, const ANeuralNetworksOperandType* newType);
// Whether the argument was specified as being in a Memory, as a pointer,
// has no value, or has not been specified.
@@ -101,16 +100,16 @@
// mDimensions is valid.
State mState = UNSPECIFIED; // fixed at creation
void* mBuffer = nullptr; // fixed at creation
- hal::DataLocation mLocationAndLength; // can be updated after creation
+ DataLocation mLocationAndLength; // can be updated after creation
std::vector<uint32_t> mDimensions; // can be updated after creation
bool mIsSufficient = true; // can be updated after creation
};
-// Convert ModelArgumentInfo to HIDL RequestArgument. For pointer arguments, use the location
+// Convert ModelArgumentInfo to HIDL Request::Argument. For pointer arguments, use the location
// information in ptrArgsLocations.
-hal::hidl_vec<hal::RequestArgument> createRequestArguments(
+std::vector<Request::Argument> createRequestArguments(
const std::vector<ModelArgumentInfo>& argumentInfos,
- const std::vector<hal::DataLocation>& ptrArgsLocations);
+ const std::vector<DataLocation>& ptrArgsLocations);
} // namespace nn
} // namespace android
diff --git a/runtime/ModelBuilder.cpp b/runtime/ModelBuilder.cpp
index ab63f62..0c506d5 100644
--- a/runtime/ModelBuilder.cpp
+++ b/runtime/ModelBuilder.cpp
@@ -35,8 +35,6 @@
namespace android {
namespace nn {
-using namespace hal;
-
// The maximum number of operands and operations that a model may have.
const uint32_t MAX_NUMBER_OF_OPERANDS = 0xFFFFFFFE;
const uint32_t MAX_NUMBER_OF_OPERATIONS = 0xFFFFFFFE;
@@ -66,7 +64,7 @@
}
OperandType operandType = static_cast<OperandType>(type.type);
- if (isExtensionOperandType(operandType) && !TypeManager::get()->areExtensionsAllowed()) {
+ if (isExtension(operandType) && !TypeManager::get()->areExtensionsAllowed()) {
LOG(ERROR) << "Extensions are not supported for this process.";
return ANEURALNETWORKS_BAD_DATA;
}
@@ -77,9 +75,9 @@
}
const Extension::OperandTypeInformation* info = nullptr;
- if (isExtensionOperandType(operandType) &&
+ if (isExtension(operandType) &&
!TypeManager::get()->getExtensionOperandTypeInfo(operandType, &info)) {
- LOG(ERROR) << "Extension operand type " << toString(operandType) << " is not registered";
+ LOG(ERROR) << "Extension operand type " << operandType << " is not registered";
return ANEURALNETWORKS_BAD_DATA;
}
NN_RETURN_IF_ERROR(validateOperandType(type, info, "ANeuralNetworksModel_addOperand", true));
@@ -92,13 +90,12 @@
mOperands.push_back({
.type = operandType,
.dimensions =
- hidl_vec<uint32_t>(type.dimensions, type.dimensions + type.dimensionCount),
- .numberOfConsumers = 0,
+ std::vector<uint32_t>(type.dimensions, type.dimensions + type.dimensionCount),
.scale = type.scale,
.zeroPoint = type.zeroPoint,
- .lifetime = OperandLifeTime::TEMPORARY_VARIABLE,
+ .lifetime = Operand::LifeTime::TEMPORARY_VARIABLE,
.location = {.poolIndex = 0, .offset = 0, .length = 0},
- .extraParams = OperandExtraParams(),
+ .extraParams = {},
});
mHasOEMOperand |= isOemOperand;
return ANEURALNETWORKS_NO_ERROR;
@@ -122,7 +119,7 @@
"not 0";
return ANEURALNETWORKS_BAD_DATA;
}
- operand.lifetime = OperandLifeTime::NO_VALUE;
+ operand.lifetime = Operand::LifeTime::NO_VALUE;
// The location is unused and is set to zeros.
operand.location = {.poolIndex = 0, .offset = 0, .length = 0};
} else {
@@ -150,14 +147,14 @@
uint32_t existingSize = static_cast<uint32_t>(mSmallOperandValues.size());
uint32_t extraBytes = alignBytesNeeded(existingSize, valueLength);
mSmallOperandValues.resize(existingSize + extraBytes + valueLength);
- operand.lifetime = OperandLifeTime::CONSTANT_COPY;
+ operand.lifetime = Operand::LifeTime::CONSTANT_COPY;
operand.location = {
.poolIndex = 0, .offset = existingSize + extraBytes, .length = valueLength};
memcpy(&mSmallOperandValues[operand.location.offset], buffer, valueLength);
VLOG(MODEL) << "Copied small value to offset " << operand.location.offset;
} else {
VLOG(MODEL) << "Saving large value";
- operand.lifetime = OperandLifeTime::CONSTANT_REFERENCE;
+ operand.lifetime = Operand::LifeTime::CONSTANT_REFERENCE;
// The values for poolIndex and offset will be set when the model is finished.
typedef decltype(operand.location.poolIndex) PoolIndexType;
typedef decltype(operand.location.offset) OffsetType;
@@ -191,7 +188,7 @@
return ANEURALNETWORKS_BAD_DATA;
}
Operand& operand = mOperands[index];
- operand.lifetime = OperandLifeTime::SUBGRAPH;
+ operand.lifetime = Operand::LifeTime::SUBGRAPH;
operand.location = {
.poolIndex = 0,
.offset = static_cast<uint32_t>(mReferencedModels.size()),
@@ -216,17 +213,17 @@
Operand& operand = mOperands[index];
if (!validateOperandSymmPerChannelQuantParams(
- operand, channelQuant,
+ convertToV1_3(operand), channelQuant,
"ANeuralNetworksModel_setOperandSymmPerChannelQuantParams")) {
return ANEURALNETWORKS_BAD_DATA;
}
switch (operand.type) {
case OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL:
- operand.extraParams.channelQuant({
- .scales = hidl_vec<float>(channelQuant.scales,
- channelQuant.scales + channelQuant.scaleCount),
+ operand.extraParams = Operand::SymmPerChannelQuantParams{
+ .scales = std::vector<float>(channelQuant.scales,
+ channelQuant.scales + channelQuant.scaleCount),
.channelDim = channelQuant.channelDim,
- });
+ };
break;
default:
LOG(ERROR) << "ANeuralNetworksModel_setOperandSymmPerChannelQuantParams "
@@ -258,7 +255,7 @@
<< "is zero";
return ANEURALNETWORKS_BAD_DATA;
}
- if (!isExtensionOperandType(operand.type)) {
+ if (!isExtension(operand.type)) {
LOG(ERROR) << "ANeuralNetworksModel_setOperandExtensionData "
<< "setting extension data for a base operand type "
<< static_cast<int32_t>(operand.type);
@@ -266,11 +263,11 @@
}
if (data == nullptr) {
- operand.extraParams.none();
+ operand.extraParams = {};
} else {
- operand.extraParams.extension(
- hidl_vec<uint8_t>(reinterpret_cast<const uint8_t*>(data),
- reinterpret_cast<const uint8_t*>(data) + length));
+ operand.extraParams = Operand::ExtensionParams(
+ std::vector<uint8_t>(reinterpret_cast<const uint8_t*>(data),
+ reinterpret_cast<const uint8_t*>(data) + length));
}
return ANEURALNETWORKS_NO_ERROR;
}
@@ -283,7 +280,7 @@
size_t poolSize = 0;
for (LargeValue& l : mLargeOperandValues) {
Operand& operand = mOperands[l.operandIndex];
- nnAssert(operand.lifetime == OperandLifeTime::CONSTANT_REFERENCE);
+ CHECK_EQ(operand.lifetime, Operand::LifeTime::CONSTANT_REFERENCE);
poolSize += alignBytesNeeded(poolSize, operand.location.length);
operand.location.offset = poolSize;
poolSize += operand.location.length;
@@ -308,8 +305,8 @@
return ANEURALNETWORKS_NO_ERROR;
}
-int ModelBuilder::setOperandValueFromMemory(uint32_t index, const Memory* memory, uint32_t offset,
- size_t length) {
+int ModelBuilder::setOperandValueFromMemory(uint32_t index, const RuntimeMemory* memory,
+ uint32_t offset, size_t length) {
VLOG(MODEL) << __func__ << " for operand " << index << " offset " << offset << " size "
<< length;
if (badState("setOperandValueFromMemory")) {
@@ -339,7 +336,7 @@
nullptr, offset, length)) {
return ANEURALNETWORKS_BAD_DATA;
}
- operand.lifetime = OperandLifeTime::CONSTANT_REFERENCE;
+ operand.lifetime = Operand::LifeTime::CONSTANT_REFERENCE;
operand.location = {.poolIndex = mMemories.add(memory),
.offset = offset,
.length = static_cast<uint32_t>(length)};
@@ -354,7 +351,7 @@
}
OperationType operationType = static_cast<OperationType>(type);
- if (isExtensionOperationType(operationType) && !TypeManager::get()->areExtensionsAllowed()) {
+ if (isExtension(operationType) && !TypeManager::get()->areExtensionsAllowed()) {
LOG(ERROR) << "Extensions are not supported for this process.";
return ANEURALNETWORKS_BAD_DATA;
}
@@ -362,7 +359,7 @@
LOG(WARNING) << "OEM_OPERATION is deprecated. Use Extensions instead.";
}
- if (!isExtensionOperationType(operationType)) {
+ if (!isExtension(operationType)) {
if (!validCode(kNumberOfOperationTypes, kNumberOfOperationTypesOEM, type)) {
LOG(ERROR) << "ANeuralNetworksModel_addOperation invalid operation type " << type;
return ANEURALNETWORKS_BAD_DATA;
@@ -370,8 +367,8 @@
}
auto isValidSubgraphReference = [this](const Operand& modelOperand) -> bool {
- NN_RET_CHECK(modelOperand.type == OperandType::SUBGRAPH)
- << "Unexpected operand type: " << toString(modelOperand.type);
+ NN_RET_CHECK_EQ(modelOperand.type, OperandType::SUBGRAPH)
+ << "Unexpected operand type: " << modelOperand.type;
NN_RET_CHECK_LT(modelOperand.location.offset, referencedModelCount())
<< "Invalid subgraph model reference";
return true;
@@ -405,14 +402,11 @@
mOperations.push_back({
.type = operationType,
- .inputs = hidl_vec<uint32_t>(inputs, inputs + inputCount),
- .outputs = hidl_vec<uint32_t>(outputs, outputs + outputCount),
+ .inputs = std::vector<uint32_t>(inputs, inputs + inputCount),
+ .outputs = std::vector<uint32_t>(outputs, outputs + outputCount),
});
- for (uint32_t i : mOperations.back().inputs) {
- mOperands[i].numberOfConsumers++;
- }
mHasOEMOperation |= (operationType == OperationType::OEM_OPERATION);
- mHasExtensionOperation |= isExtensionOperationType(operationType);
+ mHasExtensionOperation |= isExtension(operationType);
return ANEURALNETWORKS_NO_ERROR;
}
@@ -437,7 +431,7 @@
// Makes a copy of the index list, validates the arguments, and changes
// the lifetime info of the corresponding operand.
auto setArguments = [&](std::vector<uint32_t>* indexVector, uint32_t indexCount,
- const uint32_t* indexList, OperandLifeTime lifetime) -> bool {
+ const uint32_t* indexList, Operand::LifeTime lifetime) -> bool {
indexVector->resize(indexCount);
for (uint32_t i = 0; i < indexCount; i++) {
const uint32_t operandIndex = indexList[i];
@@ -451,7 +445,7 @@
}
(*indexVector)[i] = operandIndex;
Operand& operand = mOperands[operandIndex];
- if (operand.lifetime != OperandLifeTime::TEMPORARY_VARIABLE) {
+ if (operand.lifetime != Operand::LifeTime::TEMPORARY_VARIABLE) {
LOG(ERROR) << "ANeuralNetworksModel_identifyInputsAndOutputs Can't set operand "
<< operandIndex
<< " to be an input or output. Check that it's not a constant or "
@@ -463,8 +457,8 @@
return true;
};
- if (!setArguments(&mInputIndexes, inputCount, inputs, OperandLifeTime::SUBGRAPH_INPUT) ||
- !setArguments(&mOutputIndexes, outputCount, outputs, OperandLifeTime::SUBGRAPH_OUTPUT)) {
+ if (!setArguments(&mInputIndexes, inputCount, inputs, Operand::LifeTime::SUBGRAPH_INPUT) ||
+ !setArguments(&mOutputIndexes, outputCount, outputs, Operand::LifeTime::SUBGRAPH_OUTPUT)) {
return ANEURALNETWORKS_BAD_DATA;
}
@@ -523,8 +517,8 @@
// NOTE: Must copyLargeValuesToSharedMemory() before validation; otherwise,
// a CONSTANT_REFERENCE operand will not have correct .poolIndex, and
// validation will not work properly.
- const Model modelForValidation = makeHidlModel();
- if (!validateModel(modelForValidation, ValidationMode::RUNTIME)) {
+ const Model modelForValidation = makeModel();
+ if (!validateModel(convertToV1_3(modelForValidation), ValidationMode::RUNTIME)) {
LOG(ERROR) << "ANeuralNetworksModel_finish called on invalid model";
mInvalidModel = true;
return ANEURALNETWORKS_BAD_DATA;
@@ -542,12 +536,12 @@
static void logRemoval(const Operation& operation, uint32_t count,
const std::vector<Operand>& operands) {
std::ostringstream message;
- message << "Operation " << toString(operation.type) << " with inputs {";
+ message << "Operation " << operation.type << " with inputs {";
for (uint32_t i = 0; i < operation.inputs.size(); ++i) {
if (i != 0) {
message << ", ";
}
- message << toString(operands[operation.inputs[i]].type);
+ message << operands[operation.inputs[i]].type;
}
message << "} has trailing optional inputs set to default values. Removing " << count
<< " trailing inputs.";
@@ -566,9 +560,6 @@
const uint32_t inputCount = operation.inputs.size();
CHECK_LT(count, inputCount);
const uint32_t newInputCount = inputCount - count;
- for (uint32_t i = newInputCount; i < inputCount; ++i) {
- --mOperands[operation.inputs[i]].numberOfConsumers;
- }
operation.inputs.resize(newInputCount);
}
}
@@ -583,12 +574,16 @@
// See countMatchingTrailingArguments().
static bool matchesSpec(TailSpec spec, const Operand& operand,
const std::vector<uint8_t>& mSmallOperandValues) {
- if (operand.lifetime != OperandLifeTime::CONSTANT_COPY) {
+ const void* valuePtr = nullptr;
+ if (operand.lifetime == Operand::LifeTime::CONSTANT_COPY) {
+ valuePtr = static_cast<const void*>(&mSmallOperandValues[operand.location.offset]);
+ } else if (operand.lifetime == Operand::LifeTime::POINTER) {
+ valuePtr = std::get<const void*>(operand.location.pointer);
+ } else {
// CONSTANT_REFERENCE operands are not supported to avoid mapping memory
// during compilation.
return false;
}
- auto valuePtr = static_cast<const void*>(&mSmallOperandValues[operand.location.offset]);
switch (spec) {
case TailSpec::BOOL_FALSE:
return operand.type == OperandType::BOOL &&
@@ -818,8 +813,8 @@
count = 0;
for (uint32_t operandIndex : mOperations[operationIndex].inputs) {
auto lifetime = mOperands[operandIndex].lifetime;
- if (lifetime == OperandLifeTime::TEMPORARY_VARIABLE ||
- lifetime == OperandLifeTime::SUBGRAPH_OUTPUT) {
+ if (lifetime == Operand::LifeTime::TEMPORARY_VARIABLE ||
+ lifetime == Operand::LifeTime::SUBGRAPH_OUTPUT) {
count++;
operandToOperations.insert(
std::pair<uint32_t, uint32_t>(operandIndex, operationIndex));
@@ -865,38 +860,38 @@
return true;
}
-// A helper class to simplify state management when creating a HIDL model.
-class ModelBuilder::HidlModelMaker {
+// A helper class to simplify state management when creating a Model.
+class ModelBuilder::ModelMaker {
public:
static Model run(const ModelBuilder* model);
private:
- static Subgraph makeSubgraph(const ModelBuilder* model);
- HidlModelMaker() {}
- Model makeHidlModel(const ModelBuilder* mainModel);
+ static Model::Subgraph makeSubgraph(const ModelBuilder* model);
+ ModelMaker() {}
+ Model makeModel(const ModelBuilder* mainModel);
uint32_t addSubgraph(const ModelBuilder* refModel);
- void updateOperandLocations(const ModelBuilder* refModel, Subgraph* subgraph);
+ void updateOperandLocations(const ModelBuilder* refModel, Model::Subgraph* subgraph);
void addExtensions(const ModelBuilder* model);
void addExtensionWithPrefix(uint16_t prefix);
- std::vector<Subgraph> mRefSubgraphs;
- std::vector<uint8_t> mOperandValues;
+ std::vector<Model::Subgraph> mRefSubgraphs;
+ Model::OperandValues mOperandValues;
MemoryTracker mMemories;
- std::vector<ExtensionNameAndPrefix> mExtensionNameToPrefix;
+ std::vector<Model::ExtensionNameAndPrefix> mExtensionNameToPrefix;
std::set<uint16_t> mPrefixSet;
};
-Model ModelBuilder::makeHidlModel() const {
- // TODO: Cache the HIDL model to speed up subsequent calls.
- return HidlModelMaker::run(this);
+Model ModelBuilder::makeModel() const {
+ // TODO: Cache the Model to speed up subsequent calls.
+ return ModelMaker::run(this);
}
-Model ModelBuilder::HidlModelMaker::run(const ModelBuilder* model) {
- // run() ensures the state of HidlModelMaker is destroyed after the call.
- return HidlModelMaker().makeHidlModel(model);
+Model ModelBuilder::ModelMaker::run(const ModelBuilder* model) {
+ // run() ensures the state of ModelMaker is destroyed after the call.
+ return ModelMaker().makeModel(model);
}
-Model ModelBuilder::HidlModelMaker::makeHidlModel(const ModelBuilder* mainModel) {
+Model ModelBuilder::ModelMaker::makeModel(const ModelBuilder* mainModel) {
addExtensions(mainModel);
Model model;
model.main = makeSubgraph(mainModel);
@@ -905,14 +900,14 @@
model.operandValues = std::move(mOperandValues);
model.pools.resize(mMemories.size());
std::transform(mMemories.begin(), mMemories.end(), model.pools.begin(),
- [](const Memory* m) { return m->getHidlMemory(); });
+ [](const RuntimeMemory* m) { return uncheckedConvert(m->getHidlMemory()); });
model.relaxComputationFloat32toFloat16 = mainModel->mRelaxComputationFloat32toFloat16;
model.extensionNameToPrefix = std::move(mExtensionNameToPrefix);
return model;
}
-Subgraph ModelBuilder::HidlModelMaker::makeSubgraph(const ModelBuilder* model) {
- Subgraph subgraph;
+Model::Subgraph ModelBuilder::ModelMaker::makeSubgraph(const ModelBuilder* model) {
+ Model::Subgraph subgraph;
subgraph.operands = model->mOperands;
subgraph.operations = model->mOperations;
subgraph.inputIndexes = model->mInputIndexes;
@@ -920,27 +915,22 @@
return subgraph;
}
-void ModelBuilder::HidlModelMaker::updateOperandLocations(const ModelBuilder* refModel,
- Subgraph* subgraph) {
+void ModelBuilder::ModelMaker::updateOperandLocations(const ModelBuilder* refModel,
+ Model::Subgraph* subgraph) {
for (Operand& operand : subgraph->operands) {
- if (operand.lifetime == OperandLifeTime::CONSTANT_COPY) {
+ if (operand.lifetime == Operand::LifeTime::CONSTANT_COPY) {
uint32_t valueLength = operand.location.length;
- uint32_t existingSize = mOperandValues.size();
- uint32_t extraBytes = alignBytesNeeded(existingSize, valueLength);
uint32_t originalOffset = operand.location.offset;
- uint32_t offset = existingSize + extraBytes;
- mOperandValues.resize(offset + valueLength);
- memcpy(&mOperandValues[offset], &refModel->mSmallOperandValues[originalOffset],
- valueLength);
- operand.location.offset = offset;
- } else if (operand.lifetime == OperandLifeTime::CONSTANT_REFERENCE) {
+ operand.location = mOperandValues.append(&refModel->mSmallOperandValues[originalOffset],
+ valueLength);
+ } else if (operand.lifetime == Operand::LifeTime::CONSTANT_REFERENCE) {
uint32_t originalPoolIndex = operand.location.poolIndex;
operand.location.poolIndex = mMemories.add(refModel->mMemories[originalPoolIndex]);
}
}
// Do recursive calls at the end to improve locality of mOperandValues.
for (Operand& operand : subgraph->operands) {
- if (operand.lifetime == OperandLifeTime::SUBGRAPH) {
+ if (operand.lifetime == Operand::LifeTime::SUBGRAPH) {
uint32_t refModelIndex = operand.location.offset;
// TODO(b/147875885): Avoid creating duplicate refSubgraphs when
// a single refModel is referenced multiple times.
@@ -949,23 +939,22 @@
}
}
-uint32_t ModelBuilder::HidlModelMaker::addSubgraph(const ModelBuilder* refModel) {
+uint32_t ModelBuilder::ModelMaker::addSubgraph(const ModelBuilder* refModel) {
uint32_t index = mRefSubgraphs.size();
mRefSubgraphs.push_back(makeSubgraph(refModel));
updateOperandLocations(refModel, &mRefSubgraphs.back());
return index;
}
-void ModelBuilder::HidlModelMaker::addExtensions(const ModelBuilder* model) {
- constexpr uint8_t kLowBitsType = static_cast<uint8_t>(ExtensionTypeEncoding::LOW_BITS_TYPE);
+void ModelBuilder::ModelMaker::addExtensions(const ModelBuilder* model) {
for (const auto& operand : model->mOperands) {
- if (isExtensionOperandType(operand.type)) {
- addExtensionWithPrefix(static_cast<uint32_t>(operand.type) >> kLowBitsType);
+ if (isExtension(operand.type)) {
+ addExtensionWithPrefix(static_cast<uint32_t>(operand.type) >> kExtensionTypeBits);
}
}
for (const auto& operation : model->mOperations) {
- if (isExtensionOperationType(operation.type)) {
- addExtensionWithPrefix(static_cast<uint32_t>(operation.type) >> kLowBitsType);
+ if (isExtension(operation.type)) {
+ addExtensionWithPrefix(static_cast<uint32_t>(operation.type) >> kExtensionTypeBits);
}
}
for (const auto& refModel : model->mReferencedModels) {
@@ -973,7 +962,7 @@
}
}
-void ModelBuilder::HidlModelMaker::addExtensionWithPrefix(uint16_t prefix) {
+void ModelBuilder::ModelMaker::addExtensionWithPrefix(uint16_t prefix) {
if (!mPrefixSet.insert(prefix).second) {
return;
}
diff --git a/runtime/ModelBuilder.h b/runtime/ModelBuilder.h
index 2de68b3..9dd93ff 100644
--- a/runtime/ModelBuilder.h
+++ b/runtime/ModelBuilder.h
@@ -23,7 +23,6 @@
#include <memory>
#include <vector>
-#include "HalInterfaces.h"
#include "Memory.h"
#include "NeuralNetworks.h"
#include "Utils.h"
@@ -34,7 +33,7 @@
class CompilationBuilder;
class Device;
class ExecutionPlan;
-class Memory;
+class RuntimeMemory;
class ModelBuilder {
public:
@@ -44,7 +43,7 @@
// Adds an operand to the model.
int addOperand(const ANeuralNetworksOperandType& type);
int setOperandValue(uint32_t index, const void* buffer, size_t length);
- int setOperandValueFromMemory(uint32_t index, const Memory* memory, uint32_t offset,
+ int setOperandValueFromMemory(uint32_t index, const RuntimeMemory* memory, uint32_t offset,
size_t length);
int setOperandValueFromModel(uint32_t index, const ModelBuilder* value);
int setOperandSymmPerChannelQuantParams(
@@ -72,7 +71,7 @@
const std::vector<std::shared_ptr<Device>>& devices,
bool explicitDeviceList = false);
- hal::Model makeHidlModel() const;
+ Model makeModel() const;
uint32_t operandCount() const {
// We don't allow more than uint32_t worth of operands
@@ -89,7 +88,7 @@
return mInputIndexes[i];
}
const std::vector<uint32_t>& getInputOperandIndexes() const { return mInputIndexes; }
- const hal::Operand& getInputOperand(uint32_t i) const {
+ const Operand& getInputOperand(uint32_t i) const {
uint32_t index = getInputOperandIndex(i);
CHECK_LT(index, mOperands.size());
return mOperands[index];
@@ -99,15 +98,15 @@
return mOutputIndexes[i];
}
const std::vector<uint32_t>& getOutputOperandIndexes() const { return mOutputIndexes; }
- const hal::Operand& getOutputOperand(uint32_t i) const {
+ const Operand& getOutputOperand(uint32_t i) const {
uint32_t index = getOutputOperandIndex(i);
CHECK_LT(index, mOperands.size());
return mOperands[index];
}
- const hal::Operand& getOperand(uint32_t index) const { return mOperands[index]; }
- const hal::Operation& getOperation(uint32_t index) const { return mOperations[index]; }
+ const Operand& getOperand(uint32_t index) const { return mOperands[index]; }
+ const Operation& getOperation(uint32_t index) const { return mOperations[index]; }
const MemoryTracker& getMemories() const { return mMemories; }
- const std::vector<hal::Operation>& getOperations() const { return mOperations; }
+ const std::vector<Operation>& getOperations() const { return mOperations; }
const std::vector<uint32_t>& getSortedOperationMapping() const {
return mSortedOperationIndexMap;
}
@@ -121,8 +120,8 @@
CHECK_LT(i, mReferencedModels.size());
return mReferencedModels[i];
}
- const ModelBuilder* getReferencedModel(const hal::Operand& operand) const {
- CHECK(operand.lifetime == hal::OperandLifeTime::SUBGRAPH);
+ const ModelBuilder* getReferencedModel(const Operand& operand) const {
+ CHECK(operand.lifetime == Operand::LifeTime::SUBGRAPH);
return getReferencedModel(operand.location.offset);
}
@@ -174,7 +173,7 @@
// optional arguments are set to default values. This transformation enables
// more drivers to execute the model. See http://b/147105700.
void removeTrailingArgumentsWithDefaultValues();
- uint32_t getNumTrailingArgumentsToRemove(const hal::Operation& operation) const;
+ uint32_t getNumTrailingArgumentsToRemove(const Operation& operation) const;
// Sorts the operations to be in the correct order for single threaded
// node-at-a-time execution.
@@ -184,7 +183,7 @@
int copyLargeValuesToSharedMemory();
// The operations of the graph.
- std::vector<hal::Operation> mOperations;
+ std::vector<Operation> mOperations;
// The mapping from sorted index to the original index of operations in mOperations.
// mSortedOperationIndexMap is empty before sortIntoRunOrder() is called.
std::vector<uint32_t> mSortedOperationIndexMap;
@@ -193,7 +192,7 @@
// Is at least one of those operations an extension operation?
bool mHasExtensionOperation = false;
// The description of the operands of the graph.
- std::vector<hal::Operand> mOperands;
+ std::vector<Operand> mOperands;
// Is at least one of those operands an OEM operand?
bool mHasOEMOperand = false;
// The indexes of input operands of the model.
@@ -233,7 +232,7 @@
// Models referenced by operands in this model.
std::vector<const ModelBuilder*> mReferencedModels;
- class HidlModelMaker;
+ class ModelMaker;
};
} // namespace nn
diff --git a/runtime/NeuralNetworks.cpp b/runtime/NeuralNetworks.cpp
index f5206c8..0ccb646 100644
--- a/runtime/NeuralNetworks.cpp
+++ b/runtime/NeuralNetworks.cpp
@@ -22,6 +22,7 @@
#include "NeuralNetworks.h"
+#include <nnapi/Types.h>
#include <vndk/hardware_buffer.h>
#include <algorithm>
@@ -35,7 +36,6 @@
#include "ControlFlow.h"
#include "Event.h"
#include "ExecutionBuilder.h"
-#include "HalInterfaces.h"
#include "Manager.h"
#include "Memory.h"
#include "MetaModel.h"
@@ -46,7 +46,7 @@
#include "Utils.h"
using namespace android::nn;
-using namespace android::nn::hal;
+using android::sp;
// Make sure the constants defined in the header files have not changed values.
// IMPORTANT: When adding new values, update kNumberOfDataTypes or kNumberOfDataTypesOEM
@@ -558,12 +558,14 @@
// Make sure that the constants are compatible with the values defined in
// hardware/interfaces/neuralnetworks/1.3/types.hal.
-static_assert(android::nn::convertToHalPriority(ANEURALNETWORKS_PRIORITY_LOW) == Priority::LOW,
+static_assert(android::nn::convertToCanonicalPriority(ANEURALNETWORKS_PRIORITY_LOW) ==
+ Priority::LOW,
"ANEURALNETWORKS_PRIORITY_LOW does not map to Priority::LOW");
-static_assert(android::nn::convertToHalPriority(ANEURALNETWORKS_PRIORITY_MEDIUM) ==
+static_assert(android::nn::convertToCanonicalPriority(ANEURALNETWORKS_PRIORITY_MEDIUM) ==
Priority::MEDIUM,
"ANEURALNETWORKS_PRIORITY_MEDIUM does not map to Priority::MEDIUM");
-static_assert(android::nn::convertToHalPriority(ANEURALNETWORKS_PRIORITY_HIGH) == Priority::HIGH,
+static_assert(android::nn::convertToCanonicalPriority(ANEURALNETWORKS_PRIORITY_HIGH) ==
+ Priority::HIGH,
"ANEURALNETWORKS_PRIORITY_HIGH does not map to Priority::HIGH");
// Asserts for ANeuralNetworksOperandType memory layout
@@ -597,9 +599,8 @@
// Asserts for compilation caching
static_assert(ANEURALNETWORKS_BYTE_SIZE_OF_CACHE_TOKEN == 32,
"ANEURALNETWORKS_BYTE_SIZE_OF_CACHE_TOKEN has changed");
-static_assert(static_cast<uint32_t>(Constant::BYTE_SIZE_OF_CACHE_TOKEN) ==
- ANEURALNETWORKS_BYTE_SIZE_OF_CACHE_TOKEN,
- "Constant::BYTE_SIZE_OF_CACHE_TOKEN != ANEURALNETWORKS_BYTE_SIZE_OF_CACHE_TOKEN");
+static_assert(ANEURALNETWORKS_BYTE_SIZE_OF_CACHE_TOKEN == kByteSizeOfCacheToken,
+ "ANEURALNETWORKS_BYTE_SIZE_OF_CACHE_TOKEN != kByteSizeOfCacheToken");
// Asserts for compilation priority
static_assert(ANEURALNETWORKS_PRIORITY_LOW == 90, "ANEURALNETWORKS_PRIORITY_LOW has changed");
@@ -609,14 +610,6 @@
static_assert(ANEURALNETWORKS_PRIORITY_DEFAULT == ANEURALNETWORKS_PRIORITY_MEDIUM,
"ANEURALNETWORKS_PRIORITY_DEFAULT has changed");
-// Asserts for loop timeout duration
-static_assert(static_cast<uint64_t>(LoopTimeoutDurationNs::DEFAULT) ==
- operation_while::kTimeoutNsDefault,
- "LoopTimeoutDurationNs::DEFAULT != operation_while::kTimeoutNsDefault");
-static_assert(static_cast<uint64_t>(LoopTimeoutDurationNs::MAXIMUM) ==
- operation_while::kTimeoutNsMaximum,
- "LoopTimeoutDurationNs::MAXIMUM != operation_while::kTimeoutNsMaximum");
-
int ANeuralNetworks_getDeviceCount(uint32_t* numDevices) {
if (numDevices == nullptr) {
LOG(ERROR) << "ANeuralNetworks_getDeviceCount passed a nullptr";
@@ -718,7 +711,7 @@
return ANEURALNETWORKS_BAD_STATE;
}
- const Model hidlModel = m->makeHidlModel();
+ const Model canonicalModel = m->makeModel();
const std::vector<uint32_t>& opMap = m->getSortedOperationMapping();
// init the output array to false for all the operations.
std::fill(supportedOps, supportedOps + opMap.size(), false);
@@ -737,7 +730,7 @@
}
Device* d = reinterpret_cast<Device*>(const_cast<ANeuralNetworksDevice*>(devices[i]));
- const MetaModel metaModel(hidlModel, DeviceManager::get()->strictSlicing());
+ const MetaModel metaModel(canonicalModel, DeviceManager::get()->strictSlicing());
const std::vector<bool> supportsByDevice = d->getSupportedOperations(metaModel);
for (uint32_t j = 0; j < supportsByDevice.size(); j++) {
uint32_t originalIdx = opMap[j];
@@ -988,9 +981,9 @@
LOG(ERROR) << "ANeuralNetworksMemory_copy passed a nullptr";
return ANEURALNETWORKS_UNEXPECTED_NULL;
}
- const Memory* s = reinterpret_cast<const Memory*>(src);
- const Memory* d = reinterpret_cast<const Memory*>(dst);
- return Memory::copy(*s, *d);
+ const RuntimeMemory* s = reinterpret_cast<const RuntimeMemory*>(src);
+ const RuntimeMemory* d = reinterpret_cast<const RuntimeMemory*>(dst);
+ return RuntimeMemory::copy(*s, *d);
}
int ANeuralNetworksMemory_createFromFd(size_t size, int prot, int fd, size_t offset,
@@ -1024,7 +1017,7 @@
void ANeuralNetworksMemory_free(ANeuralNetworksMemory* memory) {
NNTRACE_RT(NNTRACE_PHASE_TERMINATION, "ANeuralNetworksMemory_free");
// No validation. Free of nullptr is valid.
- Memory* m = reinterpret_cast<Memory*>(memory);
+ RuntimeMemory* m = reinterpret_cast<RuntimeMemory*>(memory);
delete m;
}
@@ -1091,7 +1084,7 @@
LOG(ERROR) << "ANeuralNetworksModel_setOperandValue passed a nullptr";
return ANEURALNETWORKS_UNEXPECTED_NULL;
}
- const Memory* mem = reinterpret_cast<const Memory*>(memory);
+ const RuntimeMemory* mem = reinterpret_cast<const RuntimeMemory*>(memory);
ModelBuilder* m = reinterpret_cast<ModelBuilder*>(model);
return m->setOperandValueFromMemory(index, mem, offset, length);
}
@@ -1302,7 +1295,7 @@
return ANEURALNETWORKS_UNEXPECTED_NULL;
}
- const Memory* m = reinterpret_cast<const Memory*>(memory);
+ const RuntimeMemory* m = reinterpret_cast<const RuntimeMemory*>(memory);
ExecutionBuilder* r = reinterpret_cast<ExecutionBuilder*>(execution);
return r->setInputFromMemory(index, type, m, offset, length);
}
@@ -1330,7 +1323,7 @@
}
ExecutionBuilder* r = reinterpret_cast<ExecutionBuilder*>(execution);
- const Memory* m = reinterpret_cast<const Memory*>(memory);
+ const RuntimeMemory* m = reinterpret_cast<const RuntimeMemory*>(memory);
return r->setOutputFromMemory(index, type, m, offset, length);
}
diff --git a/runtime/TypeManager.cpp b/runtime/TypeManager.cpp
index 03fac20..932dcc7 100644
--- a/runtime/TypeManager.cpp
+++ b/runtime/TypeManager.cpp
@@ -48,11 +48,7 @@
namespace {
-using namespace hal;
-
-const uint8_t kLowBitsType = static_cast<uint8_t>(ExtensionTypeEncoding::LOW_BITS_TYPE);
-const uint32_t kMaxPrefix =
- (1 << static_cast<uint8_t>(ExtensionTypeEncoding::HIGH_BITS_PREFIX)) - 1;
+constexpr uint32_t kMaxPrefix = (1 << kExtensionPrefixBits) - 1;
// Checks if the two structures contain the same information. The order of
// operand types within the structures does not matter.
@@ -235,7 +231,7 @@
int32_t* type) {
uint16_t prefix;
NN_RET_CHECK(getExtensionPrefix(extensionName, &prefix));
- *type = (prefix << kLowBitsType) | typeWithinExtension;
+ *type = (prefix << kExtensionTypeBits) | typeWithinExtension;
return true;
}
@@ -249,8 +245,8 @@
bool TypeManager::getExtensionOperandTypeInfo(
OperandType type, const Extension::OperandTypeInformation** info) const {
uint32_t operandType = static_cast<uint32_t>(type);
- uint16_t prefix = operandType >> kLowBitsType;
- uint16_t typeWithinExtension = operandType & ((1 << kLowBitsType) - 1);
+ uint16_t prefix = operandType >> kExtensionTypeBits;
+ uint16_t typeWithinExtension = operandType & ((1 << kExtensionTypeBits) - 1);
const Extension* extension;
NN_RET_CHECK(getExtensionInfo(prefix, &extension))
<< "Cannot find extension corresponding to prefix " << prefix;
@@ -268,7 +264,7 @@
}
bool TypeManager::isTensorType(OperandType type) const {
- if (!isExtensionOperandType(type)) {
+ if (!isExtension(type)) {
return !nonExtensionOperandTypeIsScalar(static_cast<int>(type));
}
const Extension::OperandTypeInformation* info;
@@ -278,7 +274,7 @@
uint32_t TypeManager::getSizeOfData(OperandType type,
const std::vector<uint32_t>& dimensions) const {
- if (!isExtensionOperandType(type)) {
+ if (!isExtension(type)) {
return nonExtensionOperandSizeOfData(type, dimensions);
}
const Extension::OperandTypeInformation* info;
@@ -286,9 +282,9 @@
return info->isTensor ? sizeOfTensorData(info->byteSize, dimensions) : info->byteSize;
}
-bool TypeManager::sizeOfDataOverflowsUInt32(hal::OperandType type,
+bool TypeManager::sizeOfDataOverflowsUInt32(OperandType type,
const std::vector<uint32_t>& dimensions) const {
- if (!isExtensionOperandType(type)) {
+ if (!isExtension(type)) {
return nonExtensionOperandSizeOfDataOverflowsUInt32(type, dimensions);
}
const Extension::OperandTypeInformation* info;
diff --git a/runtime/TypeManager.h b/runtime/TypeManager.h
index a06ddb6..5236ba7 100644
--- a/runtime/TypeManager.h
+++ b/runtime/TypeManager.h
@@ -48,18 +48,18 @@
// Looks up information about the extension corresponding to the given prefix
//
// Returns false if no extension corresponds to the given prefix.
- bool getExtensionInfo(uint16_t prefix, const hal::Extension** extension) const;
+ bool getExtensionInfo(uint16_t prefix, const Extension** extension) const;
// Looks up information about an extension operand type
//
// Returns false if the extension or type is unknown.
- bool getExtensionOperandTypeInfo(hal::OperandType type,
- const hal::Extension::OperandTypeInformation** info) const;
+ bool getExtensionOperandTypeInfo(OperandType type,
+ const Extension::OperandTypeInformation** info) const;
// Returns true if an operand type is a tensor type.
//
// Aborts if the type is an unknown extension type.
- bool isTensorType(hal::OperandType type) const;
+ bool isTensorType(OperandType type) const;
// Returns the amount of space needed to store a value of the dimensions and
// type of this operand. For a tensor with unspecified rank or at least one
@@ -67,7 +67,7 @@
//
// Aborts if the type is an unknown extension type.
// Aborts if the size would overflow the return type.
- uint32_t getSizeOfData(const hal::Operand& operand) const {
+ uint32_t getSizeOfData(const Operand& operand) const {
return getSizeOfData(operand.type, operand.dimensions);
}
@@ -76,14 +76,13 @@
// unspecified dimension, returns zero.
//
// Aborts if the type is an unknown extension type.
- uint32_t getSizeOfData(hal::OperandType type, const std::vector<uint32_t>& dimensions) const;
+ uint32_t getSizeOfData(OperandType type, const std::vector<uint32_t>& dimensions) const;
// Returns true if the amount of space needed to store a value of the specified
// dimensions and element size overflows the uint32_t type.
//
// See also TypeManager::sizeOfDataOverflowsUInt32().
- bool sizeOfDataOverflowsUInt32(hal::OperandType type,
- const std::vector<uint32_t>& dimensions) const;
+ bool sizeOfDataOverflowsUInt32(OperandType type, const std::vector<uint32_t>& dimensions) const;
// Returns true if extensions usage is allowed in current process.
bool areExtensionsAllowed() const { return mExtensionsAllowed; }
@@ -93,7 +92,7 @@
// Registers an extension.
//
// Returns true if the registration was successful.
- bool forTest_registerExtension(const hal::Extension& extension) {
+ bool forTest_registerExtension(const Extension& extension) {
return registerExtension(extension, "INTERNAL TEST");
}
@@ -135,7 +134,7 @@
private:
TypeManager();
void findAvailableExtensions();
- bool registerExtension(hal::Extension extension, const std::string& deviceName);
+ bool registerExtension(Extension extension, const std::string& deviceName);
// Returns the numeric "prefix" value corresponding to an extension.
//
@@ -145,7 +144,7 @@
const DeviceManager* mDeviceManager = DeviceManager::get();
// Contains all registered extensions.
- std::map<std::string, hal::Extension> mExtensionNameToExtension;
+ std::map<std::string, Extension> mExtensionNameToExtension;
// Contains the name of the first discovered device that supports an
// extension. Used for error reporting.
@@ -160,7 +159,7 @@
std::map<std::string, uint16_t> mExtensionNameToPrefix;
// Entries of mPrefixToExtension point into mExtensionNameToExtension.
// prefix=0 corresponds to no extension and should never be looked up.
- std::vector<hal::Extension*> mPrefixToExtension = {nullptr};
+ std::vector<Extension*> mPrefixToExtension = {nullptr};
// True if Extensions can be used in current process.
bool mExtensionsAllowed = false;
diff --git a/runtime/VersionedInterfaces.cpp b/runtime/VersionedInterfaces.cpp
index ccb29dc..fce558c 100644
--- a/runtime/VersionedInterfaces.cpp
+++ b/runtime/VersionedInterfaces.cpp
@@ -18,13 +18,12 @@
#include "VersionedInterfaces.h"
-#include <fcntl.h>
-
#include <android-base/logging.h>
#include <android-base/properties.h>
#include <android-base/scopeguard.h>
#include <android-base/thread_annotations.h>
#include <cutils/native_handle.h>
+#include <fcntl.h>
#include <algorithm>
#include <chrono>
@@ -104,18 +103,14 @@
// anonymous namespace
namespace {
-using namespace hal;
-
-const Timing kNoTiming = {.timeOnDevice = UINT64_MAX, .timeInDriver = UINT64_MAX};
-
-void sendFailureMessage(IPreparedModelCallback* cb) {
+void sendFailureMessage(V1_3::IPreparedModelCallback* cb) {
CHECK(cb != nullptr);
- cb->notify_1_3(ErrorStatus::GENERAL_FAILURE, nullptr);
+ cb->notify_1_3(V1_3::ErrorStatus::GENERAL_FAILURE, nullptr);
}
// This class is thread safe
template <typename Callback>
-class DeathHandler : public hidl_death_recipient {
+class DeathHandler : public hardware::hidl_death_recipient {
public:
void serviceDied(uint64_t /*cookie*/, const wp<hidl::base::V1_0::IBase>& /*who*/) override {
LOG(ERROR) << "DeathHandler::serviceDied -- service unexpectedly died!";
@@ -164,7 +159,7 @@
// proactively handle service crashes. If the linkToDeath call fails,
// asynchronous calls are susceptible to hangs if the service crashes before
// providing the response.
- const Return<bool> ret = preparedModel->linkToDeath(deathHandler, 0);
+ const hardware::Return<bool> ret = preparedModel->linkToDeath(deathHandler, 0);
if (ret.isDeadObject()) {
LOG(ERROR) << "makeVersionedIPreparedModel failed to register a death recipient for the "
"IPreparedModel object because the IPreparedModel object is dead.";
@@ -206,10 +201,10 @@
const Request& request, MeasureTiming measure, const std::optional<Deadline>& deadline,
const OptionalTimeoutDuration& loopTimeoutDuration) const {
const auto failDeadObject = []() -> std::tuple<int, std::vector<OutputShape>, Timing> {
- return {ANEURALNETWORKS_DEAD_OBJECT, {}, kNoTiming};
+ return {ANEURALNETWORKS_DEAD_OBJECT, {}, {}};
};
- const auto failWithStatus = [](ErrorStatus status) {
- return getExecutionResult(status, {}, kNoTiming);
+ const auto failWithStatus = [](V1_3::ErrorStatus status) {
+ return getExecutionResult(status, {}, {});
};
const auto getResults = [failDeadObject](const ExecutionCallback& cb) {
if (cb.isDeadObject()) {
@@ -221,21 +216,23 @@
const sp<ExecutionCallback> callback = new ExecutionCallback();
const auto scoped = mDeathHandler->protectCallback(callback);
- // version 1.3+ HAL
+ // version 1.3 HAL
+ const V1_3::Request request13 = convertToV1_3(request);
if (mPreparedModelV1_3 != nullptr) {
const auto otp = makeTimePoint(deadline);
- Return<ErrorStatus> ret = mPreparedModelV1_3->execute_1_3(request, measure, otp,
- loopTimeoutDuration, callback);
+ hardware::Return<V1_3::ErrorStatus> ret = mPreparedModelV1_3->execute_1_3(
+ request13, convertToV1_2(measure), convertToV1_3(otp),
+ convertToV1_3(loopTimeoutDuration), callback);
if (ret.isDeadObject()) {
LOG(ERROR) << "execute_1_3 failure: " << ret.description();
return failDeadObject();
}
if (!ret.isOk()) {
LOG(ERROR) << "execute_1_3 failure: " << ret.description();
- return failWithStatus(ErrorStatus::GENERAL_FAILURE);
+ return failWithStatus(V1_3::ErrorStatus::GENERAL_FAILURE);
}
- if (ret != ErrorStatus::NONE) {
- LOG(ERROR) << "execute_1_3 returned " << toString(static_cast<ErrorStatus>(ret));
+ if (ret != V1_3::ErrorStatus::NONE) {
+ LOG(ERROR) << "execute_1_3 returned " << toString(static_cast<V1_3::ErrorStatus>(ret));
return failWithStatus(ret);
}
callback->wait();
@@ -244,21 +241,21 @@
// version 1.2 HAL
if (mPreparedModelV1_2 != nullptr) {
- const bool compliant = compliantWithV1_2(request);
+ const bool compliant = compliantWithV1_2(request13);
if (!compliant) {
LOG(ERROR) << "Could not handle execute_1_2!";
- return failWithStatus(ErrorStatus::GENERAL_FAILURE);
+ return failWithStatus(V1_3::ErrorStatus::GENERAL_FAILURE);
}
- const V1_0::Request request12 = convertToV1_2(request);
- Return<V1_0::ErrorStatus> ret =
- mPreparedModelV1_2->execute_1_2(request12, measure, callback);
+ const V1_0::Request request12 = convertToV1_2(request13);
+ hardware::Return<V1_0::ErrorStatus> ret =
+ mPreparedModelV1_2->execute_1_2(request12, convertToV1_2(measure), callback);
if (ret.isDeadObject()) {
LOG(ERROR) << "execute_1_2 failure: " << ret.description();
return failDeadObject();
}
if (!ret.isOk()) {
LOG(ERROR) << "execute_1_2 failure: " << ret.description();
- return failWithStatus(ErrorStatus::GENERAL_FAILURE);
+ return failWithStatus(V1_3::ErrorStatus::GENERAL_FAILURE);
}
const V1_0::ErrorStatus status = static_cast<V1_0::ErrorStatus>(ret);
if (status != V1_0::ErrorStatus::NONE) {
@@ -271,20 +268,20 @@
// version 1.0 HAL
if (mPreparedModelV1_0 != nullptr) {
- const bool compliant = compliantWithV1_0(request);
+ const bool compliant = compliantWithV1_0(request13);
if (!compliant) {
LOG(ERROR) << "Could not handle execute!";
- return failWithStatus(ErrorStatus::GENERAL_FAILURE);
+ return failWithStatus(V1_3::ErrorStatus::GENERAL_FAILURE);
}
- const V1_0::Request request10 = convertToV1_0(request);
- Return<V1_0::ErrorStatus> ret = mPreparedModelV1_0->execute(request10, callback);
+ const V1_0::Request request10 = convertToV1_0(request13);
+ hardware::Return<V1_0::ErrorStatus> ret = mPreparedModelV1_0->execute(request10, callback);
if (ret.isDeadObject()) {
LOG(ERROR) << "execute failure: " << ret.description();
return failDeadObject();
}
if (!ret.isOk()) {
LOG(ERROR) << "execute failure: " << ret.description();
- return failWithStatus(ErrorStatus::GENERAL_FAILURE);
+ return failWithStatus(V1_3::ErrorStatus::GENERAL_FAILURE);
}
const V1_0::ErrorStatus status = static_cast<V1_0::ErrorStatus>(ret);
if (status != V1_0::ErrorStatus::NONE) {
@@ -297,24 +294,27 @@
// No prepared model available
LOG(ERROR) << "executeAsynchronously called with no preparedModel";
- return failWithStatus(ErrorStatus::GENERAL_FAILURE);
+ return failWithStatus(V1_3::ErrorStatus::GENERAL_FAILURE);
}
std::tuple<int, std::vector<OutputShape>, Timing> VersionedIPreparedModel::executeSynchronously(
const Request& request, MeasureTiming measure, const std::optional<Deadline>& deadline,
const OptionalTimeoutDuration& loopTimeoutDuration) const {
const std::tuple<int, std::vector<OutputShape>, Timing> kDeadObject = {
- ANEURALNETWORKS_DEAD_OBJECT, {}, kNoTiming};
- const auto kFailure = getExecutionResult(ErrorStatus::GENERAL_FAILURE, {}, kNoTiming);
+ ANEURALNETWORKS_DEAD_OBJECT, {}, {}};
+ const auto kFailure = getExecutionResult(ErrorStatus::GENERAL_FAILURE, {}, {});
- // version 1.3+ HAL
+ // version 1.3 HAL
+ const V1_3::Request request13 = convertToV1_3(request);
if (mPreparedModelV1_3 != nullptr) {
std::tuple<int, std::vector<OutputShape>, Timing> result;
const auto otp = makeTimePoint(deadline);
- Return<void> ret = mPreparedModelV1_3->executeSynchronously_1_3(
- request, measure, otp, loopTimeoutDuration,
- [&result](ErrorStatus error, const hidl_vec<OutputShape>& outputShapes,
- const Timing& timing) {
+ hardware::Return<void> ret = mPreparedModelV1_3->executeSynchronously_1_3(
+ request13, convertToV1_2(measure), convertToV1_3(otp),
+ convertToV1_3(loopTimeoutDuration),
+ [&result](V1_3::ErrorStatus error,
+ const hardware::hidl_vec<V1_2::OutputShape>& outputShapes,
+ const V1_2::Timing& timing) {
result = getExecutionResult(error, outputShapes, timing);
});
if (ret.isDeadObject()) {
@@ -330,18 +330,19 @@
// version 1.2 HAL
if (mPreparedModelV1_2 != nullptr) {
- const bool compliant = compliantWithV1_2(request);
+ const bool compliant = compliantWithV1_2(request13);
if (!compliant) {
LOG(ERROR) << "Could not handle executeSynchronously!";
return kFailure;
}
- const V1_0::Request request12 = convertToV1_2(request);
+ const V1_0::Request request12 = convertToV1_2(request13);
std::tuple<int, std::vector<OutputShape>, Timing> result;
- Return<void> ret = mPreparedModelV1_2->executeSynchronously(
- request12, measure,
- [&result](V1_0::ErrorStatus error, const hidl_vec<OutputShape>& outputShapes,
- const Timing& timing) {
+ hardware::Return<void> ret = mPreparedModelV1_2->executeSynchronously(
+ request12, convertToV1_2(measure),
+ [&result](V1_0::ErrorStatus error,
+ const hardware::hidl_vec<V1_2::OutputShape>& outputShapes,
+ const V1_2::Timing& timing) {
result = getExecutionResult(convertToV1_3(error), outputShapes, timing);
});
if (ret.isDeadObject()) {
@@ -363,11 +364,11 @@
const Request& request, MeasureTiming measure, const std::optional<Deadline>& deadline,
const OptionalTimeoutDuration& loopTimeoutDuration, bool preferSynchronous) const {
if (preferSynchronous) {
- VLOG(EXECUTION) << "Before executeSynchronously() " << SHOW_IF_DEBUG(toString(request));
+ VLOG(EXECUTION) << "Before executeSynchronously() " << SHOW_IF_DEBUG(request);
return executeSynchronously(request, measure, deadline, loopTimeoutDuration);
}
- VLOG(EXECUTION) << "Before executeAsynchronously() " << SHOW_IF_DEBUG(toString(request));
+ VLOG(EXECUTION) << "Before executeAsynchronously() " << SHOW_IF_DEBUG(request);
return executeAsynchronously(request, measure, deadline, loopTimeoutDuration);
}
@@ -397,13 +398,15 @@
return ExecutionBurstController::create(mPreparedModelV1_2, pollingTimeWindow);
}
-static std::pair<ErrorStatus, Capabilities> getCapabilitiesFunction(V1_3::IDevice* device) {
+static std::pair<V1_3::ErrorStatus, V1_3::Capabilities> getCapabilitiesFunction(
+ V1_3::IDevice* device) {
CHECK(device != nullptr);
NNTRACE_FULL(NNTRACE_LAYER_IPC, NNTRACE_PHASE_INITIALIZATION, "getCapabilities_1_3");
- const std::pair<ErrorStatus, Capabilities> kFailure = {ErrorStatus::GENERAL_FAILURE, {}};
- std::pair<ErrorStatus, Capabilities> result = kFailure;
- const Return<void> ret = device->getCapabilities_1_3(
- [&result](ErrorStatus error, const Capabilities& capabilities) {
+ const std::pair<V1_3::ErrorStatus, V1_3::Capabilities> kFailure = {
+ V1_3::ErrorStatus::GENERAL_FAILURE, {}};
+ std::pair<V1_3::ErrorStatus, V1_3::Capabilities> result = kFailure;
+ const hardware::Return<void> ret = device->getCapabilities_1_3(
+ [&result](V1_3::ErrorStatus error, const V1_3::Capabilities& capabilities) {
result = std::make_pair(error, capabilities);
});
if (!ret.isOk()) {
@@ -413,38 +416,39 @@
return result;
}
-std::tuple<int, hal::hidl_handle, sp<hal::IFencedExecutionCallback>, hal::Timing>
-VersionedIPreparedModel::executeFenced(
- const hal::Request& request, const hal::hidl_vec<hal::hidl_handle>& waitFor,
- MeasureTiming measure, const std::optional<Deadline>& deadline,
- const OptionalTimeoutDuration& loopTimeoutDuration,
- const hal::OptionalTimeoutDuration& timeoutDurationAfterFence) {
- // version 1.3+ HAL
- hal::hidl_handle syncFence;
- sp<hal::IFencedExecutionCallback> dispatchCallback;
- hal::Timing timing = {UINT64_MAX, UINT64_MAX};
+std::tuple<int, hardware::hidl_handle, sp<V1_3::IFencedExecutionCallback>, Timing>
+VersionedIPreparedModel::executeFenced(const Request& request,
+ const hardware::hidl_vec<hardware::hidl_handle>& waitFor,
+ MeasureTiming measure,
+ const std::optional<Deadline>& deadline,
+ const OptionalTimeoutDuration& loopTimeoutDuration,
+ const OptionalTimeoutDuration& timeoutDurationAfterFence) {
+ // version 1.3 HAL
+ hardware::hidl_handle syncFence;
+ sp<V1_3::IFencedExecutionCallback> dispatchCallback;
+ Timing timing = {UINT64_MAX, UINT64_MAX};
if (mPreparedModelV1_3 != nullptr) {
ErrorStatus errorStatus;
const auto otp = makeTimePoint(deadline);
- Return<void> ret = mPreparedModelV1_3->executeFenced(
- request, waitFor, measure, otp, loopTimeoutDuration, timeoutDurationAfterFence,
+ hardware::Return<void> ret = mPreparedModelV1_3->executeFenced(
+ convertToV1_3(request), waitFor, convertToV1_2(measure), convertToV1_3(otp),
+ convertToV1_3(loopTimeoutDuration), convertToV1_3(timeoutDurationAfterFence),
[&syncFence, &errorStatus, &dispatchCallback](
- ErrorStatus error, const hidl_handle& handle,
- const sp<hal::IFencedExecutionCallback>& callback) {
+ V1_3::ErrorStatus error, const hardware::hidl_handle& handle,
+ const sp<V1_3::IFencedExecutionCallback>& callback) {
syncFence = handle;
- errorStatus = error;
+ errorStatus = uncheckedConvert(error);
dispatchCallback = callback;
});
if (!ret.isOk()) {
LOG(ERROR) << "executeFenced failure: " << ret.description();
- return std::make_tuple(ANEURALNETWORKS_OP_FAILED, hal::hidl_handle(nullptr), nullptr,
- timing);
+ return std::make_tuple(ANEURALNETWORKS_OP_FAILED, hardware::hidl_handle(nullptr),
+ nullptr, timing);
}
if (errorStatus != ErrorStatus::NONE) {
- LOG(ERROR) << "executeFenced returned "
- << toString(static_cast<ErrorStatus>(errorStatus));
+ LOG(ERROR) << "executeFenced returned " << errorStatus;
return std::make_tuple(convertErrorStatusToResultCode(errorStatus),
- hal::hidl_handle(nullptr), nullptr, timing);
+ hardware::hidl_handle(nullptr), nullptr, timing);
}
return std::make_tuple(ANEURALNETWORKS_NO_ERROR, syncFence, dispatchCallback, timing);
}
@@ -454,33 +458,35 @@
LOG(INFO) << "No drivers able to handle sync fences, falling back to regular execution";
for (const auto& fenceHandle : waitFor) {
if (!fenceHandle.getNativeHandle()) {
- return std::make_tuple(ANEURALNETWORKS_BAD_DATA, hal::hidl_handle(nullptr), nullptr,
- timing);
+ return std::make_tuple(ANEURALNETWORKS_BAD_DATA, hardware::hidl_handle(nullptr),
+ nullptr, timing);
}
int syncFd = fenceHandle.getNativeHandle()->data[0];
if (syncFd <= 0) {
- return std::make_tuple(ANEURALNETWORKS_BAD_DATA, hal::hidl_handle(nullptr), nullptr,
- timing);
+ return std::make_tuple(ANEURALNETWORKS_BAD_DATA, hardware::hidl_handle(nullptr),
+ nullptr, timing);
}
auto r = syncWait(syncFd, -1);
if (r != FenceState::SIGNALED) {
LOG(ERROR) << "syncWait failed, fd: " << syncFd;
- return std::make_tuple(ANEURALNETWORKS_OP_FAILED, hal::hidl_handle(nullptr), nullptr,
- timing);
+ return std::make_tuple(ANEURALNETWORKS_OP_FAILED, hardware::hidl_handle(nullptr),
+ nullptr, timing);
}
}
int errorCode;
std::tie(errorCode, std::ignore, timing) =
executeSynchronously(request, measure, deadline, loopTimeoutDuration);
- return std::make_tuple(errorCode, hal::hidl_handle(nullptr), nullptr, timing);
+ return std::make_tuple(errorCode, hardware::hidl_handle(nullptr), nullptr, timing);
}
-static std::pair<ErrorStatus, Capabilities> getCapabilitiesFunction(V1_2::IDevice* device) {
+static std::pair<V1_3::ErrorStatus, V1_3::Capabilities> getCapabilitiesFunction(
+ V1_2::IDevice* device) {
CHECK(device != nullptr);
NNTRACE_FULL(NNTRACE_LAYER_IPC, NNTRACE_PHASE_INITIALIZATION, "getCapabilities_1_2");
- const std::pair<ErrorStatus, Capabilities> kFailure = {ErrorStatus::GENERAL_FAILURE, {}};
- std::pair<ErrorStatus, Capabilities> result = kFailure;
- const Return<void> ret = device->getCapabilities_1_2(
+ const std::pair<V1_3::ErrorStatus, V1_3::Capabilities> kFailure = {
+ V1_3::ErrorStatus::GENERAL_FAILURE, {}};
+ std::pair<V1_3::ErrorStatus, V1_3::Capabilities> result = kFailure;
+ const hardware::Return<void> ret = device->getCapabilities_1_2(
[&result](V1_0::ErrorStatus error, const V1_2::Capabilities& capabilities) {
result = std::make_pair(convertToV1_3(error), convertToV1_3(capabilities));
});
@@ -491,12 +497,14 @@
return result;
}
-static std::pair<ErrorStatus, Capabilities> getCapabilitiesFunction(V1_1::IDevice* device) {
+static std::pair<V1_3::ErrorStatus, V1_3::Capabilities> getCapabilitiesFunction(
+ V1_1::IDevice* device) {
CHECK(device != nullptr);
NNTRACE_FULL(NNTRACE_LAYER_IPC, NNTRACE_PHASE_INITIALIZATION, "getCapabilities_1_1");
- const std::pair<ErrorStatus, Capabilities> kFailure = {ErrorStatus::GENERAL_FAILURE, {}};
- std::pair<ErrorStatus, Capabilities> result = kFailure;
- const Return<void> ret = device->getCapabilities_1_1(
+ const std::pair<V1_3::ErrorStatus, V1_3::Capabilities> kFailure = {
+ V1_3::ErrorStatus::GENERAL_FAILURE, {}};
+ std::pair<V1_3::ErrorStatus, V1_3::Capabilities> result = kFailure;
+ const hardware::Return<void> ret = device->getCapabilities_1_1(
[&result](V1_0::ErrorStatus error, const V1_1::Capabilities& capabilities) {
// Time taken to convert capabilities is trivial
result = std::make_pair(convertToV1_3(error), convertToV1_3(capabilities));
@@ -508,12 +516,14 @@
return result;
}
-static std::pair<ErrorStatus, Capabilities> getCapabilitiesFunction(V1_0::IDevice* device) {
+static std::pair<V1_3::ErrorStatus, V1_3::Capabilities> getCapabilitiesFunction(
+ V1_0::IDevice* device) {
CHECK(device != nullptr);
NNTRACE_FULL(NNTRACE_LAYER_IPC, NNTRACE_PHASE_INITIALIZATION, "getCapabilities");
- const std::pair<ErrorStatus, Capabilities> kFailure = {ErrorStatus::GENERAL_FAILURE, {}};
- std::pair<ErrorStatus, Capabilities> result = kFailure;
- const Return<void> ret = device->getCapabilities(
+ const std::pair<V1_3::ErrorStatus, V1_3::Capabilities> kFailure = {
+ V1_3::ErrorStatus::GENERAL_FAILURE, {}};
+ std::pair<V1_3::ErrorStatus, V1_3::Capabilities> result = kFailure;
+ const hardware::Return<void> ret = device->getCapabilities(
[&result](V1_0::ErrorStatus error, const V1_0::Capabilities& capabilities) {
// Time taken to convert capabilities is trivial
result = std::make_pair(convertToV1_3(error), convertToV1_3(capabilities));
@@ -525,14 +535,16 @@
return result;
}
-static std::pair<ErrorStatus, hidl_vec<Extension>> getSupportedExtensionsFunction(
- V1_2::IDevice* device) {
+static std::pair<V1_3::ErrorStatus, hardware::hidl_vec<V1_2::Extension>>
+getSupportedExtensionsFunction(V1_2::IDevice* device) {
CHECK(device != nullptr);
NNTRACE_FULL(NNTRACE_LAYER_IPC, NNTRACE_PHASE_INITIALIZATION, "getSupportedExtensions");
- const std::pair<ErrorStatus, hidl_vec<Extension>> kFailure = {ErrorStatus::GENERAL_FAILURE, {}};
- std::pair<ErrorStatus, hidl_vec<Extension>> result = kFailure;
- const Return<void> ret = device->getSupportedExtensions(
- [&result](V1_0::ErrorStatus error, const hidl_vec<Extension>& extensions) {
+ const std::pair<V1_3::ErrorStatus, hardware::hidl_vec<V1_2::Extension>> kFailure = {
+ V1_3::ErrorStatus::GENERAL_FAILURE, {}};
+ std::pair<V1_3::ErrorStatus, hardware::hidl_vec<V1_2::Extension>> result = kFailure;
+ const hardware::Return<void> ret = device->getSupportedExtensions(
+ [&result](V1_0::ErrorStatus error,
+ const hardware::hidl_vec<V1_2::Extension>& extensions) {
result = std::make_pair(convertToV1_3(error), extensions);
});
if (!ret.isOk()) {
@@ -542,18 +554,18 @@
return result;
}
-static std::pair<ErrorStatus, hidl_vec<Extension>> getSupportedExtensionsFunction(
- V1_0::IDevice* device) {
+static std::pair<V1_3::ErrorStatus, hardware::hidl_vec<V1_2::Extension>>
+getSupportedExtensionsFunction(V1_0::IDevice* device) {
CHECK(device != nullptr);
- return {ErrorStatus::NONE, {/* No extensions. */}};
+ return {V1_3::ErrorStatus::NONE, {/* No extensions. */}};
}
static int32_t getTypeFunction(V1_2::IDevice* device) {
CHECK(device != nullptr);
constexpr int32_t kFailure = -1;
int32_t result = kFailure;
- const Return<void> ret =
- device->getType([&result](V1_0::ErrorStatus error, DeviceType deviceType) {
+ const hardware::Return<void> ret =
+ device->getType([&result](V1_0::ErrorStatus error, V1_2::DeviceType deviceType) {
if (error == V1_0::ErrorStatus::NONE) {
result = static_cast<int32_t>(deviceType);
}
@@ -570,12 +582,14 @@
return ANEURALNETWORKS_DEVICE_UNKNOWN;
}
-static std::pair<ErrorStatus, hidl_string> getVersionStringFunction(V1_2::IDevice* device) {
+static std::pair<V1_3::ErrorStatus, hardware::hidl_string> getVersionStringFunction(
+ V1_2::IDevice* device) {
CHECK(device != nullptr);
- const std::pair<ErrorStatus, hidl_string> kFailure = {ErrorStatus::GENERAL_FAILURE, ""};
- std::pair<ErrorStatus, hidl_string> result = kFailure;
- const Return<void> ret = device->getVersionString(
- [&result](V1_0::ErrorStatus error, const hidl_string& version) {
+ const std::pair<V1_3::ErrorStatus, hardware::hidl_string> kFailure = {
+ V1_3::ErrorStatus::GENERAL_FAILURE, ""};
+ std::pair<V1_3::ErrorStatus, hardware::hidl_string> result = kFailure;
+ const hardware::Return<void> ret = device->getVersionString(
+ [&result](V1_0::ErrorStatus error, const hardware::hidl_string& version) {
result = std::make_pair(convertToV1_3(error), version);
});
if (!ret.isOk()) {
@@ -585,18 +599,19 @@
return result;
}
-static std::pair<ErrorStatus, hidl_string> getVersionStringFunction(V1_0::IDevice* device) {
+static std::pair<V1_3::ErrorStatus, hardware::hidl_string> getVersionStringFunction(
+ V1_0::IDevice* device) {
CHECK(device != nullptr);
- return {ErrorStatus::NONE, "UNKNOWN"};
+ return {V1_3::ErrorStatus::NONE, "UNKNOWN"};
}
-static std::tuple<ErrorStatus, uint32_t, uint32_t> getNumberOfCacheFilesNeededFunction(
+static std::tuple<V1_3::ErrorStatus, uint32_t, uint32_t> getNumberOfCacheFilesNeededFunction(
V1_2::IDevice* device) {
CHECK(device != nullptr);
- constexpr std::tuple<ErrorStatus, uint32_t, uint32_t> kFailure = {ErrorStatus::GENERAL_FAILURE,
- 0, 0};
- std::tuple<ErrorStatus, uint32_t, uint32_t> result = kFailure;
- const Return<void> ret = device->getNumberOfCacheFilesNeeded(
+ constexpr std::tuple<V1_3::ErrorStatus, uint32_t, uint32_t> kFailure = {
+ V1_3::ErrorStatus::GENERAL_FAILURE, 0, 0};
+ std::tuple<V1_3::ErrorStatus, uint32_t, uint32_t> result = kFailure;
+ const hardware::Return<void> ret = device->getNumberOfCacheFilesNeeded(
[&result](V1_0::ErrorStatus error, uint32_t numModelCache, uint32_t numDataCache) {
result = {convertToV1_3(error), numModelCache, numDataCache};
});
@@ -607,17 +622,17 @@
return result;
}
-static std::tuple<ErrorStatus, uint32_t, uint32_t> getNumberOfCacheFilesNeededFunction(
+static std::tuple<V1_3::ErrorStatus, uint32_t, uint32_t> getNumberOfCacheFilesNeededFunction(
V1_0::IDevice* device) {
CHECK(device != nullptr);
- return {ErrorStatus::NONE, 0, 0};
+ return {V1_3::ErrorStatus::NONE, 0, 0};
}
struct InitialData {
- hal::Capabilities capabilities;
- hal::hidl_vec<hal::Extension> supportedExtensions;
+ V1_3::Capabilities capabilities;
+ hardware::hidl_vec<V1_2::Extension> supportedExtensions;
int32_t type;
- hal::hidl_string versionString;
+ hardware::hidl_string versionString;
std::pair<uint32_t, uint32_t> numberOfCacheFilesNeeded;
};
@@ -626,7 +641,7 @@
CHECK(device != nullptr);
auto [capabilitiesStatus, capabilities] = getCapabilitiesFunction(device);
- if (capabilitiesStatus != ErrorStatus::NONE) {
+ if (capabilitiesStatus != V1_3::ErrorStatus::NONE) {
LOG(ERROR) << "IDevice::getCapabilities* returned the error "
<< toString(capabilitiesStatus);
return std::nullopt;
@@ -634,7 +649,7 @@
VLOG(MANAGER) << "Capab " << toString(capabilities);
auto [versionStatus, versionString] = getVersionStringFunction(device);
- if (versionStatus != ErrorStatus::NONE) {
+ if (versionStatus != V1_3::ErrorStatus::NONE) {
LOG(ERROR) << "IDevice::getVersionString returned the error " << toString(versionStatus);
return std::nullopt;
}
@@ -647,7 +662,7 @@
}
auto [extensionsStatus, supportedExtensions] = getSupportedExtensionsFunction(device);
- if (extensionsStatus != ErrorStatus::NONE) {
+ if (extensionsStatus != V1_3::ErrorStatus::NONE) {
LOG(ERROR) << "IDevice::getSupportedExtensions returned the error "
<< toString(extensionsStatus);
return std::nullopt;
@@ -655,7 +670,7 @@
const auto [cacheFilesStatus, numModelCacheFiles, numDataCacheFiles] =
getNumberOfCacheFilesNeededFunction(device);
- if (cacheFilesStatus != ErrorStatus::NONE) {
+ if (cacheFilesStatus != V1_3::ErrorStatus::NONE) {
LOG(ERROR) << "IDevice::getNumberOfCacheFilesNeeded returned the error "
<< toString(cacheFilesStatus);
return std::nullopt;
@@ -663,7 +678,7 @@
// The following limit is enforced by VTS
constexpr uint32_t maxNumCacheFiles =
- static_cast<uint32_t>(Constant::MAX_NUMBER_OF_CACHE_FILES);
+ static_cast<uint32_t>(V1_2::Constant::MAX_NUMBER_OF_CACHE_FILES);
if (numModelCacheFiles > maxNumCacheFiles || numDataCacheFiles > maxNumCacheFiles) {
LOG(ERROR)
<< "IDevice::getNumberOfCacheFilesNeeded returned invalid number of cache files: "
@@ -684,7 +699,7 @@
template <typename Core>
std::optional<InitialData> initialize(const Core& core) {
- // version 1.3+ HAL
+ // version 1.3 HAL
if (const auto device = core.template getDevice<V1_3::IDevice>()) {
return initializeFunction(device.get());
}
@@ -710,7 +725,7 @@
}
std::shared_ptr<VersionedIDevice> VersionedIDevice::create(std::string serviceName,
- const DeviceFactory& makeDevice) {
+ const HalDeviceFactory& makeDevice) {
CHECK(makeDevice != nullptr)
<< "VersionedIDevice::create passed invalid device factory object.";
@@ -736,15 +751,16 @@
auto [capabilities, supportedExtensions, type, versionString, numberOfCacheFilesNeeded] =
std::move(*initialData);
return std::make_shared<VersionedIDevice>(
- std::move(capabilities), std::move(supportedExtensions), type, std::move(versionString),
- numberOfCacheFilesNeeded, std::move(serviceName), makeDevice, std::move(core.value()));
+ uncheckedConvert(capabilities), uncheckedConvert(supportedExtensions), type,
+ std::move(versionString), numberOfCacheFilesNeeded, std::move(serviceName), makeDevice,
+ std::move(core.value()));
}
-VersionedIDevice::VersionedIDevice(hal::Capabilities capabilities,
- std::vector<hal::Extension> supportedExtensions, int32_t type,
+VersionedIDevice::VersionedIDevice(Capabilities capabilities,
+ std::vector<Extension> supportedExtensions, int32_t type,
std::string versionString,
std::pair<uint32_t, uint32_t> numberOfCacheFilesNeeded,
- std::string serviceName, const DeviceFactory& makeDevice,
+ std::string serviceName, const HalDeviceFactory& makeDevice,
Core core)
: kCapabilities(std::move(capabilities)),
kSupportedExtensions(std::move(supportedExtensions)),
@@ -765,7 +781,7 @@
// proactively handle service crashes. If the linkToDeath call fails,
// asynchronous calls are susceptible to hangs if the service crashes before
// providing the response.
- const Return<bool> ret = device->linkToDeath(deathHandler, 0);
+ const hardware::Return<bool> ret = device->linkToDeath(deathHandler, 0);
if (!ret.isOk()) {
LOG(ERROR) << "VersionedIDevice::Core::create failed to register a death recipient for the "
"IDevice object because of failure: "
@@ -828,12 +844,13 @@
}
template <typename T_Return, typename T_IDevice, typename T_Callback>
-Return<T_Return> callProtected(const char* context,
- const std::function<Return<T_Return>(const sp<T_IDevice>&)>& fn,
- const sp<T_IDevice>& device, const sp<T_Callback>& callback,
- const sp<IDeviceDeathHandler>& deathHandler) {
+hardware::Return<T_Return> callProtected(
+ const char* context,
+ const std::function<hardware::Return<T_Return>(const sp<T_IDevice>&)>& fn,
+ const sp<T_IDevice>& device, const sp<T_Callback>& callback,
+ const sp<IDeviceDeathHandler>& deathHandler) {
const auto scoped = deathHandler->protectCallback(callback);
- Return<T_Return> ret = fn(device);
+ hardware::Return<T_Return> ret = fn(device);
// Suppose there was a transport error. We have the following cases:
// 1. Either not due to a dead device, or due to a device that was
// already dead at the time of the call to protectCallback(). In
@@ -863,16 +880,16 @@
return ret;
}
template <typename T_Return, typename T_IDevice>
-Return<T_Return> callProtected(const char*,
- const std::function<Return<T_Return>(const sp<T_IDevice>&)>& fn,
- const sp<T_IDevice>& device, const std::nullptr_t&,
- const sp<IDeviceDeathHandler>&) {
+hardware::Return<T_Return> callProtected(
+ const char*, const std::function<hardware::Return<T_Return>(const sp<T_IDevice>&)>& fn,
+ const sp<T_IDevice>& device, const std::nullptr_t&, const sp<IDeviceDeathHandler>&) {
return fn(device);
}
template <typename T_Return, typename T_IDevice, typename T_Callback>
-Return<T_Return> VersionedIDevice::recoverable(
- const char* context, const std::function<Return<T_Return>(const sp<T_IDevice>&)>& fn,
+hardware::Return<T_Return> VersionedIDevice::recoverable(
+ const char* context,
+ const std::function<hardware::Return<T_Return>(const sp<T_IDevice>&)>& fn,
const T_Callback& callback) const EXCLUDES(mMutex) {
CHECK_EQ(callback == nullptr, (std::is_same_v<T_Callback, std::nullptr_t>));
@@ -880,7 +897,7 @@
sp<IDeviceDeathHandler> deathHandler;
std::tie(device, deathHandler) = getDeviceAndDeathHandler<T_IDevice>();
- Return<T_Return> ret = callProtected(context, fn, device, callback, deathHandler);
+ hardware::Return<T_Return> ret = callProtected(context, fn, device, callback, deathHandler);
if (ret.isDeadObject()) {
{
@@ -958,42 +975,42 @@
return kSupportedExtensions;
}
-std::pair<ErrorStatus, hidl_vec<bool>> VersionedIDevice::getSupportedOperations(
+std::pair<ErrorStatus, std::vector<bool>> VersionedIDevice::getSupportedOperations(
const MetaModel& metaModel) const {
- const std::pair<ErrorStatus, hidl_vec<bool>> kFailure = {ErrorStatus::GENERAL_FAILURE, {}};
- std::pair<ErrorStatus, hidl_vec<bool>> result;
+ const std::pair<ErrorStatus, std::vector<bool>> kFailure = {ErrorStatus::GENERAL_FAILURE, {}};
+ std::pair<ErrorStatus, std::vector<bool>> result;
const Model& model = metaModel.getModel();
auto noneSupported = [&model] {
- hidl_vec<bool> supported(model.main.operations.size());
- std::fill(supported.begin(), supported.end(), false);
+ std::vector<bool> supported(model.main.operations.size(), false);
return std::make_pair(ErrorStatus::NONE, std::move(supported));
};
- auto remappedResult = [&model](const std::pair<ErrorStatus, hidl_vec<bool>>& result,
- const std::function<uint32_t(uint32_t)>&
- slicedModelOperationIndexToModelOperationIndex) {
- const ErrorStatus status = result.first;
- const hidl_vec<bool>& supported = result.second;
- hidl_vec<bool> remappedSupported(model.main.operations.size());
- std::fill(remappedSupported.begin(), remappedSupported.end(), false);
- for (size_t i = 0; i < supported.size(); ++i) {
- if (supported[i]) {
- remappedSupported[slicedModelOperationIndexToModelOperationIndex(i)] = true;
- }
- }
- return std::make_pair(status, std::move(remappedSupported));
- };
+ auto remappedResult =
+ [&model](const std::pair<ErrorStatus, std::vector<bool>>& result,
+ const MetaModel::Mapper& slicedModelOperationIndexToModelOperationIndex) {
+ const ErrorStatus status = result.first;
+ const std::vector<bool>& supported = result.second;
+ std::vector<bool> remappedSupported(model.main.operations.size(), false);
+ for (size_t i = 0; i < supported.size(); ++i) {
+ if (supported[i]) {
+ remappedSupported[slicedModelOperationIndexToModelOperationIndex(i)] = true;
+ }
+ }
+ return std::make_pair(status, std::move(remappedSupported));
+ };
- // version 1.3+ HAL
+ // version 1.3 HAL
+ const V1_3::Model model13 = convertToV1_3(model);
if (getDevice<V1_3::IDevice>() != nullptr) {
NNTRACE_FULL(NNTRACE_LAYER_IPC, NNTRACE_PHASE_COMPILATION, "getSupportedOperations_1_3");
- Return<void> ret = recoverable<void, V1_3::IDevice>(
- __FUNCTION__, [&model, &result](const sp<V1_3::IDevice>& device) {
+ hardware::Return<void> ret = recoverable<void, V1_3::IDevice>(
+ __FUNCTION__, [&model13, &result](const sp<V1_3::IDevice>& device) {
return device->getSupportedOperations_1_3(
- model, [&result](ErrorStatus error, const hidl_vec<bool>& supported) {
- result = std::make_pair(error, supported);
+ model13, [&result](V1_3::ErrorStatus error,
+ const hardware::hidl_vec<bool>& supported) {
+ result = std::make_pair(uncheckedConvert(error), supported);
});
});
if (!ret.isOk()) {
@@ -1005,11 +1022,11 @@
// version 1.2 HAL
if (getDevice<V1_2::IDevice>() != nullptr) {
- const bool compliant = compliantWithV1_2(model);
+ const bool compliant = compliantWithV1_2(model13);
V1_2::Model model12;
- std::function<uint32_t(uint32_t)> slicedModelOperationIndexToModelOperationIndex;
+ MetaModel::Mapper slicedModelOperationIndexToModelOperationIndex;
if (compliant) {
- model12 = convertToV1_2(model);
+ model12 = convertToV1_2(model13);
} else {
const auto slice12 = metaModel.getSliceV1_2();
if (!slice12.has_value()) {
@@ -1018,12 +1035,12 @@
std::tie(model12, slicedModelOperationIndexToModelOperationIndex) = *slice12;
}
NNTRACE_FULL(NNTRACE_LAYER_IPC, NNTRACE_PHASE_COMPILATION, "getSupportedOperations_1_2");
- Return<void> ret = recoverable<void, V1_2::IDevice>(
+ hardware::Return<void> ret = recoverable<void, V1_2::IDevice>(
__FUNCTION__, [&model12, &result](const sp<V1_2::IDevice>& device) {
return device->getSupportedOperations_1_2(
- model12,
- [&result](V1_0::ErrorStatus error, const hidl_vec<bool>& supported) {
- result = std::make_pair(convertToV1_3(error), supported);
+ model12, [&result](V1_0::ErrorStatus error,
+ const hardware::hidl_vec<bool>& supported) {
+ result = std::make_pair(uncheckedConvert(error), supported);
});
});
if (!ret.isOk()) {
@@ -1038,11 +1055,11 @@
// version 1.1 HAL
if (getDevice<V1_1::IDevice>() != nullptr) {
- const bool compliant = compliantWithV1_1(model);
+ const bool compliant = compliantWithV1_1(model13);
V1_1::Model model11;
- std::function<uint32_t(uint32_t)> slicedModelOperationIndexToModelOperationIndex;
+ MetaModel::Mapper slicedModelOperationIndexToModelOperationIndex;
if (compliant) {
- model11 = convertToV1_1(model);
+ model11 = convertToV1_1(model13);
} else {
const auto slice11 = metaModel.getSliceV1_1();
if (!slice11.has_value()) {
@@ -1051,12 +1068,12 @@
std::tie(model11, slicedModelOperationIndexToModelOperationIndex) = *slice11;
}
NNTRACE_FULL(NNTRACE_LAYER_IPC, NNTRACE_PHASE_COMPILATION, "getSupportedOperations_1_1");
- Return<void> ret = recoverable<void, V1_1::IDevice>(
+ hardware::Return<void> ret = recoverable<void, V1_1::IDevice>(
__FUNCTION__, [&model11, &result](const sp<V1_1::IDevice>& device) {
return device->getSupportedOperations_1_1(
- model11,
- [&result](V1_0::ErrorStatus error, const hidl_vec<bool>& supported) {
- result = std::make_pair(convertToV1_3(error), supported);
+ model11, [&result](V1_0::ErrorStatus error,
+ const hardware::hidl_vec<bool>& supported) {
+ result = std::make_pair(uncheckedConvert(error), supported);
});
});
if (!ret.isOk()) {
@@ -1071,11 +1088,11 @@
// version 1.0 HAL
if (getDevice<V1_0::IDevice>() != nullptr) {
- const bool compliant = compliantWithV1_0(model);
+ const bool compliant = compliantWithV1_0(model13);
V1_0::Model model10;
- std::function<uint32_t(uint32_t)> slicedModelOperationIndexToModelOperationIndex;
+ MetaModel::Mapper slicedModelOperationIndexToModelOperationIndex;
if (compliant) {
- model10 = convertToV1_0(model);
+ model10 = convertToV1_0(model13);
} else {
const auto slice10 = metaModel.getSliceV1_0();
if (!slice10.has_value()) {
@@ -1084,12 +1101,12 @@
std::tie(model10, slicedModelOperationIndexToModelOperationIndex) = *slice10;
}
NNTRACE_FULL(NNTRACE_LAYER_IPC, NNTRACE_PHASE_COMPILATION, "getSupportedOperations");
- Return<void> ret = recoverable<void, V1_0::IDevice>(
+ hardware::Return<void> ret = recoverable<void, V1_0::IDevice>(
__FUNCTION__, [&model10, &result](const sp<V1_0::IDevice>& device) {
return device->getSupportedOperations(
- model10,
- [&result](V1_0::ErrorStatus error, const hidl_vec<bool>& supported) {
- result = std::make_pair(convertToV1_3(error), supported);
+ model10, [&result](V1_0::ErrorStatus error,
+ const hardware::hidl_vec<bool>& supported) {
+ result = std::make_pair(uncheckedConvert(error), supported);
});
});
if (!ret.isOk()) {
@@ -1111,7 +1128,7 @@
// handle is expected to come in as empty, and is only set to a fd when the function returns true.
// The file descriptor is always opened with both read and write permission.
static bool createCacheHandle(const std::string& cache, bool createIfNotExist,
- hidl_handle* handle) {
+ hardware::hidl_handle* handle) {
CHECK(handle->getNativeHandle() == nullptr);
int fd = open(cache.c_str(), createIfNotExist ? (O_RDWR | O_CREAT) : O_RDWR, S_IRUSR | S_IWUSR);
NN_RET_CHECK_GE(fd, 0);
@@ -1127,16 +1144,15 @@
// Opens a list of cache files and returns the handle vector. Returns empty vector on fail.
// The file descriptors are always opened with both read and write permission.
-static hidl_vec<hidl_handle> createCacheHandleVec(uint32_t numCacheFiles,
- const std::string& baseFileName,
- bool createIfNotExist) {
- CHECK(numCacheFiles <= static_cast<uint32_t>(Constant::MAX_NUMBER_OF_CACHE_FILES));
- hidl_vec<hidl_handle> handles(numCacheFiles);
+static hardware::hidl_vec<hardware::hidl_handle> createCacheHandleVec(
+ uint32_t numCacheFiles, const std::string& baseFileName, bool createIfNotExist) {
+ CHECK(numCacheFiles <= static_cast<uint32_t>(V1_2::Constant::MAX_NUMBER_OF_CACHE_FILES));
+ hardware::hidl_vec<hardware::hidl_handle> handles(numCacheFiles);
for (uint32_t i = 0; i < numCacheFiles; i++) {
std::string filename = baseFileName + std::to_string(i);
VLOG(COMPILATION) << "Cache " << i << ": " << filename;
if (!createCacheHandle(filename, createIfNotExist, &handles[i])) {
- return hidl_vec<hidl_handle>();
+ return hardware::hidl_vec<hardware::hidl_handle>();
}
}
return handles;
@@ -1146,8 +1162,9 @@
// fail and leaves the vectors empty. Each vector is expected to come in as empty.
static bool getCacheHandles(const std::string& cacheDir, const CacheToken& token,
const std::pair<uint32_t, uint32_t>& numCacheFiles,
- bool createIfNotExist, hidl_vec<hidl_handle>* modelCache,
- hidl_vec<hidl_handle>* dataCache) {
+ bool createIfNotExist,
+ hardware::hidl_vec<hardware::hidl_handle>* modelCache,
+ hardware::hidl_vec<hardware::hidl_handle>* dataCache) {
// The filename includes ANEURALNETWORKS_BYTE_SIZE_OF_CACHE_TOKEN * 2 characters for token,
// and 1 character for model/data cache identifier.
std::string filename(ANEURALNETWORKS_BYTE_SIZE_OF_CACHE_TOKEN * 2 + 1, '0');
@@ -1193,7 +1210,7 @@
if (status != ErrorStatus::NONE) {
LOG(ERROR) << prepareName << " on " << serviceName << " failed: "
- << "prepareReturnStatus=" << toString(status);
+ << "prepareReturnStatus=" << status;
return prepareModelFailure(status);
}
if (preparedModel == nullptr) {
@@ -1214,7 +1231,7 @@
ANEURALNETWORKS_DEAD_OBJECT, nullptr};
// Get cache files if they exist, otherwise create them.
- hidl_vec<hidl_handle> modelCache, dataCache;
+ hardware::hidl_vec<hardware::hidl_handle> modelCache, dataCache;
if (!maybeToken.has_value() ||
!getCacheHandles(cacheDir, *maybeToken, kNumberOfCacheFilesNeeded,
/*createIfNotExist=*/true, &modelCache, &dataCache)) {
@@ -1226,19 +1243,22 @@
static const CacheToken kNullToken{};
const CacheToken token = maybeToken.value_or(kNullToken);
+ const V1_3::Model model13 = convertToV1_3(model);
const sp<PreparedModelCallback> callback = new PreparedModelCallback();
// If 1.3 device, try preparing model
if (getDevice<V1_3::IDevice>() != nullptr) {
const auto otp = makeTimePoint(deadline);
- const Return<ErrorStatus> ret = recoverable<ErrorStatus, V1_3::IDevice>(
- __FUNCTION__,
- [&model, preference, priority, &otp, &modelCache, &dataCache, &token,
- &callback](const sp<V1_3::IDevice>& device) {
- return device->prepareModel_1_3(model, preference, priority, otp, modelCache,
- dataCache, token, callback);
- },
- callback);
+ const hardware::Return<V1_3::ErrorStatus> ret =
+ recoverable<V1_3::ErrorStatus, V1_3::IDevice>(
+ __FUNCTION__,
+ [&model13, preference, priority, &otp, &modelCache, &dataCache, &token,
+ &callback](const sp<V1_3::IDevice>& device) {
+ return device->prepareModel_1_3(
+ model13, convertToV1_1(preference), convertToV1_3(priority),
+ convertToV1_3(otp), modelCache, dataCache, token, callback);
+ },
+ callback);
if (ret.isDeadObject()) {
LOG(ERROR) << "prepareModel_1_3 failure: " << ret.description();
return kDeadObject;
@@ -1247,9 +1267,10 @@
LOG(ERROR) << "prepareModel_1_3 failure: " << ret.description();
return prepareModelFailure();
}
- if (ret != ErrorStatus::NONE) {
- LOG(ERROR) << "prepareModel_1_3 returned " << toString(static_cast<ErrorStatus>(ret));
- return prepareModelFailure(ret);
+ const ErrorStatus status = uncheckedConvert(ret);
+ if (status != ErrorStatus::NONE) {
+ LOG(ERROR) << "prepareModel_1_3 returned " << status;
+ return prepareModelFailure(status);
}
return prepareModelResult(*callback, "prepareModel_1_3", kServiceName);
}
@@ -1264,20 +1285,22 @@
// but could be larger for other models).
NNTRACE_FULL_SUBTRACT(NNTRACE_LAYER_RUNTIME, NNTRACE_PHASE_COMPILATION,
"VersionedIDevice::prepareModel_1_2");
- compliant = compliantWithV1_2(model);
+ compliant = compliantWithV1_2(model13);
if (compliant) {
- model12 = convertToV1_2(model); // copy is elided
+ model12 = convertToV1_2(model13); // copy is elided
}
}
if (compliant) {
- const Return<V1_0::ErrorStatus> ret = recoverable<V1_0::ErrorStatus, V1_2::IDevice>(
- __FUNCTION__,
- [&model12, &preference, &modelCache, &dataCache, &token,
- &callback](const sp<V1_2::IDevice>& device) {
- return device->prepareModel_1_2(model12, preference, modelCache, dataCache,
- token, callback);
- },
- callback);
+ const hardware::Return<V1_0::ErrorStatus> ret =
+ recoverable<V1_0::ErrorStatus, V1_2::IDevice>(
+ __FUNCTION__,
+ [&model12, &preference, &modelCache, &dataCache, &token,
+ &callback](const sp<V1_2::IDevice>& device) {
+ return device->prepareModel_1_2(model12, convertToV1_1(preference),
+ modelCache, dataCache, token,
+ callback);
+ },
+ callback);
if (ret.isDeadObject()) {
LOG(ERROR) << "prepareModel_1_2 failure: " << ret.description();
return kDeadObject;
@@ -1286,10 +1309,10 @@
LOG(ERROR) << "prepareModel_1_2 failure: " << ret.description();
return prepareModelFailure();
}
- const V1_0::ErrorStatus status = static_cast<V1_0::ErrorStatus>(ret);
- if (status != V1_0::ErrorStatus::NONE) {
- LOG(ERROR) << "prepareModel_1_2 returned " << toString(status);
- return prepareModelFailure(convertToV1_3(status));
+ const ErrorStatus status = uncheckedConvert(ret);
+ if (status != ErrorStatus::NONE) {
+ LOG(ERROR) << "prepareModel_1_2 returned " << status;
+ return prepareModelFailure(status);
}
return prepareModelResult(*callback, "prepareModel_1_2", kServiceName);
}
@@ -1308,18 +1331,20 @@
// but could be larger for other models).
NNTRACE_FULL_SUBTRACT(NNTRACE_LAYER_RUNTIME, NNTRACE_PHASE_COMPILATION,
"VersionedIDevice::prepareModel_1_1");
- compliant = compliantWithV1_1(model);
+ compliant = compliantWithV1_1(model13);
if (compliant) {
- model11 = convertToV1_1(model); // copy is elided
+ model11 = convertToV1_1(model13); // copy is elided
}
}
if (compliant) {
- const Return<V1_0::ErrorStatus> ret = recoverable<V1_0::ErrorStatus, V1_1::IDevice>(
- __FUNCTION__,
- [&model11, &preference, &callback](const sp<V1_1::IDevice>& device) {
- return device->prepareModel_1_1(model11, preference, callback);
- },
- callback);
+ const hardware::Return<V1_0::ErrorStatus> ret =
+ recoverable<V1_0::ErrorStatus, V1_1::IDevice>(
+ __FUNCTION__,
+ [&model11, &preference, &callback](const sp<V1_1::IDevice>& device) {
+ return device->prepareModel_1_1(model11, convertToV1_1(preference),
+ callback);
+ },
+ callback);
if (ret.isDeadObject()) {
LOG(ERROR) << "prepareModel_1_1 failure: " << ret.description();
return kDeadObject;
@@ -1328,10 +1353,10 @@
LOG(ERROR) << "prepareModel_1_1 failure: " << ret.description();
return prepareModelFailure();
}
- const V1_0::ErrorStatus status = static_cast<V1_0::ErrorStatus>(ret);
- if (status != V1_0::ErrorStatus::NONE) {
- LOG(ERROR) << "prepareModel_1_1 returned " << toString(status);
- return prepareModelFailure(convertToV1_3(status));
+ const ErrorStatus status = uncheckedConvert(ret);
+ if (status != ErrorStatus::NONE) {
+ LOG(ERROR) << "prepareModel_1_1 returned " << status;
+ return prepareModelFailure(status);
}
return prepareModelResult(*callback, "prepareModel_1_1", kServiceName);
}
@@ -1350,18 +1375,19 @@
// but could be larger for other models).
NNTRACE_FULL_SUBTRACT(NNTRACE_LAYER_RUNTIME, NNTRACE_PHASE_COMPILATION,
"VersionedIDevice::prepareModel");
- compliant = compliantWithV1_0(model);
+ compliant = compliantWithV1_0(model13);
if (compliant) {
- model10 = convertToV1_0(model); // copy is elided
+ model10 = convertToV1_0(model13); // copy is elided
}
}
if (compliant) {
- const Return<V1_0::ErrorStatus> ret = recoverable<V1_0::ErrorStatus, V1_0::IDevice>(
- __FUNCTION__,
- [&model10, &callback](const sp<V1_0::IDevice>& device) {
- return device->prepareModel(model10, callback);
- },
- callback);
+ const hardware::Return<V1_0::ErrorStatus> ret =
+ recoverable<V1_0::ErrorStatus, V1_0::IDevice>(
+ __FUNCTION__,
+ [&model10, &callback](const sp<V1_0::IDevice>& device) {
+ return device->prepareModel(model10, callback);
+ },
+ callback);
if (ret.isDeadObject()) {
LOG(ERROR) << "prepareModel failure: " << ret.description();
return kDeadObject;
@@ -1370,10 +1396,10 @@
LOG(ERROR) << "prepareModel failure: " << ret.description();
return prepareModelFailure();
}
- const V1_0::ErrorStatus status = static_cast<V1_0::ErrorStatus>(ret);
- if (status != V1_0::ErrorStatus::NONE) {
- LOG(ERROR) << "prepareModel returned " << toString(status);
- return prepareModelFailure(convertToV1_3(status));
+ const ErrorStatus status = uncheckedConvert(ret);
+ if (status != ErrorStatus::NONE) {
+ LOG(ERROR) << "prepareModel returned " << status;
+ return prepareModelFailure(status);
}
return prepareModelResult(*callback, "prepareModel", kServiceName);
}
@@ -1398,24 +1424,25 @@
ANEURALNETWORKS_DEAD_OBJECT, nullptr};
// Get cache files if they exist, otherwise return from the function early.
- hidl_vec<hidl_handle> modelCache, dataCache;
+ hardware::hidl_vec<hardware::hidl_handle> modelCache, dataCache;
if (!getCacheHandles(cacheDir, token, kNumberOfCacheFilesNeeded,
/*createIfNotExist=*/false, &modelCache, &dataCache)) {
return prepareModelFailure();
}
- // version 1.3+ HAL
+ // version 1.3 HAL
if (getDevice<V1_3::IDevice>() != nullptr) {
const auto otp = makeTimePoint(deadline);
const sp<PreparedModelCallback> callback = new PreparedModelCallback();
- const Return<ErrorStatus> ret = recoverable<ErrorStatus, V1_3::IDevice>(
- __FUNCTION__,
- [&otp, &modelCache, &dataCache, &token,
- &callback](const sp<V1_3::IDevice>& device) {
- return device->prepareModelFromCache_1_3(otp, modelCache, dataCache, token,
- callback);
- },
- callback);
+ const hardware::Return<V1_3::ErrorStatus> ret =
+ recoverable<V1_3::ErrorStatus, V1_3::IDevice>(
+ __FUNCTION__,
+ [&otp, &modelCache, &dataCache, &token,
+ &callback](const sp<V1_3::IDevice>& device) {
+ return device->prepareModelFromCache_1_3(convertToV1_3(otp), modelCache,
+ dataCache, token, callback);
+ },
+ callback);
if (ret.isDeadObject()) {
LOG(ERROR) << "prepareModelFromCache_1_3 failure: " << ret.description();
return kDeadObject;
@@ -1424,10 +1451,10 @@
LOG(ERROR) << "prepareModelFromCache_1_3 failure: " << ret.description();
return prepareModelFailure();
}
- if (ret != ErrorStatus::NONE) {
- LOG(ERROR) << "prepareModelFromCache_1_3 returned "
- << toString(static_cast<ErrorStatus>(ret));
- return prepareModelFailure(ret);
+ const ErrorStatus status = uncheckedConvert(ret);
+ if (status != ErrorStatus::NONE) {
+ LOG(ERROR) << "prepareModelFromCache_1_3 returned " << status;
+ return prepareModelFailure(status);
}
return prepareModelResult(*callback, "prepareModelFromCache_1_3", kServiceName);
}
@@ -1435,12 +1462,15 @@
// version 1.2 HAL
if (getDevice<V1_2::IDevice>() != nullptr) {
const sp<PreparedModelCallback> callback = new PreparedModelCallback();
- const Return<V1_0::ErrorStatus> ret = recoverable<V1_0::ErrorStatus, V1_2::IDevice>(
- __FUNCTION__,
- [&modelCache, &dataCache, &token, &callback](const sp<V1_2::IDevice>& device) {
- return device->prepareModelFromCache(modelCache, dataCache, token, callback);
- },
- callback);
+ const hardware::Return<V1_0::ErrorStatus> ret =
+ recoverable<V1_0::ErrorStatus, V1_2::IDevice>(
+ __FUNCTION__,
+ [&modelCache, &dataCache, &token,
+ &callback](const sp<V1_2::IDevice>& device) {
+ return device->prepareModelFromCache(modelCache, dataCache, token,
+ callback);
+ },
+ callback);
if (ret.isDeadObject()) {
LOG(ERROR) << "prepareModelFromCache failure: " << ret.description();
return kDeadObject;
@@ -1449,10 +1479,10 @@
LOG(ERROR) << "prepareModelFromCache failure: " << ret.description();
return prepareModelFailure();
}
- const V1_0::ErrorStatus status = static_cast<V1_0::ErrorStatus>(ret);
- if (status != V1_0::ErrorStatus::NONE) {
- LOG(ERROR) << "prepareModelFromCache returned " << toString(status);
- return prepareModelFailure(convertToV1_3(status));
+ const ErrorStatus status = uncheckedConvert(ret);
+ if (status != ErrorStatus::NONE) {
+ LOG(ERROR) << "prepareModelFromCache returned " << status;
+ return prepareModelFailure(status);
}
return prepareModelResult(*callback, "prepareModelFromCache", kServiceName);
}
@@ -1520,28 +1550,31 @@
return kServiceName;
}
-std::tuple<ErrorStatus, sp<IBuffer>, uint32_t> VersionedIDevice::allocate(
- const BufferDesc& desc,
+std::tuple<V1_3::ErrorStatus, sp<V1_3::IBuffer>, uint32_t> VersionedIDevice::allocate(
+ const V1_3::BufferDesc& desc,
const std::vector<std::shared_ptr<VersionedIPreparedModel>>& versionedPreparedModels,
- const hidl_vec<BufferRole>& inputRoles, const hidl_vec<BufferRole>& outputRoles) const {
- const auto kFailure = std::make_tuple<ErrorStatus, sp<IBuffer>, uint32_t>(
- ErrorStatus::GENERAL_FAILURE, nullptr, 0);
+ const std::vector<BufferRole>& inputRoles,
+ const std::vector<BufferRole>& outputRoles) const {
+ const auto kFailure = std::make_tuple<V1_3::ErrorStatus, sp<V1_3::IBuffer>, uint32_t>(
+ V1_3::ErrorStatus::GENERAL_FAILURE, nullptr, 0);
- // version 1.3+ HAL
+ // version 1.3 HAL
if (getDevice<V1_3::IDevice>() != nullptr) {
- hidl_vec<sp<V1_3::IPreparedModel>> preparedModels(versionedPreparedModels.size());
+ hardware::hidl_vec<sp<V1_3::IPreparedModel>> preparedModels(versionedPreparedModels.size());
std::transform(versionedPreparedModels.begin(), versionedPreparedModels.end(),
preparedModels.begin(),
[](const auto& preparedModel) { return preparedModel->getV1_3(); });
- std::tuple<ErrorStatus, sp<IBuffer>, int32_t> result;
- const Return<void> ret = recoverable<void, V1_3::IDevice>(
+ std::tuple<V1_3::ErrorStatus, sp<V1_3::IBuffer>, int32_t> result;
+ const hardware::Return<void> ret = recoverable<void, V1_3::IDevice>(
__FUNCTION__, [&](const sp<V1_3::IDevice>& device) {
- return device->allocate(desc, preparedModels, inputRoles, outputRoles,
- [&result](ErrorStatus error, const sp<IBuffer>& buffer,
- uint32_t token) {
- result = {error, buffer, token};
- });
+ return device->allocate(
+ desc, preparedModels, convertToV1_3(inputRoles),
+ convertToV1_3(outputRoles),
+ [&result](V1_3::ErrorStatus error, const sp<V1_3::IBuffer>& buffer,
+ uint32_t token) {
+ result = {error, buffer, token};
+ });
});
if (!ret.isOk()) {
LOG(ERROR) << "allocate failure: " << ret.description();
diff --git a/runtime/VersionedInterfaces.h b/runtime/VersionedInterfaces.h
index 1b8433e..d41dcd3 100644
--- a/runtime/VersionedInterfaces.h
+++ b/runtime/VersionedInterfaces.h
@@ -43,6 +43,8 @@
class MetaModel;
class VersionedIPreparedModel;
+using ModelFactory = std::function<Model()>;
+
/**
* Each class (VersionedIDevice, VersionedIPreparedModel) wraps a HIDL interface
* of any version to abstract away version differences. It allows the remainder
@@ -77,7 +79,7 @@
* @return A valid VersionedIDevice object, otherwise nullptr.
*/
static std::shared_ptr<VersionedIDevice> create(std::string serviceName,
- const hal::DeviceFactory& makeDevice);
+ const HalDeviceFactory& makeDevice);
/**
* Constructor for the VersionedIDevice object.
@@ -98,18 +100,17 @@
* newer interfaces, and a hidl_death_recipient that will proactively handle
* the case when the service containing the IDevice object crashes.
*/
- VersionedIDevice(hal::Capabilities capabilities,
- std::vector<hal::Extension> supportedExtensions, int32_t type,
- std::string versionString,
+ VersionedIDevice(Capabilities capabilities, std::vector<Extension> supportedExtensions,
+ int32_t type, std::string versionString,
std::pair<uint32_t, uint32_t> numberOfCacheFilesNeeded,
- std::string serviceName, const hal::DeviceFactory& makeDevice, Core core);
+ std::string serviceName, const HalDeviceFactory& makeDevice, Core core);
/**
* Gets the capabilities of a driver.
*
* @return capabilities Capabilities of the driver.
*/
- const hal::Capabilities& getCapabilities() const;
+ const Capabilities& getCapabilities() const;
/**
* Gets information about extensions supported by the driver implementation.
@@ -122,7 +123,7 @@
*
* @return extensions A list of supported extensions.
*/
- const std::vector<hal::Extension>& getSupportedExtensions() const;
+ const std::vector<Extension>& getSupportedExtensions() const;
/**
* Gets the supported operations in a MetaModel.
@@ -152,7 +153,7 @@
* corresponds with the index of the operation
* it is describing.
*/
- std::pair<hal::ErrorStatus, hal::hidl_vec<bool>> getSupportedOperations(
+ std::pair<ErrorStatus, std::vector<bool>> getSupportedOperations(
const MetaModel& metaModel) const;
/**
@@ -220,9 +221,9 @@
* that has been prepared for execution, else nullptr.
*/
std::pair<int, std::shared_ptr<VersionedIPreparedModel>> prepareModel(
- const hal::ModelFactory& makeModel, hal::ExecutionPreference preference, hal::Priority,
+ const ModelFactory& makeModel, ExecutionPreference preference, Priority,
const std::optional<Deadline>& deadline, const std::string& cacheDir,
- const std::optional<hal::CacheToken>& maybeToken) const;
+ const std::optional<CacheToken>& maybeToken) const;
/**
* Returns the feature level of a driver.
@@ -366,11 +367,11 @@
* execution. If the buffer was unable to be allocated due to an error, the token must be
* 0.
*/
- std::tuple<hal::ErrorStatus, sp<hal::IBuffer>, uint32_t> allocate(
- const hal::BufferDesc& desc,
+ std::tuple<V1_3::ErrorStatus, sp<V1_3::IBuffer>, uint32_t> allocate(
+ const V1_3::BufferDesc& desc,
const std::vector<std::shared_ptr<VersionedIPreparedModel>>& preparedModels,
- const hal::hidl_vec<hal::BufferRole>& inputRoles,
- const hal::hidl_vec<hal::BufferRole>& outputRoles) const;
+ const std::vector<BufferRole>& inputRoles,
+ const std::vector<BufferRole>& outputRoles) const;
/**
* Blocks until the device is not in a bad state.
@@ -382,20 +383,20 @@
private:
// Cached initialization results.
- const hal::Capabilities kCapabilities;
- const std::vector<hal::Extension> kSupportedExtensions;
+ const Capabilities kCapabilities;
+ const std::vector<Extension> kSupportedExtensions;
const int32_t kType;
const std::string kVersionString;
const std::pair<uint32_t, uint32_t> kNumberOfCacheFilesNeeded;
// internal methods to prepare a model
std::pair<int, std::shared_ptr<VersionedIPreparedModel>> prepareModelInternal(
- const hal::Model& model, hal::ExecutionPreference preference, hal::Priority priority,
+ const Model& model, ExecutionPreference preference, Priority priority,
const std::optional<Deadline>& deadline, const std::string& cacheDir,
- const std::optional<hal::CacheToken>& maybeToken) const;
+ const std::optional<CacheToken>& maybeToken) const;
std::pair<int, std::shared_ptr<VersionedIPreparedModel>> prepareModelFromCacheInternal(
const std::optional<Deadline>& deadline, const std::string& cacheDir,
- const hal::CacheToken& token) const;
+ const CacheToken& token) const;
/**
* This is a utility class for VersionedIDevice that encapsulates a
@@ -426,7 +427,7 @@
* the case when the service containing the IDevice
* object crashes.
*/
- Core(sp<hal::V1_0::IDevice> device, sp<IDeviceDeathHandler> deathHandler);
+ Core(sp<V1_0::IDevice> device, sp<IDeviceDeathHandler> deathHandler);
/**
* Destructor for the Core object.
@@ -456,7 +457,7 @@
* interface.
* @return A valid Core object, otherwise nullopt.
*/
- static std::optional<Core> create(sp<hal::V1_0::IDevice> device);
+ static std::optional<Core> create(sp<V1_0::IDevice> device);
/**
* Returns sp<*::IDevice> that is a downcast of the sp<V1_0::IDevice>
@@ -466,19 +467,19 @@
template <typename T_IDevice>
sp<T_IDevice> getDevice() const;
template <>
- sp<hal::V1_0::IDevice> getDevice() const {
+ sp<V1_0::IDevice> getDevice() const {
return mDeviceV1_0;
}
template <>
- sp<hal::V1_1::IDevice> getDevice() const {
+ sp<V1_1::IDevice> getDevice() const {
return mDeviceV1_1;
}
template <>
- sp<hal::V1_2::IDevice> getDevice() const {
+ sp<V1_2::IDevice> getDevice() const {
return mDeviceV1_2;
}
template <>
- sp<hal::V1_3::IDevice> getDevice() const {
+ sp<V1_3::IDevice> getDevice() const {
return mDeviceV1_3;
}
@@ -511,10 +512,10 @@
* Idiomatic usage: if mDeviceV1_1 is non-null, do V1_1 dispatch; otherwise,
* do V1_0 dispatch.
*/
- sp<hal::V1_0::IDevice> mDeviceV1_0;
- sp<hal::V1_1::IDevice> mDeviceV1_1;
- sp<hal::V1_2::IDevice> mDeviceV1_2;
- sp<hal::V1_3::IDevice> mDeviceV1_3;
+ sp<V1_0::IDevice> mDeviceV1_0;
+ sp<V1_1::IDevice> mDeviceV1_1;
+ sp<V1_2::IDevice> mDeviceV1_2;
+ sp<V1_3::IDevice> mDeviceV1_3;
/**
* HIDL callback to be invoked if the service for mDeviceV1_0 crashes.
@@ -548,16 +549,16 @@
// If a callback is provided, this method protects it against driver death
// and waits for it (callback->wait()).
template <typename T_Return, typename T_IDevice, typename T_Callback = std::nullptr_t>
- hal::Return<T_Return> recoverable(
+ hardware::Return<T_Return> recoverable(
const char* context,
- const std::function<hal::Return<T_Return>(const sp<T_IDevice>&)>& fn,
+ const std::function<hardware::Return<T_Return>(const sp<T_IDevice>&)>& fn,
const T_Callback& callback = nullptr) const EXCLUDES(mMutex);
// The name of the service that implements the driver.
const std::string kServiceName;
// Factory function object to generate an IDevice object.
- const hal::DeviceFactory kMakeDevice;
+ const HalDeviceFactory kMakeDevice;
// Guards access to mCore.
mutable std::shared_mutex mMutex;
@@ -591,7 +592,7 @@
* the case when the service containing the IDevice
* object crashes.
*/
- VersionedIPreparedModel(sp<hal::V1_0::IPreparedModel> preparedModel,
+ VersionedIPreparedModel(sp<V1_0::IPreparedModel> preparedModel,
sp<IPreparedModelDeathHandler> deathHandler);
/**
@@ -676,10 +677,9 @@
* UINT64_MAX. A driver may choose to report any time as UINT64_MAX,
* indicating that measurement is not available.
*/
- std::tuple<int, std::vector<hal::OutputShape>, hal::Timing> execute(
- const hal::Request& request, hal::MeasureTiming measure,
- const std::optional<Deadline>& deadline,
- const hal::OptionalTimeoutDuration& loopTimeoutDuration, bool preferSynchronous) const;
+ std::tuple<int, std::vector<OutputShape>, Timing> execute(
+ const Request& request, MeasureTiming measure, const std::optional<Deadline>& deadline,
+ const OptionalTimeoutDuration& loopTimeoutDuration, bool preferSynchronous) const;
/**
* Creates a burst controller on a prepared model.
@@ -763,30 +763,28 @@
* sync execution. Either IFencedExecutionCallback will be
* returned or optional timing information is returned
*/
- std::tuple<int, hal::hidl_handle, sp<hal::IFencedExecutionCallback>, hal::Timing> executeFenced(
- const hal::Request& request, const hal::hidl_vec<hal::hidl_handle>& waitFor,
- hal::MeasureTiming measure, const std::optional<Deadline>& deadline,
- const hal::OptionalTimeoutDuration& loopTimeoutDuration,
- const hal::OptionalTimeoutDuration& timeoutDurationAfterFence);
+ std::tuple<int, hardware::hidl_handle, sp<V1_3::IFencedExecutionCallback>, Timing>
+ executeFenced(const Request& request, const hardware::hidl_vec<hardware::hidl_handle>& waitFor,
+ MeasureTiming measure, const std::optional<Deadline>& deadline,
+ const OptionalTimeoutDuration& loopTimeoutDuration,
+ const OptionalTimeoutDuration& timeoutDurationAfterFence);
private:
friend class VersionedIDevice;
- std::tuple<int, std::vector<hal::OutputShape>, hal::Timing> executeAsynchronously(
- const hal::Request& request, hal::MeasureTiming timing,
- const std::optional<Deadline>& deadline,
- const hal::OptionalTimeoutDuration& loopTimeoutDuration) const;
- std::tuple<int, std::vector<hal::OutputShape>, hal::Timing> executeSynchronously(
- const hal::Request& request, hal::MeasureTiming measure,
- const std::optional<Deadline>& deadline,
- const hal::OptionalTimeoutDuration& loopTimeoutDuration) const;
+ std::tuple<int, std::vector<OutputShape>, Timing> executeAsynchronously(
+ const Request& request, MeasureTiming timing, const std::optional<Deadline>& deadline,
+ const OptionalTimeoutDuration& loopTimeoutDuration) const;
+ std::tuple<int, std::vector<OutputShape>, Timing> executeSynchronously(
+ const Request& request, MeasureTiming measure, const std::optional<Deadline>& deadline,
+ const OptionalTimeoutDuration& loopTimeoutDuration) const;
/**
* Returns sp<V1_3::IPreparedModel> that is a downcast of the sp<V1_0::IPreparedModel>
* passed to the constructor. This will be nullptr if that IPreparedModel is
* not actually of the specified downcast type.
*/
- sp<hal::V1_3::IPreparedModel> getV1_3() const { return mPreparedModelV1_3; }
+ sp<V1_3::IPreparedModel> getV1_3() const { return mPreparedModelV1_3; }
/**
* All versions of IPreparedModel are necessary because the preparedModel could be v1.0,
@@ -810,9 +808,9 @@
* otherwise, if mPreparedModelV1_2 is non-null, do V1_2 dispatch;
* otherwise, do V1_0 dispatch.
*/
- sp<hal::V1_0::IPreparedModel> mPreparedModelV1_0;
- sp<hal::V1_2::IPreparedModel> mPreparedModelV1_2;
- sp<hal::V1_3::IPreparedModel> mPreparedModelV1_3;
+ sp<V1_0::IPreparedModel> mPreparedModelV1_0;
+ sp<V1_2::IPreparedModel> mPreparedModelV1_2;
+ sp<V1_3::IPreparedModel> mPreparedModelV1_3;
/**
* HIDL callback to be invoked if the service for mPreparedModelV1_0 crashes.
diff --git a/runtime/test/Android.bp b/runtime/test/Android.bp
index 4ea388a..ac53b9a 100644
--- a/runtime/test/Android.bp
+++ b/runtime/test/Android.bp
@@ -134,8 +134,6 @@
"fibonacci_extension/FibonacciExtensionTest.cpp",
"TestMain.cpp",
-
- "Bridge.cpp",
],
static_libs: [
"[email protected]",
diff --git a/runtime/test/Bridge.cpp b/runtime/test/Bridge.cpp
deleted file mode 100644
index 5740256..0000000
--- a/runtime/test/Bridge.cpp
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-// There are name clashes between NeuralNetworksWrapper.h and
-// HalInterfaces.h. Many tests include the former; many internal
-// header files (nn/runtime/*.h) include the latter. This file
-// contains a few utilities for tests to call that trampoline to the
-// internal headers.
-
-#include "GraphDump.h"
-#include "ModelBuilder.h"
-
-namespace android {
-namespace nn {
-namespace bridge_tests {
-
-void graphDump(const char* name, const ModelBuilder* model, std::ostream* outStream) {
- ::android::nn::graphDump(name, model->makeHidlModel(), outStream);
-}
-
-} // namespace bridge_tests
-} // namespace nn
-} // namespace android
diff --git a/runtime/test/Bridge.h b/runtime/test/Bridge.h
deleted file mode 100644
index f067df0..0000000
--- a/runtime/test/Bridge.h
+++ /dev/null
@@ -1,42 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-// There are name clashes between NeuralNetworksWrapper.h and
-// HalInterfaces.h. Many tests include the former; many internal
-// header files (nn/runtime/*.h) include the latter. This file
-// contains a few utilities for tests to call that trampoline to the
-// internal headers.
-
-#ifndef ANDROID_FRAMEWORKS_ML_NN_RUNTIME_TEST_BRIDGE_H
-#define ANDROID_FRAMEWORKS_ML_NN_RUNTIME_TEST_BRIDGE_H
-
-#include <iostream>
-
-namespace android {
-namespace nn {
-
-class ModelBuilder;
-
-namespace bridge_tests {
-
-void graphDump(const char* name, const ModelBuilder* model, std::ostream* outStream = &std::cout);
-
-} // namespace bridge_tests
-
-} // namespace nn
-} // namespace android
-
-#endif // ANDROID_FRAMEWORKS_ML_NN_RUNTIME_TEST_BRIDGE_H
diff --git a/runtime/test/TestCompilationCaching.cpp b/runtime/test/TestCompilationCaching.cpp
index 2311685..1a1cdc6 100644
--- a/runtime/test/TestCompilationCaching.cpp
+++ b/runtime/test/TestCompilationCaching.cpp
@@ -31,16 +31,17 @@
#include "TestNeuralNetworksWrapper.h"
using namespace android::nn;
-using namespace hal;
-using Result = test_wrapper::Result;
+namespace hardware = android::hardware;
+using WrapperResult = test_wrapper::Result;
using Type = test_wrapper::Type;
-const Timing kBadTiming = {.timeOnDevice = UINT64_MAX, .timeInDriver = UINT64_MAX};
+const V1_2::Timing kBadTiming = {.timeOnDevice = UINT64_MAX, .timeInDriver = UINT64_MAX};
template <typename T>
using MQDescriptorSync = ::android::hardware::MQDescriptorSync<T>;
+using android::sp;
namespace android::hardware::neuralnetworks::V1_0 {
-::std::ostream& operator<<(::std::ostream& os, ErrorStatus errorStatus) {
+::std::ostream& operator<<(::std::ostream& os, V1_3::ErrorStatus errorStatus) {
return os << toString(errorStatus);
}
@@ -66,10 +67,10 @@
}
// Whether the driver is expected to be registered because it can pass initialization.
-bool canDeviceBeRegistered(ErrorStatus error, uint32_t numModelCache, uint32_t numDataCache) {
+bool canDeviceBeRegistered(V1_3::ErrorStatus error, uint32_t numModelCache, uint32_t numDataCache) {
constexpr uint32_t maxNumCacheFiles =
- static_cast<uint32_t>(Constant::MAX_NUMBER_OF_CACHE_FILES);
- return error == ErrorStatus::NONE && numModelCache <= maxNumCacheFiles &&
+ static_cast<uint32_t>(V1_2::Constant::MAX_NUMBER_OF_CACHE_FILES);
+ return error == V1_3::ErrorStatus::NONE && numModelCache <= maxNumCacheFiles &&
numDataCache <= maxNumCacheFiles;
}
@@ -94,55 +95,59 @@
private:
static constexpr size_t kCacheSize = 256;
- class CachingPreparedModel : public IPreparedModel {
+ class CachingPreparedModel : public V1_3::IPreparedModel {
public:
CachingPreparedModel() = default;
- Return<V1_0::ErrorStatus> execute(const V1_0::Request&,
- const sp<V1_0::IExecutionCallback>&) override {
+ hardware::Return<V1_0::ErrorStatus> execute(const V1_0::Request&,
+ const sp<V1_0::IExecutionCallback>&) override {
return V1_0::ErrorStatus::DEVICE_UNAVAILABLE;
}
- Return<V1_0::ErrorStatus> execute_1_2(const V1_0::Request&, MeasureTiming,
- const sp<V1_2::IExecutionCallback>&) override {
+ hardware::Return<V1_0::ErrorStatus> execute_1_2(
+ const V1_0::Request&, V1_2::MeasureTiming,
+ const sp<V1_2::IExecutionCallback>&) override {
return V1_0::ErrorStatus::DEVICE_UNAVAILABLE;
}
- Return<V1_3::ErrorStatus> execute_1_3(const V1_3::Request&, MeasureTiming,
- const OptionalTimePoint&,
- const OptionalTimeoutDuration&,
- const sp<V1_3::IExecutionCallback>&) override {
+ hardware::Return<V1_3::ErrorStatus> execute_1_3(
+ const V1_3::Request&, V1_2::MeasureTiming, const V1_3::OptionalTimePoint&,
+ const V1_3::OptionalTimeoutDuration&,
+ const sp<V1_3::IExecutionCallback>&) override {
return V1_3::ErrorStatus::DEVICE_UNAVAILABLE;
}
- Return<void> executeSynchronously(const V1_0::Request&, MeasureTiming,
- executeSynchronously_cb cb) override {
+ hardware::Return<void> executeSynchronously(const V1_0::Request&, V1_2::MeasureTiming,
+ executeSynchronously_cb cb) override {
cb(V1_0::ErrorStatus::DEVICE_UNAVAILABLE, {}, kBadTiming);
- return Void();
+ return hardware::Void();
}
- Return<void> executeSynchronously_1_3(const V1_3::Request&, MeasureTiming,
- const OptionalTimePoint&,
- const OptionalTimeoutDuration&,
- executeSynchronously_1_3_cb cb) override {
+ hardware::Return<void> executeSynchronously_1_3(const V1_3::Request&, V1_2::MeasureTiming,
+ const V1_3::OptionalTimePoint&,
+ const V1_3::OptionalTimeoutDuration&,
+ executeSynchronously_1_3_cb cb) override {
cb(V1_3::ErrorStatus::DEVICE_UNAVAILABLE, {}, kBadTiming);
- return Void();
+ return hardware::Void();
}
- Return<void> configureExecutionBurst(const sp<V1_2::IBurstCallback>&,
- const MQDescriptorSync<V1_2::FmqRequestDatum>&,
- const MQDescriptorSync<V1_2::FmqResultDatum>&,
- configureExecutionBurst_cb cb) override {
+ hardware::Return<void> configureExecutionBurst(
+ const sp<V1_2::IBurstCallback>&, const MQDescriptorSync<V1_2::FmqRequestDatum>&,
+ const MQDescriptorSync<V1_2::FmqResultDatum>&,
+ configureExecutionBurst_cb cb) override {
cb(V1_0::ErrorStatus::DEVICE_UNAVAILABLE, nullptr);
- return Void();
+ return hardware::Void();
}
- Return<void> executeFenced(const hal::Request&, const hidl_vec<hidl_handle>&, MeasureTiming,
- const OptionalTimePoint&, const OptionalTimeoutDuration&,
- const OptionalTimeoutDuration&, executeFenced_cb cb) {
- cb(ErrorStatus::DEVICE_UNAVAILABLE, hidl_handle(nullptr), nullptr);
- return Void();
+ hardware::Return<void> executeFenced(const V1_3::Request&,
+ const hardware::hidl_vec<hardware::hidl_handle>&,
+ V1_2::MeasureTiming, const V1_3::OptionalTimePoint&,
+ const V1_3::OptionalTimeoutDuration&,
+ const V1_3::OptionalTimeoutDuration&,
+ executeFenced_cb cb) {
+ cb(V1_3::ErrorStatus::DEVICE_UNAVAILABLE, hardware::hidl_handle(nullptr), nullptr);
+ return hardware::Void();
}
};
public:
- CachingDriver(std::string_view name, ErrorStatus errorStatusGetNumCacheFiles,
+ CachingDriver(std::string_view name, V1_3::ErrorStatus errorStatusGetNumCacheFiles,
uint32_t numModelCache, uint32_t numDataCache,
- ErrorStatus errorStatusPrepareFromCache)
+ V1_3::ErrorStatus errorStatusPrepareFromCache)
: SampleDriver(name.data()),
mErrorStatusGetNumCacheFiles(errorStatusGetNumCacheFiles),
mNumModelCache(numModelCache),
@@ -156,39 +161,40 @@
~CachingDriver() override {}
// Reports faster than cpu.
- Return<void> getCapabilities_1_3(getCapabilities_1_3_cb cb) override {
+ hardware::Return<void> getCapabilities_1_3(getCapabilities_1_3_cb cb) override {
android::nn::initVLogMask();
- const PerformanceInfo kPerf = {.execTime = 0.1, .powerUsage = 0.1};
- Capabilities capabilities = {
+ const V1_0::PerformanceInfo kPerf = {.execTime = 0.1, .powerUsage = 0.1};
+ V1_3::Capabilities capabilities = {
.relaxedFloat32toFloat16PerformanceScalar = kPerf,
.relaxedFloat32toFloat16PerformanceTensor = kPerf,
.operandPerformance = nonExtensionOperandPerformance<HalVersion::V1_3>(kPerf),
.ifPerformance = kPerf,
.whilePerformance = kPerf};
cb(V1_3::ErrorStatus::NONE, capabilities);
- return Void();
+ return hardware::Void();
}
// Reports supporting all operations.
- Return<void> getSupportedOperations_1_3(const Model& model,
- getSupportedOperations_1_3_cb cb) override {
+ hardware::Return<void> getSupportedOperations_1_3(const V1_3::Model& model,
+ getSupportedOperations_1_3_cb cb) override {
std::vector<bool> supported(model.main.operations.size(), true);
cb(V1_3::ErrorStatus::NONE, supported);
- return Void();
+ return hardware::Void();
}
// Reports according to mGetNumCacheFiles.
- Return<void> getNumberOfCacheFilesNeeded(getNumberOfCacheFilesNeeded_cb cb) override {
+ hardware::Return<void> getNumberOfCacheFilesNeeded(getNumberOfCacheFilesNeeded_cb cb) override {
cb(convertToV1_0(mErrorStatusGetNumCacheFiles), mNumModelCache, mNumDataCache);
- return Void();
+ return hardware::Void();
}
// Generates CachingPreparedModel.
// Writes the cache entry per mCacheXData and sets mHasCalledPrepareModel.
- Return<V1_3::ErrorStatus> prepareModel_1_3(
- const Model&, ExecutionPreference, Priority, const OptionalTimePoint&,
- const hidl_vec<hidl_handle>& modelCacheHandle,
- const hidl_vec<hidl_handle>& dataCacheHandle, const CacheToken&,
+ hardware::Return<V1_3::ErrorStatus> prepareModel_1_3(
+ const V1_3::Model&, V1_1::ExecutionPreference, V1_3::Priority,
+ const V1_3::OptionalTimePoint&,
+ const hardware::hidl_vec<hardware::hidl_handle>& modelCacheHandle,
+ const hardware::hidl_vec<hardware::hidl_handle>& dataCacheHandle, const HalCacheToken&,
const sp<V1_3::IPreparedModelCallback>& cb) override {
checkNumberOfCacheHandles(modelCacheHandle.size(), dataCacheHandle.size());
if (modelCacheHandle.size() != 0 || dataCacheHandle.size() != 0) {
@@ -204,9 +210,10 @@
// Checks if the cache entry is correct, notifies error status according to
// mErrorStatusPrepareFromCache, sets mHasCalledPrepareModelFromCache.
- Return<V1_3::ErrorStatus> prepareModelFromCache_1_3(
- const OptionalTimePoint&, const hidl_vec<hidl_handle>& modelCacheHandle,
- const hidl_vec<hidl_handle>& dataCacheHandle, const CacheToken&,
+ hardware::Return<V1_3::ErrorStatus> prepareModelFromCache_1_3(
+ const V1_3::OptionalTimePoint&,
+ const hardware::hidl_vec<hardware::hidl_handle>& modelCacheHandle,
+ const hardware::hidl_vec<hardware::hidl_handle>& dataCacheHandle, const HalCacheToken&,
const sp<V1_3::IPreparedModelCallback>& callback) override {
readFromCache(modelCacheHandle, mModelCacheData);
readFromCache(dataCacheHandle, mDataCacheData);
@@ -236,7 +243,8 @@
}
}
- void writeToCache(const hidl_vec<hidl_handle>& handles, const std::vector<uint8_t>& cache) {
+ void writeToCache(const hardware::hidl_vec<hardware::hidl_handle>& handles,
+ const std::vector<uint8_t>& cache) {
for (uint32_t i = 0; i < handles.size(); ++i) {
ASSERT_EQ(handles[i]->numFds, 1);
EXPECT_EQ(write(handles[i]->data[0], cache.data(), kCacheSize),
@@ -244,7 +252,8 @@
}
}
- void readFromCache(const hidl_vec<hidl_handle>& handles, const std::vector<uint8_t>& expected) {
+ void readFromCache(const hardware::hidl_vec<hardware::hidl_handle>& handles,
+ const std::vector<uint8_t>& expected) {
for (uint32_t i = 0; i < handles.size(); ++i) {
ASSERT_EQ(handles[i]->numFds, 1);
std::vector<uint8_t> actual(kCacheSize);
@@ -257,10 +266,10 @@
std::vector<uint8_t> mModelCacheData;
std::vector<uint8_t> mDataCacheData;
- const ErrorStatus mErrorStatusGetNumCacheFiles;
+ const V1_3::ErrorStatus mErrorStatusGetNumCacheFiles;
const uint32_t mNumModelCache;
const uint32_t mNumDataCache;
- const ErrorStatus mErrorStatusPrepareFromCache;
+ const V1_3::ErrorStatus mErrorStatusPrepareFromCache;
bool mHasCalledPrepareModelFromCache = false;
HasCalledPrepareModel mHasCalledPrepareModel = HasCalledPrepareModel::NO;
@@ -279,7 +288,7 @@
model->addOperation(ANEURALNETWORKS_ADD, {a, b, d}, {c});
model->identifyInputsAndOutputs({a, b}, {c});
ASSERT_TRUE(model->isValid());
- ASSERT_EQ(model->finish(), Result::NO_ERROR);
+ ASSERT_EQ(model->finish(), WrapperResult::NO_ERROR);
}
void getDeviceWithName(std::string_view deviceName, const ANeuralNetworksDevice** outputDevice) {
@@ -307,17 +316,17 @@
// - ErrorStatus returning from getNumberOfCacheFilesNeeded
// - Number of model cache files returning from getNumberOfCacheFilesNeeded
// - Number of data cache files returning from getNumberOfCacheFilesNeeded
-using DeviceRegistrationTestParam = std::tuple<ErrorStatus, uint32_t, uint32_t>;
+using DeviceRegistrationTestParam = std::tuple<V1_3::ErrorStatus, uint32_t, uint32_t>;
class DeviceRegistrationTest : public ::testing::TestWithParam<DeviceRegistrationTestParam> {
protected:
static constexpr std::string_view kDeviceName = "deviceTestCompilationCaching";
- const ErrorStatus kErrorStatusGetNumCacheFiles = std::get<0>(GetParam());
+ const V1_3::ErrorStatus kErrorStatusGetNumCacheFiles = std::get<0>(GetParam());
const uint32_t kNumModelCache = std::get<1>(GetParam());
const uint32_t kNumDataCache = std::get<2>(GetParam());
const sp<CachingDriver> kDriver =
new CachingDriver(kDeviceName, kErrorStatusGetNumCacheFiles, kNumModelCache,
- kNumDataCache, ErrorStatus::NONE);
+ kNumDataCache, V1_3::ErrorStatus::NONE);
};
TEST_P(DeviceRegistrationTest, CachingFailure) {
@@ -344,7 +353,7 @@
// - Number of model cache files returning from getNumberOfCacheFilesNeeded
// - Number of data cache files returning from getNumberOfCacheFilesNeeded
// - ErrorStatus returning from prepareModelFromCache_1_3
-using CompilationCachingTestParam = std::tuple<uint32_t, uint32_t, ErrorStatus>;
+using CompilationCachingTestParam = std::tuple<uint32_t, uint32_t, V1_3::ErrorStatus>;
class CompilationCachingTest : public ::testing::TestWithParam<CompilationCachingTestParam> {
protected:
@@ -390,27 +399,29 @@
}
void createCache() {
- sp<CachingDriver> driver = new CachingDriver(kDeviceName, ErrorStatus::NONE, kNumModelCache,
- kNumDataCache, ErrorStatus::NONE);
+ sp<CachingDriver> driver =
+ new CachingDriver(kDeviceName, V1_3::ErrorStatus::NONE, kNumModelCache,
+ kNumDataCache, V1_3::ErrorStatus::NONE);
compileModel(driver, /*withToken=*/true);
}
static constexpr std::string_view kDeviceName = "deviceTestCompilationCaching";
const uint32_t kNumModelCache = std::get<0>(GetParam());
const uint32_t kNumDataCache = std::get<1>(GetParam());
- const ErrorStatus kErrorStatusPrepareFromCache = std::get<2>(GetParam());
+ const V1_3::ErrorStatus kErrorStatusPrepareFromCache = std::get<2>(GetParam());
const bool kIsCachingSupported = isCachingSupported(kNumModelCache, kNumDataCache);
test_wrapper::Model mModel;
std::string mCacheDir;
- const CacheToken kToken{};
+ const HalCacheToken kToken{};
};
TEST_P(CompilationCachingTest, TokenProvidedAndCacheNotExist) {
if (DeviceManager::get()->getUseCpuOnly()) {
return;
}
- sp<CachingDriver> driver = new CachingDriver(kDeviceName, ErrorStatus::NONE, kNumModelCache,
- kNumDataCache, kErrorStatusPrepareFromCache);
+ sp<CachingDriver> driver =
+ new CachingDriver(kDeviceName, V1_3::ErrorStatus::NONE, kNumModelCache, kNumDataCache,
+ kErrorStatusPrepareFromCache);
compileModel(driver, /*withToken=*/true);
// When cache file does not exist, the runtime should never call prepareModelFromCache_1_3.
@@ -427,8 +438,9 @@
return;
}
createCache();
- sp<CachingDriver> driver = new CachingDriver(kDeviceName, ErrorStatus::NONE, kNumModelCache,
- kNumDataCache, kErrorStatusPrepareFromCache);
+ sp<CachingDriver> driver =
+ new CachingDriver(kDeviceName, V1_3::ErrorStatus::NONE, kNumModelCache, kNumDataCache,
+ kErrorStatusPrepareFromCache);
compileModel(driver, /*withToken=*/true);
// When cache files exist, the runtime should call prepareModelFromCache_1_3 iff caching
@@ -437,7 +449,7 @@
HasCalledPrepareModel expectHasCalledPrepareModel;
if (kIsCachingSupported) {
- if (kErrorStatusPrepareFromCache == ErrorStatus::NONE) {
+ if (kErrorStatusPrepareFromCache == V1_3::ErrorStatus::NONE) {
// The runtime should not call prepareModel_1_3 iff caching supported and
// prepareModelFromCache_1_3 succeeds.
expectHasCalledPrepareModel = HasCalledPrepareModel::NO;
@@ -457,8 +469,9 @@
if (DeviceManager::get()->getUseCpuOnly()) {
return;
}
- sp<CachingDriver> driver = new CachingDriver(kDeviceName, ErrorStatus::NONE, kNumModelCache,
- kNumDataCache, kErrorStatusPrepareFromCache);
+ sp<CachingDriver> driver =
+ new CachingDriver(kDeviceName, V1_3::ErrorStatus::NONE, kNumModelCache, kNumDataCache,
+ kErrorStatusPrepareFromCache);
compileModel(driver, /*withToken=*/false);
// When no NDK token is provided by the client, the runtime should never call
@@ -468,15 +481,15 @@
}
static const auto kErrorStatusGetNumCacheFilesChoices =
- testing::Values(ErrorStatus::NONE, ErrorStatus::DEVICE_UNAVAILABLE);
+ testing::Values(V1_3::ErrorStatus::NONE, V1_3::ErrorStatus::DEVICE_UNAVAILABLE);
static const auto kNumCacheChoices =
- testing::Values(0ul, 1ul, static_cast<uint32_t>(Constant::MAX_NUMBER_OF_CACHE_FILES),
- static_cast<uint32_t>(Constant::MAX_NUMBER_OF_CACHE_FILES) + 1);
+ testing::Values(0ul, 1ul, static_cast<uint32_t>(V1_2::Constant::MAX_NUMBER_OF_CACHE_FILES),
+ static_cast<uint32_t>(V1_2::Constant::MAX_NUMBER_OF_CACHE_FILES) + 1);
static const auto kNumValidCacheChoices =
- testing::Values(0ul, 1ul, static_cast<uint32_t>(Constant::MAX_NUMBER_OF_CACHE_FILES));
+ testing::Values(0ul, 1ul, static_cast<uint32_t>(V1_2::Constant::MAX_NUMBER_OF_CACHE_FILES));
static const auto kErrorStatusPrepareFromCacheChoices =
- testing::Values(ErrorStatus::NONE, ErrorStatus::GENERAL_FAILURE,
- ErrorStatus::DEVICE_UNAVAILABLE, ErrorStatus::INVALID_ARGUMENT);
+ testing::Values(V1_3::ErrorStatus::NONE, V1_3::ErrorStatus::GENERAL_FAILURE,
+ V1_3::ErrorStatus::DEVICE_UNAVAILABLE, V1_3::ErrorStatus::INVALID_ARGUMENT);
INSTANTIATE_TEST_SUITE_P(TestCompilationCaching, DeviceRegistrationTest,
testing::Combine(kErrorStatusGetNumCacheFilesChoices, kNumCacheChoices,
diff --git a/runtime/test/TestCompliance.cpp b/runtime/test/TestCompliance.cpp
index d756c24..299eebc 100644
--- a/runtime/test/TestCompliance.cpp
+++ b/runtime/test/TestCompliance.cpp
@@ -27,7 +27,6 @@
namespace android::nn::compliance_test {
-using namespace hal;
using namespace test_helper;
using HidlModel = V1_3::Model;
using WrapperModel = test_wrapper::Model;
@@ -42,7 +41,7 @@
auto modelBuilder = reinterpret_cast<const ModelBuilder*>(wrapperModel.getHandle());
EXPECT_TRUE(modelBuilder->isFinished());
EXPECT_TRUE(modelBuilder->isValid());
- return modelBuilder->makeHidlModel();
+ return convertToV1_3(modelBuilder->makeModel());
}
static void testAvailableSinceV1_3(const WrapperModel& wrapperModel) {
@@ -73,12 +72,12 @@
ASSERT_TRUE(compliantWithV1_0(hidlModel));
}
-static void testAvailableSinceV1_2(const Request& request) {
+static void testAvailableSinceV1_2(const V1_3::Request& request) {
ASSERT_FALSE(compliantWithV1_0(request));
ASSERT_TRUE(compliantWithV1_2(request));
}
-static void testAvailableSinceV1_3(const Request& request) {
+static void testAvailableSinceV1_3(const V1_3::Request& request) {
ASSERT_FALSE(compliantWithV1_0(request));
ASSERT_FALSE(compliantWithV1_2(request));
}
@@ -172,20 +171,20 @@
TEST_F(ComplianceTest, HardwareBufferRequest) {
const auto [n, ahwb] = MemoryRuntimeAHWB::create(1024);
ASSERT_EQ(n, ANEURALNETWORKS_NO_ERROR);
- Request::MemoryPool sharedMemoryPool, ahwbMemoryPool = ahwb->getMemoryPool();
+ V1_3::Request::MemoryPool sharedMemoryPool, ahwbMemoryPool = ahwb->getMemoryPool();
sharedMemoryPool.hidlMemory(allocateSharedMemory(1024));
ASSERT_TRUE(sharedMemoryPool.hidlMemory().valid());
ASSERT_TRUE(ahwbMemoryPool.hidlMemory().valid());
// AHardwareBuffer as input.
- testAvailableSinceV1_2(Request{
+ testAvailableSinceV1_2(V1_3::Request{
.inputs = {{.hasNoValue = false, .location = {.poolIndex = 0}, .dimensions = {}}},
.outputs = {{.hasNoValue = false, .location = {.poolIndex = 1}, .dimensions = {}}},
.pools = {ahwbMemoryPool, sharedMemoryPool},
});
// AHardwareBuffer as output.
- testAvailableSinceV1_2(Request{
+ testAvailableSinceV1_2(V1_3::Request{
.inputs = {{.hasNoValue = false, .location = {.poolIndex = 0}, .dimensions = {}}},
.outputs = {{.hasNoValue = false, .location = {.poolIndex = 1}, .dimensions = {}}},
.pools = {sharedMemoryPool, ahwbMemoryPool},
@@ -194,20 +193,20 @@
#endif
TEST_F(ComplianceTest, DeviceMemory) {
- Request::MemoryPool sharedMemoryPool, deviceMemoryPool;
+ V1_3::Request::MemoryPool sharedMemoryPool, deviceMemoryPool;
sharedMemoryPool.hidlMemory(allocateSharedMemory(1024));
ASSERT_TRUE(sharedMemoryPool.hidlMemory().valid());
deviceMemoryPool.token(1);
// Device memory as input.
- testAvailableSinceV1_3(Request{
+ testAvailableSinceV1_3(V1_3::Request{
.inputs = {{.hasNoValue = false, .location = {.poolIndex = 0}, .dimensions = {}}},
.outputs = {{.hasNoValue = false, .location = {.poolIndex = 1}, .dimensions = {}}},
.pools = {deviceMemoryPool, sharedMemoryPool},
});
// Device memory as output.
- testAvailableSinceV1_3(Request{
+ testAvailableSinceV1_3(V1_3::Request{
.inputs = {{.hasNoValue = false, .location = {.poolIndex = 0}, .dimensions = {}}},
.outputs = {{.hasNoValue = false, .location = {.poolIndex = 1}, .dimensions = {}}},
.pools = {sharedMemoryPool, deviceMemoryPool},
diff --git a/runtime/test/TestExecution.cpp b/runtime/test/TestExecution.cpp
index 3441f9f..5f012c3 100644
--- a/runtime/test/TestExecution.cpp
+++ b/runtime/test/TestExecution.cpp
@@ -38,49 +38,54 @@
namespace android {
-using namespace nn::hal;
+namespace V1_0 = ::android::hardware::neuralnetworks::V1_0;
+namespace V1_1 = ::android::hardware::neuralnetworks::V1_1;
+namespace V1_2 = ::android::hardware::neuralnetworks::V1_2;
+namespace V1_3 = ::android::hardware::neuralnetworks::V1_3;
using CompilationBuilder = nn::CompilationBuilder;
using Device = nn::Device;
using DeviceManager = nn::DeviceManager;
using HidlModel = V1_3::Model;
using PreparedModelCallback = nn::PreparedModelCallback;
-using Result = nn::test_wrapper::Result;
using SampleDriver = nn::sample_driver::SampleDriver;
using WrapperCompilation = nn::test_wrapper::Compilation;
using WrapperEvent = nn::test_wrapper::Event;
using WrapperExecution = nn::test_wrapper::Execution;
using WrapperModel = nn::test_wrapper::Model;
using WrapperOperandType = nn::test_wrapper::OperandType;
+using WrapperResult = nn::test_wrapper::Result;
using WrapperType = nn::test_wrapper::Type;
using nn::convertToV1_0;
+using nn::convertToV1_3;
+using nn::ErrorStatus;
template <typename T>
using MQDescriptorSync = hardware::MQDescriptorSync<T>;
namespace {
-const Timing kBadTiming = {.timeOnDevice = UINT64_MAX, .timeInDriver = UINT64_MAX};
+const V1_2::Timing kBadTiming = {.timeOnDevice = UINT64_MAX, .timeInDriver = UINT64_MAX};
// Wraps the latest version of IPreparedModel to allow dummying up the execution status,
// and control when the execution finishes.
-class TestPreparedModelLatest : public IPreparedModel {
+class TestPreparedModelLatest : public V1_3::IPreparedModel {
public:
// If errorStatus is NONE, then execute behaves normally (and sends back
// the actual execution status). Otherwise, don't bother to execute, and
// just send back errorStatus (as the execution status, not the launch
// status).
- TestPreparedModelLatest(sp<V1_0::IPreparedModel> preparedModel, ErrorStatus errorStatus)
+ TestPreparedModelLatest(sp<V1_0::IPreparedModel> preparedModel, V1_3::ErrorStatus errorStatus)
: mPreparedModelV1_0(preparedModel),
mPreparedModelV1_2(V1_2::IPreparedModel::castFrom(preparedModel).withDefault(nullptr)),
mPreparedModelV1_3(V1_3::IPreparedModel::castFrom(preparedModel).withDefault(nullptr)),
mErrorStatus(errorStatus) {}
- Return<V1_0::ErrorStatus> execute(const V1_0::Request& request,
- const sp<V1_0::IExecutionCallback>& callback) override {
+ hardware::Return<V1_0::ErrorStatus> execute(
+ const V1_0::Request& request, const sp<V1_0::IExecutionCallback>& callback) override {
CHECK(mPreparedModelV1_0 != nullptr) << "V1_0 prepared model is nullptr.";
std::thread([this, request, callback] {
dummyExecution();
- if (mErrorStatus == ErrorStatus::NONE) {
+ if (mErrorStatus == V1_3::ErrorStatus::NONE) {
// Note that we lose the actual launch status.
(void)mPreparedModelV1_0->execute(request, callback);
} else {
@@ -90,16 +95,17 @@
return V1_0::ErrorStatus::NONE;
}
- Return<V1_0::ErrorStatus> execute_1_2(const V1_0::Request& request, MeasureTiming measure,
- const sp<V1_2::IExecutionCallback>& callback) override {
+ hardware::Return<V1_0::ErrorStatus> execute_1_2(
+ const V1_0::Request& request, V1_2::MeasureTiming measure,
+ const sp<V1_2::IExecutionCallback>& callback) override {
CHECK(mPreparedModelV1_2 != nullptr) << "V1_2 prepared model is nullptr.";
std::thread([this, request, measure, callback] {
dummyExecution();
- if (mErrorStatus == ErrorStatus::NONE) {
+ if (mErrorStatus == V1_3::ErrorStatus::NONE) {
// Note that we lose the actual launch status.
(void)mPreparedModelV1_2->execute_1_2(request, measure, callback);
- } else if (mErrorStatus == ErrorStatus::OUTPUT_INSUFFICIENT_SIZE) {
- OutputShape shape = {.dimensions = {1}, .isSufficient = false};
+ } else if (mErrorStatus == V1_3::ErrorStatus::OUTPUT_INSUFFICIENT_SIZE) {
+ V1_2::OutputShape shape = {.dimensions = {1}, .isSufficient = false};
callback->notify_1_2(convertToV1_0(mErrorStatus), {shape}, kBadTiming);
} else {
callback->notify_1_2(convertToV1_0(mErrorStatus), {}, kBadTiming);
@@ -108,19 +114,20 @@
return V1_0::ErrorStatus::NONE;
}
- Return<V1_3::ErrorStatus> execute_1_3(const V1_3::Request& request, MeasureTiming measure,
- const OptionalTimePoint& deadline,
- const OptionalTimeoutDuration& loopTimeoutDuration,
- const sp<V1_3::IExecutionCallback>& callback) override {
+ hardware::Return<V1_3::ErrorStatus> execute_1_3(
+ const V1_3::Request& request, V1_2::MeasureTiming measure,
+ const V1_3::OptionalTimePoint& deadline,
+ const V1_3::OptionalTimeoutDuration& loopTimeoutDuration,
+ const sp<V1_3::IExecutionCallback>& callback) override {
CHECK(mPreparedModelV1_3 != nullptr) << "V1_3 prepared model is nullptr.";
std::thread([this, request, measure, deadline, loopTimeoutDuration, callback] {
dummyExecution();
- if (mErrorStatus == ErrorStatus::NONE) {
+ if (mErrorStatus == V1_3::ErrorStatus::NONE) {
// Note that we lose the actual launch status.
(void)mPreparedModelV1_3->execute_1_3(request, measure, deadline,
loopTimeoutDuration, callback);
- } else if (mErrorStatus == ErrorStatus::OUTPUT_INSUFFICIENT_SIZE) {
- OutputShape shape = {.dimensions = {1}, .isSufficient = false};
+ } else if (mErrorStatus == V1_3::ErrorStatus::OUTPUT_INSUFFICIENT_SIZE) {
+ V1_2::OutputShape shape = {.dimensions = {1}, .isSufficient = false};
callback->notify_1_3(mErrorStatus, {shape}, kBadTiming);
} else {
callback->notify_1_3(mErrorStatus, {}, kBadTiming);
@@ -129,53 +136,55 @@
return V1_3::ErrorStatus::NONE;
}
- Return<void> executeSynchronously(const V1_0::Request& request, MeasureTiming measure,
- executeSynchronously_cb cb) override {
+ hardware::Return<void> executeSynchronously(const V1_0::Request& request,
+ V1_2::MeasureTiming measure,
+ executeSynchronously_cb cb) override {
CHECK(mPreparedModelV1_2 != nullptr) << "V1_2 prepared model is nullptr.";
dummyExecution();
- if (mErrorStatus == ErrorStatus::NONE) {
+ if (mErrorStatus == V1_3::ErrorStatus::NONE) {
return mPreparedModelV1_2->executeSynchronously(request, measure, cb);
- } else if (mErrorStatus == ErrorStatus::OUTPUT_INSUFFICIENT_SIZE) {
- OutputShape shape = {.dimensions = {1}, .isSufficient = false};
+ } else if (mErrorStatus == V1_3::ErrorStatus::OUTPUT_INSUFFICIENT_SIZE) {
+ V1_2::OutputShape shape = {.dimensions = {1}, .isSufficient = false};
cb(convertToV1_0(mErrorStatus), {shape}, kBadTiming);
- return Void();
+ return hardware::Void();
} else {
cb(convertToV1_0(mErrorStatus), {}, kBadTiming);
- return Void();
+ return hardware::Void();
}
}
- Return<void> executeSynchronously_1_3(const V1_3::Request& request, MeasureTiming measure,
- const OptionalTimePoint& deadline,
- const OptionalTimeoutDuration& loopTimeoutDuration,
- executeSynchronously_1_3_cb cb) override {
+ hardware::Return<void> executeSynchronously_1_3(
+ const V1_3::Request& request, V1_2::MeasureTiming measure,
+ const V1_3::OptionalTimePoint& deadline,
+ const V1_3::OptionalTimeoutDuration& loopTimeoutDuration,
+ executeSynchronously_1_3_cb cb) override {
CHECK(mPreparedModelV1_3 != nullptr) << "V1_3 prepared model is nullptr.";
dummyExecution();
- if (mErrorStatus == ErrorStatus::NONE) {
+ if (mErrorStatus == V1_3::ErrorStatus::NONE) {
return mPreparedModelV1_3->executeSynchronously_1_3(request, measure, deadline,
loopTimeoutDuration, cb);
- } else if (mErrorStatus == ErrorStatus::OUTPUT_INSUFFICIENT_SIZE) {
- OutputShape shape = {.dimensions = {1}, .isSufficient = false};
+ } else if (mErrorStatus == V1_3::ErrorStatus::OUTPUT_INSUFFICIENT_SIZE) {
+ V1_2::OutputShape shape = {.dimensions = {1}, .isSufficient = false};
cb(mErrorStatus, {shape}, kBadTiming);
- return Void();
+ return hardware::Void();
} else {
cb(mErrorStatus, {}, kBadTiming);
- return Void();
+ return hardware::Void();
}
}
- Return<void> configureExecutionBurst(
+ hardware::Return<void> configureExecutionBurst(
const sp<V1_2::IBurstCallback>& callback,
const MQDescriptorSync<V1_2::FmqRequestDatum>& requestChannel,
const MQDescriptorSync<V1_2::FmqResultDatum>& resultChannel,
configureExecutionBurst_cb cb) override {
CHECK(mPreparedModelV1_2 != nullptr) << "V1_2 prepared model is nullptr.";
- if (mErrorStatus == ErrorStatus::NONE) {
+ if (mErrorStatus == V1_3::ErrorStatus::NONE) {
return mPreparedModelV1_2->configureExecutionBurst(callback, requestChannel,
resultChannel, cb);
} else {
cb(convertToV1_0(mErrorStatus), nullptr);
- return Void();
+ return hardware::Void();
}
}
@@ -184,25 +193,27 @@
// SampleDriver is written with that in mind. Therefore, this
// implementation is synchronous also. If the SampleDriver is updated to
// return real sync fence, this must be updated.
- Return<void> executeFenced(const V1_3::Request& request, const hidl_vec<hidl_handle>& waitFor,
- MeasureTiming measure, const OptionalTimePoint& deadline,
- const OptionalTimeoutDuration& loopTimeoutDuration,
- const OptionalTimeoutDuration& duration,
- executeFenced_cb cb) override {
+ hardware::Return<void> executeFenced(const V1_3::Request& request,
+ const hardware::hidl_vec<hardware::hidl_handle>& waitFor,
+ V1_2::MeasureTiming measure,
+ const V1_3::OptionalTimePoint& deadline,
+ const V1_3::OptionalTimeoutDuration& loopTimeoutDuration,
+ const V1_3::OptionalTimeoutDuration& duration,
+ executeFenced_cb cb) override {
CHECK(mPreparedModelV1_3 != nullptr) << "V1_3 prepared model is nullptr.";
- CHECK(mErrorStatus != ErrorStatus::OUTPUT_INSUFFICIENT_SIZE)
+ CHECK(mErrorStatus != V1_3::ErrorStatus::OUTPUT_INSUFFICIENT_SIZE)
<< "executeFenced does not support dynamic output shape";
dummyExecution();
- if (mErrorStatus == ErrorStatus::NONE) {
+ if (mErrorStatus == V1_3::ErrorStatus::NONE) {
return mPreparedModelV1_3->executeFenced(request, waitFor, measure, deadline,
loopTimeoutDuration, duration, cb);
} else {
// Due to the limitations of the SampleDriver, all failures look
// like launch failures. If the SampleDriver is updated to return
// real sync fences, this must be updated.
- cb(mErrorStatus, hidl_handle(nullptr), nullptr);
+ cb(mErrorStatus, hardware::hidl_handle(nullptr), nullptr);
}
- return Void();
+ return hardware::Void();
}
// We can place the TestPreparedModelLatest system in a "pause" mode where
@@ -225,7 +236,7 @@
const sp<V1_0::IPreparedModel> mPreparedModelV1_0;
const sp<V1_2::IPreparedModel> mPreparedModelV1_2;
const sp<V1_3::IPreparedModel> mPreparedModelV1_3;
- ErrorStatus mErrorStatus;
+ V1_3::ErrorStatus mErrorStatus;
static std::atomic<bool> mPauseExecutions;
static std::atomic<unsigned int> mExecutionsInFlight;
@@ -245,25 +256,27 @@
// Like TestPreparedModelLatest, but implementing 1.2
class TestPreparedModel12 : public V1_2::IPreparedModel {
public:
- TestPreparedModel12(sp<V1_0::IPreparedModel> preparedModel, ErrorStatus errorStatus)
+ TestPreparedModel12(sp<V1_0::IPreparedModel> preparedModel, V1_3::ErrorStatus errorStatus)
: mLatestPreparedModel(new TestPreparedModelLatest(preparedModel, errorStatus)) {}
- Return<V1_0::ErrorStatus> execute(const V1_0::Request& request,
- const sp<V1_0::IExecutionCallback>& callback) override {
+ hardware::Return<V1_0::ErrorStatus> execute(
+ const V1_0::Request& request, const sp<V1_0::IExecutionCallback>& callback) override {
return mLatestPreparedModel->execute(request, callback);
}
- Return<V1_0::ErrorStatus> execute_1_2(const V1_0::Request& request, MeasureTiming measure,
- const sp<V1_2::IExecutionCallback>& callback) override {
+ hardware::Return<V1_0::ErrorStatus> execute_1_2(
+ const V1_0::Request& request, V1_2::MeasureTiming measure,
+ const sp<V1_2::IExecutionCallback>& callback) override {
return mLatestPreparedModel->execute_1_2(request, measure, callback);
}
- Return<void> executeSynchronously(const V1_0::Request& request, MeasureTiming measure,
- executeSynchronously_cb cb) override {
+ hardware::Return<void> executeSynchronously(const V1_0::Request& request,
+ V1_2::MeasureTiming measure,
+ executeSynchronously_cb cb) override {
return mLatestPreparedModel->executeSynchronously(request, measure, cb);
}
- Return<void> configureExecutionBurst(
+ hardware::Return<void> configureExecutionBurst(
const sp<V1_2::IBurstCallback>& callback,
const MQDescriptorSync<V1_2::FmqRequestDatum>& requestChannel,
const MQDescriptorSync<V1_2::FmqResultDatum>& resultChannel,
@@ -273,22 +286,22 @@
}
private:
- const sp<IPreparedModel> mLatestPreparedModel;
+ const sp<V1_3::IPreparedModel> mLatestPreparedModel;
};
// Like TestPreparedModelLatest, but implementing 1.0
class TestPreparedModel10 : public V1_0::IPreparedModel {
public:
- TestPreparedModel10(sp<V1_0::IPreparedModel> preparedModel, ErrorStatus errorStatus)
+ TestPreparedModel10(sp<V1_0::IPreparedModel> preparedModel, V1_3::ErrorStatus errorStatus)
: mLatestPreparedModel(new TestPreparedModelLatest(preparedModel, errorStatus)) {}
- Return<V1_0::ErrorStatus> execute(const V1_0::Request& request,
- const sp<V1_0::IExecutionCallback>& callback) override {
+ hardware::Return<V1_0::ErrorStatus> execute(
+ const V1_0::Request& request, const sp<V1_0::IExecutionCallback>& callback) override {
return mLatestPreparedModel->execute(request, callback);
}
private:
- const sp<IPreparedModel> mLatestPreparedModel;
+ const sp<V1_3::IPreparedModel> mLatestPreparedModel;
};
// Behaves like SampleDriver, except that it produces wrapped IPreparedModel.
@@ -300,13 +313,13 @@
// status). Otherwise, don't bother to execute, and just send
// back errorStatus (as the execution status, not the launch
// status).
- TestDriver13(const std::string& name, ErrorStatus errorStatus)
+ TestDriver13(const std::string& name, V1_3::ErrorStatus errorStatus)
: SampleDriver(name.c_str()), mErrorStatus(errorStatus) {}
- Return<void> getCapabilities_1_3(getCapabilities_1_3_cb _hidl_cb) override {
+ hardware::Return<void> getCapabilities_1_3(getCapabilities_1_3_cb _hidl_cb) override {
android::nn::initVLogMask();
- const PerformanceInfo kPerf = {.execTime = 0.75f, .powerUsage = 0.75f};
- Capabilities capabilities = {
+ const V1_0::PerformanceInfo kPerf = {.execTime = 0.75f, .powerUsage = 0.75f};
+ V1_3::Capabilities capabilities = {
.relaxedFloat32toFloat16PerformanceScalar = kPerf,
.relaxedFloat32toFloat16PerformanceTensor = kPerf,
.operandPerformance =
@@ -314,41 +327,43 @@
.ifPerformance = kPerf,
.whilePerformance = kPerf};
_hidl_cb(V1_3::ErrorStatus::NONE, capabilities);
- return Void();
+ return hardware::Void();
}
- Return<void> getSupportedOperations_1_3(const HidlModel& model,
- getSupportedOperations_1_3_cb cb) override {
+ hardware::Return<void> getSupportedOperations_1_3(const HidlModel& model,
+ getSupportedOperations_1_3_cb cb) override {
if (nn::validateModel(model)) {
std::vector<bool> supported(model.main.operations.size(), true);
cb(V1_3::ErrorStatus::NONE, supported);
} else {
cb(V1_3::ErrorStatus::INVALID_ARGUMENT, {});
}
- return Void();
+ return hardware::Void();
}
- Return<V1_3::ErrorStatus> prepareModel_1_3(
- const HidlModel& model, ExecutionPreference preference, Priority priority,
- const OptionalTimePoint& deadline, const hidl_vec<hidl_handle>& modelCache,
- const hidl_vec<hidl_handle>& dataCache, const CacheToken& token,
+ hardware::Return<V1_3::ErrorStatus> prepareModel_1_3(
+ const HidlModel& model, V1_1::ExecutionPreference preference, V1_3::Priority priority,
+ const V1_3::OptionalTimePoint& deadline,
+ const hardware::hidl_vec<hardware::hidl_handle>& modelCache,
+ const hardware::hidl_vec<hardware::hidl_handle>& dataCache,
+ const nn::HalCacheToken& token,
const sp<V1_3::IPreparedModelCallback>& actualCallback) override {
sp<PreparedModelCallback> localCallback = new PreparedModelCallback;
- Return<V1_3::ErrorStatus> prepareModelReturn = SampleDriver::prepareModel_1_3(
+ hardware::Return<V1_3::ErrorStatus> prepareModelReturn = SampleDriver::prepareModel_1_3(
model, preference, priority, deadline, modelCache, dataCache, token, localCallback);
if (!prepareModelReturn.isOkUnchecked()) {
return prepareModelReturn;
}
- if (prepareModelReturn != ErrorStatus::NONE) {
+ if (prepareModelReturn != V1_3::ErrorStatus::NONE) {
actualCallback->notify_1_3(
- localCallback->getStatus(),
+ convertToV1_3(localCallback->getStatus()),
V1_3::IPreparedModel::castFrom(localCallback->getPreparedModel()));
return prepareModelReturn;
}
localCallback->wait();
if (localCallback->getStatus() != ErrorStatus::NONE) {
actualCallback->notify_1_3(
- localCallback->getStatus(),
+ convertToV1_3(localCallback->getStatus()),
V1_3::IPreparedModel::castFrom(localCallback->getPreparedModel()));
} else {
actualCallback->notify_1_3(
@@ -358,13 +373,14 @@
return prepareModelReturn;
}
- Return<V1_0::ErrorStatus> prepareModel_1_2(
- const V1_2::Model& model, ExecutionPreference preference,
- const hidl_vec<hidl_handle>& modelCache, const hidl_vec<hidl_handle>& dataCache,
- const CacheToken& token,
+ hardware::Return<V1_0::ErrorStatus> prepareModel_1_2(
+ const V1_2::Model& model, V1_1::ExecutionPreference preference,
+ const hardware::hidl_vec<hardware::hidl_handle>& modelCache,
+ const hardware::hidl_vec<hardware::hidl_handle>& dataCache,
+ const nn::HalCacheToken& token,
const sp<V1_2::IPreparedModelCallback>& actualCallback) override {
sp<PreparedModelCallback> localCallback = new PreparedModelCallback;
- Return<V1_0::ErrorStatus> prepareModelReturn = SampleDriver::prepareModel_1_2(
+ hardware::Return<V1_0::ErrorStatus> prepareModelReturn = SampleDriver::prepareModel_1_2(
model, preference, modelCache, dataCache, token, localCallback);
if (!prepareModelReturn.isOkUnchecked()) {
return prepareModelReturn;
@@ -388,11 +404,11 @@
return prepareModelReturn;
}
- Return<V1_0::ErrorStatus> prepareModel_1_1(
- const V1_1::Model& model, ExecutionPreference preference,
+ hardware::Return<V1_0::ErrorStatus> prepareModel_1_1(
+ const V1_1::Model& model, V1_1::ExecutionPreference preference,
const sp<V1_0::IPreparedModelCallback>& actualCallback) override {
sp<PreparedModelCallback> localCallback = new PreparedModelCallback;
- Return<V1_0::ErrorStatus> prepareModelReturn =
+ hardware::Return<V1_0::ErrorStatus> prepareModelReturn =
SampleDriver::prepareModel_1_1(model, preference, localCallback);
if (!prepareModelReturn.isOkUnchecked()) {
return prepareModelReturn;
@@ -414,75 +430,79 @@
return prepareModelReturn;
}
- Return<V1_0::ErrorStatus> prepareModel(
+ hardware::Return<V1_0::ErrorStatus> prepareModel(
const V1_0::Model& model,
const sp<V1_0::IPreparedModelCallback>& actualCallback) override {
- return prepareModel_1_1(nn::convertToV1_1(model), ExecutionPreference::FAST_SINGLE_ANSWER,
- actualCallback);
+ return prepareModel_1_1(nn::convertToV1_1(model),
+ V1_1::ExecutionPreference::FAST_SINGLE_ANSWER, actualCallback);
}
private:
- ErrorStatus mErrorStatus;
+ V1_3::ErrorStatus mErrorStatus;
};
// Like TestDriver, but implementing 1.2
class TestDriver12 : public V1_2::IDevice {
public:
- TestDriver12(const std::string& name, ErrorStatus errorStatus)
+ TestDriver12(const std::string& name, V1_3::ErrorStatus errorStatus)
: mLatestDriver(new TestDriver13(name, errorStatus)) {}
- Return<void> getCapabilities_1_2(getCapabilities_1_2_cb _hidl_cb) override {
+ hardware::Return<void> getCapabilities_1_2(getCapabilities_1_2_cb _hidl_cb) override {
return mLatestDriver->getCapabilities_1_2(_hidl_cb);
}
- Return<void> getCapabilities_1_1(getCapabilities_1_1_cb _hidl_cb) override {
+ hardware::Return<void> getCapabilities_1_1(getCapabilities_1_1_cb _hidl_cb) override {
return mLatestDriver->getCapabilities_1_1(_hidl_cb);
}
- Return<void> getCapabilities(getCapabilities_cb _hidl_cb) override {
+ hardware::Return<void> getCapabilities(getCapabilities_cb _hidl_cb) override {
return mLatestDriver->getCapabilities(_hidl_cb);
}
- Return<void> getSupportedOperations_1_2(const V1_2::Model& model,
- getSupportedOperations_1_2_cb _hidl_cb) override {
+ hardware::Return<void> getSupportedOperations_1_2(
+ const V1_2::Model& model, getSupportedOperations_1_2_cb _hidl_cb) override {
return mLatestDriver->getSupportedOperations_1_2(model, _hidl_cb);
}
- Return<void> getSupportedOperations_1_1(const V1_1::Model& model,
- getSupportedOperations_1_1_cb _hidl_cb) override {
+ hardware::Return<void> getSupportedOperations_1_1(
+ const V1_1::Model& model, getSupportedOperations_1_1_cb _hidl_cb) override {
return mLatestDriver->getSupportedOperations_1_1(model, _hidl_cb);
}
- Return<void> getSupportedOperations(const V1_0::Model& model,
- getSupportedOperations_cb _hidl_cb) override {
+ hardware::Return<void> getSupportedOperations(const V1_0::Model& model,
+ getSupportedOperations_cb _hidl_cb) override {
return mLatestDriver->getSupportedOperations(model, _hidl_cb);
}
- Return<V1_0::ErrorStatus> prepareModel_1_2(
- const V1_2::Model& model, ExecutionPreference preference,
- const hidl_vec<hidl_handle>& modelCache, const hidl_vec<hidl_handle>& dataCache,
- const CacheToken& token,
+ hardware::Return<V1_0::ErrorStatus> prepareModel_1_2(
+ const V1_2::Model& model, V1_1::ExecutionPreference preference,
+ const hardware::hidl_vec<hardware::hidl_handle>& modelCache,
+ const hardware::hidl_vec<hardware::hidl_handle>& dataCache,
+ const nn::HalCacheToken& token,
const sp<V1_2::IPreparedModelCallback>& actualCallback) override {
return mLatestDriver->prepareModel_1_2(model, preference, modelCache, dataCache, token,
actualCallback);
}
- Return<V1_0::ErrorStatus> prepareModel_1_1(
- const V1_1::Model& model, ExecutionPreference preference,
+ hardware::Return<V1_0::ErrorStatus> prepareModel_1_1(
+ const V1_1::Model& model, V1_1::ExecutionPreference preference,
const sp<V1_0::IPreparedModelCallback>& actualCallback) override {
return mLatestDriver->prepareModel_1_1(model, preference, actualCallback);
}
- Return<V1_0::ErrorStatus> prepareModel(
+ hardware::Return<V1_0::ErrorStatus> prepareModel(
const V1_0::Model& model,
const sp<V1_0::IPreparedModelCallback>& actualCallback) override {
return mLatestDriver->prepareModel(model, actualCallback);
}
- Return<DeviceStatus> getStatus() override { return mLatestDriver->getStatus(); }
- Return<void> getVersionString(getVersionString_cb _hidl_cb) override {
+ hardware::Return<V1_0::DeviceStatus> getStatus() override { return mLatestDriver->getStatus(); }
+ hardware::Return<void> getVersionString(getVersionString_cb _hidl_cb) override {
return mLatestDriver->getVersionString(_hidl_cb);
}
- Return<void> getType(getType_cb _hidl_cb) override { return mLatestDriver->getType(_hidl_cb); }
- Return<void> getSupportedExtensions(getSupportedExtensions_cb _hidl_cb) {
+ hardware::Return<void> getType(getType_cb _hidl_cb) override {
+ return mLatestDriver->getType(_hidl_cb);
+ }
+ hardware::Return<void> getSupportedExtensions(getSupportedExtensions_cb _hidl_cb) {
return mLatestDriver->getSupportedExtensions(_hidl_cb);
}
- Return<void> getNumberOfCacheFilesNeeded(getNumberOfCacheFilesNeeded_cb _hidl_cb) {
+ hardware::Return<void> getNumberOfCacheFilesNeeded(getNumberOfCacheFilesNeeded_cb _hidl_cb) {
return mLatestDriver->getNumberOfCacheFilesNeeded(_hidl_cb);
}
- Return<V1_0::ErrorStatus> prepareModelFromCache(
- const hidl_vec<hidl_handle>& modelCache, const hidl_vec<hidl_handle>& dataCache,
- const CacheToken& token, const sp<V1_2::IPreparedModelCallback>& callback) {
+ hardware::Return<V1_0::ErrorStatus> prepareModelFromCache(
+ const hardware::hidl_vec<hardware::hidl_handle>& modelCache,
+ const hardware::hidl_vec<hardware::hidl_handle>& dataCache,
+ const nn::HalCacheToken& token, const sp<V1_2::IPreparedModelCallback>& callback) {
return mLatestDriver->prepareModelFromCache(modelCache, dataCache, token, callback);
}
@@ -493,29 +513,29 @@
// Like TestDriver, but implementing 1.1
class TestDriver11 : public V1_1::IDevice {
public:
- TestDriver11(const std::string& name, ErrorStatus errorStatus)
+ TestDriver11(const std::string& name, V1_3::ErrorStatus errorStatus)
: mLatestDriver(new TestDriver13(name, errorStatus)) {}
- Return<void> getCapabilities_1_1(getCapabilities_1_1_cb _hidl_cb) override {
+ hardware::Return<void> getCapabilities_1_1(getCapabilities_1_1_cb _hidl_cb) override {
return mLatestDriver->getCapabilities_1_1(_hidl_cb);
}
- Return<void> getSupportedOperations_1_1(const V1_1::Model& model,
- getSupportedOperations_1_1_cb _hidl_cb) override {
+ hardware::Return<void> getSupportedOperations_1_1(
+ const V1_1::Model& model, getSupportedOperations_1_1_cb _hidl_cb) override {
return mLatestDriver->getSupportedOperations_1_1(model, _hidl_cb);
}
- Return<V1_0::ErrorStatus> prepareModel_1_1(
- const V1_1::Model& model, ExecutionPreference preference,
+ hardware::Return<V1_0::ErrorStatus> prepareModel_1_1(
+ const V1_1::Model& model, V1_1::ExecutionPreference preference,
const sp<V1_0::IPreparedModelCallback>& actualCallback) override {
return mLatestDriver->prepareModel_1_1(model, preference, actualCallback);
}
- Return<DeviceStatus> getStatus() override { return mLatestDriver->getStatus(); }
- Return<void> getCapabilities(getCapabilities_cb _hidl_cb) override {
+ hardware::Return<V1_0::DeviceStatus> getStatus() override { return mLatestDriver->getStatus(); }
+ hardware::Return<void> getCapabilities(getCapabilities_cb _hidl_cb) override {
return mLatestDriver->getCapabilities(_hidl_cb);
}
- Return<void> getSupportedOperations(const V1_0::Model& model,
- getSupportedOperations_cb _hidl_cb) override {
+ hardware::Return<void> getSupportedOperations(const V1_0::Model& model,
+ getSupportedOperations_cb _hidl_cb) override {
return mLatestDriver->getSupportedOperations(model, _hidl_cb);
}
- Return<V1_0::ErrorStatus> prepareModel(
+ hardware::Return<V1_0::ErrorStatus> prepareModel(
const V1_0::Model& model,
const sp<V1_0::IPreparedModelCallback>& actualCallback) override {
return mLatestDriver->prepareModel(model, actualCallback);
@@ -528,21 +548,21 @@
// Like TestDriver, but implementing 1.0
class TestDriver10 : public V1_0::IDevice {
public:
- TestDriver10(const std::string& name, ErrorStatus errorStatus)
+ TestDriver10(const std::string& name, V1_3::ErrorStatus errorStatus)
: mLatestDriver(new TestDriver13(name, errorStatus)) {}
- Return<void> getCapabilities(getCapabilities_cb _hidl_cb) override {
+ hardware::Return<void> getCapabilities(getCapabilities_cb _hidl_cb) override {
return mLatestDriver->getCapabilities(_hidl_cb);
}
- Return<void> getSupportedOperations(const V1_0::Model& model,
- getSupportedOperations_cb _hidl_cb) override {
+ hardware::Return<void> getSupportedOperations(const V1_0::Model& model,
+ getSupportedOperations_cb _hidl_cb) override {
return mLatestDriver->getSupportedOperations(model, _hidl_cb);
}
- Return<V1_0::ErrorStatus> prepareModel(
+ hardware::Return<V1_0::ErrorStatus> prepareModel(
const V1_0::Model& model,
const sp<V1_0::IPreparedModelCallback>& actualCallback) override {
return mLatestDriver->prepareModel(model, actualCallback);
}
- Return<DeviceStatus> getStatus() override { return mLatestDriver->getStatus(); }
+ hardware::Return<V1_0::DeviceStatus> getStatus() override { return mLatestDriver->getStatus(); }
private:
const sp<V1_3::IDevice> mLatestDriver;
@@ -560,7 +580,7 @@
// Otherwise, don't bother to execute, and just send back
// errorStatus (as the execution status, not the launch status).
TestCompilation(const WrapperModel* model, const std::string& deviceName,
- ErrorStatus errorStatus) {
+ V1_3::ErrorStatus errorStatus) {
std::vector<std::shared_ptr<Device>> devices;
auto device = DeviceManager::forTest_makeDriverDevice(
deviceName, new DriverClass(deviceName, errorStatus));
@@ -613,7 +633,7 @@
template <class DriverClass>
class ExecutionTestTemplate
- : public ::testing::TestWithParam<std::tuple<ErrorStatus, Result, bool>> {
+ : public ::testing::TestWithParam<std::tuple<V1_3::ErrorStatus, WrapperResult, bool>> {
public:
ExecutionTestTemplate()
: kName(toString(std::get<0>(GetParam()))),
@@ -648,11 +668,11 @@
// sends back the actual execution status). Otherwise, don't
// bother to execute, and just send back kForceErrorStatus (as the
// execution status, not the launch status).
- const ErrorStatus kForceErrorStatus;
+ const V1_3::ErrorStatus kForceErrorStatus;
- // What result do we expect from the execution? (The Result
+ // What result do we expect from the execution? (The WrapperResult
// equivalent of kForceErrorStatus.)
- const Result kExpectResult;
+ const WrapperResult kExpectResult;
// Whether mCompilation is created via Introspection API or not.
const bool kUseIntrospectionAPI;
@@ -663,8 +683,10 @@
void setInputOutput(WrapperExecution* execution) {
mInputBuffer = kInputBuffer;
mOutputBuffer = kOutputBufferInitial;
- ASSERT_EQ(execution->setInput(0, &mInputBuffer, sizeof(mInputBuffer)), Result::NO_ERROR);
- ASSERT_EQ(execution->setOutput(0, &mOutputBuffer, sizeof(mOutputBuffer)), Result::NO_ERROR);
+ ASSERT_EQ(execution->setInput(0, &mInputBuffer, sizeof(mInputBuffer)),
+ WrapperResult::NO_ERROR);
+ ASSERT_EQ(execution->setOutput(0, &mOutputBuffer, sizeof(mOutputBuffer)),
+ WrapperResult::NO_ERROR);
}
const float kInputBuffer = 3.14;
@@ -683,7 +705,7 @@
uint32_t output = model.addOperand(&tensorType);
model.addOperation(ANEURALNETWORKS_FLOOR, {input}, {output});
model.identifyInputsAndOutputs({input}, {output});
- assert(model.finish() == Result::NO_ERROR);
+ assert(model.finish() == WrapperResult::NO_ERROR);
return model;
}
@@ -697,13 +719,13 @@
GTEST_SKIP();
}
- ASSERT_EQ(mCompilation.finish(), Result::NO_ERROR);
+ ASSERT_EQ(mCompilation.finish(), WrapperResult::NO_ERROR);
const auto getDimensionsWhileRunning = [](WrapperExecution& execution) {
TestPreparedModelLatest::waitForExecutionToBegin();
// Cannot query dimensions while execution is running
std::vector<uint32_t> dimensions;
- EXPECT_EQ(execution.getOutputOperandDimensions(0, &dimensions), Result::BAD_STATE);
+ EXPECT_EQ(execution.getOutputOperandDimensions(0, &dimensions), WrapperResult::BAD_STATE);
};
{
@@ -712,21 +734,22 @@
ASSERT_NO_FATAL_FAILURE(setInputOutput(&execution));
TestPreparedModelLatest::pauseExecutions(true);
WrapperEvent event;
- ASSERT_EQ(execution.startCompute(&event), Result::NO_ERROR);
+ ASSERT_EQ(execution.startCompute(&event), WrapperResult::NO_ERROR);
getDimensionsWhileRunning(execution);
TestPreparedModelLatest::pauseExecutions(false);
ASSERT_EQ(event.wait(), kExpectResult);
- if (kExpectResult == Result::NO_ERROR) {
+ if (kExpectResult == WrapperResult::NO_ERROR) {
ASSERT_EQ(mOutputBuffer, kOutputBufferExpected);
}
std::vector<uint32_t> dimensions;
- if (kExpectResult == Result::NO_ERROR ||
- kExpectResult == Result::OUTPUT_INSUFFICIENT_SIZE) {
+ if (kExpectResult == WrapperResult::NO_ERROR ||
+ kExpectResult == WrapperResult::OUTPUT_INSUFFICIENT_SIZE) {
// Only one output operand, hardcoded as index 0.
ASSERT_EQ(execution.getOutputOperandDimensions(0, &dimensions), kExpectResult);
ASSERT_EQ(dimensions, kOutputDimensionsExpected);
} else {
- ASSERT_EQ(execution.getOutputOperandDimensions(0, &dimensions), Result::BAD_STATE);
+ ASSERT_EQ(execution.getOutputOperandDimensions(0, &dimensions),
+ WrapperResult::BAD_STATE);
}
}
{
@@ -738,17 +761,18 @@
getDimensionsWhileRunning(execution);
TestPreparedModelLatest::pauseExecutions(false);
run.join();
- if (kExpectResult == Result::NO_ERROR) {
+ if (kExpectResult == WrapperResult::NO_ERROR) {
ASSERT_EQ(mOutputBuffer, kOutputBufferExpected);
}
std::vector<uint32_t> dimensions;
- if (kExpectResult == Result::NO_ERROR ||
- kExpectResult == Result::OUTPUT_INSUFFICIENT_SIZE) {
+ if (kExpectResult == WrapperResult::NO_ERROR ||
+ kExpectResult == WrapperResult::OUTPUT_INSUFFICIENT_SIZE) {
// Only one output operand, hardcoded as index 0.
ASSERT_EQ(execution.getOutputOperandDimensions(0, &dimensions), kExpectResult);
ASSERT_EQ(dimensions, kOutputDimensionsExpected);
} else {
- ASSERT_EQ(execution.getOutputOperandDimensions(0, &dimensions), Result::BAD_STATE);
+ ASSERT_EQ(execution.getOutputOperandDimensions(0, &dimensions),
+ WrapperResult::BAD_STATE);
}
}
{
@@ -767,20 +791,21 @@
getDimensionsWhileRunning(execution);
TestPreparedModelLatest::pauseExecutions(false);
run.join();
- if (kExpectResult == Result::NO_ERROR) {
+ if (kExpectResult == WrapperResult::NO_ERROR) {
ASSERT_EQ(mOutputBuffer, kOutputBufferExpected);
}
std::vector<uint32_t> dimensions;
- if (kExpectResult == Result::NO_ERROR ||
- kExpectResult == Result::OUTPUT_INSUFFICIENT_SIZE) {
+ if (kExpectResult == WrapperResult::NO_ERROR ||
+ kExpectResult == WrapperResult::OUTPUT_INSUFFICIENT_SIZE) {
// Only one output operand, hardcoded as index 0.
ASSERT_EQ(execution.getOutputOperandDimensions(0, &dimensions), kExpectResult);
ASSERT_EQ(dimensions, kOutputDimensionsExpected);
} else {
- ASSERT_EQ(execution.getOutputOperandDimensions(0, &dimensions), Result::BAD_STATE);
+ ASSERT_EQ(execution.getOutputOperandDimensions(0, &dimensions),
+ WrapperResult::BAD_STATE);
}
}
- if (kExpectResult != Result::OUTPUT_INSUFFICIENT_SIZE) {
+ if (kExpectResult != WrapperResult::OUTPUT_INSUFFICIENT_SIZE) {
// computeWithDependencies doesn't support OUTPUT_INSUFFICIENT_SIZE
SCOPED_TRACE("computeWithDependencies");
WrapperExecution execution(&mCompilation);
@@ -796,32 +821,35 @@
getDimensionsWhileRunning(execution);
TestPreparedModelLatest::pauseExecutions(false);
run.join();
- if (kExpectResult == Result::NO_ERROR) {
+ if (kExpectResult == WrapperResult::NO_ERROR) {
ASSERT_EQ(event.wait(), kExpectResult);
ASSERT_EQ(mOutputBuffer, kOutputBufferExpected);
} else {
- ASSERT_EQ(event.wait(), Result::UNEXPECTED_NULL);
+ ASSERT_EQ(event.wait(), WrapperResult::UNEXPECTED_NULL);
}
std::vector<uint32_t> dimensions;
- if (kExpectResult == Result::NO_ERROR) {
+ if (kExpectResult == WrapperResult::NO_ERROR) {
// Only one output operand, hardcoded as index 0.
ASSERT_EQ(execution.getOutputOperandDimensions(0, &dimensions), kExpectResult);
ASSERT_EQ(dimensions, kOutputDimensionsExpected);
} else {
- ASSERT_EQ(execution.getOutputOperandDimensions(0, &dimensions), Result::BAD_STATE);
+ ASSERT_EQ(execution.getOutputOperandDimensions(0, &dimensions),
+ WrapperResult::BAD_STATE);
}
}
}
auto kTestValues = ::testing::Values(
- std::make_tuple(ErrorStatus::NONE, Result::NO_ERROR, /* kUseIntrospectionAPI */ false),
- std::make_tuple(ErrorStatus::DEVICE_UNAVAILABLE, Result::UNAVAILABLE_DEVICE,
+ std::make_tuple(V1_3::ErrorStatus::NONE, WrapperResult::NO_ERROR,
/* kUseIntrospectionAPI */ false),
- std::make_tuple(ErrorStatus::GENERAL_FAILURE, Result::OP_FAILED,
+ std::make_tuple(V1_3::ErrorStatus::DEVICE_UNAVAILABLE, WrapperResult::UNAVAILABLE_DEVICE,
/* kUseIntrospectionAPI */ false),
- std::make_tuple(ErrorStatus::OUTPUT_INSUFFICIENT_SIZE, Result::OUTPUT_INSUFFICIENT_SIZE,
+ std::make_tuple(V1_3::ErrorStatus::GENERAL_FAILURE, WrapperResult::OP_FAILED,
/* kUseIntrospectionAPI */ false),
- std::make_tuple(ErrorStatus::INVALID_ARGUMENT, Result::BAD_DATA,
+ std::make_tuple(V1_3::ErrorStatus::OUTPUT_INSUFFICIENT_SIZE,
+ WrapperResult::OUTPUT_INSUFFICIENT_SIZE,
+ /* kUseIntrospectionAPI */ false),
+ std::make_tuple(V1_3::ErrorStatus::INVALID_ARGUMENT, WrapperResult::BAD_DATA,
/* kUseIntrospectionAPI */ false));
class ExecutionTest13 : public ExecutionTestTemplate<TestDriver13> {};
@@ -838,27 +866,29 @@
class ExecutionTest11 : public ExecutionTestTemplate<TestDriver11> {};
TEST_P(ExecutionTest11, Wait) {
- if (kForceErrorStatus == ErrorStatus::OUTPUT_INSUFFICIENT_SIZE) return;
+ if (kForceErrorStatus == V1_3::ErrorStatus::OUTPUT_INSUFFICIENT_SIZE) return;
TestWait();
}
INSTANTIATE_TEST_SUITE_P(Flavor, ExecutionTest11, kTestValues);
class ExecutionTest10 : public ExecutionTestTemplate<TestDriver10> {};
TEST_P(ExecutionTest10, Wait) {
- if (kForceErrorStatus == ErrorStatus::OUTPUT_INSUFFICIENT_SIZE) return;
+ if (kForceErrorStatus == V1_3::ErrorStatus::OUTPUT_INSUFFICIENT_SIZE) return;
TestWait();
}
INSTANTIATE_TEST_SUITE_P(Flavor, ExecutionTest10, kTestValues);
auto kIntrospectionTestValues = ::testing::Values(
- std::make_tuple(ErrorStatus::NONE, Result::NO_ERROR, /* kUseIntrospectionAPI */ true),
- std::make_tuple(ErrorStatus::DEVICE_UNAVAILABLE, Result::UNAVAILABLE_DEVICE,
+ std::make_tuple(V1_3::ErrorStatus::NONE, WrapperResult::NO_ERROR,
/* kUseIntrospectionAPI */ true),
- std::make_tuple(ErrorStatus::GENERAL_FAILURE, Result::OP_FAILED,
+ std::make_tuple(V1_3::ErrorStatus::DEVICE_UNAVAILABLE, WrapperResult::UNAVAILABLE_DEVICE,
/* kUseIntrospectionAPI */ true),
- std::make_tuple(ErrorStatus::OUTPUT_INSUFFICIENT_SIZE, Result::OUTPUT_INSUFFICIENT_SIZE,
+ std::make_tuple(V1_3::ErrorStatus::GENERAL_FAILURE, WrapperResult::OP_FAILED,
/* kUseIntrospectionAPI */ true),
- std::make_tuple(ErrorStatus::INVALID_ARGUMENT, Result::BAD_DATA,
+ std::make_tuple(V1_3::ErrorStatus::OUTPUT_INSUFFICIENT_SIZE,
+ WrapperResult::OUTPUT_INSUFFICIENT_SIZE,
+ /* kUseIntrospectionAPI */ true),
+ std::make_tuple(V1_3::ErrorStatus::INVALID_ARGUMENT, WrapperResult::BAD_DATA,
/* kUseIntrospectionAPI */ true));
INSTANTIATE_TEST_SUITE_P(IntrospectionFlavor, ExecutionTest13, kIntrospectionTestValues);
diff --git a/runtime/test/TestExtensions.cpp b/runtime/test/TestExtensions.cpp
index f104854..da13073 100644
--- a/runtime/test/TestExtensions.cpp
+++ b/runtime/test/TestExtensions.cpp
@@ -32,7 +32,9 @@
using SampleDriver = ::android::nn::sample_driver::SampleDriver;
using TypeManager = ::android::nn::TypeManager;
-using namespace android::nn::hal;
+namespace hardware = ::android::hardware;
+namespace V1_0 = ::android::hardware::neuralnetworks::V1_0;
+namespace V1_3 = ::android::hardware::neuralnetworks::V1_3;
const char* kTestDriverName = "extensions-test-driver";
const char* kTestExtension1 = "vendor.test.one";
@@ -44,23 +46,24 @@
TestDriver() : SampleDriver(kTestDriverName) {}
~TestDriver() override {}
- Return<void> getSupportedExtensions(getSupportedExtensions_cb cb) override {
+ hardware::Return<void> getSupportedExtensions(getSupportedExtensions_cb cb) override {
cb(V1_0::ErrorStatus::NONE, {
{.name = kTestExtension1},
{.name = kTestExtension2},
{.name = kTestExtension3},
});
- return Void();
+ return hardware::Void();
}
- Return<void> getCapabilities_1_3(getCapabilities_1_3_cb cb) override {
+ hardware::Return<void> getCapabilities_1_3(getCapabilities_1_3_cb cb) override {
cb(V1_3::ErrorStatus::NONE, {/* Placeholder zero-filled capabilities. */});
- return Void();
+ return hardware::Void();
}
- Return<void> getSupportedOperations_1_3(const Model&, getSupportedOperations_1_3_cb) override {
+ hardware::Return<void> getSupportedOperations_1_3(const V1_3::Model&,
+ getSupportedOperations_1_3_cb) override {
CHECK(false) << "not implemented";
- return Void();
+ return hardware::Void();
}
};
diff --git a/runtime/test/TestFailingDriver.cpp b/runtime/test/TestFailingDriver.cpp
index 7d41ace..d2e30a6 100644
--- a/runtime/test/TestFailingDriver.cpp
+++ b/runtime/test/TestFailingDriver.cpp
@@ -16,6 +16,7 @@
#include <gtest/gtest.h>
+#include <algorithm>
#include <memory>
#include <vector>
@@ -28,7 +29,6 @@
namespace android::nn {
namespace {
-using namespace hal;
using sample_driver::SampleDriverPartial;
using Result = test_wrapper::Result;
using WrapperOperandType = test_wrapper::OperandType;
@@ -50,20 +50,21 @@
// EmptyOperationResolver causes execution to fail.
FailingTestDriver() : SampleDriverPartial(kTestDriverName, &mEmptyOperationResolver) {}
- Return<void> getCapabilities_1_3(getCapabilities_1_3_cb cb) override {
+ hardware::Return<void> getCapabilities_1_3(getCapabilities_1_3_cb cb) override {
cb(V1_3::ErrorStatus::NONE,
- {.operandPerformance = {{.type = OperandType::TENSOR_FLOAT32,
+ {.operandPerformance = {{.type = V1_3::OperandType::TENSOR_FLOAT32,
.info = {.execTime = 0.1, // Faster than CPU.
.powerUsage = 0.1}}}});
- return Void();
+ return hardware::Void();
}
private:
- std::vector<bool> getSupportedOperationsImpl(const Model& model) const override {
+ std::vector<bool> getSupportedOperationsImpl(const V1_3::Model& model) const override {
std::vector<bool> supported(model.main.operations.size());
- std::transform(
- model.main.operations.begin(), model.main.operations.end(), supported.begin(),
- [](const Operation& operation) { return operation.type == OperationType::SQRT; });
+ std::transform(model.main.operations.begin(), model.main.operations.end(),
+ supported.begin(), [](const V1_3::Operation& operation) {
+ return operation.type == V1_3::OperationType::SQRT;
+ });
return supported;
}
diff --git a/runtime/test/TestIntrospectionControl.cpp b/runtime/test/TestIntrospectionControl.cpp
index 972619e..abb7e33 100644
--- a/runtime/test/TestIntrospectionControl.cpp
+++ b/runtime/test/TestIntrospectionControl.cpp
@@ -16,6 +16,7 @@
#include <gtest/gtest.h>
+#include <algorithm>
#include <chrono>
#include <iterator>
#include <map>
@@ -41,7 +42,10 @@
namespace {
using namespace ::android;
-using namespace nn::hal;
+namespace V1_0 = ::android::hardware::neuralnetworks::V1_0;
+namespace V1_1 = ::android::hardware::neuralnetworks::V1_1;
+namespace V1_2 = ::android::hardware::neuralnetworks::V1_2;
+namespace V1_3 = ::android::hardware::neuralnetworks::V1_3;
using CompilationBuilder = nn::CompilationBuilder;
using Device = nn::Device;
@@ -63,40 +67,42 @@
template <typename T>
using MQDescriptorSync = hardware::MQDescriptorSync<T>;
-constexpr Timing kBadTiming = {.timeOnDevice = UINT64_MAX, .timeInDriver = UINT64_MAX};
-constexpr Timing kGoodUnfencedTiming = {.timeOnDevice = 123, .timeInDriver = 456};
-constexpr Timing kGoodFencedTiming = {.timeOnDevice = 23, .timeInDriver = 56};
+constexpr V1_2::Timing kBadTiming = {.timeOnDevice = UINT64_MAX, .timeInDriver = UINT64_MAX};
+constexpr V1_2::Timing kGoodUnfencedTiming = {.timeOnDevice = 123, .timeInDriver = 456};
+constexpr V1_2::Timing kGoodFencedTiming = {.timeOnDevice = 23, .timeInDriver = 56};
// This is an IDevice for testing purposes. The test driver has customized
// getCapabilities_1_3 and getSupportedOperations_1_3.
class TestDriver : public SampleDriver {
public:
- TestDriver(const char* name, Capabilities capabilities, const std::vector<bool>& supportedOps)
+ TestDriver(const char* name, V1_3::Capabilities capabilities,
+ const std::vector<bool>& supportedOps)
: SampleDriver(name), mCapabilities(capabilities), mSupportedOps(supportedOps) {}
~TestDriver() override {}
- Return<void> getCapabilities_1_3(getCapabilities_1_3_cb cb) override {
+ hardware::Return<void> getCapabilities_1_3(getCapabilities_1_3_cb cb) override {
cb(V1_3::ErrorStatus::NONE, mCapabilities);
- return Void();
+ return hardware::Void();
}
- Return<void> getSupportedOperations_1_3(const Model& model,
- getSupportedOperations_1_3_cb cb) override {
+ hardware::Return<void> getSupportedOperations_1_3(const V1_3::Model& model,
+ getSupportedOperations_1_3_cb cb) override {
if (!android::nn::validateModel(model)) {
cb(V1_3::ErrorStatus::INVALID_ARGUMENT, std::vector<bool>());
- return Void();
+ return hardware::Void();
}
const size_t count = model.main.operations.size();
std::vector<bool> supported(count);
- std::transform(
- model.main.operations.begin(), model.main.operations.end(), supported.begin(),
- [this](Operation op) { return mSupportedOps[static_cast<int32_t>(op.type)]; });
+ std::transform(model.main.operations.begin(), model.main.operations.end(),
+ supported.begin(), [this](V1_3::Operation op) {
+ return mSupportedOps[static_cast<int32_t>(op.type)];
+ });
cb(V1_3::ErrorStatus::NONE, supported);
- return Void();
+ return hardware::Void();
}
private:
- Capabilities mCapabilities;
+ V1_3::Capabilities mCapabilities;
std::vector<bool> mSupportedOps;
};
@@ -119,7 +125,7 @@
struct DeviceSpecification {
DeviceSpecification(const std::string& name, float perf, std::vector<bool>& supportedOps)
: mName(name), mSupportedOps(supportedOps) {
- PerformanceInfo perfInfo = {.execTime = perf, .powerUsage = perf};
+ V1_0::PerformanceInfo perfInfo = {.execTime = perf, .powerUsage = perf};
mCapabilities = {
.relaxedFloat32toFloat16PerformanceScalar = perfInfo,
.relaxedFloat32toFloat16PerformanceTensor = perfInfo,
@@ -129,7 +135,7 @@
.whilePerformance = perfInfo};
}
std::string mName;
- Capabilities mCapabilities;
+ V1_3::Capabilities mCapabilities;
std::vector<bool> mSupportedOps;
};
@@ -383,14 +389,14 @@
// Returns (unfenced timing, fenced timing).
// Not for PASS_CPU.
-std::pair<Timing, Timing> getExpectedTiming(Success s, bool fencedExecution) {
+std::pair<V1_2::Timing, V1_2::Timing> getExpectedTiming(Success s, bool fencedExecution) {
CHECK_NE(s, Success::PASS_CPU);
if (!hasBit(s, Success::PASS_BIT)) {
return {kBadTiming, kBadTiming};
}
- std::pair<Timing, Timing> result;
+ std::pair<V1_2::Timing, V1_2::Timing> result;
result.first.timeOnDevice = hasBit(s, Success::PASS_UNFENCED_DEVICE_BIT)
? kGoodUnfencedTiming.timeOnDevice
: UINT64_MAX;
@@ -416,12 +422,12 @@
class TestPreparedModelLatest : public SamplePreparedModel {
public:
TestPreparedModelLatest(const HidlModel& model, const SampleDriver* driver, Success success)
- : SamplePreparedModel(model, driver, ExecutionPreference::FAST_SINGLE_ANSWER, uid_t{},
- kDefaultPriority),
+ : SamplePreparedModel(model, driver, V1_1::ExecutionPreference::FAST_SINGLE_ANSWER, uid_t{},
+ nn::kDefaultPriority13),
mSuccess(success) {}
- Return<V1_0::ErrorStatus> execute(const V1_0::Request&,
- const sp<V1_0::IExecutionCallback>& callback) override {
+ hardware::Return<V1_0::ErrorStatus> execute(
+ const V1_0::Request&, const sp<V1_0::IExecutionCallback>& callback) override {
switch (mSuccess) {
case Success::PASS_NEITHER:
std::thread([callback] {
@@ -445,9 +451,10 @@
}
}
- Return<V1_0::ErrorStatus> execute_1_2(const V1_0::Request&, MeasureTiming measure,
- const sp<V1_2::IExecutionCallback>& callback) override {
- EXPECT_EQ(measure, MeasureTiming::YES);
+ hardware::Return<V1_0::ErrorStatus> execute_1_2(
+ const V1_0::Request&, V1_2::MeasureTiming measure,
+ const sp<V1_2::IExecutionCallback>& callback) override {
+ EXPECT_EQ(measure, V1_2::MeasureTiming::YES);
switch (mSuccess) {
case Success::PASS_NEITHER:
case Success::PASS_DEVICE:
@@ -475,17 +482,18 @@
}
}
- Return<V1_3::ErrorStatus> execute_1_3(const V1_3::Request&, MeasureTiming measure,
- const OptionalTimePoint&, const OptionalTimeoutDuration&,
- const sp<V1_3::IExecutionCallback>& callback) override {
+ hardware::Return<V1_3::ErrorStatus> execute_1_3(
+ const V1_3::Request&, V1_2::MeasureTiming measure, const V1_3::OptionalTimePoint&,
+ const V1_3::OptionalTimeoutDuration&,
+ const sp<V1_3::IExecutionCallback>& callback) override {
// Use a placeholder V1_0::Request because execute_1_2 ignores request entirely.
const V1_0::ErrorStatus status = execute_1_2(V1_0::Request{}, measure, callback);
return convertToV1_3(status);
}
- Return<void> executeSynchronously(const V1_0::Request&, MeasureTiming measure,
- executeSynchronously_cb cb) override {
- EXPECT_EQ(measure, MeasureTiming::YES);
+ hardware::Return<void> executeSynchronously(const V1_0::Request&, V1_2::MeasureTiming measure,
+ executeSynchronously_cb cb) override {
+ EXPECT_EQ(measure, V1_2::MeasureTiming::YES);
switch (mSuccess) {
case Success::PASS_NEITHER:
case Success::PASS_DEVICE:
@@ -493,7 +501,7 @@
case Success::PASS_BOTH:
dummyExecution();
cb(V1_0::ErrorStatus::NONE, {}, getExpectedTiming(mSuccess, false).first);
- return Void();
+ return hardware::Void();
case Success::FAIL_WAIT:
// While this is a synchronous execution method, the NNAPI
// runtime may call it even for asynchronous execution, so we
@@ -503,19 +511,22 @@
case Success::FAIL_LAUNCH:
dummyExecution();
cb(V1_0::ErrorStatus::GENERAL_FAILURE, {}, kBadTiming);
- return Void();
+ return hardware::Void();
default:
ADD_FAILURE() << "Unexpected Success kind";
cb(V1_0::ErrorStatus::GENERAL_FAILURE, {}, kBadTiming);
- return Void();
+ return hardware::Void();
}
}
- Return<void> executeSynchronously_1_3(const V1_3::Request&, MeasureTiming measure,
- const OptionalTimePoint&, const OptionalTimeoutDuration&,
- executeSynchronously_1_3_cb cb) override {
+ hardware::Return<void> executeSynchronously_1_3(const V1_3::Request&,
+ V1_2::MeasureTiming measure,
+ const V1_3::OptionalTimePoint&,
+ const V1_3::OptionalTimeoutDuration&,
+ executeSynchronously_1_3_cb cb) override {
const auto wrappedCb = [&cb](V1_0::ErrorStatus status,
- const hidl_vec<OutputShape>& outputShapes, Timing timing) {
+ const hardware::hidl_vec<V1_2::OutputShape>& outputShapes,
+ V1_2::Timing timing) {
cb(convertToV1_3(status), outputShapes, timing);
};
// Use a placeholder V1_0::Request because executeSynchronously ignores request entirely.
@@ -525,7 +536,7 @@
// ExecutionBurstServer::create has an overload that will use
// IPreparedModel::executeSynchronously(), so we can rely on that, rather
// than having to implement ExecutionBurstServer::IExecutorWithCache.
- Return<void> configureExecutionBurst(
+ hardware::Return<void> configureExecutionBurst(
const sp<V1_2::IBurstCallback>& callback,
const MQDescriptorSync<V1_2::FmqRequestDatum>& requestChannel,
const MQDescriptorSync<V1_2::FmqResultDatum>& resultChannel,
@@ -534,21 +545,26 @@
callback, requestChannel, resultChannel, this, std::chrono::microseconds{0});
cb(burst == nullptr ? V1_0::ErrorStatus::GENERAL_FAILURE : V1_0::ErrorStatus::NONE, burst);
- return Void();
+ return hardware::Void();
}
- Return<void> executeFenced(const Request&, const hidl_vec<hidl_handle>&, MeasureTiming measure,
- const OptionalTimePoint&, const OptionalTimeoutDuration&,
- const OptionalTimeoutDuration&, executeFenced_cb callback) override {
- EXPECT_EQ(measure, MeasureTiming::YES);
+ hardware::Return<void> executeFenced(const V1_3::Request&,
+ const hardware::hidl_vec<hardware::hidl_handle>&,
+ V1_2::MeasureTiming measure,
+ const V1_3::OptionalTimePoint&,
+ const V1_3::OptionalTimeoutDuration&,
+ const V1_3::OptionalTimeoutDuration&,
+ executeFenced_cb callback) override {
+ EXPECT_EQ(measure, V1_2::MeasureTiming::YES);
if (hasBit(mSuccess, Success::PASS_BIT)) {
dummyExecution();
const auto expectedTiming = getExpectedTiming(mSuccess, true);
sp<SampleFencedExecutionCallback> fencedExecutionCallback =
new SampleFencedExecutionCallback(expectedTiming.first, expectedTiming.second,
V1_3::ErrorStatus::NONE);
- callback(V1_3::ErrorStatus::NONE, hidl_handle(nullptr), fencedExecutionCallback);
- return Void();
+ callback(V1_3::ErrorStatus::NONE, hardware::hidl_handle(nullptr),
+ fencedExecutionCallback);
+ return hardware::Void();
}
switch (mSuccess) {
case Success::FAIL_WAIT:
@@ -559,11 +575,12 @@
FALLTHROUGH_INTENDED;
case Success::FAIL_LAUNCH:
dummyExecution();
- callback(V1_3::ErrorStatus::GENERAL_FAILURE, hidl_handle(nullptr), nullptr);
- return Void();
+ callback(V1_3::ErrorStatus::GENERAL_FAILURE, hardware::hidl_handle(nullptr),
+ nullptr);
+ return hardware::Void();
default:
ADD_FAILURE() << "Unexpected Success kind";
- return Void();
+ return hardware::Void();
}
}
@@ -607,22 +624,24 @@
TestPreparedModel12(const HidlModel& model, const SampleDriver* driver, Success success)
: mLatestPreparedModel(new TestPreparedModelLatest(model, driver, success)) {}
- Return<V1_0::ErrorStatus> execute(const V1_0::Request& request,
- const sp<V1_0::IExecutionCallback>& callback) override {
+ hardware::Return<V1_0::ErrorStatus> execute(
+ const V1_0::Request& request, const sp<V1_0::IExecutionCallback>& callback) override {
return mLatestPreparedModel->execute(request, callback);
}
- Return<V1_0::ErrorStatus> execute_1_2(const V1_0::Request& request, MeasureTiming measure,
- const sp<V1_2::IExecutionCallback>& callback) override {
+ hardware::Return<V1_0::ErrorStatus> execute_1_2(
+ const V1_0::Request& request, V1_2::MeasureTiming measure,
+ const sp<V1_2::IExecutionCallback>& callback) override {
return mLatestPreparedModel->execute_1_2(request, measure, callback);
}
- Return<void> executeSynchronously(const V1_0::Request& request, MeasureTiming measure,
- executeSynchronously_cb cb) override {
+ hardware::Return<void> executeSynchronously(const V1_0::Request& request,
+ V1_2::MeasureTiming measure,
+ executeSynchronously_cb cb) override {
return mLatestPreparedModel->executeSynchronously(request, measure, cb);
}
- Return<void> configureExecutionBurst(
+ hardware::Return<void> configureExecutionBurst(
const sp<V1_2::IBurstCallback>& callback,
const MQDescriptorSync<V1_2::FmqRequestDatum>& requestChannel,
const MQDescriptorSync<V1_2::FmqResultDatum>& resultChannel,
@@ -632,7 +651,7 @@
}
private:
- const sp<IPreparedModel> mLatestPreparedModel;
+ const sp<V1_3::IPreparedModel> mLatestPreparedModel;
};
// Like TestPreparedModelLatest, but implementing 1.0
@@ -641,13 +660,13 @@
TestPreparedModel10(const HidlModel& model, const SampleDriver* driver, Success success)
: mLatestPreparedModel(new TestPreparedModelLatest(model, driver, success)) {}
- Return<V1_0::ErrorStatus> execute(const V1_0::Request& request,
- const sp<V1_0::IExecutionCallback>& callback) override {
+ hardware::Return<V1_0::ErrorStatus> execute(
+ const V1_0::Request& request, const sp<V1_0::IExecutionCallback>& callback) override {
return mLatestPreparedModel->execute(request, callback);
}
private:
- const sp<IPreparedModel> mLatestPreparedModel;
+ const sp<V1_3::IPreparedModel> mLatestPreparedModel;
};
// Behaves like SampleDriver, except that it produces customized IPrepareModel.
@@ -656,31 +675,31 @@
TestDriver13(const std::string& name, Success success)
: SampleDriver(name.c_str()), mSuccess(success) {}
- Return<void> getCapabilities_1_3(getCapabilities_1_3_cb _hidl_cb) override {
+ hardware::Return<void> getCapabilities_1_3(getCapabilities_1_3_cb _hidl_cb) override {
android::nn::initVLogMask();
- const PerformanceInfo kPerf = {.execTime = 0.75f, .powerUsage = 0.75f};
- Capabilities capabilities = {
+ const V1_0::PerformanceInfo kPerf = {.execTime = 0.75f, .powerUsage = 0.75f};
+ V1_3::Capabilities capabilities = {
.relaxedFloat32toFloat16PerformanceScalar = kPerf,
.relaxedFloat32toFloat16PerformanceTensor = kPerf,
.operandPerformance =
nn::nonExtensionOperandPerformance<nn::HalVersion::V1_3>(kPerf)};
_hidl_cb(V1_3::ErrorStatus::NONE, capabilities);
- return Void();
+ return hardware::Void();
}
- Return<void> getSupportedOperations_1_3(const HidlModel& model,
- getSupportedOperations_1_3_cb cb) override {
+ hardware::Return<void> getSupportedOperations_1_3(const HidlModel& model,
+ getSupportedOperations_1_3_cb cb) override {
if (nn::validateModel(model)) {
std::vector<bool> supported(model.main.operations.size(), true);
cb(V1_3::ErrorStatus::NONE, supported);
} else {
cb(V1_3::ErrorStatus::INVALID_ARGUMENT, {});
}
- return Void();
+ return hardware::Void();
}
- Return<void> getSupportedOperations_1_2(const V1_2::Model& model,
- getSupportedOperations_1_2_cb cb) override {
+ hardware::Return<void> getSupportedOperations_1_2(const V1_2::Model& model,
+ getSupportedOperations_1_2_cb cb) override {
if (nn::validateModel(model)) {
std::vector<bool> supported(model.operations.size(), true);
cb(V1_0::ErrorStatus::NONE, supported);
@@ -688,39 +707,41 @@
std::vector<bool> supported;
cb(V1_0::ErrorStatus::INVALID_ARGUMENT, supported);
}
- return Void();
+ return hardware::Void();
}
- Return<V1_3::ErrorStatus> prepareModel_1_3(
- const HidlModel& model, ExecutionPreference, Priority, const OptionalTimePoint&,
- const hidl_vec<hidl_handle>&, const hidl_vec<hidl_handle>&, const CacheToken&,
+ hardware::Return<V1_3::ErrorStatus> prepareModel_1_3(
+ const HidlModel& model, V1_1::ExecutionPreference, V1_3::Priority,
+ const V1_3::OptionalTimePoint&, const hardware::hidl_vec<hardware::hidl_handle>&,
+ const hardware::hidl_vec<hardware::hidl_handle>&, const nn::HalCacheToken&,
const sp<V1_3::IPreparedModelCallback>& callback) override {
callback->notify_1_3(V1_3::ErrorStatus::NONE,
new TestPreparedModel13(model, this, mSuccess));
return V1_3::ErrorStatus::NONE;
}
- Return<V1_0::ErrorStatus> prepareModel_1_2(
- const V1_2::Model& model, ExecutionPreference, const hidl_vec<hidl_handle>&,
- const hidl_vec<hidl_handle>&, const CacheToken&,
+ hardware::Return<V1_0::ErrorStatus> prepareModel_1_2(
+ const V1_2::Model& model, V1_1::ExecutionPreference,
+ const hardware::hidl_vec<hardware::hidl_handle>&,
+ const hardware::hidl_vec<hardware::hidl_handle>&, const nn::HalCacheToken&,
const sp<V1_2::IPreparedModelCallback>& callback) override {
callback->notify_1_2(V1_0::ErrorStatus::NONE,
new TestPreparedModel12(nn::convertToV1_3(model), this, mSuccess));
return V1_0::ErrorStatus::NONE;
}
- Return<V1_0::ErrorStatus> prepareModel_1_1(
- const V1_1::Model& model, ExecutionPreference,
+ hardware::Return<V1_0::ErrorStatus> prepareModel_1_1(
+ const V1_1::Model& model, V1_1::ExecutionPreference,
const sp<V1_0::IPreparedModelCallback>& callback) override {
callback->notify(V1_0::ErrorStatus::NONE,
new TestPreparedModel10(nn::convertToV1_3(model), this, mSuccess));
return V1_0::ErrorStatus::NONE;
}
- Return<V1_0::ErrorStatus> prepareModel(
+ hardware::Return<V1_0::ErrorStatus> prepareModel(
const V1_0::Model& model, const sp<V1_0::IPreparedModelCallback>& callback) override {
- return prepareModel_1_1(nn::convertToV1_1(model), ExecutionPreference::FAST_SINGLE_ANSWER,
- callback);
+ return prepareModel_1_1(nn::convertToV1_1(model),
+ V1_1::ExecutionPreference::FAST_SINGLE_ANSWER, callback);
}
private:
@@ -732,27 +753,27 @@
public:
TestDriver11(const std::string& name, Success success)
: mLatestDriver(new TestDriver13(name, success)) {}
- Return<void> getCapabilities_1_1(getCapabilities_1_1_cb _hidl_cb) override {
+ hardware::Return<void> getCapabilities_1_1(getCapabilities_1_1_cb _hidl_cb) override {
return mLatestDriver->getCapabilities_1_1(_hidl_cb);
}
- Return<void> getSupportedOperations_1_1(const V1_1::Model& model,
- getSupportedOperations_1_1_cb _hidl_cb) override {
+ hardware::Return<void> getSupportedOperations_1_1(
+ const V1_1::Model& model, getSupportedOperations_1_1_cb _hidl_cb) override {
return mLatestDriver->getSupportedOperations_1_1(model, _hidl_cb);
}
- Return<V1_0::ErrorStatus> prepareModel_1_1(
- const V1_1::Model& model, ExecutionPreference preference,
+ hardware::Return<V1_0::ErrorStatus> prepareModel_1_1(
+ const V1_1::Model& model, V1_1::ExecutionPreference preference,
const sp<V1_0::IPreparedModelCallback>& actualCallback) override {
return mLatestDriver->prepareModel_1_1(model, preference, actualCallback);
}
- Return<DeviceStatus> getStatus() override { return mLatestDriver->getStatus(); }
- Return<void> getCapabilities(getCapabilities_cb _hidl_cb) override {
+ hardware::Return<V1_0::DeviceStatus> getStatus() override { return mLatestDriver->getStatus(); }
+ hardware::Return<void> getCapabilities(getCapabilities_cb _hidl_cb) override {
return mLatestDriver->getCapabilities(_hidl_cb);
}
- Return<void> getSupportedOperations(const V1_0::Model& model,
- getSupportedOperations_cb _hidl_cb) override {
+ hardware::Return<void> getSupportedOperations(const V1_0::Model& model,
+ getSupportedOperations_cb _hidl_cb) override {
return mLatestDriver->getSupportedOperations(model, _hidl_cb);
}
- Return<V1_0::ErrorStatus> prepareModel(
+ hardware::Return<V1_0::ErrorStatus> prepareModel(
const V1_0::Model& model,
const sp<V1_0::IPreparedModelCallback>& actualCallback) override {
return mLatestDriver->prepareModel(model, actualCallback);
diff --git a/runtime/test/TestMemoryDomain.cpp b/runtime/test/TestMemoryDomain.cpp
index 06418e5..35a826a 100644
--- a/runtime/test/TestMemoryDomain.cpp
+++ b/runtime/test/TestMemoryDomain.cpp
@@ -34,20 +34,22 @@
#include "TestUtils.h"
using namespace android::nn;
-using namespace hal;
-using Result = test_wrapper::Result;
+namespace hardware = android::hardware;
+using WrapperResult = test_wrapper::Result;
using Type = test_wrapper::Type;
+using android::sp;
namespace {
// A buffer for test that does nothing.
-class TestBuffer : public IBuffer {
+class TestBuffer : public V1_3::IBuffer {
public:
- Return<ErrorStatus> copyTo(const hidl_memory&) override {
- return ErrorStatus::DEVICE_UNAVAILABLE;
+ hardware::Return<V1_3::ErrorStatus> copyTo(const hardware::hidl_memory&) override {
+ return V1_3::ErrorStatus::DEVICE_UNAVAILABLE;
}
- Return<ErrorStatus> copyFrom(const hidl_memory&, const hidl_vec<uint32_t>&) override {
- return ErrorStatus::DEVICE_UNAVAILABLE;
+ hardware::Return<V1_3::ErrorStatus> copyFrom(const hardware::hidl_memory&,
+ const hardware::hidl_vec<uint32_t>&) override {
+ return V1_3::ErrorStatus::DEVICE_UNAVAILABLE;
}
};
@@ -73,64 +75,67 @@
class TestDriverLatest : public sample_driver::SampleDriver {
public:
- TestDriverLatest(const char* name, std::set<OperationType> supportedOperations,
+ TestDriverLatest(const char* name, std::set<V1_3::OperationType> supportedOperations,
AllocateReturn allocateReturn)
: SampleDriver(name),
kSupportedOperations(std::move(supportedOperations)),
kAllocateReturn(allocateReturn) {}
- Return<void> getCapabilities_1_3(getCapabilities_1_3_cb cb) override {
+ hardware::Return<void> getCapabilities_1_3(getCapabilities_1_3_cb cb) override {
android::nn::initVLogMask();
// Faster than cpu.
- const PerformanceInfo kPerf = {.execTime = 0.1, .powerUsage = 0.1};
- const Capabilities capabilities = {
+ const V1_0::PerformanceInfo kPerf = {.execTime = 0.1, .powerUsage = 0.1};
+ const V1_3::Capabilities capabilities = {
.relaxedFloat32toFloat16PerformanceScalar = kPerf,
.relaxedFloat32toFloat16PerformanceTensor = kPerf,
.operandPerformance = nonExtensionOperandPerformance<HalVersion::V1_3>(kPerf),
.ifPerformance = kPerf,
.whilePerformance = kPerf};
- cb(ErrorStatus::NONE, capabilities);
- return Void();
+ cb(V1_3::ErrorStatus::NONE, capabilities);
+ return hardware::Void();
}
- Return<void> getSupportedOperations_1_3(const Model& model,
- getSupportedOperations_1_3_cb cb) override {
+ hardware::Return<void> getSupportedOperations_1_3(const V1_3::Model& model,
+ getSupportedOperations_1_3_cb cb) override {
// The tests will never use a referenced model.
CHECK(model.referenced.size() == 0);
std::vector<bool> supported(model.main.operations.size(), false);
- std::transform(
- model.main.operations.begin(), model.main.operations.end(), supported.begin(),
- [this](const Operation& op) { return kSupportedOperations.count(op.type) > 0; });
- cb(ErrorStatus::NONE, supported);
- return Void();
+ std::transform(model.main.operations.begin(), model.main.operations.end(),
+ supported.begin(), [this](const V1_3::Operation& op) {
+ return kSupportedOperations.count(op.type) > 0;
+ });
+ cb(V1_3::ErrorStatus::NONE, supported);
+ return hardware::Void();
}
- Return<void> allocate(const BufferDesc&, const hidl_vec<sp<IPreparedModel>>&,
- const hidl_vec<BufferRole>&, const hidl_vec<BufferRole>&,
- allocate_cb cb) override {
+ hardware::Return<void> allocate(const V1_3::BufferDesc&,
+ const hardware::hidl_vec<sp<V1_3::IPreparedModel>>&,
+ const hardware::hidl_vec<V1_3::BufferRole>&,
+ const hardware::hidl_vec<V1_3::BufferRole>&,
+ allocate_cb cb) override {
switch (kAllocateReturn) {
case AllocateReturn::OK:
- cb(ErrorStatus::NONE, new TestBuffer(), mValidBufferToken++);
- return Void();
+ cb(V1_3::ErrorStatus::NONE, new TestBuffer(), mValidBufferToken++);
+ return hardware::Void();
case AllocateReturn::BAD_IBUFFER:
- cb(ErrorStatus::NONE, nullptr, mValidBufferToken++);
- return Void();
+ cb(V1_3::ErrorStatus::NONE, nullptr, mValidBufferToken++);
+ return hardware::Void();
case AllocateReturn::BAD_TOKEN:
- cb(ErrorStatus::NONE, new TestBuffer(), 0);
- return Void();
+ cb(V1_3::ErrorStatus::NONE, new TestBuffer(), 0);
+ return hardware::Void();
case AllocateReturn::BAD_STATUS:
- cb(ErrorStatus::GENERAL_FAILURE, new TestBuffer(), mValidBufferToken++);
- return Void();
+ cb(V1_3::ErrorStatus::GENERAL_FAILURE, new TestBuffer(), mValidBufferToken++);
+ return hardware::Void();
case AllocateReturn::NOT_SUPPORTED:
- cb(ErrorStatus::GENERAL_FAILURE, nullptr, 0);
- return Void();
+ cb(V1_3::ErrorStatus::GENERAL_FAILURE, nullptr, 0);
+ return hardware::Void();
}
LOG(FATAL) << "Invalid AllocateReturn code " << static_cast<int>(kAllocateReturn);
- return Void();
+ return hardware::Void();
}
private:
- const std::set<OperationType> kSupportedOperations;
+ const std::set<V1_3::OperationType> kSupportedOperations;
const AllocateReturn kAllocateReturn;
uint32_t mValidBufferToken = 1;
};
@@ -160,7 +165,7 @@
model->addOperation(ANEURALNETWORKS_SUB, {input1, input2, act}, {temp});
model->addOperation(ANEURALNETWORKS_MUL, {output0, temp, act}, {output1});
model->identifyInputsAndOutputs({input0, input1, input2}, {output0, output1});
- EXPECT_EQ(model->finish(), Result::NO_ERROR);
+ EXPECT_EQ(model->finish(), WrapperResult::NO_ERROR);
}
class MemoryDomainTestBase : public ::testing::Test {
@@ -199,14 +204,14 @@
std::vector<const ANeuralNetworksDevice*> devices(deviceNames.size());
std::transform(deviceNames.begin(), deviceNames.end(), devices.begin(),
[&deviceMap](const std::string& name) { return deviceMap.at(name); });
- Result result;
+ WrapperResult result;
std::tie(result, compilation) =
test_wrapper::Compilation::createForDevices(&mModel, devices);
- EXPECT_EQ(result, Result::NO_ERROR);
+ EXPECT_EQ(result, WrapperResult::NO_ERROR);
} else {
compilation = test_wrapper::Compilation(&mModel);
}
- EXPECT_EQ(compilation.finish(), Result::NO_ERROR);
+ EXPECT_EQ(compilation.finish(), WrapperResult::NO_ERROR);
return compilation;
}
@@ -245,7 +250,8 @@
public ::testing::WithParamInterface<MemoryDomainTestParam> {
protected:
// If kUseV1_2Driver, allocateReturn must be AllocateReturn::NOT_SUPPORTED.
- void createAndRegisterDriver(const char* name, std::set<OperationType> supportedOperations,
+ void createAndRegisterDriver(const char* name,
+ std::set<V1_3::OperationType> supportedOperations,
AllocateReturn allocateReturn) {
sp<V1_0::IDevice> driver;
if (kUseV1_2Driver) {
@@ -275,9 +281,10 @@
// Test device memory allocation on a compilation with only a single partition.
TEST_P(MemoryDomainTest, SinglePartition) {
- createAndRegisterDriver("test_driver",
- {OperationType::ADD, OperationType::SUB, OperationType::MUL},
- kAllocateReturn);
+ createAndRegisterDriver(
+ "test_driver",
+ {V1_3::OperationType::ADD, V1_3::OperationType::SUB, V1_3::OperationType::MUL},
+ kAllocateReturn);
auto compilation = createCompilation({"test_driver"});
ASSERT_NE(compilation.getHandle(), nullptr);
@@ -285,7 +292,7 @@
if (kAllocateReturn == AllocateReturn::OK) {
// The memory should be backed by the IBuffer returned from the driver.
ASSERT_EQ(n, ANEURALNETWORKS_NO_ERROR);
- const Memory* m = reinterpret_cast<const Memory*>(memory.get());
+ const RuntimeMemory* m = reinterpret_cast<const RuntimeMemory*>(memory.get());
ASSERT_NE(m, nullptr);
EXPECT_NE(m->getIBuffer(), nullptr);
} else {
@@ -295,7 +302,7 @@
} else {
// The memory should fallback to ashmem or blob ahwb based on the driver version.
ASSERT_EQ(n, ANEURALNETWORKS_NO_ERROR);
- const Memory* m = reinterpret_cast<const Memory*>(memory.get());
+ const RuntimeMemory* m = reinterpret_cast<const RuntimeMemory*>(memory.get());
ASSERT_NE(m, nullptr);
EXPECT_EQ(m->getIBuffer(), nullptr);
const auto& hidlMemory = m->getHidlMemory();
@@ -311,9 +318,9 @@
// Test device memory allocation on a compilation with multiple partitions.
TEST_P(MemoryDomainTest, MultiplePartitions) {
- createAndRegisterDriver("test_driver_add", {OperationType::ADD}, kAllocateReturn);
- createAndRegisterDriver("test_driver_sub", {OperationType::SUB}, kAllocateReturn);
- createAndRegisterDriver("test_driver_mul", {OperationType::MUL}, kAllocateReturn);
+ createAndRegisterDriver("test_driver_add", {V1_3::OperationType::ADD}, kAllocateReturn);
+ createAndRegisterDriver("test_driver_sub", {V1_3::OperationType::SUB}, kAllocateReturn);
+ createAndRegisterDriver("test_driver_mul", {V1_3::OperationType::MUL}, kAllocateReturn);
auto compilation = createCompilation({"test_driver_add", "test_driver_sub", "test_driver_mul"});
ASSERT_NE(compilation.getHandle(), nullptr);
@@ -323,7 +330,7 @@
if (kAllocateReturn == AllocateReturn::OK) {
// The memory should be backed by the IBuffer returned from the driver.
ASSERT_EQ(n, ANEURALNETWORKS_NO_ERROR);
- const Memory* m = reinterpret_cast<const Memory*>(memory.get());
+ const RuntimeMemory* m = reinterpret_cast<const RuntimeMemory*>(memory.get());
ASSERT_NE(m, nullptr);
EXPECT_NE(m->getIBuffer(), nullptr);
} else {
@@ -333,7 +340,7 @@
} else {
// The memory should fallback to ashmem or blob ahwb based on the driver version.
ASSERT_EQ(n, ANEURALNETWORKS_NO_ERROR);
- const Memory* m = reinterpret_cast<const Memory*>(memory.get());
+ const RuntimeMemory* m = reinterpret_cast<const RuntimeMemory*>(memory.get());
ASSERT_NE(m, nullptr);
EXPECT_EQ(m->getIBuffer(), nullptr);
const auto& hidlMemory = m->getHidlMemory();
@@ -357,7 +364,7 @@
} else {
// The memory should fallback to ashmem or blob ahwb based on the driver version.
ASSERT_EQ(n, ANEURALNETWORKS_NO_ERROR);
- const Memory* m = reinterpret_cast<const Memory*>(memory.get());
+ const RuntimeMemory* m = reinterpret_cast<const RuntimeMemory*>(memory.get());
ASSERT_NE(m, nullptr);
EXPECT_EQ(m->getIBuffer(), nullptr);
const auto& hidlMemory = m->getHidlMemory();
@@ -380,7 +387,7 @@
} else {
// The memory should fallback to ashmem or blob ahwb based on the driver version.
ASSERT_EQ(n, ANEURALNETWORKS_NO_ERROR);
- const Memory* m = reinterpret_cast<const Memory*>(memory.get());
+ const RuntimeMemory* m = reinterpret_cast<const RuntimeMemory*>(memory.get());
ASSERT_NE(m, nullptr);
EXPECT_EQ(m->getIBuffer(), nullptr);
const auto& hidlMemory = m->getHidlMemory();
@@ -396,9 +403,10 @@
// Test device memory allocation with dynamic shape.
TEST_P(MemoryDomainTest, DynamicShape) {
- createAndRegisterDriver("test_driver",
- {OperationType::ADD, OperationType::SUB, OperationType::MUL},
- kAllocateReturn);
+ createAndRegisterDriver(
+ "test_driver",
+ {V1_3::OperationType::ADD, V1_3::OperationType::SUB, V1_3::OperationType::MUL},
+ kAllocateReturn);
auto compilation = createCompilation({"test_driver"});
ASSERT_NE(compilation.getHandle(), nullptr);
@@ -406,7 +414,7 @@
if (kAllocateReturn == AllocateReturn::OK) {
// The memory should be backed by the IBuffer returned from the driver.
ASSERT_EQ(n, ANEURALNETWORKS_NO_ERROR);
- const Memory* m = reinterpret_cast<const Memory*>(memory.get());
+ const RuntimeMemory* m = reinterpret_cast<const RuntimeMemory*>(memory.get());
ASSERT_NE(m, nullptr);
EXPECT_NE(m->getIBuffer(), nullptr);
} else {
diff --git a/runtime/test/TestPartitioning.cpp b/runtime/test/TestPartitioning.cpp
index d85717c..939612a 100644
--- a/runtime/test/TestPartitioning.cpp
+++ b/runtime/test/TestPartitioning.cpp
@@ -145,7 +145,11 @@
namespace {
-using namespace android::nn::hal;
+namespace hardware = android::hardware;
+namespace V1_0 = ::android::hardware::neuralnetworks::V1_0;
+namespace V1_1 = ::android::hardware::neuralnetworks::V1_1;
+namespace V1_2 = ::android::hardware::neuralnetworks::V1_2;
+namespace V1_3 = ::android::hardware::neuralnetworks::V1_3;
using CompilationBuilder = ::android::nn::CompilationBuilder;
using Deadline = ::android::nn::Deadline;
using Device = ::android::nn::Device;
@@ -154,10 +158,13 @@
using ExecutePriority = ::android::nn::test_wrapper::ExecutePriority;
using ExecutionPlan = ::android::nn::ExecutionPlan;
using ExecutionStep = ::android::nn::ExecutionStep;
+using HalCacheToken = ::android::nn::HalCacheToken;
using HalVersion = ::android::nn::HalVersion;
using HidlModel = V1_3::Model;
using LogicalStep = ::android::nn::LogicalStep;
using ModelBuilder = ::android::nn::ModelBuilder;
+using Operand = ::android::nn::Operand;
+using Operation = ::android::nn::Operation;
using Result = ::android::nn::test_wrapper::Result;
using SampleDriver = ::android::nn::sample_driver::SampleDriver;
using WrapperCompilation = ::android::nn::test_wrapper::Compilation;
@@ -166,9 +173,10 @@
using WrapperOperandType = ::android::nn::test_wrapper::OperandType;
using WrapperSymmPerChannelQuantParams = ::android::nn::test_wrapper::SymmPerChannelQuantParams;
using WrapperType = ::android::nn::test_wrapper::Type;
+using android::sp;
-Capabilities makeCapabilities(float perf) {
- PerformanceInfo perfInfo = {.execTime = perf, .powerUsage = perf};
+V1_3::Capabilities makeCapabilities(float perf) {
+ V1_0::PerformanceInfo perfInfo = {.execTime = perf, .powerUsage = perf};
return {.relaxedFloat32toFloat16PerformanceScalar = perfInfo,
.relaxedFloat32toFloat16PerformanceTensor = perfInfo,
.operandPerformance =
@@ -177,12 +185,12 @@
.whilePerformance = perfInfo};
};
-void update(Capabilities* capabilities, OperandType type, float perf) {
- PerformanceInfo perfInfo = {.execTime = perf, .powerUsage = perf};
+void update(V1_3::Capabilities* capabilities, V1_3::OperandType type, float perf) {
+ V1_0::PerformanceInfo perfInfo = {.execTime = perf, .powerUsage = perf};
::android::nn::update(&capabilities->operandPerformance, type, perfInfo);
}
-float lookupExecTime(const Capabilities& capabilities, OperandType type) {
+float lookupExecTime(const V1_3::Capabilities& capabilities, V1_3::OperandType type) {
return ::android::nn::lookup(capabilities.operandPerformance, type).execTime;
}
@@ -214,16 +222,16 @@
const uint32_t kFirstEncodingV1_3 = kFirstEncodingHARD_SWISH;
const uint32_t kLastEncodingV1_3 = kFirstEncodingHARD_SWISH;
-const std::map<OperationType, uint32_t> operationToFirstEncoding = {
- {OperationType::ADD, kFirstEncodingADD},
- {OperationType::MUL, kFirstEncodingMUL},
- {OperationType::DIV, kFirstEncodingDIV},
- {OperationType::SUB, kFirstEncodingSUB},
- {OperationType::MAXIMUM, kFirstEncodingMAXIMUM},
- {OperationType::MINIMUM, kFirstEncodingMINIMUM},
- {OperationType::POW, kFirstEncodingPOW},
- {OperationType::PRELU, kFirstEncodingPRELU},
- {OperationType::HARD_SWISH, kFirstEncodingHARD_SWISH},
+const std::map<V1_3::OperationType, uint32_t> operationToFirstEncoding = {
+ {V1_3::OperationType::ADD, kFirstEncodingADD},
+ {V1_3::OperationType::MUL, kFirstEncodingMUL},
+ {V1_3::OperationType::DIV, kFirstEncodingDIV},
+ {V1_3::OperationType::SUB, kFirstEncodingSUB},
+ {V1_3::OperationType::MAXIMUM, kFirstEncodingMAXIMUM},
+ {V1_3::OperationType::MINIMUM, kFirstEncodingMINIMUM},
+ {V1_3::OperationType::POW, kFirstEncodingPOW},
+ {V1_3::OperationType::PRELU, kFirstEncodingPRELU},
+ {V1_3::OperationType::HARD_SWISH, kFirstEncodingHARD_SWISH},
};
// Sorted in reverse order (std::greater) so that we can use map::lower_bound to
@@ -244,20 +252,20 @@
// Look up the operation with the specified index in a graph, and return the
// operation encoding; or, if for some reason this is not one of the encoded
// operations, then return kBadOperation.
-uint32_t lookupOperation(std::function<const Operation&(uint32_t)> getOperation,
- std::function<const Operand&(uint32_t)> getOperand,
+uint32_t lookupOperation(std::function<const V1_3::Operation&(uint32_t)> getOperation,
+ std::function<const V1_3::Operand&(uint32_t)> getOperand,
std::function<const uint8_t*(uint32_t)> getValue,
uint32_t operationIndex) {
- const Operation& operation = getOperation(operationIndex);
+ const V1_3::Operation& operation = getOperation(operationIndex);
switch (operation.type) {
- case OperationType::ADD:
- case OperationType::MUL:
- case OperationType::DIV:
- case OperationType::SUB: {
+ case V1_3::OperationType::ADD:
+ case V1_3::OperationType::MUL:
+ case V1_3::OperationType::DIV:
+ case V1_3::OperationType::SUB: {
// input2 is the fused activation function
- const Operand& input2 = getOperand(operation.inputs[2]);
- if ((input2.type == OperandType::INT32) &&
- (input2.lifetime == OperandLifeTime::CONSTANT_COPY)) {
+ const V1_3::Operand& input2 = getOperand(operation.inputs[2]);
+ if ((input2.type == V1_3::OperandType::INT32) &&
+ (input2.lifetime == V1_3::OperandLifeTime::CONSTANT_COPY)) {
int32_t value;
CHECK_EQ(sizeof(value), input2.location.length);
memcpy(&value, getValue(input2.location.offset), input2.location.length);
@@ -276,11 +284,15 @@
return kBadOperation;
}
-uint32_t lookupOperation(const HidlModel& model, const Subgraph& subgraph,
+uint32_t lookupOperation(const HidlModel& model, const V1_3::Subgraph& subgraph,
uint32_t operationIndex) {
return lookupOperation(
- [&subgraph](uint32_t index) -> const Operation& { return subgraph.operations[index]; },
- [&subgraph](uint32_t index) -> const Operand& { return subgraph.operands[index]; },
+ [&subgraph](uint32_t index) -> const V1_3::Operation& {
+ return subgraph.operations[index];
+ },
+ [&subgraph](uint32_t index) -> const V1_3::Operand& {
+ return subgraph.operands[index];
+ },
[&model](uint32_t offset) { return &model.operandValues[offset]; }, operationIndex);
}
@@ -288,12 +300,11 @@
// This is a debugging utility function
void dump(const char* name, const ModelBuilder* model) {
const HidlModel hidlModel = model->makeHidlModel();
- std::cout << name << ": " << toString(hidlModel) << std::endl;
- std::cout << "inputs: " << toString(hidlModel.main.inputIndexes) << std::endl;
- std::cout << "outputs: " << toString(hidlModel.main.outputIndexes) << std::endl;
+ std::cout << name << ": " << hidlModel << std::endl;
+ std::cout << "inputs: " << hidlModel.main.inputIndexes << std::endl;
+ std::cout << "outputs: " << hidlModel.main.outputIndexes << std::endl;
for (size_t i = 0, e = hidlModel.main.operations.size(); i < e; i++) {
- std::cout << "operation[" << i << "]: " << toString(hidlModel.main.operations[i])
- << std::endl;
+ std::cout << "operation[" << i << "]: " << hidlModel.main.operations[i] << std::endl;
}
}
#endif
@@ -313,37 +324,39 @@
OEMYes, // accepted by getSupportedOperations and prepareModel
};
- PartitioningDriver(const char* name, const char* version, Capabilities capabilities,
+ PartitioningDriver(const char* name, const char* version, V1_3::Capabilities capabilities,
uint32_t operationMask, OEM oem = OEMNo,
- std::set<OperationType> operationTypes = {})
+ std::set<V1_3::OperationType> operationTypes = {})
: SampleDriver(name),
mVersionString(version),
mCapabilities(capabilities),
mOperationMask(operationMask),
mOEM(oem),
mOperationTypes(std::move(operationTypes)) {
- CHECK_EQ(mOperationTypes.count(OperationType::OEM_OPERATION), size_t(0));
+ CHECK_EQ(mOperationTypes.count(V1_3::OperationType::OEM_OPERATION), size_t(0));
if (operationMask) {
- std::for_each(mOperationTypes.begin(), mOperationTypes.end(), [](OperationType type) {
- CHECK_EQ(operationToFirstEncoding.count(type), size_t(0));
- });
+ std::for_each(mOperationTypes.begin(), mOperationTypes.end(),
+ [](V1_3::OperationType type) {
+ CHECK_EQ(operationToFirstEncoding.count(type), size_t(0));
+ });
}
}
~PartitioningDriver() override {}
- Return<void> getVersionString(getVersionString_cb cb) override {
+ hardware::Return<void> getVersionString(getVersionString_cb cb) override {
cb(V1_0::ErrorStatus::NONE, mVersionString);
- return Void();
+ return hardware::Void();
}
- Return<V1_3::ErrorStatus> prepareModel_1_3(
- const Model& model, ExecutionPreference preference, Priority priority,
- const OptionalTimePoint& deadline, const hidl_vec<hidl_handle>& modelCache,
- const hidl_vec<hidl_handle>& dataCache, const CacheToken& token,
+ hardware::Return<V1_3::ErrorStatus> prepareModel_1_3(
+ const V1_3::Model& model, V1_1::ExecutionPreference preference, V1_3::Priority priority,
+ const V1_3::OptionalTimePoint& deadline,
+ const hardware::hidl_vec<hardware::hidl_handle>& modelCache,
+ const hardware::hidl_vec<hardware::hidl_handle>& dataCache, const HalCacheToken& token,
const sp<V1_3::IPreparedModelCallback>& callback) override {
if (mOEM == OEMIndecisive) {
for (const auto& operation : model.main.operations) {
- if (operation.type == OperationType::OEM_OPERATION) {
+ if (operation.type == V1_3::OperationType::OEM_OPERATION) {
callback->notify_1_3(V1_3::ErrorStatus::INVALID_ARGUMENT, nullptr);
return V1_3::ErrorStatus::INVALID_ARGUMENT;
}
@@ -354,7 +367,7 @@
V1_3::ErrorStatus outStatus = V1_3::ErrorStatus::INVALID_ARGUMENT;
auto ret = getSupportedOperations_1_3(
model, [&outStatus](V1_3::ErrorStatus inStatus,
- const hidl_vec<bool>& supportedOperations) {
+ const hardware::hidl_vec<bool>& supportedOperations) {
if (inStatus == V1_3::ErrorStatus::NONE) {
if (std::all_of(supportedOperations.begin(), supportedOperations.end(),
[](bool v) { return v; })) {
@@ -371,57 +384,60 @@
}
}
- Return<DeviceStatus> getStatus() override { return DeviceStatus::AVAILABLE; }
-
- Return<void> getCapabilities_1_3(getCapabilities_1_3_cb cb) override {
- cb(V1_3::ErrorStatus::NONE, mCapabilities);
- return Void();
+ hardware::Return<V1_0::DeviceStatus> getStatus() override {
+ return V1_0::DeviceStatus::AVAILABLE;
}
- Return<void> getSupportedOperations_1_3(const Model& model,
- getSupportedOperations_1_3_cb cb) override {
+ hardware::Return<void> getCapabilities_1_3(getCapabilities_1_3_cb cb) override {
+ cb(V1_3::ErrorStatus::NONE, mCapabilities);
+ return hardware::Void();
+ }
+
+ hardware::Return<void> getSupportedOperations_1_3(const V1_3::Model& model,
+ getSupportedOperations_1_3_cb cb) override {
if (!android::nn::validateModel(model)) {
cb(V1_3::ErrorStatus::INVALID_ARGUMENT, std::vector<bool>());
- return Void();
+ return hardware::Void();
}
cb(V1_3::ErrorStatus::NONE, getSupportedOperationsForSubgraph(model, model.main));
- return Void();
+ return hardware::Void();
}
- Return<void> getNumberOfCacheFilesNeeded(getNumberOfCacheFilesNeeded_cb cb) override {
+ hardware::Return<void> getNumberOfCacheFilesNeeded(getNumberOfCacheFilesNeeded_cb cb) override {
cb(V1_0::ErrorStatus::NONE, /*numModelCache=*/1, /*numDataCache=*/1);
- return Void();
+ return hardware::Void();
}
private:
- std::vector<bool> getSupportedOperationsForSubgraph(const Model& model,
- const Subgraph& subgraph) {
+ std::vector<bool> getSupportedOperationsForSubgraph(const V1_3::Model& model,
+ const V1_3::Subgraph& subgraph) {
CHECK(&subgraph == &model.main ||
std::find_if(model.referenced.begin(), model.referenced.end(),
- [&subgraph](const Subgraph& refSubgraph) {
+ [&subgraph](const V1_3::Subgraph& refSubgraph) {
return &subgraph == &refSubgraph;
}) != model.referenced.end());
auto supportsEntireSubgraph = [this, &model, &subgraph](uint32_t refSubgraphOperandIndex) {
CHECK_LT(refSubgraphOperandIndex, subgraph.operands.size());
- const Operand& refSubgraphOperand = subgraph.operands[refSubgraphOperandIndex];
- CHECK(refSubgraphOperand.lifetime == OperandLifeTime::SUBGRAPH);
+ const V1_3::Operand& refSubgraphOperand = subgraph.operands[refSubgraphOperandIndex];
+ CHECK(refSubgraphOperand.lifetime == V1_3::OperandLifeTime::SUBGRAPH);
CHECK_LT(refSubgraphOperand.location.offset, model.referenced.size());
- const Subgraph& refSubgraph = model.referenced[refSubgraphOperand.location.offset];
+ const V1_3::Subgraph& refSubgraph =
+ model.referenced[refSubgraphOperand.location.offset];
std::vector<bool> supported = getSupportedOperationsForSubgraph(model, refSubgraph);
return std::all_of(supported.begin(), supported.end(), [](bool x) { return x; });
};
const size_t count = subgraph.operations.size();
std::vector<bool> supported(count);
for (size_t i = 0; i < count; i++) {
- const Operation& operation = subgraph.operations[i];
+ const V1_3::Operation& operation = subgraph.operations[i];
if (mOperationTypes.count(operation.type)) {
- if (operation.type == OperationType::IF) {
+ if (operation.type == V1_3::OperationType::IF) {
namespace op = android::nn::operation_if;
CHECK_GE(operation.inputs.size(), op::kFirstInput);
supported[i] =
supportsEntireSubgraph(operation.inputs[op::kThenModelOperand]) &&
supportsEntireSubgraph(operation.inputs[op::kElseModelOperand]);
- } else if (operation.type == OperationType::WHILE) {
+ } else if (operation.type == V1_3::OperationType::WHILE) {
namespace op = android::nn::operation_while;
CHECK_GE(operation.inputs.size(), op::kFirstInput);
supported[i] =
@@ -432,7 +448,7 @@
}
continue;
}
- if (operation.type == OperationType::OEM_OPERATION) {
+ if (operation.type == V1_3::OperationType::OEM_OPERATION) {
supported[i] = (mOEM != OEMNo);
continue;
}
@@ -447,72 +463,75 @@
}
std::string mVersionString;
- Capabilities mCapabilities;
+ V1_3::Capabilities mCapabilities;
uint32_t mOperationMask;
OEM mOEM;
- std::set<OperationType> mOperationTypes;
+ std::set<V1_3::OperationType> mOperationTypes;
};
// Like PartitioningDriver, but implementing 1.2
class PartitioningDriverV1_2 : public V1_2::IDevice {
public:
- PartitioningDriverV1_2(const char* name, const char* version, Capabilities capabilities,
+ PartitioningDriverV1_2(const char* name, const char* version, V1_3::Capabilities capabilities,
uint32_t operationMask,
PartitioningDriver::OEM oem = PartitioningDriver::OEMNo,
- std::set<OperationType> operationTypes = {})
+ std::set<V1_3::OperationType> operationTypes = {})
: mLatestDriver(new PartitioningDriver(name, version, capabilities, operationMask, oem,
operationTypes)) {}
- Return<void> getCapabilities_1_2(getCapabilities_1_2_cb _hidl_cb) override {
+ hardware::Return<void> getCapabilities_1_2(getCapabilities_1_2_cb _hidl_cb) override {
return mLatestDriver->getCapabilities_1_2(_hidl_cb);
}
- Return<void> getSupportedOperations_1_2(const V1_2::Model& model,
- getSupportedOperations_1_2_cb _hidl_cb) override {
+ hardware::Return<void> getSupportedOperations_1_2(
+ const V1_2::Model& model, getSupportedOperations_1_2_cb _hidl_cb) override {
return mLatestDriver->getSupportedOperations_1_2(model, _hidl_cb);
}
- Return<V1_0::ErrorStatus> prepareModel_1_2(
- const V1_2::Model& model, ExecutionPreference preference,
- const hidl_vec<hidl_handle>& modelCache, const hidl_vec<hidl_handle>& dataCache,
- const CacheToken& token,
+ hardware::Return<V1_0::ErrorStatus> prepareModel_1_2(
+ const V1_2::Model& model, V1_1::ExecutionPreference preference,
+ const hardware::hidl_vec<hardware::hidl_handle>& modelCache,
+ const hardware::hidl_vec<hardware::hidl_handle>& dataCache, const HalCacheToken& token,
const sp<V1_2::IPreparedModelCallback>& actualCallback) override {
return mLatestDriver->prepareModel_1_2(model, preference, modelCache, dataCache, token,
actualCallback);
}
- Return<void> getVersionString(getVersionString_cb _hidl_cb) override {
+ hardware::Return<void> getVersionString(getVersionString_cb _hidl_cb) override {
return mLatestDriver->getVersionString(_hidl_cb);
}
- Return<void> getType(getType_cb _hidl_cb) override { return mLatestDriver->getType(_hidl_cb); }
- Return<void> getSupportedExtensions(getSupportedExtensions_cb _hidl_cb) {
+ hardware::Return<void> getType(getType_cb _hidl_cb) override {
+ return mLatestDriver->getType(_hidl_cb);
+ }
+ hardware::Return<void> getSupportedExtensions(getSupportedExtensions_cb _hidl_cb) {
return mLatestDriver->getSupportedExtensions(_hidl_cb);
}
- Return<void> getNumberOfCacheFilesNeeded(getNumberOfCacheFilesNeeded_cb _hidl_cb) {
+ hardware::Return<void> getNumberOfCacheFilesNeeded(getNumberOfCacheFilesNeeded_cb _hidl_cb) {
return mLatestDriver->getNumberOfCacheFilesNeeded(_hidl_cb);
}
- Return<V1_0::ErrorStatus> prepareModelFromCache(
- const hidl_vec<hidl_handle>& modelCache, const hidl_vec<hidl_handle>& dataCache,
- const CacheToken& token, const sp<V1_2::IPreparedModelCallback>& callback) {
+ hardware::Return<V1_0::ErrorStatus> prepareModelFromCache(
+ const hardware::hidl_vec<hardware::hidl_handle>& modelCache,
+ const hardware::hidl_vec<hardware::hidl_handle>& dataCache, const HalCacheToken& token,
+ const sp<V1_2::IPreparedModelCallback>& callback) {
return mLatestDriver->prepareModelFromCache(modelCache, dataCache, token, callback);
}
- Return<void> getCapabilities_1_1(getCapabilities_1_1_cb _hidl_cb) override {
+ hardware::Return<void> getCapabilities_1_1(getCapabilities_1_1_cb _hidl_cb) override {
return mLatestDriver->getCapabilities_1_1(_hidl_cb);
}
- Return<void> getSupportedOperations_1_1(const V1_1::Model& model,
- getSupportedOperations_1_1_cb _hidl_cb) override {
+ hardware::Return<void> getSupportedOperations_1_1(
+ const V1_1::Model& model, getSupportedOperations_1_1_cb _hidl_cb) override {
return mLatestDriver->getSupportedOperations_1_1(model, _hidl_cb);
}
- Return<V1_0::ErrorStatus> prepareModel_1_1(
- const V1_1::Model& model, ExecutionPreference preference,
+ hardware::Return<V1_0::ErrorStatus> prepareModel_1_1(
+ const V1_1::Model& model, V1_1::ExecutionPreference preference,
const sp<V1_0::IPreparedModelCallback>& actualCallback) override {
return mLatestDriver->prepareModel_1_1(model, preference, actualCallback);
}
- Return<DeviceStatus> getStatus() override { return mLatestDriver->getStatus(); }
- Return<void> getCapabilities(getCapabilities_cb _hidl_cb) override {
+ hardware::Return<V1_0::DeviceStatus> getStatus() override { return mLatestDriver->getStatus(); }
+ hardware::Return<void> getCapabilities(getCapabilities_cb _hidl_cb) override {
return mLatestDriver->getCapabilities(_hidl_cb);
}
- Return<void> getSupportedOperations(const V1_0::Model& model,
- getSupportedOperations_cb _hidl_cb) override {
+ hardware::Return<void> getSupportedOperations(const V1_0::Model& model,
+ getSupportedOperations_cb _hidl_cb) override {
return mLatestDriver->getSupportedOperations(model, _hidl_cb);
}
- Return<V1_0::ErrorStatus> prepareModel(
+ hardware::Return<V1_0::ErrorStatus> prepareModel(
const V1_0::Model& model,
const sp<V1_0::IPreparedModelCallback>& actualCallback) override {
return mLatestDriver->prepareModel(model, actualCallback);
@@ -525,33 +544,33 @@
// Like PartitioningDriver, but implementing 1.1
class PartitioningDriverV1_1 : public V1_1::IDevice {
public:
- PartitioningDriverV1_1(const char* name, const char* version, Capabilities capabilities,
+ PartitioningDriverV1_1(const char* name, const char* version, V1_3::Capabilities capabilities,
uint32_t operationMask,
PartitioningDriver::OEM oem = PartitioningDriver::OEMNo,
- std::set<OperationType> operationTypes = {})
+ std::set<V1_3::OperationType> operationTypes = {})
: mLatestDriver(new PartitioningDriver(name, version, capabilities, operationMask, oem,
operationTypes)) {}
- Return<void> getCapabilities_1_1(getCapabilities_1_1_cb _hidl_cb) override {
+ hardware::Return<void> getCapabilities_1_1(getCapabilities_1_1_cb _hidl_cb) override {
return mLatestDriver->getCapabilities_1_1(_hidl_cb);
}
- Return<void> getSupportedOperations_1_1(const V1_1::Model& model,
- getSupportedOperations_1_1_cb _hidl_cb) override {
+ hardware::Return<void> getSupportedOperations_1_1(
+ const V1_1::Model& model, getSupportedOperations_1_1_cb _hidl_cb) override {
return mLatestDriver->getSupportedOperations_1_1(model, _hidl_cb);
}
- Return<V1_0::ErrorStatus> prepareModel_1_1(
- const V1_1::Model& model, ExecutionPreference preference,
+ hardware::Return<V1_0::ErrorStatus> prepareModel_1_1(
+ const V1_1::Model& model, V1_1::ExecutionPreference preference,
const sp<V1_0::IPreparedModelCallback>& actualCallback) override {
return mLatestDriver->prepareModel_1_1(model, preference, actualCallback);
}
- Return<DeviceStatus> getStatus() override { return mLatestDriver->getStatus(); }
- Return<void> getCapabilities(getCapabilities_cb _hidl_cb) override {
+ hardware::Return<V1_0::DeviceStatus> getStatus() override { return mLatestDriver->getStatus(); }
+ hardware::Return<void> getCapabilities(getCapabilities_cb _hidl_cb) override {
return mLatestDriver->getCapabilities(_hidl_cb);
}
- Return<void> getSupportedOperations(const V1_0::Model& model,
- getSupportedOperations_cb _hidl_cb) override {
+ hardware::Return<void> getSupportedOperations(const V1_0::Model& model,
+ getSupportedOperations_cb _hidl_cb) override {
return mLatestDriver->getSupportedOperations(model, _hidl_cb);
}
- Return<V1_0::ErrorStatus> prepareModel(
+ hardware::Return<V1_0::ErrorStatus> prepareModel(
const V1_0::Model& model,
const sp<V1_0::IPreparedModelCallback>& actualCallback) override {
return mLatestDriver->prepareModel(model, actualCallback);
@@ -564,25 +583,25 @@
// Like PartitioningDriver, but implementing 1.0
class PartitioningDriverV1_0 : public V1_0::IDevice {
public:
- PartitioningDriverV1_0(const char* name, const char* version, Capabilities capabilities,
+ PartitioningDriverV1_0(const char* name, const char* version, V1_3::Capabilities capabilities,
uint32_t operationMask,
PartitioningDriver::OEM oem = PartitioningDriver::OEMNo,
- std::set<OperationType> operationTypes = {})
+ std::set<V1_3::OperationType> operationTypes = {})
: mLatestDriver(new PartitioningDriver(name, version, capabilities, operationMask, oem,
operationTypes)) {}
- Return<void> getCapabilities(getCapabilities_cb _hidl_cb) override {
+ hardware::Return<void> getCapabilities(getCapabilities_cb _hidl_cb) override {
return mLatestDriver->getCapabilities(_hidl_cb);
}
- Return<void> getSupportedOperations(const V1_0::Model& model,
- getSupportedOperations_cb _hidl_cb) override {
+ hardware::Return<void> getSupportedOperations(const V1_0::Model& model,
+ getSupportedOperations_cb _hidl_cb) override {
return mLatestDriver->getSupportedOperations(model, _hidl_cb);
}
- Return<V1_0::ErrorStatus> prepareModel(
+ hardware::Return<V1_0::ErrorStatus> prepareModel(
const V1_0::Model& model,
const sp<V1_0::IPreparedModelCallback>& actualCallback) override {
return mLatestDriver->prepareModel(model, actualCallback);
}
- Return<DeviceStatus> getStatus() override { return mLatestDriver->getStatus(); }
+ hardware::Return<V1_0::DeviceStatus> getStatus() override { return mLatestDriver->getStatus(); }
private:
const sp<V1_3::IDevice> mLatestDriver;
@@ -949,7 +968,7 @@
// From a vector of DeviceSpecification, create a vector of
// Devices.
struct DeviceSpecification {
- DeviceSpecification(const std::string& name, const Capabilities& capabilities,
+ DeviceSpecification(const std::string& name, const V1_3::Capabilities& capabilities,
uint32_t operationMask,
PartitioningDriver::OEM oem = PartitioningDriver::OEMNo)
: mName(name),
@@ -959,30 +978,31 @@
mOEM(oem) {}
DeviceSpecification(const std::string& name, float perf, uint32_t operationMask,
PartitioningDriver::OEM oem = PartitioningDriver::OEMNo,
- std::set<OperationType> operationTypes = {})
+ std::set<V1_3::OperationType> operationTypes = {})
: DeviceSpecification(name, perf, perf, operationMask, oem, operationTypes) {}
DeviceSpecification(const std::string& name, float perf, float perfRelaxed,
uint32_t operationMask,
PartitioningDriver::OEM oem = PartitioningDriver::OEMNo,
- std::set<OperationType> operationTypes = {})
+ std::set<V1_3::OperationType> operationTypes = {})
: DeviceSpecification(name, kVersionString, perf, perfRelaxed, operationMask, oem,
operationTypes) {}
DeviceSpecification(const std::string& name, const std::string& version, float perf,
uint32_t operationMask,
PartitioningDriver::OEM oem = PartitioningDriver::OEMNo,
- std::set<OperationType> operationTypes = {})
+ std::set<V1_3::OperationType> operationTypes = {})
: DeviceSpecification(name, version, perf, perf, operationMask, oem, operationTypes) {}
DeviceSpecification(const std::string& name, const std::string& version, float perf,
float perfRelaxed, uint32_t operationMask,
PartitioningDriver::OEM oem = PartitioningDriver::OEMNo,
- std::set<OperationType> operationTypes = {})
+ std::set<V1_3::OperationType> operationTypes = {})
: mName(name),
mVersionString(version),
mOperationMask(operationMask),
mOEM(oem),
mOperationTypes(std::move(operationTypes)) {
- PerformanceInfo perfInfo = {.execTime = perf, .powerUsage = perf};
- PerformanceInfo perfRelaxedInfo = {.execTime = perfRelaxed, .powerUsage = perfRelaxed};
+ V1_0::PerformanceInfo perfInfo = {.execTime = perf, .powerUsage = perf};
+ V1_0::PerformanceInfo perfRelaxedInfo = {.execTime = perfRelaxed,
+ .powerUsage = perfRelaxed};
mCapabilities = {
.relaxedFloat32toFloat16PerformanceScalar = perfRelaxedInfo,
.relaxedFloat32toFloat16PerformanceTensor = perfRelaxedInfo,
@@ -1004,11 +1024,11 @@
std::string mName;
std::string mVersionString;
- Capabilities mCapabilities;
+ V1_3::Capabilities mCapabilities;
HalVersion mHalVersion = HalVersion::LATEST;
uint32_t mOperationMask;
PartitioningDriver::OEM mOEM = PartitioningDriver::OEMNo;
- std::set<OperationType> mOperationTypes;
+ std::set<V1_3::OperationType> mOperationTypes;
static constexpr char kVersionString[] = "JUST_AN_EXAMPLE";
@@ -1137,7 +1157,7 @@
// actual definitions
ASSERT_LT(model->operationCount(), kPseudoDefiningOperationBase);
for (uint32_t i = 0, e = model->operationCount(); i < e; i++) {
- const Operation& operation = model->getOperation(i);
+ const V1_3::Operation& operation = android::nn::convertToV1_3(model->getOperation(i));
for (uint32_t output : operation.outputs) {
(*defMap)[output] = i;
}
@@ -1149,12 +1169,12 @@
}
// look for NO_VALUE and CONSTANT_COPY
for (uint32_t i = 0, e = model->operandCount(); i < e; i++) {
- const Operand& operand = model->getOperand(i);
+ const V1_3::Operand& operand = android::nn::convertToV1_3(model->getOperand(i));
switch (operand.lifetime) {
- case OperandLifeTime::NO_VALUE:
+ case V1_3::OperandLifeTime::NO_VALUE:
(*defMap)[i] = kPseudoDefiningOperationNoValue;
break;
- case OperandLifeTime::CONSTANT_COPY: {
+ case V1_3::OperandLifeTime::CONSTANT_COPY: {
ASSERT_EQ(operand.location.length, sizeof(uint32_t));
uint32_t value;
memcpy(&value, model->getPointerToOperandValue(operand.location.offset),
@@ -1163,9 +1183,9 @@
(*defMap)[i] = kPseudoDefiningOperationConstantCopy0 + value;
break;
}
- case OperandLifeTime::TEMPORARY_VARIABLE:
- case OperandLifeTime::SUBGRAPH_INPUT:
- case OperandLifeTime::SUBGRAPH_OUTPUT:
+ case V1_3::OperandLifeTime::TEMPORARY_VARIABLE:
+ case V1_3::OperandLifeTime::SUBGRAPH_INPUT:
+ case V1_3::OperandLifeTime::SUBGRAPH_OUTPUT:
// already handled
break;
default:
@@ -1207,7 +1227,6 @@
bool compare(const Operand& operandA, const Operand& operandB) {
if (operandA.type != operandB.type || operandA.dimensions != operandB.dimensions ||
- operandA.numberOfConsumers != operandB.numberOfConsumers ||
operandA.scale != operandB.scale || operandA.zeroPoint != operandB.zeroPoint) {
return false;
}
@@ -2021,8 +2040,8 @@
// WrapperOperandType is the NeuralNetworksWrapper.h representation of a
// full operand type (WrapperType plus dimensions plus other attributes).
- auto TestType = [](OperandType operandType) {
- if (operandType == OperandType::SUBGRAPH) {
+ auto TestType = [](V1_3::OperandType operandType) {
+ if (operandType == V1_3::OperandType::SUBGRAPH) {
// SUBGRAPH capabilities are handled differently.
return;
}
@@ -2037,11 +2056,11 @@
model.finish();
ASSERT_TRUE(model.isValid());
- const Capabilities baseCapabilities = makeCapabilities(0.5);
+ const V1_3::Capabilities baseCapabilities = makeCapabilities(0.5);
{
// better than base
- Capabilities goodCapabilities = baseCapabilities;
+ V1_3::Capabilities goodCapabilities = baseCapabilities;
update(&goodCapabilities, operandType, 0.25);
const auto devices =
@@ -2062,7 +2081,7 @@
{
// worse than base
- Capabilities badCapabilities = baseCapabilities;
+ V1_3::Capabilities badCapabilities = baseCapabilities;
update(&badCapabilities, operandType, 0.75);
const auto devices =
makeDevices({{"base", baseCapabilities, ~0U, PartitioningDriver::OEMYes},
@@ -2081,13 +2100,13 @@
}
};
- for (uint32_t type = static_cast<uint32_t>(OperandTypeRange::FUNDAMENTAL_MIN);
- type <= static_cast<uint32_t>(OperandTypeRange::FUNDAMENTAL_MAX); ++type) {
- TestType(static_cast<OperandType>(type));
+ for (uint32_t type = static_cast<uint32_t>(V1_3::OperandTypeRange::FUNDAMENTAL_MIN);
+ type <= static_cast<uint32_t>(V1_3::OperandTypeRange::FUNDAMENTAL_MAX); ++type) {
+ TestType(static_cast<V1_3::OperandType>(type));
}
- for (uint32_t type = static_cast<uint32_t>(OperandTypeRange::OEM_MIN);
- type <= static_cast<uint32_t>(OperandTypeRange::OEM_MAX); ++type) {
- TestType(static_cast<OperandType>(type));
+ for (uint32_t type = static_cast<uint32_t>(V1_3::OperandTypeRange::OEM_MIN);
+ type <= static_cast<uint32_t>(V1_3::OperandTypeRange::OEM_MAX); ++type) {
+ TestType(static_cast<V1_3::OperandType>(type));
}
}
@@ -2167,8 +2186,9 @@
ASSERT_TRUE(mModel.has_value());
ASSERT_TRUE(!mCompilation.has_value());
- auto devices = makeDevices({{"fill", 0.9, 0U, PartitioningDriver::OEMNo, {OperationType::FILL}},
- {"add", 0.9, 0U, PartitioningDriver::OEMNo, {OperationType::ADD}}});
+ auto devices =
+ makeDevices({{"fill", 0.9, 0U, PartitioningDriver::OEMNo, {V1_3::OperationType::FILL}},
+ {"add", 0.9, 0U, PartitioningDriver::OEMNo, {V1_3::OperationType::ADD}}});
mCompilation = PartitioningCompilation(&mModel.value(), devices);
ASSERT_EQ(mCompilation->setPartitioning(DeviceManager::kPartitioningWithoutFallback),
@@ -2824,44 +2844,44 @@
TEST_F(PerfTest, Lookup) {
// Derive an arbitrary (but reproducible) performance value from an OperandType.
// We'll use this to ensure that we can save and then recover a type's performance.
- auto typePerf = [](OperandType type) { return float(static_cast<uint32_t>(type)); };
+ auto typePerf = [](V1_3::OperandType type) { return float(static_cast<uint32_t>(type)); };
- Capabilities capabilities = makeCapabilities(-1.0f);
+ V1_3::Capabilities capabilities = makeCapabilities(-1.0f);
- for (uint32_t type = static_cast<uint32_t>(OperandTypeRange::FUNDAMENTAL_MIN);
- type <= static_cast<uint32_t>(OperandTypeRange::FUNDAMENTAL_MAX); ++type) {
- OperandType operandType = static_cast<OperandType>(type);
+ for (uint32_t type = static_cast<uint32_t>(V1_3::OperandTypeRange::FUNDAMENTAL_MIN);
+ type <= static_cast<uint32_t>(V1_3::OperandTypeRange::FUNDAMENTAL_MAX); ++type) {
+ V1_3::OperandType operandType = static_cast<V1_3::OperandType>(type);
update(&capabilities, operandType, typePerf(operandType));
}
- for (uint32_t type = static_cast<uint32_t>(OperandTypeRange::OEM_MIN);
- type <= static_cast<uint32_t>(OperandTypeRange::OEM_MAX); ++type) {
- OperandType operandType = static_cast<OperandType>(type);
+ for (uint32_t type = static_cast<uint32_t>(V1_3::OperandTypeRange::OEM_MIN);
+ type <= static_cast<uint32_t>(V1_3::OperandTypeRange::OEM_MAX); ++type) {
+ V1_3::OperandType operandType = static_cast<V1_3::OperandType>(type);
update(&capabilities, operandType, typePerf(operandType));
}
// Make sure lookup retrieves the values stored by update
- for (uint32_t type = static_cast<uint32_t>(OperandTypeRange::FUNDAMENTAL_MIN);
- type <= static_cast<uint32_t>(OperandTypeRange::FUNDAMENTAL_MAX); ++type) {
- OperandType operandType = static_cast<OperandType>(type);
- if (operandType == OperandType::SUBGRAPH) {
+ for (uint32_t type = static_cast<uint32_t>(V1_3::OperandTypeRange::FUNDAMENTAL_MIN);
+ type <= static_cast<uint32_t>(V1_3::OperandTypeRange::FUNDAMENTAL_MAX); ++type) {
+ V1_3::OperandType operandType = static_cast<V1_3::OperandType>(type);
+ if (operandType == V1_3::OperandType::SUBGRAPH) {
// SUBGRAPH capabilities are handled differently.
continue;
}
SCOPED_TRACE(toString(operandType));
EXPECT_EQ(lookupExecTime(capabilities, operandType), typePerf(operandType));
}
- for (uint32_t type = static_cast<uint32_t>(OperandTypeRange::OEM_MIN);
- type <= static_cast<uint32_t>(OperandTypeRange::OEM_MAX); ++type) {
- OperandType operandType = static_cast<OperandType>(type);
+ for (uint32_t type = static_cast<uint32_t>(V1_3::OperandTypeRange::OEM_MIN);
+ type <= static_cast<uint32_t>(V1_3::OperandTypeRange::OEM_MAX); ++type) {
+ V1_3::OperandType operandType = static_cast<V1_3::OperandType>(type);
SCOPED_TRACE(toString(operandType));
EXPECT_EQ(lookupExecTime(capabilities, operandType), typePerf(operandType));
}
// Check the behavior of a missing type
- OperandType operandType =
- static_cast<OperandType>(static_cast<uint32_t>(OperandTypeRange::BASE_MAX) + 1);
+ V1_3::OperandType operandType = static_cast<V1_3::OperandType>(
+ static_cast<uint32_t>(V1_3::OperandTypeRange::BASE_MAX) + 1);
EXPECT_EQ(lookupExecTime(capabilities, operandType), FLT_MAX);
}
@@ -3005,7 +3025,7 @@
// The device supports all operations.
const auto devices =
- makeDevices({{"ALL", 0.9, ~0U, PartitioningDriver::OEMNo, {OperationType::IF}}});
+ makeDevices({{"ALL", 0.9, ~0U, PartitioningDriver::OEMNo, {V1_3::OperationType::IF}}});
ExecutionPlan plan;
ASSERT_EQ(models[0]->partitionTheWork(devices, ExecutePreference::PREFER_LOW_POWER,
@@ -3023,7 +3043,7 @@
0.9,
~0U,
PartitioningDriver::OEMNo,
- {OperationType::WHILE, OperationType::EQUAL}}});
+ {V1_3::OperationType::WHILE, V1_3::OperationType::EQUAL}}});
ExecutionPlan plan;
ASSERT_EQ(models[0]->partitionTheWork(devices, ExecutePreference::PREFER_LOW_POWER,
@@ -3047,7 +3067,7 @@
// The device supports all operations but the partitioner ignores its IF
// support due to http://b/159076604#comment5.
const auto devices =
- makeDevices({{"ALL", 0.9, ~0U, PartitioningDriver::OEMNo, {OperationType::IF}}});
+ makeDevices({{"ALL", 0.9, ~0U, PartitioningDriver::OEMNo, {V1_3::OperationType::IF}}});
ExecutionPlan plan;
ASSERT_EQ(models[0]->partitionTheWork(devices, ExecutePreference::PREFER_LOW_POWER,
@@ -3090,7 +3110,7 @@
0.9,
~0U,
PartitioningDriver::OEMNo,
- {OperationType::WHILE, OperationType::EQUAL}}});
+ {V1_3::OperationType::WHILE, V1_3::OperationType::EQUAL}}});
ExecutionPlan plan;
ASSERT_EQ(models[0]->partitionTheWork(devices, ExecutePreference::PREFER_LOW_POWER,
diff --git a/runtime/test/TestPartitioningRandom.cpp b/runtime/test/TestPartitioningRandom.cpp
index 51d7910..294d93a 100644
--- a/runtime/test/TestPartitioningRandom.cpp
+++ b/runtime/test/TestPartitioningRandom.cpp
@@ -95,11 +95,15 @@
namespace android {
-using namespace nn::hal;
+namespace V1_0 = ::android::hardware::neuralnetworks::V1_0;
+namespace V1_1 = ::android::hardware::neuralnetworks::V1_1;
+namespace V1_2 = ::android::hardware::neuralnetworks::V1_2;
+namespace V1_3 = ::android::hardware::neuralnetworks::V1_3;
using CompilationBuilder = nn::CompilationBuilder;
-using Device = nn::Device;
using DeviceManager = nn::DeviceManager;
+using Device = nn::Device;
using ExecutionPlan = nn::ExecutionPlan;
+using HalCacheToken = nn::HalCacheToken;
using HalVersion = nn::HalVersion;
using HidlModel = V1_3::Model;
using ModelBuilder = nn::ModelBuilder;
@@ -335,7 +339,7 @@
public:
RandomPartitioningTest() : mRandNumEng(GetParam() /* seed */), mRandNumUnitDist(0.0, 1.0) {}
- static Signature getSignature(const HidlModel& model, const Operation& operation);
+ static Signature getSignature(const HidlModel& model, const V1_3::Operation& operation);
protected:
static V1_0::IDevice* makeTestDriver(HalVersion version, const char* name,
@@ -500,7 +504,8 @@
return kOperationToVersion.at(type);
}
-Signature RandomPartitioningTest::getSignature(const HidlModel& model, const Operation& operation) {
+Signature RandomPartitioningTest::getSignature(const HidlModel& model,
+ const V1_3::Operation& operation) {
static const auto kOperationToActivation = [] {
std::map<ANeuralNetworksOperationType, int> result;
for (const auto& pattern : kOperationPatterns) {
@@ -516,9 +521,10 @@
return Signature(operationType, -1);
}
- const Operand& operand = model.main.operands[operation.inputs[activationFunctionInputIndex]];
- CHECK(operand.lifetime == OperandLifeTime::CONSTANT_COPY);
- CHECK(operand.type == OperandType::INT32);
+ const V1_3::Operand& operand =
+ model.main.operands[operation.inputs[activationFunctionInputIndex]];
+ CHECK(operand.lifetime == V1_3::OperandLifeTime::CONSTANT_COPY);
+ CHECK(operand.type == V1_3::OperandType::INT32);
int32_t value;
memcpy(&value, &model.operandValues[operand.location.offset], operand.location.length);
return Signature(operationType, value);
@@ -546,21 +552,21 @@
TestDriver(const char* name, std::set<Signature> signatures)
: SampleDriver(name), mSignatures(std::move(signatures)) {}
- Return<void> getCapabilities_1_3(getCapabilities_1_3_cb _hidl_cb) override {
+ hardware::Return<void> getCapabilities_1_3(getCapabilities_1_3_cb _hidl_cb) override {
android::nn::initVLogMask();
- const PerformanceInfo kPerf = {.execTime = 0.75f, .powerUsage = 0.75f};
- Capabilities capabilities = {
+ const V1_0::PerformanceInfo kPerf = {.execTime = 0.75f, .powerUsage = 0.75f};
+ V1_3::Capabilities capabilities = {
.relaxedFloat32toFloat16PerformanceScalar = kPerf,
.relaxedFloat32toFloat16PerformanceTensor = kPerf,
.operandPerformance = nn::nonExtensionOperandPerformance<HalVersion::V1_3>(kPerf),
.ifPerformance = kPerf,
.whilePerformance = kPerf};
_hidl_cb(V1_3::ErrorStatus::NONE, capabilities);
- return Void();
+ return hardware::Void();
}
- Return<void> getSupportedOperations_1_3(const HidlModel& model,
- getSupportedOperations_1_3_cb cb) override {
+ hardware::Return<void> getSupportedOperations_1_3(const HidlModel& model,
+ getSupportedOperations_1_3_cb cb) override {
if (nn::validateModel(model)) {
const size_t count = model.main.operations.size();
std::vector<bool> supported(count);
@@ -572,19 +578,20 @@
} else {
cb(V1_3::ErrorStatus::INVALID_ARGUMENT, {});
}
- return Void();
+ return hardware::Void();
}
- Return<V1_3::ErrorStatus> prepareModel_1_3(
- const HidlModel& model, ExecutionPreference preference, Priority priority,
- const OptionalTimePoint& deadline, const hidl_vec<hidl_handle>& modelCache,
- const hidl_vec<hidl_handle>& dataCache, const CacheToken& token,
+ hardware::Return<V1_3::ErrorStatus> prepareModel_1_3(
+ const HidlModel& model, V1_1::ExecutionPreference preference, V1_3::Priority priority,
+ const V1_3::OptionalTimePoint& deadline,
+ const hardware::hidl_vec<hardware::hidl_handle>& modelCache,
+ const hardware::hidl_vec<hardware::hidl_handle>& dataCache, const HalCacheToken& token,
const sp<V1_3::IPreparedModelCallback>& callback) override {
// NOTE: We verify that all operations in the model are supported.
V1_3::ErrorStatus outStatus = V1_3::ErrorStatus::INVALID_ARGUMENT;
auto ret = getSupportedOperations_1_3(
model, [&outStatus](V1_3::ErrorStatus inStatus,
- const hidl_vec<bool>& supportedOperations) {
+ const hardware::hidl_vec<bool>& supportedOperations) {
if (inStatus == V1_3::ErrorStatus::NONE) {
if (std::all_of(supportedOperations.begin(), supportedOperations.end(),
[](bool v) { return v; })) {
@@ -610,57 +617,60 @@
public:
TestDriverV1_2(const char* name, std::set<Signature> signatures)
: mLatestDriver(new TestDriver(name, std::move(signatures))) {}
- Return<void> getCapabilities_1_2(getCapabilities_1_2_cb _hidl_cb) override {
+ hardware::Return<void> getCapabilities_1_2(getCapabilities_1_2_cb _hidl_cb) override {
return mLatestDriver->getCapabilities_1_2(_hidl_cb);
}
- Return<void> getSupportedOperations_1_2(const V1_2::Model& model,
- getSupportedOperations_1_2_cb _hidl_cb) override {
+ hardware::Return<void> getSupportedOperations_1_2(
+ const V1_2::Model& model, getSupportedOperations_1_2_cb _hidl_cb) override {
return mLatestDriver->getSupportedOperations_1_2(model, _hidl_cb);
}
- Return<V1_0::ErrorStatus> prepareModel_1_2(
- const V1_2::Model& model, ExecutionPreference preference,
- const hidl_vec<hidl_handle>& modelCache, const hidl_vec<hidl_handle>& dataCache,
- const CacheToken& token,
+ hardware::Return<V1_0::ErrorStatus> prepareModel_1_2(
+ const V1_2::Model& model, V1_1::ExecutionPreference preference,
+ const hardware::hidl_vec<hardware::hidl_handle>& modelCache,
+ const hardware::hidl_vec<hardware::hidl_handle>& dataCache, const HalCacheToken& token,
const sp<V1_2::IPreparedModelCallback>& actualCallback) override {
return mLatestDriver->prepareModel_1_2(model, preference, modelCache, dataCache, token,
actualCallback);
}
- Return<void> getVersionString(getVersionString_cb _hidl_cb) override {
+ hardware::Return<void> getVersionString(getVersionString_cb _hidl_cb) override {
return mLatestDriver->getVersionString(_hidl_cb);
}
- Return<void> getType(getType_cb _hidl_cb) override { return mLatestDriver->getType(_hidl_cb); }
- Return<void> getSupportedExtensions(getSupportedExtensions_cb _hidl_cb) {
+ hardware::Return<void> getType(getType_cb _hidl_cb) override {
+ return mLatestDriver->getType(_hidl_cb);
+ }
+ hardware::Return<void> getSupportedExtensions(getSupportedExtensions_cb _hidl_cb) {
return mLatestDriver->getSupportedExtensions(_hidl_cb);
}
- Return<void> getNumberOfCacheFilesNeeded(getNumberOfCacheFilesNeeded_cb _hidl_cb) {
+ hardware::Return<void> getNumberOfCacheFilesNeeded(getNumberOfCacheFilesNeeded_cb _hidl_cb) {
return mLatestDriver->getNumberOfCacheFilesNeeded(_hidl_cb);
}
- Return<V1_0::ErrorStatus> prepareModelFromCache(
- const hidl_vec<hidl_handle>& modelCache, const hidl_vec<hidl_handle>& dataCache,
- const CacheToken& token, const sp<V1_2::IPreparedModelCallback>& callback) {
+ hardware::Return<V1_0::ErrorStatus> prepareModelFromCache(
+ const hardware::hidl_vec<hardware::hidl_handle>& modelCache,
+ const hardware::hidl_vec<hardware::hidl_handle>& dataCache, const HalCacheToken& token,
+ const sp<V1_2::IPreparedModelCallback>& callback) {
return mLatestDriver->prepareModelFromCache(modelCache, dataCache, token, callback);
}
- Return<void> getCapabilities_1_1(getCapabilities_1_1_cb _hidl_cb) override {
+ hardware::Return<void> getCapabilities_1_1(getCapabilities_1_1_cb _hidl_cb) override {
return mLatestDriver->getCapabilities_1_1(_hidl_cb);
}
- Return<void> getSupportedOperations_1_1(const V1_1::Model& model,
- getSupportedOperations_1_1_cb _hidl_cb) override {
+ hardware::Return<void> getSupportedOperations_1_1(
+ const V1_1::Model& model, getSupportedOperations_1_1_cb _hidl_cb) override {
return mLatestDriver->getSupportedOperations_1_1(model, _hidl_cb);
}
- Return<V1_0::ErrorStatus> prepareModel_1_1(
- const V1_1::Model& model, ExecutionPreference preference,
+ hardware::Return<V1_0::ErrorStatus> prepareModel_1_1(
+ const V1_1::Model& model, V1_1::ExecutionPreference preference,
const sp<V1_0::IPreparedModelCallback>& actualCallback) override {
return mLatestDriver->prepareModel_1_1(model, preference, actualCallback);
}
- Return<DeviceStatus> getStatus() override { return mLatestDriver->getStatus(); }
- Return<void> getCapabilities(getCapabilities_cb _hidl_cb) override {
+ hardware::Return<V1_0::DeviceStatus> getStatus() override { return mLatestDriver->getStatus(); }
+ hardware::Return<void> getCapabilities(getCapabilities_cb _hidl_cb) override {
return mLatestDriver->getCapabilities(_hidl_cb);
}
- Return<void> getSupportedOperations(const V1_0::Model& model,
- getSupportedOperations_cb _hidl_cb) override {
+ hardware::Return<void> getSupportedOperations(const V1_0::Model& model,
+ getSupportedOperations_cb _hidl_cb) override {
return mLatestDriver->getSupportedOperations(model, _hidl_cb);
}
- Return<V1_0::ErrorStatus> prepareModel(
+ hardware::Return<V1_0::ErrorStatus> prepareModel(
const V1_0::Model& model,
const sp<V1_0::IPreparedModelCallback>& actualCallback) override {
return mLatestDriver->prepareModel(model, actualCallback);
@@ -675,27 +685,27 @@
public:
TestDriverV1_1(const char* name, std::set<Signature> signatures)
: mLatestDriver(new TestDriver(name, std::move(signatures))) {}
- Return<void> getCapabilities_1_1(getCapabilities_1_1_cb _hidl_cb) override {
+ hardware::Return<void> getCapabilities_1_1(getCapabilities_1_1_cb _hidl_cb) override {
return mLatestDriver->getCapabilities_1_1(_hidl_cb);
}
- Return<void> getSupportedOperations_1_1(const V1_1::Model& model,
- getSupportedOperations_1_1_cb _hidl_cb) override {
+ hardware::Return<void> getSupportedOperations_1_1(
+ const V1_1::Model& model, getSupportedOperations_1_1_cb _hidl_cb) override {
return mLatestDriver->getSupportedOperations_1_1(model, _hidl_cb);
}
- Return<V1_0::ErrorStatus> prepareModel_1_1(
- const V1_1::Model& model, ExecutionPreference preference,
+ hardware::Return<V1_0::ErrorStatus> prepareModel_1_1(
+ const V1_1::Model& model, V1_1::ExecutionPreference preference,
const sp<V1_0::IPreparedModelCallback>& actualCallback) override {
return mLatestDriver->prepareModel_1_1(model, preference, actualCallback);
}
- Return<DeviceStatus> getStatus() override { return mLatestDriver->getStatus(); }
- Return<void> getCapabilities(getCapabilities_cb _hidl_cb) override {
+ hardware::Return<V1_0::DeviceStatus> getStatus() override { return mLatestDriver->getStatus(); }
+ hardware::Return<void> getCapabilities(getCapabilities_cb _hidl_cb) override {
return mLatestDriver->getCapabilities(_hidl_cb);
}
- Return<void> getSupportedOperations(const V1_0::Model& model,
- getSupportedOperations_cb _hidl_cb) override {
+ hardware::Return<void> getSupportedOperations(const V1_0::Model& model,
+ getSupportedOperations_cb _hidl_cb) override {
return mLatestDriver->getSupportedOperations(model, _hidl_cb);
}
- Return<V1_0::ErrorStatus> prepareModel(
+ hardware::Return<V1_0::ErrorStatus> prepareModel(
const V1_0::Model& model,
const sp<V1_0::IPreparedModelCallback>& actualCallback) override {
return mLatestDriver->prepareModel(model, actualCallback);
@@ -710,19 +720,19 @@
public:
TestDriverV1_0(const char* name, std::set<Signature> signatures)
: mLatestDriver(new TestDriver(name, std::move(signatures))) {}
- Return<void> getCapabilities(getCapabilities_cb _hidl_cb) override {
+ hardware::Return<void> getCapabilities(getCapabilities_cb _hidl_cb) override {
return mLatestDriver->getCapabilities(_hidl_cb);
}
- Return<void> getSupportedOperations(const V1_0::Model& model,
- getSupportedOperations_cb _hidl_cb) override {
+ hardware::Return<void> getSupportedOperations(const V1_0::Model& model,
+ getSupportedOperations_cb _hidl_cb) override {
return mLatestDriver->getSupportedOperations(model, _hidl_cb);
}
- Return<V1_0::ErrorStatus> prepareModel(
+ hardware::Return<V1_0::ErrorStatus> prepareModel(
const V1_0::Model& model,
const sp<V1_0::IPreparedModelCallback>& actualCallback) override {
return mLatestDriver->prepareModel(model, actualCallback);
}
- Return<DeviceStatus> getStatus() override { return mLatestDriver->getStatus(); }
+ hardware::Return<V1_0::DeviceStatus> getStatus() override { return mLatestDriver->getStatus(); }
private:
const sp<V1_3::IDevice> mLatestDriver;
diff --git a/runtime/test/TestRemoveDefaultArguments.cpp b/runtime/test/TestRemoveDefaultArguments.cpp
index 8726adc..daef6bf 100644
--- a/runtime/test/TestRemoveDefaultArguments.cpp
+++ b/runtime/test/TestRemoveDefaultArguments.cpp
@@ -98,7 +98,6 @@
namespace android::nn {
namespace {
-using namespace hal;
using sample_driver::SampleDriverPartial;
using Result = test_wrapper::Result;
using WrapperOperandType = test_wrapper::OperandType;
@@ -113,18 +112,18 @@
public:
TestDriver() : SampleDriverPartial(kTestDriverName) {}
- Return<void> getCapabilities_1_3(getCapabilities_1_3_cb cb) override {
+ hardware::Return<void> getCapabilities_1_3(getCapabilities_1_3_cb cb) override {
cb(V1_3::ErrorStatus::NONE, {/* Placeholder zero-filled capabilities. */});
- return Void();
+ return hardware::Void();
}
void setSupportedInputCount(uint32_t count) { mSupportedInputCount = count; }
private:
- std::vector<bool> getSupportedOperationsImpl(const Model& model) const override {
+ std::vector<bool> getSupportedOperationsImpl(const V1_3::Model& model) const override {
std::vector<bool> supported(model.main.operations.size());
std::transform(model.main.operations.begin(), model.main.operations.end(),
- supported.begin(), [this](const Operation& operation) {
+ supported.begin(), [this](const V1_3::Operation& operation) {
SCOPED_TRACE("operation = " + toString(operation.type));
EXPECT_EQ(operation.inputs.size(), mSupportedInputCount);
return operation.inputs.size() == mSupportedInputCount;
diff --git a/runtime/test/TestUnspecifiedDimensions.cpp b/runtime/test/TestUnspecifiedDimensions.cpp
index c1bad04..5a2287c 100644
--- a/runtime/test/TestUnspecifiedDimensions.cpp
+++ b/runtime/test/TestUnspecifiedDimensions.cpp
@@ -17,7 +17,10 @@
#include "TestNeuralNetworksWrapper.h"
#include <sys/mman.h>
+#include <memory>
+#include <string>
#include <tuple>
+#include <utility>
#include <vector>
#include <android-base/macros.h>
diff --git a/runtime/test/TestVersionedInterfaces.cpp b/runtime/test/TestVersionedInterfaces.cpp
index 6d1306d..b4f32bc 100644
--- a/runtime/test/TestVersionedInterfaces.cpp
+++ b/runtime/test/TestVersionedInterfaces.cpp
@@ -22,6 +22,7 @@
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include <hidl/Status.h>
+#include <nnapi/TypeUtils.h>
#include <utils/Errors.h>
#include <limits>
@@ -37,7 +38,6 @@
namespace android::nn {
namespace {
-using namespace hal;
using testing::_;
using testing::Invoke;
using testing::InvokeWithoutArgs;
@@ -45,40 +45,59 @@
using MockDeviceFactory = MockFunction<sp<V1_0::IDevice>(bool blocking)>;
constexpr uint32_t kNoCacheFilesNeeded = 0;
-constexpr uint32_t kMaxNumberOfCacheFiles =
- static_cast<uint32_t>(Constant::MAX_NUMBER_OF_CACHE_FILES);
-constexpr Timing kNoTiming = {.timeOnDevice = std::numeric_limits<uint64_t>::max(),
- .timeInDriver = std::numeric_limits<uint64_t>::max()};
+constexpr V1_2::Timing kNoTiming12 = {.timeOnDevice = std::numeric_limits<uint64_t>::max(),
+ .timeInDriver = std::numeric_limits<uint64_t>::max()};
+constexpr V1_0::PerformanceInfo kNoPerformanceInfo = {.execTime = FLT_MAX, .powerUsage = FLT_MAX};
+constexpr Timing kNoTiming = {};
template <typename... Args>
auto makeCallbackReturn(Args&&... args) {
return [argPack = std::make_tuple(std::forward<Args>(args)...)](const auto& cb) {
std::apply(cb, argPack);
- return Void();
+ return hardware::Void();
};
};
-class MockDevice : public IDevice {
+class MockDevice : public V1_3::IDevice {
public:
static sp<MockDevice> create() {
const sp<MockDevice> mockDevice = new MockDevice();
- const auto linkToDeathRet_ret = []() -> Return<bool> { return true; };
- const auto getCapabilities_ret =
- makeCallbackReturn(V1_0::ErrorStatus::NONE, V1_0::Capabilities{});
+ const auto linkToDeathRet_ret = []() -> hardware::Return<bool> { return true; };
+ const auto getCapabilities_ret = makeCallbackReturn(
+ V1_0::ErrorStatus::NONE, V1_0::Capabilities{
+ .float32Performance = kNoPerformanceInfo,
+ .quantized8Performance = kNoPerformanceInfo,
+ });
const auto getCapabilities_1_1_ret =
- makeCallbackReturn(V1_0::ErrorStatus::NONE, V1_1::Capabilities{});
+ makeCallbackReturn(V1_0::ErrorStatus::NONE,
+ V1_1::Capabilities{
+ .float32Performance = kNoPerformanceInfo,
+ .quantized8Performance = kNoPerformanceInfo,
+ .relaxedFloat32toFloat16Performance = kNoPerformanceInfo,
+ });
const auto getVersionString_ret =
makeCallbackReturn(V1_0::ErrorStatus::NONE, "Google-MockV1");
- const auto getType_ret = makeCallbackReturn(V1_0::ErrorStatus::NONE, DeviceType::OTHER);
- const auto getCapabilities_1_2_ret =
- makeCallbackReturn(V1_0::ErrorStatus::NONE, V1_2::Capabilities{});
+ const auto getType_ret =
+ makeCallbackReturn(V1_0::ErrorStatus::NONE, V1_2::DeviceType::OTHER);
+ const auto getCapabilities_1_2_ret = makeCallbackReturn(
+ V1_0::ErrorStatus::NONE,
+ V1_2::Capabilities{
+ .relaxedFloat32toFloat16PerformanceScalar = kNoPerformanceInfo,
+ .relaxedFloat32toFloat16PerformanceTensor = kNoPerformanceInfo,
+ });
const auto getSupportedExtensions_ret =
- makeCallbackReturn(V1_0::ErrorStatus::NONE, hidl_vec<Extension>{});
+ makeCallbackReturn(V1_0::ErrorStatus::NONE, hardware::hidl_vec<V1_2::Extension>{});
const auto getNumberOfCacheFilesNeeded_ret = makeCallbackReturn(
V1_0::ErrorStatus::NONE, kMaxNumberOfCacheFiles, kMaxNumberOfCacheFiles);
- const auto getCapabilities_1_3_ret =
- makeCallbackReturn(V1_3::ErrorStatus::NONE, V1_3::Capabilities{});
+ const auto getCapabilities_1_3_ret = makeCallbackReturn(
+ V1_3::ErrorStatus::NONE,
+ V1_3::Capabilities{
+ .relaxedFloat32toFloat16PerformanceScalar = kNoPerformanceInfo,
+ .relaxedFloat32toFloat16PerformanceTensor = kNoPerformanceInfo,
+ .ifPerformance = kNoPerformanceInfo,
+ .whilePerformance = kNoPerformanceInfo,
+ });
ON_CALL(*mockDevice, linkToDeathRet()).WillByDefault(Invoke(linkToDeathRet_ret));
ON_CALL(*mockDevice, getCapabilities(_)).WillByDefault(Invoke(getCapabilities_ret));
@@ -108,73 +127,82 @@
}
// IBase methods below.
- Return<bool> linkToDeath(const sp<hidl_death_recipient>& recipient,
- uint64_t /*cookie*/) override {
+ hardware::Return<bool> linkToDeath(const sp<hardware::hidl_death_recipient>& recipient,
+ uint64_t /*cookie*/) override {
mDeathRecipient = recipient;
return linkToDeathRet();
}
- MOCK_METHOD(Return<void>, ping, (), (override));
+ MOCK_METHOD(hardware::Return<void>, ping, (), (override));
// V1_0 methods below.
- MOCK_METHOD(Return<void>, getCapabilities, (getCapabilities_cb cb), (override));
- MOCK_METHOD(Return<void>, getSupportedOperations,
+ MOCK_METHOD(hardware::Return<void>, getCapabilities, (getCapabilities_cb cb), (override));
+ MOCK_METHOD(hardware::Return<void>, getSupportedOperations,
(const V1_0::Model& model, getSupportedOperations_cb cb), (override));
- MOCK_METHOD(Return<V1_0::ErrorStatus>, prepareModel,
+ MOCK_METHOD(hardware::Return<V1_0::ErrorStatus>, prepareModel,
(const V1_0::Model& model, const sp<V1_0::IPreparedModelCallback>& callback),
(override));
- MOCK_METHOD(Return<DeviceStatus>, getStatus, (), (override));
+ MOCK_METHOD(hardware::Return<V1_0::DeviceStatus>, getStatus, (), (override));
// V1_1 methods below.
- MOCK_METHOD(Return<void>, getCapabilities_1_1, (getCapabilities_1_1_cb cb), (override));
- MOCK_METHOD(Return<void>, getSupportedOperations_1_1,
+ MOCK_METHOD(hardware::Return<void>, getCapabilities_1_1, (getCapabilities_1_1_cb cb),
+ (override));
+ MOCK_METHOD(hardware::Return<void>, getSupportedOperations_1_1,
(const V1_1::Model& model, getSupportedOperations_1_1_cb cb), (override));
- MOCK_METHOD(Return<V1_0::ErrorStatus>, prepareModel_1_1,
- (const V1_1::Model& model, ExecutionPreference preference,
+ MOCK_METHOD(hardware::Return<V1_0::ErrorStatus>, prepareModel_1_1,
+ (const V1_1::Model& model, V1_1::ExecutionPreference preference,
const sp<V1_0::IPreparedModelCallback>& callback),
(override));
// V1_2 methods below.
- MOCK_METHOD(Return<void>, getVersionString, (getVersionString_cb cb), (override));
- MOCK_METHOD(Return<void>, getType, (getType_cb cb), (override));
- MOCK_METHOD(Return<void>, getCapabilities_1_2, (getCapabilities_1_2_cb cb), (override));
- MOCK_METHOD(Return<void>, getSupportedExtensions, (getSupportedExtensions_cb cb), (override));
- MOCK_METHOD(Return<void>, getSupportedOperations_1_2,
+ MOCK_METHOD(hardware::Return<void>, getVersionString, (getVersionString_cb cb), (override));
+ MOCK_METHOD(hardware::Return<void>, getType, (getType_cb cb), (override));
+ MOCK_METHOD(hardware::Return<void>, getCapabilities_1_2, (getCapabilities_1_2_cb cb),
+ (override));
+ MOCK_METHOD(hardware::Return<void>, getSupportedExtensions, (getSupportedExtensions_cb cb),
+ (override));
+ MOCK_METHOD(hardware::Return<void>, getSupportedOperations_1_2,
(const V1_2::Model& model, getSupportedOperations_1_2_cb cb), (override));
- MOCK_METHOD(Return<void>, getNumberOfCacheFilesNeeded, (getNumberOfCacheFilesNeeded_cb cb),
+ MOCK_METHOD(hardware::Return<void>, getNumberOfCacheFilesNeeded,
+ (getNumberOfCacheFilesNeeded_cb cb), (override));
+ MOCK_METHOD(hardware::Return<V1_0::ErrorStatus>, prepareModel_1_2,
+ (const V1_2::Model& model, V1_1::ExecutionPreference preference,
+ const hardware::hidl_vec<hardware::hidl_handle>& modelCache,
+ const hardware::hidl_vec<hardware::hidl_handle>& dataCache,
+ const HalCacheToken& token, const sp<V1_2::IPreparedModelCallback>& callback),
(override));
- MOCK_METHOD(Return<V1_0::ErrorStatus>, prepareModel_1_2,
- (const V1_2::Model& model, ExecutionPreference preference,
- const hidl_vec<hidl_handle>& modelCache, const hidl_vec<hidl_handle>& dataCache,
- const CacheToken& token, const sp<V1_2::IPreparedModelCallback>& callback),
- (override));
- MOCK_METHOD(Return<V1_0::ErrorStatus>, prepareModelFromCache,
- (const hidl_vec<hidl_handle>& modelCache, const hidl_vec<hidl_handle>& dataCache,
- const CacheToken& token, const sp<V1_2::IPreparedModelCallback>& callback),
+ MOCK_METHOD(hardware::Return<V1_0::ErrorStatus>, prepareModelFromCache,
+ (const hardware::hidl_vec<hardware::hidl_handle>& modelCache,
+ const hardware::hidl_vec<hardware::hidl_handle>& dataCache,
+ const HalCacheToken& token, const sp<V1_2::IPreparedModelCallback>& callback),
(override));
// V1_3 methods below.
- MOCK_METHOD(Return<void>, getCapabilities_1_3, (getCapabilities_1_3_cb cb), (override));
- MOCK_METHOD(Return<void>, getSupportedOperations_1_3,
+ MOCK_METHOD(hardware::Return<void>, getCapabilities_1_3, (getCapabilities_1_3_cb cb),
+ (override));
+ MOCK_METHOD(hardware::Return<void>, getSupportedOperations_1_3,
(const V1_3::Model& model, getSupportedOperations_1_3_cb cb), (override));
- MOCK_METHOD(Return<V1_3::ErrorStatus>, prepareModel_1_3,
- (const V1_3::Model& model, ExecutionPreference preference, Priority priority,
- const OptionalTimePoint& deadline, const hidl_vec<hidl_handle>& modelCache,
- const hidl_vec<hidl_handle>& dataCache, const CacheToken& token,
- const sp<V1_3::IPreparedModelCallback>& callback),
+ MOCK_METHOD(hardware::Return<V1_3::ErrorStatus>, prepareModel_1_3,
+ (const V1_3::Model& model, V1_1::ExecutionPreference preference,
+ V1_3::Priority priority, const V1_3::OptionalTimePoint& deadline,
+ const hardware::hidl_vec<hardware::hidl_handle>& modelCache,
+ const hardware::hidl_vec<hardware::hidl_handle>& dataCache,
+ const HalCacheToken& token, const sp<V1_3::IPreparedModelCallback>& callback),
(override));
- MOCK_METHOD(Return<V1_3::ErrorStatus>, prepareModelFromCache_1_3,
- (const OptionalTimePoint& deadline, const hidl_vec<hidl_handle>& modelCache,
- const hidl_vec<hidl_handle>& dataCache, const CacheToken& token,
- const sp<V1_3::IPreparedModelCallback>& callback),
+ MOCK_METHOD(hardware::Return<V1_3::ErrorStatus>, prepareModelFromCache_1_3,
+ (const V1_3::OptionalTimePoint& deadline,
+ const hardware::hidl_vec<hardware::hidl_handle>& modelCache,
+ const hardware::hidl_vec<hardware::hidl_handle>& dataCache,
+ const HalCacheToken& token, const sp<V1_3::IPreparedModelCallback>& callback),
(override));
- MOCK_METHOD(Return<void>, allocate,
- (const BufferDesc& desc, const hidl_vec<sp<V1_3::IPreparedModel>>& preparedModels,
- const hidl_vec<BufferRole>& inputRoles, const hidl_vec<BufferRole>& outputRoles,
- allocate_cb cb),
+ MOCK_METHOD(hardware::Return<void>, allocate,
+ (const V1_3::BufferDesc& desc,
+ const hardware::hidl_vec<sp<V1_3::IPreparedModel>>& preparedModels,
+ const hardware::hidl_vec<V1_3::BufferRole>& inputRoles,
+ const hardware::hidl_vec<V1_3::BufferRole>& outputRoles, allocate_cb cb),
(override));
// Helper methods.
- MOCK_METHOD(Return<bool>, linkToDeathRet, ());
+ MOCK_METHOD(hardware::Return<bool>, linkToDeathRet, ());
void simulateCrash() {
ASSERT_NE(nullptr, mDeathRecipient.get());
@@ -189,15 +217,15 @@
private:
// Members.
- sp<hidl_death_recipient> mDeathRecipient;
+ sp<hardware::hidl_death_recipient> mDeathRecipient;
};
-class MockPreparedModel : public IPreparedModel {
+class MockPreparedModel : public V1_3::IPreparedModel {
public:
static sp<MockPreparedModel> create() {
const sp<MockPreparedModel> mockPreparedModel = new MockPreparedModel();
- const auto linkToDeathRet_ret = []() -> Return<bool> { return true; };
+ const auto linkToDeathRet_ret = []() -> hardware::Return<bool> { return true; };
ON_CALL(*mockPreparedModel, linkToDeathRet()).WillByDefault(Invoke(linkToDeathRet_ret));
// This EXPECT_CALL(...).Times(testing::AnyNumber()) calls are to
@@ -208,27 +236,28 @@
}
// IBase methods below.
- Return<bool> linkToDeath(const sp<hidl_death_recipient>& recipient,
- uint64_t /*cookie*/) override {
+ hardware::Return<bool> linkToDeath(const sp<hardware::hidl_death_recipient>& recipient,
+ uint64_t /*cookie*/) override {
mDeathRecipient = recipient;
return linkToDeathRet();
}
- MOCK_METHOD(Return<void>, ping, (), (override));
+ MOCK_METHOD(hardware::Return<void>, ping, (), (override));
// V1_0 methods below.
- MOCK_METHOD(Return<V1_0::ErrorStatus>, execute,
+ MOCK_METHOD(hardware::Return<V1_0::ErrorStatus>, execute,
(const V1_0::Request& request, const sp<V1_0::IExecutionCallback>& callback),
(override));
// V1_2 methods below.
- MOCK_METHOD(Return<V1_0::ErrorStatus>, execute_1_2,
- (const V1_0::Request& request, MeasureTiming measure,
+ MOCK_METHOD(hardware::Return<V1_0::ErrorStatus>, execute_1_2,
+ (const V1_0::Request& request, V1_2::MeasureTiming measure,
const sp<V1_2::IExecutionCallback>& callback),
(override));
- MOCK_METHOD(Return<void>, executeSynchronously,
- (const V1_0::Request& request, MeasureTiming measure, executeSynchronously_cb cb),
+ MOCK_METHOD(hardware::Return<void>, executeSynchronously,
+ (const V1_0::Request& request, V1_2::MeasureTiming measure,
+ executeSynchronously_cb cb),
(override));
- MOCK_METHOD(Return<void>, configureExecutionBurst,
+ MOCK_METHOD(hardware::Return<void>, configureExecutionBurst,
(const sp<V1_2::IBurstCallback>& callback,
const hardware::MQDescriptorSync<V1_2::FmqRequestDatum>& requestChannel,
const hardware::MQDescriptorSync<V1_2::FmqResultDatum>& resultChannel,
@@ -236,27 +265,28 @@
(override));
// V1_3 methods below.
- MOCK_METHOD(Return<ErrorStatus>, execute_1_3,
- (const V1_3::Request& request, MeasureTiming measure,
- const OptionalTimePoint& deadline,
- const OptionalTimeoutDuration& loopTimeoutDuration,
- const sp<IExecutionCallback>& callback),
+ MOCK_METHOD(hardware::Return<V1_3::ErrorStatus>, execute_1_3,
+ (const V1_3::Request& request, V1_2::MeasureTiming measure,
+ const V1_3::OptionalTimePoint& deadline,
+ const V1_3::OptionalTimeoutDuration& loopTimeoutDuration,
+ const sp<V1_3::IExecutionCallback>& callback),
(override));
- MOCK_METHOD(Return<void>, executeSynchronously_1_3,
- (const V1_3::Request& request, MeasureTiming measure,
- const OptionalTimePoint& deadline,
- const OptionalTimeoutDuration& loopTimeoutDuration,
+ MOCK_METHOD(hardware::Return<void>, executeSynchronously_1_3,
+ (const V1_3::Request& request, V1_2::MeasureTiming measure,
+ const V1_3::OptionalTimePoint& deadline,
+ const V1_3::OptionalTimeoutDuration& loopTimeoutDuration,
executeSynchronously_1_3_cb cb),
(override));
- MOCK_METHOD(Return<void>, executeFenced,
- (const V1_3::Request& request, const hidl_vec<hidl_handle>& waitFor,
- MeasureTiming measure, const OptionalTimePoint& deadline,
- const OptionalTimeoutDuration& loopTimeoutDuration,
- const OptionalTimeoutDuration& duration, executeFenced_cb cb),
+ MOCK_METHOD(hardware::Return<void>, executeFenced,
+ (const V1_3::Request& request,
+ const hardware::hidl_vec<hardware::hidl_handle>& waitFor,
+ V1_2::MeasureTiming measure, const V1_3::OptionalTimePoint& deadline,
+ const V1_3::OptionalTimeoutDuration& loopTimeoutDuration,
+ const V1_3::OptionalTimeoutDuration& duration, executeFenced_cb cb),
(override));
// Helper methods.
- MOCK_METHOD(Return<bool>, linkToDeathRet, ());
+ MOCK_METHOD(hardware::Return<bool>, linkToDeathRet, ());
void simulateCrash() {
ASSERT_NE(nullptr, mDeathRecipient.get());
@@ -271,27 +301,29 @@
private:
// Members.
- sp<hidl_death_recipient> mDeathRecipient;
+ sp<hardware::hidl_death_recipient> mDeathRecipient;
};
class MockBurstContext : public V1_2::IBurstContext {
public:
// V1_2 methods below.
- MOCK_METHOD(Return<void>, freeMemory, (int32_t slot), (override));
+ MOCK_METHOD(hardware::Return<void>, freeMemory, (int32_t slot), (override));
};
-class MockFencedExecutionCallback : public IFencedExecutionCallback {
+class MockFencedExecutionCallback : public V1_3::IFencedExecutionCallback {
public:
// V1_3 methods below.
- MOCK_METHOD(Return<void>, getExecutionInfo, (getExecutionInfo_cb cb), (override));
+ MOCK_METHOD(hardware::Return<void>, getExecutionInfo, (getExecutionInfo_cb cb), (override));
};
-class MockBuffer : public IBuffer {
+class MockBuffer : public V1_3::IBuffer {
public:
// V1_3 methods below.
- MOCK_METHOD(Return<ErrorStatus>, copyTo, (const hidl_memory& dst), (override));
- MOCK_METHOD(Return<ErrorStatus>, copyFrom,
- (const hidl_memory& src, const hidl_vec<uint32_t>& dimensions), (override));
+ MOCK_METHOD(hardware::Return<V1_3::ErrorStatus>, copyTo, (const hardware::hidl_memory& dst),
+ (override));
+ MOCK_METHOD(hardware::Return<V1_3::ErrorStatus>, copyFrom,
+ (const hardware::hidl_memory& src, const hardware::hidl_vec<uint32_t>& dimensions),
+ (override));
};
enum class Version { V1_0, V1_1, V1_2, V1_3, MOCK };
@@ -315,18 +347,19 @@
auto makePreparedModelReturn(V1_0::ErrorStatus launchStatus, V1_0::ErrorStatus returnStatus,
const sp<MockPreparedModel>& preparedModel) {
- return [launchStatus, returnStatus, preparedModel](
- const V1_0::Model& /*model*/,
- const sp<V1_0::IPreparedModelCallback>& cb) -> Return<V1_0::ErrorStatus> {
+ return [launchStatus, returnStatus, preparedModel](const V1_0::Model& /*model*/,
+ const sp<V1_0::IPreparedModelCallback>& cb)
+ -> hardware::Return<V1_0::ErrorStatus> {
cb->notify(returnStatus, preparedModel).isOk();
return launchStatus;
};
}
auto makePreparedModel_1_1Return(V1_0::ErrorStatus launchStatus, V1_0::ErrorStatus returnStatus,
const sp<MockPreparedModel>& preparedModel) {
- return [launchStatus, returnStatus, preparedModel](
- const V1_1::Model& /*model*/, ExecutionPreference /*preference*/,
- const sp<V1_0::IPreparedModelCallback>& cb) -> Return<V1_0::ErrorStatus> {
+ return [launchStatus, returnStatus, preparedModel](const V1_1::Model& /*model*/,
+ V1_1::ExecutionPreference /*preference*/,
+ const sp<V1_0::IPreparedModelCallback>& cb)
+ -> hardware::Return<V1_0::ErrorStatus> {
cb->notify(returnStatus, preparedModel).isOk();
return launchStatus;
};
@@ -334,9 +367,10 @@
auto makePreparedModel_1_2Return(V1_0::ErrorStatus launchStatus, V1_0::ErrorStatus returnStatus,
const sp<MockPreparedModel>& preparedModel) {
return [launchStatus, returnStatus, preparedModel](
- const V1_2::Model& /*model*/, ExecutionPreference /*preference*/,
+ const V1_2::Model& /*model*/, V1_1::ExecutionPreference /*preference*/,
const auto& /*modelCache*/, const auto& /*dataCache*/, const auto& /*token*/,
- const sp<V1_2::IPreparedModelCallback>& cb) -> Return<V1_0::ErrorStatus> {
+ const sp<V1_2::IPreparedModelCallback>& cb)
+ -> hardware::Return<V1_0::ErrorStatus> {
cb->notify_1_2(returnStatus, preparedModel).isOk();
return launchStatus;
};
@@ -344,11 +378,12 @@
auto makePreparedModel_1_3Return(V1_3::ErrorStatus launchStatus, V1_3::ErrorStatus returnStatus,
const sp<MockPreparedModel>& preparedModel) {
return [launchStatus, returnStatus, preparedModel](
- const V1_3::Model& /*model*/, ExecutionPreference /*preference*/,
- Priority /*priority*/, const OptionalTimePoint& /*deadline*/,
- const hidl_vec<hidl_handle>& /*modelCache*/,
- const hidl_vec<hidl_handle>& /*dataCache*/, const CacheToken& /*token*/,
- const sp<V1_3::IPreparedModelCallback>& cb) -> Return<V1_3::ErrorStatus> {
+ const V1_3::Model& /*model*/, V1_1::ExecutionPreference /*preference*/,
+ V1_3::Priority /*priority*/, const V1_3::OptionalTimePoint& /*deadline*/,
+ const hardware::hidl_vec<hardware::hidl_handle>& /*modelCache*/,
+ const hardware::hidl_vec<hardware::hidl_handle>& /*dataCache*/,
+ const HalCacheToken& /*token*/, const sp<V1_3::IPreparedModelCallback>& cb)
+ -> hardware::Return<V1_3::ErrorStatus> {
cb->notify_1_3(returnStatus, preparedModel).isOk();
return launchStatus;
};
@@ -357,51 +392,53 @@
auto makeExecuteReturn(V1_0::ErrorStatus launchStatus, V1_0::ErrorStatus returnStatus) {
return [launchStatus, returnStatus](
const V1_0::Request& /*request*/,
- const sp<V1_0::IExecutionCallback>& cb) -> Return<V1_0::ErrorStatus> {
+ const sp<V1_0::IExecutionCallback>& cb) -> hardware::Return<V1_0::ErrorStatus> {
cb->notify(returnStatus);
return launchStatus;
};
}
auto makeExecute_1_2Return(V1_0::ErrorStatus launchStatus, V1_0::ErrorStatus returnStatus,
- const std::vector<OutputShape>& outputShapes, const Timing& timing) {
+ const std::vector<V1_2::OutputShape>& outputShapes,
+ const V1_2::Timing& timing) {
return [launchStatus, returnStatus, outputShapes, timing](
- const V1_0::Request& /*request*/, MeasureTiming /*measureTiming*/,
- const sp<V1_2::IExecutionCallback>& cb) -> Return<V1_0::ErrorStatus> {
+ const V1_0::Request& /*request*/, V1_2::MeasureTiming /*measureTiming*/,
+ const sp<V1_2::IExecutionCallback>& cb) -> hardware::Return<V1_0::ErrorStatus> {
cb->notify_1_2(returnStatus, outputShapes, timing);
return launchStatus;
};
}
auto makeExecute_1_3Return(V1_3::ErrorStatus launchStatus, V1_3::ErrorStatus returnStatus,
- const std::vector<OutputShape>& outputShapes, const Timing& timing) {
+ const std::vector<V1_2::OutputShape>& outputShapes,
+ const V1_2::Timing& timing) {
return [launchStatus, returnStatus, outputShapes, timing](
- const V1_3::Request& /*request*/, MeasureTiming /*measureTiming*/,
- const OptionalTimePoint& /*deadline*/,
- const OptionalTimeoutDuration& /*loopTimeoutDuration*/,
- const sp<V1_3::IExecutionCallback>& cb) -> Return<V1_3::ErrorStatus> {
+ const V1_3::Request& /*request*/, V1_2::MeasureTiming /*measureTiming*/,
+ const V1_3::OptionalTimePoint& /*deadline*/,
+ const V1_3::OptionalTimeoutDuration& /*loopTimeoutDuration*/,
+ const sp<V1_3::IExecutionCallback>& cb) -> hardware::Return<V1_3::ErrorStatus> {
cb->notify_1_3(returnStatus, outputShapes, timing);
return launchStatus;
};
}
auto makeExecuteSynchronouslyReturn(V1_0::ErrorStatus status,
- const std::vector<OutputShape>& outputShapes,
- const Timing& timing) {
+ const std::vector<V1_2::OutputShape>& outputShapes,
+ const V1_2::Timing& timing) {
return [status, outputShapes, timing](const V1_0::Request& /*request*/,
- MeasureTiming /*measureTiming*/,
+ V1_2::MeasureTiming /*measureTiming*/,
const V1_2::IPreparedModel::executeSynchronously_cb& cb) {
cb(status, outputShapes, timing);
- return Void();
+ return hardware::Void();
};
}
auto makeExecuteSynchronously_1_3Return(V1_3::ErrorStatus status,
- const std::vector<OutputShape>& outputShapes,
- const Timing& timing) {
+ const std::vector<V1_2::OutputShape>& outputShapes,
+ const V1_2::Timing& timing) {
return [status, outputShapes, timing](
- const V1_3::Request& /*request*/, MeasureTiming /*measureTiming*/,
- const OptionalTimePoint& /*deadline*/,
- const OptionalTimeoutDuration& /*loopTimeoutDuration*/,
+ const V1_3::Request& /*request*/, V1_2::MeasureTiming /*measureTiming*/,
+ const V1_3::OptionalTimePoint& /*deadline*/,
+ const V1_3::OptionalTimeoutDuration& /*loopTimeoutDuration*/,
const V1_3::IPreparedModel::executeSynchronously_1_3_cb& cb) {
cb(status, outputShapes, timing);
- return Void();
+ return hardware::Void();
};
}
auto makeConfigureExecutionBurst(V1_0::ErrorStatus status,
@@ -412,19 +449,20 @@
const hardware::MQDescriptorSync<V1_2::FmqResultDatum>& /*resultChannel*/,
V1_2::IPreparedModel::configureExecutionBurst_cb cb) {
cb(status, burstContext);
- return Void();
+ return hardware::Void();
};
}
-auto makeExecuteFencedReturn(V1_3::ErrorStatus status, const hidl_handle& syncFence,
- const sp<IFencedExecutionCallback>& dispatchCallback) {
+auto makeExecuteFencedReturn(V1_3::ErrorStatus status, const hardware::hidl_handle& syncFence,
+ const sp<V1_3::IFencedExecutionCallback>& dispatchCallback) {
return [status, syncFence, dispatchCallback](
- const V1_3::Request& /*request*/, const hidl_vec<hidl_handle>& /*waitFor*/,
- MeasureTiming /*measure*/, const OptionalTimePoint& /*deadline*/,
- const OptionalTimeoutDuration& /*loopTimeoutDuration*/,
- const OptionalTimeoutDuration& /*duration*/,
+ const V1_3::Request& /*request*/,
+ const hardware::hidl_vec<hardware::hidl_handle>& /*waitFor*/,
+ V1_2::MeasureTiming /*measure*/, const V1_3::OptionalTimePoint& /*deadline*/,
+ const V1_3::OptionalTimeoutDuration& /*loopTimeoutDuration*/,
+ const V1_3::OptionalTimeoutDuration& /*duration*/,
V1_3::IPreparedModel::executeFenced_cb cb) {
cb(status, syncFence, dispatchCallback);
- return Void();
+ return hardware::Void();
};
}
@@ -516,7 +554,7 @@
const auto device = adaptAs(mockDevice, version);
ON_CALL(*mockDeviceFactory, Call(_)).WillByDefault(testing::Return(device));
EXPECT_CALL(*mockDeviceFactory, Call(/*blocking=*/true)).Times(testing::AtLeast(1));
- const DeviceFactory makeDevice = mockDeviceFactory->AsStdFunction();
+ const HalDeviceFactory makeDevice = mockDeviceFactory->AsStdFunction();
return VersionedIDevice::create("MockDevice", makeDevice);
}
@@ -566,7 +604,7 @@
TEST_F(VersionedIDeviceInitializationTest, creationFailure) {
// setup failure
EXPECT_CALL(*kMockMakeDevice, Call(_)).Times(1).WillOnce(testing::Return(nullptr));
- const DeviceFactory makeDevice = kMockMakeDevice->AsStdFunction();
+ const HalDeviceFactory makeDevice = kMockMakeDevice->AsStdFunction();
// run test
const auto device = VersionedIDevice::create("MockDevice", makeDevice);
@@ -581,7 +619,7 @@
.Times(1)
.WillOnce(InvokeWithoutArgs(makeGeneralTransportFailure));
EXPECT_CALL(*kMockMakeDevice, Call(_)).Times(1).WillOnce(testing::Return(kMockDevice));
- const DeviceFactory makeDevice = kMockMakeDevice->AsStdFunction();
+ const HalDeviceFactory makeDevice = kMockMakeDevice->AsStdFunction();
// run test
const auto device = VersionedIDevice::create("MockDevice", makeDevice);
@@ -592,10 +630,10 @@
TEST_F(VersionedIDeviceInitializationTest, linkToDeathReturnError) {
// setup failure
- const auto ret = []() -> Return<bool> { return false; };
+ const auto ret = []() -> hardware::Return<bool> { return false; };
EXPECT_CALL(*kMockMakeDevice, Call(_)).Times(1).WillOnce(testing::Return(kMockDevice));
EXPECT_CALL(*kMockDevice, linkToDeathRet()).Times(1).WillOnce(InvokeWithoutArgs(ret));
- const DeviceFactory makeDevice = kMockMakeDevice->AsStdFunction();
+ const HalDeviceFactory makeDevice = kMockMakeDevice->AsStdFunction();
// run test
const auto device = VersionedIDevice::create("MockDevice", makeDevice);
@@ -666,7 +704,8 @@
TEST_F(VersionedIDeviceInitializationTest, getTypeFailure) {
// setup failure
- const auto ret = makeCallbackReturn(V1_0::ErrorStatus::GENERAL_FAILURE, DeviceType::OTHER);
+ const auto ret =
+ makeCallbackReturn(V1_0::ErrorStatus::GENERAL_FAILURE, V1_2::DeviceType::OTHER);
EXPECT_CALL(*kMockDevice, getType(_)).Times(1).WillOnce(Invoke(ret));
// run test
@@ -678,7 +717,8 @@
TEST_F(VersionedIDeviceInitializationTest, getSupportedExtensionsFailure) {
// setup failure
- const auto ret = makeCallbackReturn(V1_0::ErrorStatus::GENERAL_FAILURE, hidl_vec<Extension>{});
+ const auto ret = makeCallbackReturn(V1_0::ErrorStatus::GENERAL_FAILURE,
+ hardware::hidl_vec<V1_2::Extension>{});
EXPECT_CALL(*kMockDevice, getSupportedExtensions(_)).Times(1).WillOnce(Invoke(ret));
// run test
@@ -839,9 +879,11 @@
const auto cached = kDevice->getCapabilities();
// verify success
- EXPECT_EQ(PerformanceInfo{}, capabilities.relaxedFloat32toFloat16PerformanceScalar);
- EXPECT_EQ(PerformanceInfo{}, capabilities.relaxedFloat32toFloat16PerformanceTensor);
- EXPECT_LT(0u, capabilities.operandPerformance.size());
+ EXPECT_EQ(Capabilities::PerformanceInfo{},
+ capabilities.relaxedFloat32toFloat16PerformanceScalar);
+ EXPECT_EQ(Capabilities::PerformanceInfo{},
+ capabilities.relaxedFloat32toFloat16PerformanceTensor);
+ EXPECT_LT(0u, capabilities.operandPerformance.asVector().size());
EXPECT_EQ(cached, capabilities);
}
@@ -851,9 +893,11 @@
const auto cached = kDevice->getCapabilities();
// verify success
- EXPECT_EQ(PerformanceInfo{}, capabilities.relaxedFloat32toFloat16PerformanceScalar);
- EXPECT_EQ(PerformanceInfo{}, capabilities.relaxedFloat32toFloat16PerformanceTensor);
- EXPECT_LT(0u, capabilities.operandPerformance.size());
+ EXPECT_EQ(Capabilities::PerformanceInfo{},
+ capabilities.relaxedFloat32toFloat16PerformanceScalar);
+ EXPECT_EQ(Capabilities::PerformanceInfo{},
+ capabilities.relaxedFloat32toFloat16PerformanceTensor);
+ EXPECT_LT(0u, capabilities.operandPerformance.asVector().size());
EXPECT_EQ(cached, capabilities);
}
@@ -863,9 +907,11 @@
const auto cached = kDevice->getCapabilities();
// verify success
- EXPECT_EQ(PerformanceInfo{}, capabilities.relaxedFloat32toFloat16PerformanceScalar);
- EXPECT_EQ(PerformanceInfo{}, capabilities.relaxedFloat32toFloat16PerformanceTensor);
- EXPECT_EQ(0u, capabilities.operandPerformance.size());
+ EXPECT_EQ(Capabilities::PerformanceInfo{},
+ capabilities.relaxedFloat32toFloat16PerformanceScalar);
+ EXPECT_EQ(Capabilities::PerformanceInfo{},
+ capabilities.relaxedFloat32toFloat16PerformanceTensor);
+ EXPECT_EQ(0u, capabilities.operandPerformance.asVector().size());
EXPECT_EQ(cached, capabilities);
}
@@ -875,9 +921,11 @@
const auto cached = kDevice->getCapabilities();
// verify success
- EXPECT_EQ(PerformanceInfo{}, capabilities.relaxedFloat32toFloat16PerformanceScalar);
- EXPECT_EQ(PerformanceInfo{}, capabilities.relaxedFloat32toFloat16PerformanceTensor);
- EXPECT_EQ(0u, capabilities.operandPerformance.size());
+ EXPECT_EQ(Capabilities::PerformanceInfo{},
+ capabilities.relaxedFloat32toFloat16PerformanceScalar);
+ EXPECT_EQ(Capabilities::PerformanceInfo{},
+ capabilities.relaxedFloat32toFloat16PerformanceTensor);
+ EXPECT_EQ(0u, capabilities.operandPerformance.asVector().size());
EXPECT_EQ(cached, capabilities);
}
@@ -1107,16 +1155,16 @@
// setup call
const auto ret = [](const auto& /*model*/, const auto cb) {
cb(V1_0::ErrorStatus::NONE, {});
- return Void();
+ return hardware::Void();
};
EXPECT_CALL(*kMockDevice, getSupportedOperations(_, _)).Times(1).WillOnce(Invoke(ret));
// run test
- const auto metaModel = MetaModel({}, /*strictSlicing=*/true);
+ const auto metaModel = MetaModel(Model{}, /*strictSlicing=*/true);
const auto [resultCode, supportedOperations] = kDevice->getSupportedOperations(metaModel);
// verify success
- EXPECT_EQ(V1_3::ErrorStatus::NONE, resultCode);
+ EXPECT_EQ(ErrorStatus::NONE, resultCode);
EXPECT_EQ(0u, supportedOperations.size());
}
@@ -1124,16 +1172,16 @@
// setup call
const auto ret = [](const auto& /*model*/, const auto cb) {
cb(V1_0::ErrorStatus::NONE, {});
- return Void();
+ return hardware::Void();
};
EXPECT_CALL(*kMockDevice, getSupportedOperations_1_1(_, _)).Times(1).WillOnce(Invoke(ret));
// run test
- const auto metaModel = MetaModel({}, /*strictSlicing=*/true);
+ const auto metaModel = MetaModel(Model{}, /*strictSlicing=*/true);
const auto [resultCode, supportedOperations] = kDevice->getSupportedOperations(metaModel);
// verify success
- EXPECT_EQ(V1_3::ErrorStatus::NONE, resultCode);
+ EXPECT_EQ(ErrorStatus::NONE, resultCode);
EXPECT_EQ(0u, supportedOperations.size());
}
@@ -1141,16 +1189,16 @@
// setup call
const auto ret = [](const auto& /*model*/, const auto cb) {
cb(V1_0::ErrorStatus::NONE, {});
- return Void();
+ return hardware::Void();
};
EXPECT_CALL(*kMockDevice, getSupportedOperations_1_2(_, _)).Times(1).WillOnce(Invoke(ret));
// run test
- const auto metaModel = MetaModel({}, /*strictSlicing=*/true);
+ const auto metaModel = MetaModel(Model{}, /*strictSlicing=*/true);
const auto [resultCode, supportedOperations] = kDevice->getSupportedOperations(metaModel);
// verify success
- EXPECT_EQ(V1_3::ErrorStatus::NONE, resultCode);
+ EXPECT_EQ(ErrorStatus::NONE, resultCode);
EXPECT_EQ(0u, supportedOperations.size());
}
@@ -1158,16 +1206,16 @@
// setup call
const auto ret = [](const auto& /*model*/, const auto cb) {
cb(V1_3::ErrorStatus::NONE, {});
- return Void();
+ return hardware::Void();
};
EXPECT_CALL(*kMockDevice, getSupportedOperations_1_3(_, _)).Times(1).WillOnce(Invoke(ret));
// run test
- const auto metaModel = MetaModel({}, /*strictSlicing=*/true);
+ const auto metaModel = MetaModel(Model{}, /*strictSlicing=*/true);
const auto [resultCode, supportedOperations] = kDevice->getSupportedOperations(metaModel);
// verify success
- EXPECT_EQ(V1_3::ErrorStatus::NONE, resultCode);
+ EXPECT_EQ(ErrorStatus::NONE, resultCode);
EXPECT_EQ(0u, supportedOperations.size());
}
@@ -1179,7 +1227,7 @@
EXPECT_CALL(*kMockDevice, prepareModel(_, _)).Times(1).WillOnce(Invoke(ret));
// run test
- const ModelFactory makeModel = [] { return V1_3::Model{}; };
+ const ModelFactory makeModel = [] { return Model{}; };
const auto [resultCode, preparedModel] = kDevice->prepareModel(makeModel, {}, {}, {}, {}, {});
// verify success
@@ -1195,7 +1243,7 @@
EXPECT_CALL(*kMockDevice, prepareModel_1_1(_, _, _)).Times(1).WillOnce(Invoke(ret));
// run test
- const ModelFactory makeModel = [] { return V1_3::Model{}; };
+ const ModelFactory makeModel = [] { return Model{}; };
const auto [resultCode, preparedModel] = kDevice->prepareModel(makeModel, {}, {}, {}, {}, {});
// verify success
@@ -1211,7 +1259,7 @@
EXPECT_CALL(*kMockDevice, prepareModel_1_2(_, _, _, _, _, _)).Times(1).WillOnce(Invoke(ret));
// run test
- const ModelFactory makeModel = [] { return V1_3::Model{}; };
+ const ModelFactory makeModel = [] { return Model{}; };
const auto [resultCode, preparedModel] = kDevice->prepareModel(makeModel, {}, {}, {}, {}, {});
// verify success
@@ -1229,7 +1277,7 @@
.WillOnce(Invoke(ret));
// run test
- const ModelFactory makeModel = [] { return V1_3::Model{}; };
+ const ModelFactory makeModel = [] { return Model{}; };
const auto [resultCode, preparedModel] = kDevice->prepareModel(makeModel, {}, {}, {}, {}, {});
// verify success
@@ -1271,13 +1319,14 @@
// setup call
const sp<MockBuffer> mockBuffer = new MockBuffer();
constexpr uint32_t mockToken = 1;
- const auto ret = [mockBuffer](const BufferDesc& /*desc*/,
- const hidl_vec<sp<V1_3::IPreparedModel>>& /*preparedModels*/,
- const hidl_vec<BufferRole>& /*inputRoles*/,
- const hidl_vec<BufferRole>& /*outputRoles*/,
- V1_3::IDevice::allocate_cb cb) -> Return<void> {
+ const auto ret = [mockBuffer](
+ const V1_3::BufferDesc& /*desc*/,
+ const hardware::hidl_vec<sp<V1_3::IPreparedModel>>& /*preparedModels*/,
+ const hardware::hidl_vec<V1_3::BufferRole>& /*inputRoles*/,
+ const hardware::hidl_vec<V1_3::BufferRole>& /*outputRoles*/,
+ V1_3::IDevice::allocate_cb cb) -> hardware::Return<void> {
cb(V1_3::ErrorStatus::NONE, mockBuffer, mockToken);
- return Void();
+ return hardware::Void();
};
EXPECT_CALL(*kMockDevice, allocate(_, _, _, _, _)).Times(1).WillOnce(Invoke(ret));
@@ -1292,7 +1341,7 @@
TEST_F(VersionedIDeviceMockTest, wait) {
// setup call
- const auto ret = []() -> Return<void> { return {}; };
+ const auto ret = []() -> hardware::Return<void> { return {}; };
EXPECT_CALL(*kMockDevice, ping()).Times(1).WillOnce(Invoke(ret));
// run test
@@ -1308,16 +1357,16 @@
// setup failure
const auto ret = [](const auto& /*model*/, const auto cb) {
cb(V1_0::ErrorStatus::GENERAL_FAILURE, {});
- return Void();
+ return hardware::Void();
};
EXPECT_CALL(*kMockDevice, getSupportedOperations(_, _)).Times(1).WillOnce(Invoke(ret));
// run test
- const auto metaModel = MetaModel({}, /*strictSlicing=*/true);
+ const auto metaModel = MetaModel(Model{}, /*strictSlicing=*/true);
const auto [resultCode, supportedOperations] = kDevice->getSupportedOperations(metaModel);
// verify failure
- EXPECT_EQ(V1_3::ErrorStatus::GENERAL_FAILURE, resultCode);
+ EXPECT_EQ(ErrorStatus::GENERAL_FAILURE, resultCode);
EXPECT_EQ(0u, supportedOperations.size());
}
@@ -1325,16 +1374,16 @@
// setup failure
const auto ret = [](const auto& /*model*/, const auto cb) {
cb(V1_0::ErrorStatus::GENERAL_FAILURE, {});
- return Void();
+ return hardware::Void();
};
EXPECT_CALL(*kMockDevice, getSupportedOperations_1_1(_, _)).Times(1).WillOnce(Invoke(ret));
// run test
- const auto metaModel = MetaModel({}, /*strictSlicing=*/true);
+ const auto metaModel = MetaModel(Model{}, /*strictSlicing=*/true);
const auto [resultCode, supportedOperations] = kDevice->getSupportedOperations(metaModel);
// verify failure
- EXPECT_EQ(V1_3::ErrorStatus::GENERAL_FAILURE, resultCode);
+ EXPECT_EQ(ErrorStatus::GENERAL_FAILURE, resultCode);
EXPECT_EQ(0u, supportedOperations.size());
}
@@ -1342,16 +1391,16 @@
// setup failure
const auto ret = [](const auto& /*model*/, const auto cb) {
cb(V1_0::ErrorStatus::GENERAL_FAILURE, {});
- return Void();
+ return hardware::Void();
};
EXPECT_CALL(*kMockDevice, getSupportedOperations_1_2(_, _)).Times(1).WillOnce(Invoke(ret));
// run test
- const auto metaModel = MetaModel({}, /*strictSlicing=*/true);
+ const auto metaModel = MetaModel(Model{}, /*strictSlicing=*/true);
const auto [resultCode, supportedOperations] = kDevice->getSupportedOperations(metaModel);
// verify failure
- EXPECT_EQ(V1_3::ErrorStatus::GENERAL_FAILURE, resultCode);
+ EXPECT_EQ(ErrorStatus::GENERAL_FAILURE, resultCode);
EXPECT_EQ(0u, supportedOperations.size());
}
@@ -1359,16 +1408,16 @@
// setup failure
const auto ret = [](const auto& /*model*/, const auto cb) {
cb(V1_3::ErrorStatus::GENERAL_FAILURE, {});
- return Void();
+ return hardware::Void();
};
EXPECT_CALL(*kMockDevice, getSupportedOperations_1_3(_, _)).Times(1).WillOnce(Invoke(ret));
// run test
- const auto metaModel = MetaModel({}, /*strictSlicing=*/true);
+ const auto metaModel = MetaModel(Model{}, /*strictSlicing=*/true);
const auto [resultCode, supportedOperations] = kDevice->getSupportedOperations(metaModel);
// verify failure
- EXPECT_EQ(V1_3::ErrorStatus::GENERAL_FAILURE, resultCode);
+ EXPECT_EQ(ErrorStatus::GENERAL_FAILURE, resultCode);
EXPECT_EQ(0u, supportedOperations.size());
}
@@ -1380,7 +1429,7 @@
EXPECT_CALL(*kMockDevice, prepareModel(_, _)).Times(1).WillOnce(Invoke(ret));
// run test
- const ModelFactory makeModel = [] { return V1_3::Model{}; };
+ const ModelFactory makeModel = [] { return Model{}; };
const auto [resultCode, preparedModel] = kDevice->prepareModel(makeModel, {}, {}, {}, {}, {});
// verify failure
@@ -1396,7 +1445,7 @@
EXPECT_CALL(*kMockDevice, prepareModel_1_1(_, _, _)).Times(1).WillOnce(Invoke(ret));
// run test
- const ModelFactory makeModel = [] { return V1_3::Model{}; };
+ const ModelFactory makeModel = [] { return Model{}; };
const auto [resultCode, preparedModel] = kDevice->prepareModel(makeModel, {}, {}, {}, {}, {});
// verify failure
@@ -1412,7 +1461,7 @@
EXPECT_CALL(*kMockDevice, prepareModel_1_2(_, _, _, _, _, _)).Times(1).WillOnce(Invoke(ret));
// run test
- const ModelFactory makeModel = [] { return V1_3::Model{}; };
+ const ModelFactory makeModel = [] { return Model{}; };
const auto [resultCode, preparedModel] = kDevice->prepareModel(makeModel, {}, {}, {}, {}, {});
// verify failure
@@ -1430,7 +1479,7 @@
.WillOnce(Invoke(ret));
// run test
- const ModelFactory makeModel = [] { return V1_3::Model{}; };
+ const ModelFactory makeModel = [] { return Model{}; };
const auto [resultCode, preparedModel] = kDevice->prepareModel(makeModel, {}, {}, {}, {}, {});
// verify failure
@@ -1446,7 +1495,7 @@
EXPECT_CALL(*kMockDevice, prepareModel(_, _)).Times(1).WillOnce(Invoke(ret));
// run test
- const ModelFactory makeModel = [] { return V1_3::Model{}; };
+ const ModelFactory makeModel = [] { return Model{}; };
const auto [resultCode, preparedModel] = kDevice->prepareModel(makeModel, {}, {}, {}, {}, {});
// verify failure
@@ -1462,7 +1511,7 @@
EXPECT_CALL(*kMockDevice, prepareModel_1_1(_, _, _)).Times(1).WillOnce(Invoke(ret));
// run test
- const ModelFactory makeModel = [] { return V1_3::Model{}; };
+ const ModelFactory makeModel = [] { return Model{}; };
const auto [resultCode, preparedModel] = kDevice->prepareModel(makeModel, {}, {}, {}, {}, {});
// verify failure
@@ -1478,7 +1527,7 @@
EXPECT_CALL(*kMockDevice, prepareModel_1_2(_, _, _, _, _, _)).Times(1).WillOnce(Invoke(ret));
// run test
- const ModelFactory makeModel = [] { return V1_3::Model{}; };
+ const ModelFactory makeModel = [] { return Model{}; };
const auto [resultCode, preparedModel] = kDevice->prepareModel(makeModel, {}, {}, {}, {}, {});
// verify failure
@@ -1496,7 +1545,7 @@
.WillOnce(Invoke(ret));
// run test
- const ModelFactory makeModel = [] { return V1_3::Model{}; };
+ const ModelFactory makeModel = [] { return Model{}; };
const auto [resultCode, preparedModel] = kDevice->prepareModel(makeModel, {}, {}, {}, {}, {});
// verify failure
@@ -1512,7 +1561,7 @@
EXPECT_CALL(*kMockDevice, prepareModel(_, _)).Times(1).WillOnce(Invoke(ret));
// run test
- const ModelFactory makeModel = [] { return V1_3::Model{}; };
+ const ModelFactory makeModel = [] { return Model{}; };
const auto [resultCode, preparedModel] = kDevice->prepareModel(makeModel, {}, {}, {}, {}, {});
// verify failure
@@ -1528,7 +1577,7 @@
EXPECT_CALL(*kMockDevice, prepareModel_1_1(_, _, _)).Times(1).WillOnce(Invoke(ret));
// run test
- const ModelFactory makeModel = [] { return V1_3::Model{}; };
+ const ModelFactory makeModel = [] { return Model{}; };
const auto [resultCode, preparedModel] = kDevice->prepareModel(makeModel, {}, {}, {}, {}, {});
// verify failure
@@ -1544,7 +1593,7 @@
EXPECT_CALL(*kMockDevice, prepareModel_1_2(_, _, _, _, _, _)).Times(1).WillOnce(Invoke(ret));
// run test
- const ModelFactory makeModel = [] { return V1_3::Model{}; };
+ const ModelFactory makeModel = [] { return Model{}; };
const auto [resultCode, preparedModel] = kDevice->prepareModel(makeModel, {}, {}, {}, {}, {});
// verify failure
@@ -1562,7 +1611,7 @@
.WillOnce(Invoke(ret));
// run test
- const ModelFactory makeModel = [] { return V1_3::Model{}; };
+ const ModelFactory makeModel = [] { return Model{}; };
const auto [resultCode, preparedModel] = kDevice->prepareModel(makeModel, {}, {}, {}, {}, {});
// verify failure
@@ -1572,13 +1621,13 @@
TEST_F(VersionedIDeviceV1_3Test, allocateFailure) {
// setup failure
- const auto ret = [](const BufferDesc& /*desc*/,
- const hidl_vec<sp<V1_3::IPreparedModel>>& /*preparedModels*/,
- const hidl_vec<BufferRole>& /*inputRoles*/,
- const hidl_vec<BufferRole>& /*outputRoles*/,
- V1_3::IDevice::allocate_cb cb) -> Return<void> {
+ const auto ret = [](const V1_3::BufferDesc& /*desc*/,
+ const hardware::hidl_vec<sp<V1_3::IPreparedModel>>& /*preparedModels*/,
+ const hardware::hidl_vec<V1_3::BufferRole>& /*inputRoles*/,
+ const hardware::hidl_vec<V1_3::BufferRole>& /*outputRoles*/,
+ V1_3::IDevice::allocate_cb cb) -> hardware::Return<void> {
cb(V1_3::ErrorStatus::GENERAL_FAILURE, nullptr, 0);
- return Void();
+ return hardware::Void();
};
EXPECT_CALL(*kMockDevice, allocate(_, _, _, _, _)).Times(1).WillOnce(Invoke(ret));
@@ -1600,11 +1649,11 @@
.WillOnce(InvokeWithoutArgs(makeGeneralTransportFailure));
// run test
- const auto metaModel = MetaModel({}, /*strictSlicing=*/true);
+ const auto metaModel = MetaModel(Model{}, /*strictSlicing=*/true);
const auto [resultCode, supportedOperations] = kDevice->getSupportedOperations(metaModel);
// verify failure
- EXPECT_EQ(V1_3::ErrorStatus::GENERAL_FAILURE, resultCode);
+ EXPECT_EQ(ErrorStatus::GENERAL_FAILURE, resultCode);
EXPECT_EQ(0u, supportedOperations.size());
}
@@ -1615,11 +1664,11 @@
.WillOnce(InvokeWithoutArgs(makeGeneralTransportFailure));
// run test
- const auto metaModel = MetaModel({}, /*strictSlicing=*/true);
+ const auto metaModel = MetaModel(Model{}, /*strictSlicing=*/true);
const auto [resultCode, supportedOperations] = kDevice->getSupportedOperations(metaModel);
// verify failure
- EXPECT_EQ(V1_3::ErrorStatus::GENERAL_FAILURE, resultCode);
+ EXPECT_EQ(ErrorStatus::GENERAL_FAILURE, resultCode);
EXPECT_EQ(0u, supportedOperations.size());
}
@@ -1630,11 +1679,11 @@
.WillOnce(InvokeWithoutArgs(makeGeneralTransportFailure));
// run test
- const auto metaModel = MetaModel({}, /*strictSlicing=*/true);
+ const auto metaModel = MetaModel(Model{}, /*strictSlicing=*/true);
const auto [resultCode, supportedOperations] = kDevice->getSupportedOperations(metaModel);
// verify failure
- EXPECT_EQ(V1_3::ErrorStatus::GENERAL_FAILURE, resultCode);
+ EXPECT_EQ(ErrorStatus::GENERAL_FAILURE, resultCode);
EXPECT_EQ(0u, supportedOperations.size());
}
@@ -1645,11 +1694,11 @@
.WillOnce(InvokeWithoutArgs(makeGeneralTransportFailure));
// run test
- const auto metaModel = MetaModel({}, /*strictSlicing=*/true);
+ const auto metaModel = MetaModel(Model{}, /*strictSlicing=*/true);
const auto [resultCode, supportedOperations] = kDevice->getSupportedOperations(metaModel);
// verify failure
- EXPECT_EQ(V1_3::ErrorStatus::GENERAL_FAILURE, resultCode);
+ EXPECT_EQ(ErrorStatus::GENERAL_FAILURE, resultCode);
EXPECT_EQ(0u, supportedOperations.size());
}
@@ -1660,7 +1709,7 @@
.WillOnce(InvokeWithoutArgs(makeGeneralTransportFailure));
// run test
- const ModelFactory makeModel = [] { return V1_3::Model{}; };
+ const ModelFactory makeModel = [] { return Model{}; };
const auto [resultCode, preparedModel] = kDevice->prepareModel(makeModel, {}, {}, {}, {}, {});
// verify failure
@@ -1675,7 +1724,7 @@
.WillOnce(InvokeWithoutArgs(makeGeneralTransportFailure));
// run test
- const ModelFactory makeModel = [] { return V1_3::Model{}; };
+ const ModelFactory makeModel = [] { return Model{}; };
const auto [resultCode, preparedModel] = kDevice->prepareModel(makeModel, {}, {}, {}, {}, {});
// verify failure
@@ -1690,7 +1739,7 @@
.WillOnce(InvokeWithoutArgs(makeGeneralTransportFailure));
// run test
- const ModelFactory makeModel = [] { return V1_3::Model{}; };
+ const ModelFactory makeModel = [] { return Model{}; };
const auto [resultCode, preparedModel] = kDevice->prepareModel(makeModel, {}, {}, {}, {}, {});
// verify failure
@@ -1705,7 +1754,7 @@
.WillOnce(InvokeWithoutArgs(makeGeneralTransportFailure));
// run test
- const ModelFactory makeModel = [] { return V1_3::Model{}; };
+ const ModelFactory makeModel = [] { return Model{}; };
const auto [resultCode, preparedModel] = kDevice->prepareModel(makeModel, {}, {}, {}, {}, {});
// verify failure
@@ -1767,7 +1816,7 @@
.WillOnce(Invoke(ret));
// run test
- const ModelFactory makeModel = [] { return V1_3::Model{}; };
+ const ModelFactory makeModel = [] { return Model{}; };
const auto [resultCode, preparedModel] = kDevice->prepareModel(makeModel, {}, {}, {}, {}, {});
// verify success
@@ -1788,7 +1837,7 @@
.WillOnce(testing::Return(nullptr));
// run test
- const ModelFactory makeModel = [] { return V1_3::Model{}; };
+ const ModelFactory makeModel = [] { return Model{}; };
const auto [resultCode, preparedModel] = kDevice->prepareModel(makeModel, {}, {}, {}, {}, {});
// verify failure
@@ -1798,7 +1847,7 @@
TEST_F(VersionedIDeviceMockTest, prepareModelAsyncCrash) {
// setup failure
- const auto ret = [this]() -> Return<V1_3::ErrorStatus> {
+ const auto ret = [this]() -> hardware::Return<V1_3::ErrorStatus> {
kMockDevice->simulateCrash();
return V1_3::ErrorStatus::NONE;
};
@@ -1807,7 +1856,7 @@
.WillOnce(InvokeWithoutArgs(ret));
// run test
- const ModelFactory makeModel = [] { return V1_3::Model{}; };
+ const ModelFactory makeModel = [] { return Model{}; };
const auto [resultCode, preparedModel] = kDevice->prepareModel(makeModel, {}, {}, {}, {}, {});
// verify failure
@@ -1842,7 +1891,7 @@
.WillOnce(testing::Return(mockRecoveredDevice));
// setup recovered device calls
- const auto ret = []() -> Return<bool> { return true; };
+ const auto ret = []() -> hardware::Return<bool> { return true; };
EXPECT_CALL(*mockRecoveredDevice, linkToDeathRet()).Times(1).WillOnce(Invoke(ret));
// run test
@@ -1903,7 +1952,7 @@
EXPECT_CALL(*mockDevice, prepareModel_1_2(_, _, _, _, _, _)).Times(testing::AnyNumber());
EXPECT_CALL(*mockDevice, prepareModel_1_3(_, _, _, _, _, _, _, _)).Times(testing::AnyNumber());
- const ModelFactory makeModel = [] { return V1_3::Model{}; };
+ const ModelFactory makeModel = [] { return Model{}; };
const auto [resultCode, preparedModel] = device.prepareModel(makeModel, {}, {}, {}, {}, {});
CHECK_EQ(ANEURALNETWORKS_NO_ERROR, resultCode);
@@ -1948,7 +1997,7 @@
.WillOnce(Invoke(ret));
// run test
- const ModelFactory makeModel = [] { return V1_3::Model{}; };
+ const ModelFactory makeModel = [] { return Model{}; };
const auto [resultCode, preparedModel] = kDevice->prepareModel(makeModel, {}, {}, {}, {}, {});
// verify failure
@@ -1968,7 +2017,7 @@
.WillOnce(Invoke(ret));
// run test
- const ModelFactory makeModel = [] { return V1_3::Model{}; };
+ const ModelFactory makeModel = [] { return Model{}; };
const auto [resultCode, preparedModel] = kDevice->prepareModel(makeModel, {}, {}, {}, {}, {});
// verify failure
@@ -1980,7 +2029,7 @@
// setup failure
EXPECT_CALL(*kMockPreparedModel, linkToDeathRet())
.Times(1)
- .WillOnce(InvokeWithoutArgs([]() -> Return<bool> { return false; }));
+ .WillOnce(InvokeWithoutArgs([]() -> hardware::Return<bool> { return false; }));
const auto ret = makePreparedModel_1_3Return(V1_3::ErrorStatus::NONE, V1_3::ErrorStatus::NONE,
kMockPreparedModel);
EXPECT_CALL(*kMockDevice, prepareModel_1_3(_, _, _, _, _, _, _, _))
@@ -1988,7 +2037,7 @@
.WillOnce(Invoke(ret));
// run test
- const ModelFactory makeModel = [] { return V1_3::Model{}; };
+ const ModelFactory makeModel = [] { return Model{}; };
const auto [resultCode, preparedModel] = kDevice->prepareModel(makeModel, {}, {}, {}, {}, {});
// verify failure
@@ -2030,8 +2079,8 @@
TEST_F(VersionedIPreparedModelV1_2Test, executeAsync) {
// setup call
- const auto ret =
- makeExecute_1_2Return(V1_0::ErrorStatus::NONE, V1_0::ErrorStatus::NONE, {}, kNoTiming);
+ const auto ret = makeExecute_1_2Return(V1_0::ErrorStatus::NONE, V1_0::ErrorStatus::NONE, {},
+ kNoTiming12);
EXPECT_CALL(*kMockPreparedModel, execute_1_2(_, _, _)).Times(1).WillOnce(Invoke(ret));
// run test
@@ -2046,8 +2095,8 @@
TEST_F(VersionedIPreparedModelV1_3Test, executeAsync) {
// setup call
- const auto ret =
- makeExecute_1_3Return(V1_3::ErrorStatus::NONE, V1_3::ErrorStatus::NONE, {}, kNoTiming);
+ const auto ret = makeExecute_1_3Return(V1_3::ErrorStatus::NONE, V1_3::ErrorStatus::NONE, {},
+ kNoTiming12);
EXPECT_CALL(*kMockPreparedModel, execute_1_3(_, _, _, _, _)).Times(1).WillOnce(Invoke(ret));
// run test
@@ -2092,7 +2141,7 @@
TEST_F(VersionedIPreparedModelV1_2Test, executePreferSync) {
// setup call
- const auto ret = makeExecuteSynchronouslyReturn(V1_0::ErrorStatus::NONE, {}, kNoTiming);
+ const auto ret = makeExecuteSynchronouslyReturn(V1_0::ErrorStatus::NONE, {}, kNoTiming12);
EXPECT_CALL(*kMockPreparedModel, executeSynchronously(_, _, _)).Times(1).WillOnce(Invoke(ret));
// run test
@@ -2107,7 +2156,7 @@
TEST_F(VersionedIPreparedModelV1_3Test, executePreferSync) {
// setup call
- const auto ret = makeExecuteSynchronously_1_3Return(V1_3::ErrorStatus::NONE, {}, kNoTiming);
+ const auto ret = makeExecuteSynchronously_1_3Return(V1_3::ErrorStatus::NONE, {}, kNoTiming12);
EXPECT_CALL(*kMockPreparedModel, executeSynchronously_1_3(_, _, _, _, _))
.Times(1)
.WillOnce(Invoke(ret));
@@ -2156,7 +2205,7 @@
TEST_F(VersionedIPreparedModelV1_2Test, executeFenced) {
// setup call
- const auto ret = makeExecuteSynchronouslyReturn(V1_0::ErrorStatus::NONE, {}, kNoTiming);
+ const auto ret = makeExecuteSynchronouslyReturn(V1_0::ErrorStatus::NONE, {}, kNoTiming12);
EXPECT_CALL(*kMockPreparedModel, executeSynchronously(_, _, _)).Times(1).WillOnce(Invoke(ret));
// run test
@@ -2173,8 +2222,8 @@
TEST_F(VersionedIPreparedModelV1_3Test, executeFenced) {
// setup call
auto memory = allocateSharedMemory(4);
- hidl_handle fakeSyncFence(memory.handle());
- const sp<IFencedExecutionCallback> callback = new MockFencedExecutionCallback();
+ hardware::hidl_handle fakeSyncFence(memory.handle());
+ const sp<V1_3::IFencedExecutionCallback> callback = new MockFencedExecutionCallback();
const auto ret = makeExecuteFencedReturn(V1_3::ErrorStatus::NONE, fakeSyncFence, callback);
EXPECT_CALL(*kMockPreparedModel, executeFenced(_, _, _, _, _, _, _))
.Times(1)
@@ -2276,7 +2325,7 @@
TEST_F(VersionedIPreparedModelV1_2Test, executeAsyncLaunchFailure) {
// setup failure
const auto ret = makeExecute_1_2Return(V1_0::ErrorStatus::GENERAL_FAILURE,
- V1_0::ErrorStatus::NONE, {}, kNoTiming);
+ V1_0::ErrorStatus::NONE, {}, kNoTiming12);
EXPECT_CALL(*kMockPreparedModel, execute_1_2(_, _, _)).Times(1).WillOnce(Invoke(ret));
// run test
@@ -2292,7 +2341,7 @@
TEST_F(VersionedIPreparedModelV1_3Test, executeAsyncLaunchFailure) {
// setup failure
const auto ret = makeExecute_1_3Return(V1_3::ErrorStatus::GENERAL_FAILURE,
- V1_3::ErrorStatus::NONE, {}, kNoTiming);
+ V1_3::ErrorStatus::NONE, {}, kNoTiming12);
EXPECT_CALL(*kMockPreparedModel, execute_1_3(_, _, _, _, _)).Times(1).WillOnce(Invoke(ret));
// run test
@@ -2338,7 +2387,7 @@
TEST_F(VersionedIPreparedModelV1_2Test, executeAsyncReturnFailure) {
// setup failure
const auto ret = makeExecute_1_2Return(V1_0::ErrorStatus::NONE,
- V1_0::ErrorStatus::GENERAL_FAILURE, {}, kNoTiming);
+ V1_0::ErrorStatus::GENERAL_FAILURE, {}, kNoTiming12);
EXPECT_CALL(*kMockPreparedModel, execute_1_2(_, _, _)).Times(1).WillOnce(Invoke(ret));
// run test
@@ -2354,7 +2403,7 @@
TEST_F(VersionedIPreparedModelV1_3Test, executeAsyncReturnFailure) {
// setup failure
const auto ret = makeExecute_1_3Return(V1_3::ErrorStatus::NONE,
- V1_3::ErrorStatus::GENERAL_FAILURE, {}, kNoTiming);
+ V1_3::ErrorStatus::GENERAL_FAILURE, {}, kNoTiming12);
EXPECT_CALL(*kMockPreparedModel, execute_1_3(_, _, _, _, _)).Times(1).WillOnce(Invoke(ret));
// run test
@@ -2402,7 +2451,7 @@
TEST_F(VersionedIPreparedModelV1_2Test, executePreferSyncFailure) {
// setup failure
const auto ret =
- makeExecuteSynchronouslyReturn(V1_0::ErrorStatus::GENERAL_FAILURE, {}, kNoTiming);
+ makeExecuteSynchronouslyReturn(V1_0::ErrorStatus::GENERAL_FAILURE, {}, kNoTiming12);
EXPECT_CALL(*kMockPreparedModel, executeSynchronously(_, _, _)).Times(1).WillOnce(Invoke(ret));
// run test
@@ -2418,7 +2467,7 @@
TEST_F(VersionedIPreparedModelV1_3Test, executePreferSyncFailure) {
// setup failure
const auto ret =
- makeExecuteSynchronously_1_3Return(V1_3::ErrorStatus::GENERAL_FAILURE, {}, kNoTiming);
+ makeExecuteSynchronously_1_3Return(V1_3::ErrorStatus::GENERAL_FAILURE, {}, kNoTiming12);
EXPECT_CALL(*kMockPreparedModel, executeSynchronously_1_3(_, _, _, _, _))
.Times(1)
.WillOnce(Invoke(ret));
@@ -2470,7 +2519,7 @@
TEST_F(VersionedIPreparedModelV1_2Test, executeFencedFailure) {
// setup failure
const auto ret =
- makeExecuteSynchronouslyReturn(V1_0::ErrorStatus::GENERAL_FAILURE, {}, kNoTiming);
+ makeExecuteSynchronouslyReturn(V1_0::ErrorStatus::GENERAL_FAILURE, {}, kNoTiming12);
EXPECT_CALL(*kMockPreparedModel, executeSynchronously(_, _, _)).Times(1).WillOnce(Invoke(ret));
// run test
@@ -2487,8 +2536,8 @@
TEST_F(VersionedIPreparedModelV1_3Test, executeFencedFailure) {
// setup failure
auto memory = allocateSharedMemory(4);
- hidl_handle fakeSyncFence(memory.handle());
- const sp<IFencedExecutionCallback> callback = new MockFencedExecutionCallback();
+ hardware::hidl_handle fakeSyncFence(memory.handle());
+ const sp<V1_3::IFencedExecutionCallback> callback = new MockFencedExecutionCallback();
const auto ret =
makeExecuteFencedReturn(V1_3::ErrorStatus::GENERAL_FAILURE, fakeSyncFence, callback);
EXPECT_CALL(*kMockPreparedModel, executeFenced(_, _, _, _, _, _, _))
@@ -2894,7 +2943,7 @@
TEST_F(VersionedIPreparedModelMockTest, executeAsyncReturnCrash) {
// setup failure
- const auto ret = [this]() -> Return<V1_3::ErrorStatus> {
+ const auto ret = [this]() -> hardware::Return<V1_3::ErrorStatus> {
kMockPreparedModel->simulateCrash();
return V1_3::ErrorStatus::NONE;
};
diff --git a/runtime/test/android_fuzzing/Converter.cpp b/runtime/test/android_fuzzing/Converter.cpp
index ca853ae..c2fc354 100644
--- a/runtime/test/android_fuzzing/Converter.cpp
+++ b/runtime/test/android_fuzzing/Converter.cpp
@@ -29,39 +29,38 @@
namespace {
using namespace test_helper;
-using namespace android_nn_fuzz;
constexpr uint32_t kMaxSize = 65536;
-TestOperandType convert(OperandType type) {
+TestOperandType convert(android_nn_fuzz::OperandType type) {
return static_cast<TestOperandType>(type);
}
-TestOperationType convert(OperationType type) {
+TestOperationType convert(android_nn_fuzz::OperationType type) {
return static_cast<TestOperationType>(type);
}
-TestOperandLifeTime convert(OperandLifeTime lifetime) {
+TestOperandLifeTime convert(android_nn_fuzz::OperandLifeTime lifetime) {
return static_cast<TestOperandLifeTime>(lifetime);
}
-std::vector<float> convert(const Scales& scales) {
+std::vector<float> convert(const android_nn_fuzz::Scales& scales) {
const auto& repeatedScale = scales.scale();
return std::vector<float>(repeatedScale.begin(), repeatedScale.end());
}
-TestSymmPerChannelQuantParams convert(const SymmPerChannelQuantParams& params) {
+TestSymmPerChannelQuantParams convert(const android_nn_fuzz::SymmPerChannelQuantParams& params) {
std::vector<float> scales = convert(params.scales());
const uint32_t channelDim = params.channel_dim();
return {.scales = std::move(scales), .channelDim = channelDim};
}
-std::vector<uint32_t> convert(const Dimensions& dimensions) {
+std::vector<uint32_t> convert(const android_nn_fuzz::Dimensions& dimensions) {
const auto& repeatedDimension = dimensions.dimension();
return std::vector<uint32_t>(repeatedDimension.begin(), repeatedDimension.end());
}
-TestBuffer convert(size_t size, const Buffer& buffer) {
+TestBuffer convert(size_t size, const android_nn_fuzz::Buffer& buffer) {
if (size == 0) {
return TestBuffer();
}
@@ -70,7 +69,7 @@
return TestBuffer::createRandom(size % kMaxSize, &generator);
}
-TestOperand convert(const Operand& operand) {
+TestOperand convert(const android_nn_fuzz::Operand& operand) {
const TestOperandType type = convert(operand.type());
std::vector<uint32_t> dimensions = convert(operand.dimensions());
const float scale = operand.scale();
@@ -79,7 +78,7 @@
auto channelQuant = convert(operand.channel_quant());
const bool isIgnored = false;
- const auto halType = static_cast<hal::OperandType>(type);
+ const auto halType = static_cast<V1_3::OperandType>(type);
const bool willOverflow = nonExtensionOperandSizeOfDataOverflowsUInt32(halType, dimensions);
const bool makeEmpty = (lifetime == TestOperandLifeTime::NO_VALUE ||
lifetime == TestOperandLifeTime::TEMPORARY_VARIABLE || willOverflow);
@@ -97,7 +96,7 @@
.data = std::move(data)};
}
-std::vector<TestOperand> convert(const Operands& operands) {
+std::vector<TestOperand> convert(const android_nn_fuzz::Operands& operands) {
std::vector<TestOperand> testOperands;
testOperands.reserve(operands.operand_size());
const auto& repeatedOperand = operands.operand();
@@ -106,19 +105,19 @@
return testOperands;
}
-std::vector<uint32_t> convert(const Indexes& indexes) {
+std::vector<uint32_t> convert(const android_nn_fuzz::Indexes& indexes) {
const auto& repeatedIndex = indexes.index();
return std::vector<uint32_t>(repeatedIndex.begin(), repeatedIndex.end());
}
-TestOperation convert(const Operation& operation) {
+TestOperation convert(const android_nn_fuzz::Operation& operation) {
const TestOperationType type = convert(operation.type());
std::vector<uint32_t> inputs = convert(operation.inputs());
std::vector<uint32_t> outputs = convert(operation.outputs());
return {.type = type, .inputs = std::move(inputs), .outputs = std::move(outputs)};
}
-std::vector<TestOperation> convert(const Operations& operations) {
+std::vector<TestOperation> convert(const android_nn_fuzz::Operations& operations) {
std::vector<TestOperation> testOperations;
testOperations.reserve(operations.operation_size());
const auto& repeatedOperation = operations.operation();
@@ -142,7 +141,7 @@
std::for_each(operations.begin(), operations.end(), addAllConsumers);
}
-TestModel convert(const Model& model) {
+TestModel convert(const android_nn_fuzz::Model& model) {
std::vector<TestOperand> operands = convert(model.operands());
std::vector<TestOperation> operations = convert(model.operations());
std::vector<uint32_t> inputIndexes = convert(model.input_indexes());
@@ -161,7 +160,7 @@
} // anonymous namespace
-TestModel convertToTestModel(const Test& model) {
+TestModel convertToTestModel(const android_nn_fuzz::Test& model) {
return convert(model.model());
}
diff --git a/runtime/test/android_fuzzing/FuzzHarness.cpp b/runtime/test/android_fuzzing/FuzzHarness.cpp
index 3d787d6..76c34a7 100644
--- a/runtime/test/android_fuzzing/FuzzHarness.cpp
+++ b/runtime/test/android_fuzzing/FuzzHarness.cpp
@@ -31,7 +31,7 @@
using ::android::nn::nonExtensionOperandSizeOfDataOverflowsUInt32;
using ::android::nn::fuzz::convertToTestModel;
-using ::android::nn::hal::OperandType;
+using ::android::nn::V1_3::OperandType;
using ::test_helper::TestModel;
using ::test_helper::TestOperand;
diff --git a/runtime/test/android_fuzzing/GenerateCorpus.cpp b/runtime/test/android_fuzzing/GenerateCorpus.cpp
index 2f72b9d..783b660 100644
--- a/runtime/test/android_fuzzing/GenerateCorpus.cpp
+++ b/runtime/test/android_fuzzing/GenerateCorpus.cpp
@@ -41,8 +41,8 @@
return static_cast<OperationType>(type);
}
-OperandLifeTime convert(TestOperandLifeTime lifetime) {
- return static_cast<OperandLifeTime>(lifetime);
+Operand::LifeTime convert(TestOperandLifeTime lifetime) {
+ return static_cast<Operand::LifeTime>(lifetime);
}
Scales convert(const std::vector<float>& scales) {
diff --git a/runtime/test/fibonacci_extension/FibonacciDriver.cpp b/runtime/test/fibonacci_extension/FibonacciDriver.cpp
index c488298..66023c1 100644
--- a/runtime/test/fibonacci_extension/FibonacciDriver.cpp
+++ b/runtime/test/fibonacci_extension/FibonacciDriver.cpp
@@ -20,6 +20,7 @@
#include <vector>
+#include <nnapi/Types.h>
#include "FibonacciExtension.h"
#include "HalInterfaces.h"
#include "NeuralNetworksExtensions.h"
@@ -33,10 +34,7 @@
namespace sample_driver {
namespace {
-using namespace hal;
-
-const uint8_t kLowBitsType = static_cast<uint8_t>(ExtensionTypeEncoding::LOW_BITS_TYPE);
-const uint32_t kTypeWithinExtensionMask = (1 << kLowBitsType) - 1;
+const uint32_t kTypeWithinExtensionMask = (1 << kExtensionTypeBits) - 1;
namespace fibonacci_op {
@@ -48,22 +46,22 @@
constexpr uint32_t kNumOutputs = 1;
constexpr uint32_t kOutputTensor = 0;
-bool getFibonacciExtensionPrefix(const Model& model, uint16_t* prefix) {
+bool getFibonacciExtensionPrefix(const V1_3::Model& model, uint16_t* prefix) {
NN_RET_CHECK_EQ(model.extensionNameToPrefix.size(), 1u); // Assumes no other extensions in use.
NN_RET_CHECK_EQ(model.extensionNameToPrefix[0].name, EXAMPLE_FIBONACCI_EXTENSION_NAME);
*prefix = model.extensionNameToPrefix[0].prefix;
return true;
}
-bool isFibonacciOperation(const Operation& operation, const Model& model) {
+bool isFibonacciOperation(const V1_3::Operation& operation, const V1_3::Model& model) {
int32_t operationType = static_cast<int32_t>(operation.type);
uint16_t prefix;
NN_RET_CHECK(getFibonacciExtensionPrefix(model, &prefix));
- NN_RET_CHECK_EQ(operationType, (prefix << kLowBitsType) | EXAMPLE_FIBONACCI);
+ NN_RET_CHECK_EQ(operationType, (prefix << kExtensionTypeBits) | EXAMPLE_FIBONACCI);
return true;
}
-bool validate(const Operation& operation, const Model& model) {
+bool validate(const V1_3::Operation& operation, const V1_3::Model& model) {
NN_RET_CHECK(isFibonacciOperation(operation, model));
NN_RET_CHECK_EQ(operation.inputs.size(), kNumInputs);
NN_RET_CHECK_EQ(operation.outputs.size(), kNumOutputs);
@@ -71,9 +69,9 @@
int32_t outputType = static_cast<int32_t>(model.main.operands[operation.outputs[0]].type);
uint16_t prefix;
NN_RET_CHECK(getFibonacciExtensionPrefix(model, &prefix));
- NN_RET_CHECK(inputType == ((prefix << kLowBitsType) | EXAMPLE_INT64) ||
+ NN_RET_CHECK(inputType == ((prefix << kExtensionTypeBits) | EXAMPLE_INT64) ||
inputType == ANEURALNETWORKS_TENSOR_FLOAT32);
- NN_RET_CHECK(outputType == ((prefix << kLowBitsType) | EXAMPLE_TENSOR_QUANT64_ASYMM) ||
+ NN_RET_CHECK(outputType == ((prefix << kExtensionTypeBits) | EXAMPLE_TENSOR_QUANT64_ASYMM) ||
outputType == ANEURALNETWORKS_TENSOR_FLOAT32);
return true;
}
@@ -128,7 +126,7 @@
uint64_t* output = context->getOutputBuffer<uint64_t>(kOutputTensor);
Shape outputShape = context->getOutputShape(kOutputTensor);
auto outputQuant = reinterpret_cast<const ExampleQuant64AsymmParams*>(
- outputShape.extraParams.extension().data());
+ std::get<Operand::ExtensionParams>(outputShape.extraParams).data());
return compute(n, outputQuant->scale, outputQuant->zeroPoint, output);
}
}
@@ -142,14 +140,14 @@
static OperationRegistration operationRegistration(operationType, fibonacci_op::kOperationName,
nullptr, fibonacci_op::prepare,
fibonacci_op::execute, {});
- uint16_t prefix = static_cast<int32_t>(operationType) >> kLowBitsType;
+ uint16_t prefix = static_cast<int32_t>(operationType) >> kExtensionTypeBits;
uint16_t typeWithinExtension = static_cast<int32_t>(operationType) & kTypeWithinExtensionMask;
// Assumes no other extensions in use.
return prefix != 0 && typeWithinExtension == EXAMPLE_FIBONACCI ? &operationRegistration
: nullptr;
}
-Return<void> FibonacciDriver::getSupportedExtensions(getSupportedExtensions_cb cb) {
+hardware::Return<void> FibonacciDriver::getSupportedExtensions(getSupportedExtensions_cb cb) {
cb(V1_0::ErrorStatus::NONE,
{
{
@@ -169,44 +167,44 @@
},
},
});
- return Void();
+ return hardware::Void();
}
-Return<void> FibonacciDriver::getCapabilities_1_3(getCapabilities_1_3_cb cb) {
+hardware::Return<void> FibonacciDriver::getCapabilities_1_3(getCapabilities_1_3_cb cb) {
android::nn::initVLogMask();
VLOG(DRIVER) << "getCapabilities()";
- static const PerformanceInfo kPerf = {.execTime = 1.0f, .powerUsage = 1.0f};
- Capabilities capabilities = {
+ static const V1_0::PerformanceInfo kPerf = {.execTime = 1.0f, .powerUsage = 1.0f};
+ V1_3::Capabilities capabilities = {
.relaxedFloat32toFloat16PerformanceScalar = kPerf,
.relaxedFloat32toFloat16PerformanceTensor = kPerf,
.operandPerformance = nonExtensionOperandPerformance<HalVersion::V1_3>(kPerf),
.ifPerformance = kPerf,
.whilePerformance = kPerf};
cb(V1_3::ErrorStatus::NONE, capabilities);
- return Void();
+ return hardware::Void();
}
-Return<void> FibonacciDriver::getSupportedOperations_1_3(const V1_3::Model& model,
- getSupportedOperations_1_3_cb cb) {
+hardware::Return<void> FibonacciDriver::getSupportedOperations_1_3(
+ const V1_3::Model& model, getSupportedOperations_1_3_cb cb) {
VLOG(DRIVER) << "getSupportedOperations()";
if (!validateModel(model)) {
cb(V1_3::ErrorStatus::INVALID_ARGUMENT, {});
- return Void();
+ return hardware::Void();
}
const size_t count = model.main.operations.size();
std::vector<bool> supported(count);
for (size_t i = 0; i < count; ++i) {
- const Operation& operation = model.main.operations[i];
+ const V1_3::Operation& operation = model.main.operations[i];
if (fibonacci_op::isFibonacciOperation(operation, model)) {
if (!fibonacci_op::validate(operation, model)) {
cb(V1_3::ErrorStatus::INVALID_ARGUMENT, {});
- return Void();
+ return hardware::Void();
}
supported[i] = true;
}
}
cb(V1_3::ErrorStatus::NONE, supported);
- return Void();
+ return hardware::Void();
}
} // namespace sample_driver
diff --git a/runtime/test/fibonacci_extension/FibonacciDriver.h b/runtime/test/fibonacci_extension/FibonacciDriver.h
index 303edd8..7daf4d2 100644
--- a/runtime/test/fibonacci_extension/FibonacciDriver.h
+++ b/runtime/test/fibonacci_extension/FibonacciDriver.h
@@ -34,7 +34,7 @@
return &instance;
}
- const OperationRegistration* findOperation(hal::OperationType operationType) const override;
+ const OperationRegistration* findOperation(OperationType operationType) const override;
private:
FibonacciOperationResolver() {}
@@ -45,10 +45,10 @@
class FibonacciDriver : public SampleDriver {
public:
FibonacciDriver() : SampleDriver(kDriverName, FibonacciOperationResolver::get()) {}
- hal::Return<void> getSupportedExtensions(getSupportedExtensions_cb cb) override;
- hal::Return<void> getCapabilities_1_3(getCapabilities_1_3_cb cb) override;
- hal::Return<void> getSupportedOperations_1_3(const hal::V1_3::Model& model,
- getSupportedOperations_1_3_cb cb) override;
+ hardware::Return<void> getSupportedExtensions(getSupportedExtensions_cb cb) override;
+ hardware::Return<void> getCapabilities_1_3(getCapabilities_1_3_cb cb) override;
+ hardware::Return<void> getSupportedOperations_1_3(const V1_3::Model& model,
+ getSupportedOperations_1_3_cb cb) override;
static constexpr char kDriverName[] = "sample-driver-fibonacci-extension";
};
diff --git a/runtime/test/fuzzing/RandomGraphGenerator.cpp b/runtime/test/fuzzing/RandomGraphGenerator.cpp
index 9799ca0..8ba763a 100644
--- a/runtime/test/fuzzing/RandomGraphGenerator.cpp
+++ b/runtime/test/fuzzing/RandomGraphGenerator.cpp
@@ -44,7 +44,7 @@
RandomOperand::RandomOperand(const OperandSignature& operand, TestOperandType dataType,
uint32_t rank)
: type(operand.type), finalizer(operand.finalizer) {
- NN_FUZZER_LOG << "Operand: " << toString(type);
+ NN_FUZZER_LOG << "Operand: " << type;
if (operand.constructor) operand.constructor(dataType, rank, this);
}
@@ -81,7 +81,7 @@
// Construct a RandomOperation from OperationSignature.
RandomOperation::RandomOperation(const OperationSignature& operation)
: opType(operation.opType), finalizer(operation.finalizer) {
- NN_FUZZER_LOG << "Operation: " << toString(opType);
+ NN_FUZZER_LOG << "Operation: " << opType;
// Determine the data type and rank of the operation and invoke the constructor.
TestOperandType dataType = getRandomChoice(operation.supportedDataTypes);
@@ -294,14 +294,14 @@
// Set model operations.
for (auto& operation : mOperations) {
- NN_FUZZER_LOG << "Operation: " << toString(operation.opType);
+ NN_FUZZER_LOG << "Operation: " << operation.opType;
TestOperation testOperation = {.type = static_cast<TestOperationType>(operation.opType)};
for (auto& op : operation.inputs) {
- NN_FUZZER_LOG << toString(*op);
+ NN_FUZZER_LOG << *op;
testOperation.inputs.push_back(op->opIndex);
}
for (auto& op : operation.outputs) {
- NN_FUZZER_LOG << toString(*op);
+ NN_FUZZER_LOG << *op;
testOperation.outputs.push_back(op->opIndex);
}
testModel.main.operations.push_back(std::move(testOperation));
diff --git a/runtime/test/fuzzing/RandomGraphGeneratorUtils.h b/runtime/test/fuzzing/RandomGraphGeneratorUtils.h
index 1aa7fea..8faae12 100644
--- a/runtime/test/fuzzing/RandomGraphGeneratorUtils.h
+++ b/runtime/test/fuzzing/RandomGraphGeneratorUtils.h
@@ -119,18 +119,13 @@
};
template <typename T>
-inline std::string toString(const T& obj) {
- return std::to_string(obj);
-}
-
-template <typename T>
inline std::string joinStr(const std::string& joint, const std::vector<T>& items) {
std::stringstream ss;
for (uint32_t i = 0; i < items.size(); i++) {
if (i == 0) {
- ss << toString(items[i]);
+ ss << items[i];
} else {
- ss << joint << toString(items[i]);
+ ss << joint << items[i];
}
}
return ss.str();
@@ -150,18 +145,15 @@
inline std::string joinStr(const std::string& joint, int limit, const std::vector<T>& items) {
if (items.size() > static_cast<size_t>(limit)) {
std::vector<T> topMax(items.begin(), items.begin() + limit);
- return joinStr(joint, topMax) + ", (" + toString(items.size() - limit) + " ommited), " +
- toString(items.back());
+ std::stringstream ss;
+ ss << joinStr(joint, topMax) << ", (" << (items.size() - limit) << " omitted), "
+ << items.back();
+ return ss.str();
} else {
return joinStr(joint, items);
}
}
-static const char* kLifeTimeNames[6] = {
- "TEMPORARY_VARIABLE", "SUBGRAPH_INPUT", "SUBGRAPH_OUTPUT",
- "CONSTANT_COPY", "CONSTANT_REFERENCE", "NO_VALUE",
-};
-
static const bool kScalarDataType[]{
true, // ANEURALNETWORKS_FLOAT32
true, // ANEURALNETWORKS_INT32
@@ -198,10 +190,9 @@
1, // ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED
};
-template <>
-inline std::string toString<RandomVariableType>(const RandomVariableType& type) {
+inline std::ostream& operator<<(std::ostream& os, const RandomVariableType& type) {
static const std::string typeNames[] = {"FREE", "CONST", "OP"};
- return typeNames[static_cast<int>(type)];
+ return os << typeNames[static_cast<int>(type)];
}
inline std::string alignedString(std::string str, int width) {
@@ -210,51 +201,45 @@
return str;
}
-template <>
-inline std::string toString<RandomVariableRange>(const RandomVariableRange& range) {
- return "[" + joinStr(", ", 20, range.getChoices()) + "]";
+inline std::ostream& operator<<(std::ostream& os, const RandomVariableRange& range) {
+ return os << "[" + joinStr(", ", 20, range.getChoices()) + "]";
}
-template <>
-inline std::string toString<RandomOperandType>(const RandomOperandType& type) {
+inline std::ostream& operator<<(std::ostream& os, const RandomOperandType& type) {
static const std::string typeNames[] = {"Input", "Output", "Internal", "Parameter", "No Value"};
- return typeNames[static_cast<int>(type)];
+ return os << typeNames[static_cast<int>(type)];
}
-template <>
-inline std::string toString<RandomVariableNode>(const RandomVariableNode& var) {
- std::stringstream ss;
- ss << "var" << var->index << " = ";
+inline std::ostream& operator<<(std::ostream& os, const RandomVariableNode& var) {
+ os << "var" << var->index << " = ";
switch (var->type) {
case RandomVariableType::FREE:
- ss << "FREE " << toString(var->range);
+ os << "FREE " << var->range;
break;
case RandomVariableType::CONST:
- ss << "CONST " << toString(var->value);
+ os << "CONST " << var->value;
break;
case RandomVariableType::OP:
- ss << "var" << var->parent1->index << " " << var->op->getName();
- if (var->parent2 != nullptr) ss << " var" << var->parent2->index;
- ss << ", " << toString(var->range);
+ os << "var" << var->parent1->index << " " << var->op->getName();
+ if (var->parent2 != nullptr) os << " var" << var->parent2->index;
+ os << ", " << var->range;
break;
default:
NN_FUZZER_CHECK(false);
}
- ss << ", timestamp = " << var->timestamp;
- return ss.str();
+ os << ", timestamp = " << var->timestamp;
+ return os;
}
-template <>
-inline std::string toString<RandomVariable>(const RandomVariable& var) {
- return "var" + std::to_string(var.get()->index);
+inline std::ostream& operator<<(std::ostream& os, const RandomVariable& var) {
+ return os << "var" + std::to_string(var.get()->index);
}
-template <>
-inline std::string toString<RandomOperand>(const RandomOperand& op) {
- return toString(op.type) + ", dimension = [" +
- joinStr(", ", op.dimensions,
- [](const RandomVariable& var) { return std::to_string(var.getValue()); }) +
- "], scale = " + toString(op.scale) + " , zero_point = " + toString(op.zeroPoint);
+inline std::ostream& operator<<(std::ostream& os, const RandomOperand& op) {
+ return os << op.type << ", dimension = ["
+ << joinStr(", ", op.dimensions,
+ [](const RandomVariable& var) { return std::to_string(var.getValue()); })
+ << "], scale = " << op.scale << " , zero_point = " << op.zeroPoint;
}
// This class is a workaround for two issues our code relies on:
diff --git a/runtime/test/fuzzing/RandomVariable.cpp b/runtime/test/fuzzing/RandomVariable.cpp
index d3f6ef7..f1067e1 100644
--- a/runtime/test/fuzzing/RandomVariable.cpp
+++ b/runtime/test/fuzzing/RandomVariable.cpp
@@ -1,1225 +1,1225 @@
-/*
- * Copyright (C) 2019 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "RandomVariable.h"
-
-#include <algorithm>
-#include <memory>
-#include <set>
-#include <string>
-#include <unordered_map>
-#include <utility>
-#include <vector>
-
-#include "RandomGraphGeneratorUtils.h"
-
-namespace android {
-namespace nn {
-namespace fuzzing_test {
-
-unsigned int RandomVariableBase::globalIndex = 0;
-int RandomVariable::defaultValue = 10;
-
-RandomVariableBase::RandomVariableBase(int value)
- : index(globalIndex++),
- type(RandomVariableType::CONST),
- range(value),
- value(value),
- timestamp(RandomVariableNetwork::get()->getGlobalTime()) {}
-
-RandomVariableBase::RandomVariableBase(int lower, int upper)
- : index(globalIndex++),
- type(RandomVariableType::FREE),
- range(lower, upper),
- timestamp(RandomVariableNetwork::get()->getGlobalTime()) {}
-
-RandomVariableBase::RandomVariableBase(const std::vector<int>& choices)
- : index(globalIndex++),
- type(RandomVariableType::FREE),
- range(choices),
- timestamp(RandomVariableNetwork::get()->getGlobalTime()) {}
-
-RandomVariableBase::RandomVariableBase(const RandomVariableNode& lhs, const RandomVariableNode& rhs,
- const std::shared_ptr<const IRandomVariableOp>& op)
- : index(globalIndex++),
- type(RandomVariableType::OP),
- range(op->getInitRange(lhs->range, rhs == nullptr ? RandomVariableRange(0) : rhs->range)),
- op(op),
- parent1(lhs),
- parent2(rhs),
- timestamp(RandomVariableNetwork::get()->getGlobalTime()) {}
-
-void RandomVariableRange::setRange(int lower, int upper) {
- // kInvalidValue indicates unlimited bound.
- auto head = lower == kInvalidValue ? mChoices.begin()
- : std::lower_bound(mChoices.begin(), mChoices.end(), lower);
- auto tail = upper == kInvalidValue ? mChoices.end()
- : std::upper_bound(mChoices.begin(), mChoices.end(), upper);
- NN_FUZZER_CHECK(head <= tail) << "Invalid range!";
- if (head != mChoices.begin() || tail != mChoices.end()) {
- mChoices = std::vector<int>(head, tail);
- }
-}
-
-int RandomVariableRange::toConst() {
- if (mChoices.size() > 1) mChoices = {getRandomChoice(mChoices)};
- return mChoices[0];
-}
-
-RandomVariableRange operator&(const RandomVariableRange& lhs, const RandomVariableRange& rhs) {
- std::vector<int> result(lhs.size() + rhs.size());
- auto it = std::set_intersection(lhs.mChoices.begin(), lhs.mChoices.end(), rhs.mChoices.begin(),
- rhs.mChoices.end(), result.begin());
- result.resize(it - result.begin());
- return RandomVariableRange(std::move(result));
-}
-
-void RandomVariableBase::freeze() {
- if (type == RandomVariableType::CONST) return;
- value = range.toConst();
- type = RandomVariableType::CONST;
-}
-
-int RandomVariableBase::getValue() const {
- switch (type) {
- case RandomVariableType::CONST:
- return value;
- case RandomVariableType::OP:
- return op->eval(parent1->getValue(), parent2 == nullptr ? 0 : parent2->getValue());
- default:
- NN_FUZZER_CHECK(false) << "Invalid type when getting value of var" << index;
- return 0;
- }
-}
-
-void RandomVariableBase::updateTimestamp() {
- timestamp = RandomVariableNetwork::get()->getGlobalTime();
- NN_FUZZER_LOG << "Update timestamp of var" << index << " to " << timestamp;
-}
-
-RandomVariable::RandomVariable(int value) : mVar(new RandomVariableBase(value)) {
- NN_FUZZER_LOG << "New RandomVariable " << toString(mVar);
- RandomVariableNetwork::get()->add(mVar);
-}
-RandomVariable::RandomVariable(int lower, int upper) : mVar(new RandomVariableBase(lower, upper)) {
- NN_FUZZER_LOG << "New RandomVariable " << toString(mVar);
- RandomVariableNetwork::get()->add(mVar);
-}
-RandomVariable::RandomVariable(const std::vector<int>& choices)
- : mVar(new RandomVariableBase(choices)) {
- NN_FUZZER_LOG << "New RandomVariable " << toString(mVar);
- RandomVariableNetwork::get()->add(mVar);
-}
-RandomVariable::RandomVariable(RandomVariableType type)
- : mVar(new RandomVariableBase(1, defaultValue)) {
- NN_FUZZER_CHECK(type == RandomVariableType::FREE);
- NN_FUZZER_LOG << "New RandomVariable " << toString(mVar);
- RandomVariableNetwork::get()->add(mVar);
-}
-RandomVariable::RandomVariable(const RandomVariable& lhs, const RandomVariable& rhs,
- const std::shared_ptr<const IRandomVariableOp>& op)
- : mVar(new RandomVariableBase(lhs.get(), rhs.get(), op)) {
- // Make a copy if the parent is CONST. This will resolve the fake dependency problem.
- if (mVar->parent1->type == RandomVariableType::CONST) {
- mVar->parent1 = RandomVariable(mVar->parent1->value).get();
- }
- if (mVar->parent2 != nullptr && mVar->parent2->type == RandomVariableType::CONST) {
- mVar->parent2 = RandomVariable(mVar->parent2->value).get();
- }
- mVar->parent1->children.push_back(mVar);
- if (mVar->parent2 != nullptr) mVar->parent2->children.push_back(mVar);
- RandomVariableNetwork::get()->add(mVar);
- NN_FUZZER_LOG << "New RandomVariable " << toString(mVar);
-}
-
-void RandomVariable::setRange(int lower, int upper) {
- NN_FUZZER_CHECK(mVar != nullptr) << "setRange() on nullptr";
- NN_FUZZER_LOG << "Set range [" << lower << ", " << upper << "] on var" << mVar->index;
- size_t oldSize = mVar->range.size();
- mVar->range.setRange(lower, upper);
- // Only update the timestamp if the range is *indeed* narrowed down.
- if (mVar->range.size() != oldSize) mVar->updateTimestamp();
-}
-
-RandomVariableRange IRandomVariableOp::getInitRange(const RandomVariableRange& lhs,
- const RandomVariableRange& rhs) const {
- std::set<int> st;
- for (auto i : lhs.getChoices()) {
- for (auto j : rhs.getChoices()) {
- int res = this->eval(i, j);
- if (res > kMaxValue || res < -kMaxValue) continue;
- st.insert(res);
- }
- }
- return RandomVariableRange(st);
-}
-
-// Check if the range contains exactly all values in [min, max].
-static inline bool isContinuous(const std::set<int>* range) {
- return (*(range->rbegin()) - *(range->begin()) + 1) == static_cast<int>(range->size());
-}
-
-// Fill the set with a range of values specified by [lower, upper].
-static inline void fillRange(std::set<int>* range, int lower, int upper) {
- for (int i = lower; i <= upper; i++) range->insert(i);
-}
-
-// The slowest algorithm: iterate through every combinations of parents and save the valid pairs.
-void IRandomVariableOp::eval(const std::set<int>* parent1In, const std::set<int>* parent2In,
- const std::set<int>* childIn, std::set<int>* parent1Out,
- std::set<int>* parent2Out, std::set<int>* childOut) const {
- // Avoid the binary search if the child is a closed range.
- bool isChildInContinuous = isContinuous(childIn);
- std::pair<int, int> child = {*childIn->begin(), *childIn->rbegin()};
- for (auto i : *parent1In) {
- bool valid = false;
- for (auto j : *parent2In) {
- int res = this->eval(i, j);
- // Avoid the binary search if obviously out of range.
- if (res > child.second || res < child.first) continue;
- if (isChildInContinuous || childIn->find(res) != childIn->end()) {
- parent2Out->insert(j);
- childOut->insert(res);
- valid = true;
- }
- }
- if (valid) parent1Out->insert(i);
- }
-}
-
-// A helper template to make a class into a Singleton.
-template <class T>
-class Singleton : public T {
- public:
- static const std::shared_ptr<const T>& get() {
- static std::shared_ptr<const T> instance(new T);
- return instance;
- }
-};
-
-// A set of operations that only compute on a single input value.
-class IUnaryOp : public IRandomVariableOp {
- public:
- using IRandomVariableOp::eval;
- virtual int eval(int val) const = 0;
- virtual int eval(int lhs, int) const override { return eval(lhs); }
- // The slowest algorithm: iterate through every value of the parent and save the valid one.
- virtual void eval(const std::set<int>* parent1In, const std::set<int>* parent2In,
- const std::set<int>* childIn, std::set<int>* parent1Out,
- std::set<int>* parent2Out, std::set<int>* childOut) const override {
- NN_FUZZER_CHECK(parent2In == nullptr);
- NN_FUZZER_CHECK(parent2Out == nullptr);
- bool isChildInContinuous = isContinuous(childIn);
- std::pair<int, int> child = {*childIn->begin(), *childIn->rbegin()};
- for (auto i : *parent1In) {
- int res = this->eval(i);
- if (res > child.second || res < child.first) continue;
- if (isChildInContinuous || childIn->find(res) != childIn->end()) {
- parent1Out->insert(i);
- childOut->insert(res);
- }
- }
- }
-};
-
-// A set of operations that only check conditional constraints.
-class IConstraintOp : public IRandomVariableOp {
- public:
- using IRandomVariableOp::eval;
- virtual bool check(int lhs, int rhs) const = 0;
- virtual int eval(int lhs, int rhs) const override {
- return check(lhs, rhs) ? 0 : kInvalidValue;
- }
- // The range for a constraint op is always {0}.
- virtual RandomVariableRange getInitRange(const RandomVariableRange&,
- const RandomVariableRange&) const override {
- return RandomVariableRange(0);
- }
- // The slowest algorithm:
- // iterate through every combinations of parents and save the valid pairs.
- virtual void eval(const std::set<int>* parent1In, const std::set<int>* parent2In,
- const std::set<int>*, std::set<int>* parent1Out, std::set<int>* parent2Out,
- std::set<int>* childOut) const override {
- for (auto i : *parent1In) {
- bool valid = false;
- for (auto j : *parent2In) {
- if (this->check(i, j)) {
- parent2Out->insert(j);
- valid = true;
- }
- }
- if (valid) parent1Out->insert(i);
- }
- if (!parent1Out->empty()) childOut->insert(0);
- }
-};
-
-class Addition : public IRandomVariableOp {
- public:
- virtual int eval(int lhs, int rhs) const override { return lhs + rhs; }
- virtual RandomVariableRange getInitRange(const RandomVariableRange& lhs,
- const RandomVariableRange& rhs) const override {
- return RandomVariableRange(lhs.min() + rhs.min(), lhs.max() + rhs.max());
- }
- virtual void eval(const std::set<int>* parent1In, const std::set<int>* parent2In,
- const std::set<int>* childIn, std::set<int>* parent1Out,
- std::set<int>* parent2Out, std::set<int>* childOut) const override {
- if (!isContinuous(parent1In) || !isContinuous(parent2In) || !isContinuous(childIn)) {
- IRandomVariableOp::eval(parent1In, parent2In, childIn, parent1Out, parent2Out,
- childOut);
- } else {
- // For parents and child with close range, the out range can be computed directly
- // without iterations.
- std::pair<int, int> parent1 = {*parent1In->begin(), *parent1In->rbegin()};
- std::pair<int, int> parent2 = {*parent2In->begin(), *parent2In->rbegin()};
- std::pair<int, int> child = {*childIn->begin(), *childIn->rbegin()};
-
- // From ranges for parent, evalute range for child.
- // [a, b] + [c, d] -> [a + c, b + d]
- fillRange(childOut, std::max(child.first, parent1.first + parent2.first),
- std::min(child.second, parent1.second + parent2.second));
-
- // From ranges for child and one parent, evalute range for another parent.
- // [a, b] - [c, d] -> [a - d, b - c]
- fillRange(parent1Out, std::max(parent1.first, child.first - parent2.second),
- std::min(parent1.second, child.second - parent2.first));
- fillRange(parent2Out, std::max(parent2.first, child.first - parent1.second),
- std::min(parent2.second, child.second - parent1.first));
- }
- }
- virtual const char* getName() const override { return "ADD"; }
-};
-
-class Subtraction : public IRandomVariableOp {
- public:
- virtual int eval(int lhs, int rhs) const override { return lhs - rhs; }
- virtual RandomVariableRange getInitRange(const RandomVariableRange& lhs,
- const RandomVariableRange& rhs) const override {
- return RandomVariableRange(lhs.min() - rhs.max(), lhs.max() - rhs.min());
- }
- virtual void eval(const std::set<int>* parent1In, const std::set<int>* parent2In,
- const std::set<int>* childIn, std::set<int>* parent1Out,
- std::set<int>* parent2Out, std::set<int>* childOut) const override {
- if (!isContinuous(parent1In) || !isContinuous(parent2In) || !isContinuous(childIn)) {
- IRandomVariableOp::eval(parent1In, parent2In, childIn, parent1Out, parent2Out,
- childOut);
- } else {
- // Similar algorithm as Addition.
- std::pair<int, int> parent1 = {*parent1In->begin(), *parent1In->rbegin()};
- std::pair<int, int> parent2 = {*parent2In->begin(), *parent2In->rbegin()};
- std::pair<int, int> child = {*childIn->begin(), *childIn->rbegin()};
- fillRange(childOut, std::max(child.first, parent1.first - parent2.second),
- std::min(child.second, parent1.second - parent2.first));
- fillRange(parent1Out, std::max(parent1.first, child.first + parent2.first),
- std::min(parent1.second, child.second + parent2.second));
- fillRange(parent2Out, std::max(parent2.first, parent1.first - child.second),
- std::min(parent2.second, parent1.second - child.first));
- }
- }
- virtual const char* getName() const override { return "SUB"; }
-};
-
-class Multiplication : public IRandomVariableOp {
- public:
- virtual int eval(int lhs, int rhs) const override { return lhs * rhs; }
- virtual RandomVariableRange getInitRange(const RandomVariableRange& lhs,
- const RandomVariableRange& rhs) const override {
- if (lhs.min() < 0 || rhs.min() < 0) {
- return IRandomVariableOp::getInitRange(lhs, rhs);
- } else {
- int lower = std::min(lhs.min() * rhs.min(), kMaxValue);
- int upper = std::min(lhs.max() * rhs.max(), kMaxValue);
- return RandomVariableRange(lower, upper);
- }
- }
- virtual void eval(const std::set<int>* parent1In, const std::set<int>* parent2In,
- const std::set<int>* childIn, std::set<int>* parent1Out,
- std::set<int>* parent2Out, std::set<int>* childOut) const override {
- if (*parent1In->begin() < 0 || *parent2In->begin() < 0 || *childIn->begin() < 0) {
- IRandomVariableOp::eval(parent1In, parent2In, childIn, parent1Out, parent2Out,
- childOut);
- } else {
- bool isChildInContinuous = isContinuous(childIn);
- std::pair<int, int> child = {*childIn->begin(), *childIn->rbegin()};
- for (auto i : *parent1In) {
- bool valid = false;
- for (auto j : *parent2In) {
- int res = this->eval(i, j);
- // Since MUL increases monotonically with one value, break the loop if the
- // result is larger than the limit.
- if (res > child.second) break;
- if (res < child.first) continue;
- if (isChildInContinuous || childIn->find(res) != childIn->end()) {
- valid = true;
- parent2Out->insert(j);
- childOut->insert(res);
- }
- }
- if (valid) parent1Out->insert(i);
- }
- }
- }
- virtual const char* getName() const override { return "MUL"; }
-};
-
-class Division : public IRandomVariableOp {
- public:
- virtual int eval(int lhs, int rhs) const override {
- return rhs == 0 ? kInvalidValue : lhs / rhs;
- }
- virtual RandomVariableRange getInitRange(const RandomVariableRange& lhs,
- const RandomVariableRange& rhs) const override {
- if (lhs.min() < 0 || rhs.min() <= 0) {
- return IRandomVariableOp::getInitRange(lhs, rhs);
- } else {
- return RandomVariableRange(lhs.min() / rhs.max(), lhs.max() / rhs.min());
- }
- }
- virtual const char* getName() const override { return "DIV"; }
-};
-
-class ExactDivision : public Division {
- public:
- virtual int eval(int lhs, int rhs) const override {
- return (rhs == 0 || lhs % rhs != 0) ? kInvalidValue : lhs / rhs;
- }
- virtual const char* getName() const override { return "EXACT_DIV"; }
-};
-
-class Modulo : public IRandomVariableOp {
- public:
- virtual int eval(int lhs, int rhs) const override {
- return rhs == 0 ? kInvalidValue : lhs % rhs;
- }
- virtual RandomVariableRange getInitRange(const RandomVariableRange&,
- const RandomVariableRange& rhs) const override {
- return RandomVariableRange(0, rhs.max());
- }
- virtual void eval(const std::set<int>* parent1In, const std::set<int>* parent2In,
- const std::set<int>* childIn, std::set<int>* parent1Out,
- std::set<int>* parent2Out, std::set<int>* childOut) const override {
- if (*childIn->begin() != 0 || childIn->size() != 1u) {
- IRandomVariableOp::eval(parent1In, parent2In, childIn, parent1Out, parent2Out,
- childOut);
- } else {
- // For the special case that child is a const 0, it would be faster if the range for
- // parents are evaluated separately.
-
- // Evalute parent1 directly.
- for (auto i : *parent1In) {
- for (auto j : *parent2In) {
- if (i % j == 0) {
- parent1Out->insert(i);
- break;
- }
- }
- }
- // Evalute parent2, see if a multiple of parent2 value can be found in parent1.
- int parent1Max = *parent1In->rbegin();
- for (auto i : *parent2In) {
- int jMax = parent1Max / i;
- for (int j = 1; j <= jMax; j++) {
- if (parent1In->find(i * j) != parent1In->end()) {
- parent2Out->insert(i);
- break;
- }
- }
- }
- if (!parent1Out->empty()) childOut->insert(0);
- }
- }
- virtual const char* getName() const override { return "MOD"; }
-};
-
-class Maximum : public IRandomVariableOp {
- public:
- virtual int eval(int lhs, int rhs) const override { return std::max(lhs, rhs); }
- virtual const char* getName() const override { return "MAX"; }
-};
-
-class Minimum : public IRandomVariableOp {
- public:
- virtual int eval(int lhs, int rhs) const override { return std::min(lhs, rhs); }
- virtual const char* getName() const override { return "MIN"; }
-};
-
-class Square : public IUnaryOp {
- public:
- virtual int eval(int val) const override { return val * val; }
- virtual const char* getName() const override { return "SQUARE"; }
-};
-
-class UnaryEqual : public IUnaryOp {
- public:
- virtual int eval(int val) const override { return val; }
- virtual const char* getName() const override { return "UNARY_EQUAL"; }
-};
-
-class Equal : public IConstraintOp {
- public:
- virtual bool check(int lhs, int rhs) const override { return lhs == rhs; }
- virtual void eval(const std::set<int>* parent1In, const std::set<int>* parent2In,
- const std::set<int>* childIn, std::set<int>* parent1Out,
- std::set<int>* parent2Out, std::set<int>* childOut) const override {
- NN_FUZZER_CHECK(childIn->size() == 1u && *childIn->begin() == 0);
- // The intersection of two sets can be found in O(n).
- std::set_intersection(parent1In->begin(), parent1In->end(), parent2In->begin(),
- parent2In->end(), std::inserter(*parent1Out, parent1Out->begin()));
- *parent2Out = *parent1Out;
- childOut->insert(0);
- }
- virtual const char* getName() const override { return "EQUAL"; }
-};
-
-class GreaterThan : public IConstraintOp {
- public:
- virtual bool check(int lhs, int rhs) const override { return lhs > rhs; }
- virtual const char* getName() const override { return "GREATER_THAN"; }
-};
-
-class GreaterEqual : public IConstraintOp {
- public:
- virtual bool check(int lhs, int rhs) const override { return lhs >= rhs; }
- virtual const char* getName() const override { return "GREATER_EQUAL"; }
-};
-
-class FloatMultiplication : public IUnaryOp {
- public:
- FloatMultiplication(float multiplicand) : mMultiplicand(multiplicand) {}
- virtual int eval(int val) const override {
- return static_cast<int>(std::floor(static_cast<float>(val) * mMultiplicand));
- }
- virtual const char* getName() const override { return "MUL_FLOAT"; }
-
- private:
- float mMultiplicand;
-};
-
-// Arithmetic operators and methods on RandomVariables will create OP RandomVariableNodes.
-// Since there must be at most one edge between two RandomVariableNodes, we have to do something
-// special when both sides are refering to the same node.
-
-RandomVariable operator+(const RandomVariable& lhs, const RandomVariable& rhs) {
- return lhs.get() == rhs.get() ? RandomVariable(lhs, 2, Singleton<Multiplication>::get())
- : RandomVariable(lhs, rhs, Singleton<Addition>::get());
-}
-RandomVariable operator-(const RandomVariable& lhs, const RandomVariable& rhs) {
- return lhs.get() == rhs.get() ? RandomVariable(0)
- : RandomVariable(lhs, rhs, Singleton<Subtraction>::get());
-}
-RandomVariable operator*(const RandomVariable& lhs, const RandomVariable& rhs) {
- return lhs.get() == rhs.get() ? RandomVariable(lhs, RandomVariable(), Singleton<Square>::get())
- : RandomVariable(lhs, rhs, Singleton<Multiplication>::get());
-}
-RandomVariable operator*(const RandomVariable& lhs, const float& rhs) {
- return RandomVariable(lhs, RandomVariable(), std::make_shared<FloatMultiplication>(rhs));
-}
-RandomVariable operator/(const RandomVariable& lhs, const RandomVariable& rhs) {
- return lhs.get() == rhs.get() ? RandomVariable(1)
- : RandomVariable(lhs, rhs, Singleton<Division>::get());
-}
-RandomVariable operator%(const RandomVariable& lhs, const RandomVariable& rhs) {
- return lhs.get() == rhs.get() ? RandomVariable(0)
- : RandomVariable(lhs, rhs, Singleton<Modulo>::get());
-}
-RandomVariable max(const RandomVariable& lhs, const RandomVariable& rhs) {
- return lhs.get() == rhs.get() ? lhs : RandomVariable(lhs, rhs, Singleton<Maximum>::get());
-}
-RandomVariable min(const RandomVariable& lhs, const RandomVariable& rhs) {
- return lhs.get() == rhs.get() ? lhs : RandomVariable(lhs, rhs, Singleton<Minimum>::get());
-}
-
-RandomVariable RandomVariable::exactDiv(const RandomVariable& other) {
- return mVar == other.get() ? RandomVariable(1)
- : RandomVariable(*this, other, Singleton<ExactDivision>::get());
-}
-
-RandomVariable RandomVariable::setEqual(const RandomVariable& other) const {
- RandomVariableNode node1 = mVar, node2 = other.get();
- NN_FUZZER_LOG << "Set equality of var" << node1->index << " and var" << node2->index;
-
- // Do not setEqual on the same pair twice.
- if (node1 == node2 || (node1->op == Singleton<UnaryEqual>::get() && node1->parent1 == node2) ||
- (node2->op == Singleton<UnaryEqual>::get() && node2->parent1 == node1)) {
- NN_FUZZER_LOG << "Already equal. Return.";
- return RandomVariable();
- }
-
- // If possible, always try UnaryEqual first to reduce the search space.
- // UnaryEqual can be used if node B is FREE and is evaluated later than node A.
- // TODO: Reduce code duplication.
- if (RandomVariableNetwork::get()->isSubordinate(node1, node2)) {
- NN_FUZZER_LOG << " Make var" << node2->index << " a child of var" << node1->index;
- node2->type = RandomVariableType::OP;
- node2->parent1 = node1;
- node2->op = Singleton<UnaryEqual>::get();
- node1->children.push_back(node2);
- RandomVariableNetwork::get()->join(node1, node2);
- node1->updateTimestamp();
- return other;
- }
- if (RandomVariableNetwork::get()->isSubordinate(node2, node1)) {
- NN_FUZZER_LOG << " Make var" << node1->index << " a child of var" << node2->index;
- node1->type = RandomVariableType::OP;
- node1->parent1 = node2;
- node1->op = Singleton<UnaryEqual>::get();
- node2->children.push_back(node1);
- RandomVariableNetwork::get()->join(node2, node1);
- node1->updateTimestamp();
- return *this;
- }
- return RandomVariable(*this, other, Singleton<Equal>::get());
-}
-
-RandomVariable RandomVariable::setGreaterThan(const RandomVariable& other) const {
- NN_FUZZER_CHECK(mVar != other.get());
- return RandomVariable(*this, other, Singleton<GreaterThan>::get());
-}
-RandomVariable RandomVariable::setGreaterEqual(const RandomVariable& other) const {
- return mVar == other.get() ? *this
- : RandomVariable(*this, other, Singleton<GreaterEqual>::get());
-}
-
-void DisjointNetwork::add(const RandomVariableNode& var) {
- // Find the subnet index of the parents and decide the index for var.
- int ind1 = var->parent1 == nullptr ? -1 : mIndexMap[var->parent1];
- int ind2 = var->parent2 == nullptr ? -1 : mIndexMap[var->parent2];
- int ind = join(ind1, ind2);
- // If no parent, put it into a new subnet component.
- if (ind == -1) ind = mNextIndex++;
- NN_FUZZER_LOG << "Add RandomVariable var" << var->index << " to network #" << ind;
- mIndexMap[var] = ind;
- mEvalOrderMap[ind].push_back(var);
-}
-
-int DisjointNetwork::join(int ind1, int ind2) {
- if (ind1 == -1) return ind2;
- if (ind2 == -1) return ind1;
- if (ind1 == ind2) return ind1;
- NN_FUZZER_LOG << "Join network #" << ind1 << " and #" << ind2;
- auto &order1 = mEvalOrderMap[ind1], &order2 = mEvalOrderMap[ind2];
- // Append every node in ind2 to the end of ind1
- for (const auto& var : order2) {
- order1.push_back(var);
- mIndexMap[var] = ind1;
- }
- // Remove ind2 from mEvalOrderMap.
- mEvalOrderMap.erase(mEvalOrderMap.find(ind2));
- return ind1;
-}
-
-RandomVariableNetwork* RandomVariableNetwork::get() {
- static RandomVariableNetwork instance;
- return &instance;
-}
-
-void RandomVariableNetwork::initialize(int defaultValue) {
- RandomVariableBase::globalIndex = 0;
- RandomVariable::defaultValue = defaultValue;
- mIndexMap.clear();
- mEvalOrderMap.clear();
- mDimProd.clear();
- mNextIndex = 0;
- mGlobalTime = 0;
- mTimestamp = -1;
-}
-
-bool RandomVariableNetwork::isSubordinate(const RandomVariableNode& node1,
- const RandomVariableNode& node2) {
- if (node2->type != RandomVariableType::FREE) return false;
- int ind1 = mIndexMap[node1];
- // node2 is of a different subnet.
- if (ind1 != mIndexMap[node2]) return true;
- for (const auto& node : mEvalOrderMap[ind1]) {
- if (node == node2) return false;
- // node2 is of the same subnet but evaluated later than node1.
- if (node == node1) return true;
- }
- NN_FUZZER_CHECK(false) << "Code executed in non-reachable region.";
- return false;
-}
-
-struct EvalInfo {
- // The RandomVariableNode that this EvalInfo is associated with.
- // var->value is the current value during evaluation.
- RandomVariableNode var;
-
- // The RandomVariable value is staged when a valid combination is found.
- std::set<int> staging;
-
- // The staging values are committed after a subnet evaluation.
- std::set<int> committed;
-
- // Keeps track of the latest timestamp that committed is updated.
- int timestamp;
-
- // For evalSubnetWithLocalNetwork.
- RandomVariableType originalType;
-
- // Should only invoke eval on OP RandomVariable.
- bool eval() {
- NN_FUZZER_CHECK(var->type == RandomVariableType::OP);
- var->value = var->op->eval(var->parent1->value,
- var->parent2 == nullptr ? 0 : var->parent2->value);
- if (var->value == kInvalidValue) return false;
- return committed.find(var->value) != committed.end();
- }
- void stage() { staging.insert(var->value); }
- void commit() {
- // Only update committed and timestamp if the range is *indeed* changed.
- if (staging.size() != committed.size()) {
- committed = std::move(staging);
- timestamp = RandomVariableNetwork::get()->getGlobalTime();
- }
- staging.clear();
- }
- void updateRange() {
- // Only update range and timestamp if the range is *indeed* changed.
- if (committed.size() != var->range.size()) {
- var->range = RandomVariableRange(committed);
- var->timestamp = timestamp;
- }
- committed.clear();
- }
-
- EvalInfo(const RandomVariableNode& var)
- : var(var),
- committed(var->range.getChoices().begin(), var->range.getChoices().end()),
- timestamp(var->timestamp) {}
-};
-using EvalContext = std::unordered_map<RandomVariableNode, EvalInfo>;
-
-// For logging only.
-inline std::string toString(const RandomVariableNode& var, EvalContext* context) {
- std::stringstream ss;
- ss << "var" << var->index << " = ";
- const auto& committed = context->at(var).committed;
- switch (var->type) {
- case RandomVariableType::FREE:
- ss << "FREE ["
- << joinStr(", ", 20, std::vector<int>(committed.begin(), committed.end())) << "]";
- break;
- case RandomVariableType::CONST:
- ss << "CONST " << toString(var->value);
- break;
- case RandomVariableType::OP:
- ss << "var" << var->parent1->index << " " << var->op->getName();
- if (var->parent2 != nullptr) ss << " var" << var->parent2->index;
- ss << ", [" << joinStr(", ", 20, std::vector<int>(committed.begin(), committed.end()))
- << "]";
- break;
- default:
- NN_FUZZER_CHECK(false);
- }
- ss << ", timestamp = " << context->at(var).timestamp;
- return ss.str();
-}
-
-// Check if the subnet needs to be re-evaluated by comparing the timestamps.
-static inline bool needEvaluate(const EvaluationOrder& evalOrder, int subnetTime,
- EvalContext* context = nullptr) {
- for (const auto& var : evalOrder) {
- int timestamp = context == nullptr ? var->timestamp : context->at(var).timestamp;
- // If we find a node that has been modified since last evaluation, the subnet needs to be
- // re-evaluated.
- if (timestamp > subnetTime) return true;
- }
- return false;
-}
-
-// Helper function to evaluate the subnet recursively.
-// Iterate through all combinations of FREE RandomVariables choices.
-static void evalSubnetHelper(const EvaluationOrder& evalOrder, EvalContext* context, size_t i = 0) {
- if (i == evalOrder.size()) {
- // Reach the end of the evaluation, find a valid combination.
- for (auto& var : evalOrder) context->at(var).stage();
- return;
- }
- const auto& var = evalOrder[i];
- if (var->type == RandomVariableType::FREE) {
- // For FREE RandomVariable, iterate through all valid choices.
- for (int val : context->at(var).committed) {
- var->value = val;
- evalSubnetHelper(evalOrder, context, i + 1);
- }
- return;
- } else if (var->type == RandomVariableType::OP) {
- // For OP RandomVariable, evaluate from parents and terminate if the result is invalid.
- if (!context->at(var).eval()) return;
- }
- evalSubnetHelper(evalOrder, context, i + 1);
-}
-
-// Check if the subnet has only one single OP RandomVariable.
-static inline bool isSingleOpSubnet(const EvaluationOrder& evalOrder) {
- int numOp = 0;
- for (const auto& var : evalOrder) {
- if (var->type == RandomVariableType::OP) numOp++;
- if (numOp > 1) return false;
- }
- return numOp != 0;
-}
-
-// Evaluate with a potentially faster approach provided by IRandomVariableOp.
-static inline void evalSubnetSingleOpHelper(const EvaluationOrder& evalOrder,
- EvalContext* context) {
- NN_FUZZER_LOG << "Identified as single op subnet";
- const auto& var = evalOrder.back();
- NN_FUZZER_CHECK(var->type == RandomVariableType::OP);
- var->op->eval(&context->at(var->parent1).committed,
- var->parent2 == nullptr ? nullptr : &context->at(var->parent2).committed,
- &context->at(var).committed, &context->at(var->parent1).staging,
- var->parent2 == nullptr ? nullptr : &context->at(var->parent2).staging,
- &context->at(var).staging);
-}
-
-// Check if the number of combinations of FREE RandomVariables exceeds the limit.
-static inline uint64_t getNumCombinations(const EvaluationOrder& evalOrder,
- EvalContext* context = nullptr) {
- constexpr uint64_t kLimit = 1e8;
- uint64_t numCombinations = 1;
- for (const auto& var : evalOrder) {
- if (var->type == RandomVariableType::FREE) {
- size_t size =
- context == nullptr ? var->range.size() : context->at(var).committed.size();
- numCombinations *= size;
- // To prevent overflow.
- if (numCombinations > kLimit) return kLimit;
- }
- }
- return numCombinations;
-}
-
-// Evaluate the subnet recursively. Will return fail if the number of combinations of FREE
-// RandomVariable exceeds the threshold kMaxNumCombinations.
-static bool evalSubnetWithBruteForce(const EvaluationOrder& evalOrder, EvalContext* context) {
- constexpr uint64_t kMaxNumCombinations = 1e7;
- NN_FUZZER_LOG << "Evaluate with brute force";
- if (isSingleOpSubnet(evalOrder)) {
- // If the network only have one single OP, dispatch to a faster evaluation.
- evalSubnetSingleOpHelper(evalOrder, context);
- } else {
- if (getNumCombinations(evalOrder, context) > kMaxNumCombinations) {
- NN_FUZZER_LOG << "Terminate the evaluation because of large search range";
- std::cout << "[ ] Terminate the evaluation because of large search range"
- << std::endl;
- return false;
- }
- evalSubnetHelper(evalOrder, context);
- }
- for (auto& var : evalOrder) {
- if (context->at(var).staging.empty()) {
- NN_FUZZER_LOG << "Evaluation failed at " << toString(var, context);
- return false;
- }
- context->at(var).commit();
- }
- return true;
-}
-
-struct LocalNetwork {
- EvaluationOrder evalOrder;
- std::vector<RandomVariableNode> bridgeNodes;
- int timestamp = 0;
-
- bool eval(EvalContext* context) {
- NN_FUZZER_LOG << "Evaluate local network with timestamp = " << timestamp;
- // Temporarily treat bridge nodes as FREE RandomVariables.
- for (const auto& var : bridgeNodes) {
- context->at(var).originalType = var->type;
- var->type = RandomVariableType::FREE;
- }
- for (const auto& var : evalOrder) {
- context->at(var).staging.clear();
- NN_FUZZER_LOG << " - " << toString(var, context);
- }
- bool success = evalSubnetWithBruteForce(evalOrder, context);
- // Reset the RandomVariable types for bridge nodes.
- for (const auto& var : bridgeNodes) var->type = context->at(var).originalType;
- return success;
- }
-};
-
-// Partition the network further into LocalNetworks based on the result from bridge annotation
-// algorithm.
-class GraphPartitioner : public DisjointNetwork {
- public:
- GraphPartitioner() = default;
-
- std::vector<LocalNetwork> partition(const EvaluationOrder& evalOrder, int timestamp) {
- annotateBridge(evalOrder);
- for (const auto& var : evalOrder) add(var);
- return get(timestamp);
- }
-
- private:
- GraphPartitioner(const GraphPartitioner&) = delete;
- GraphPartitioner& operator=(const GraphPartitioner&) = delete;
-
- // Find the parent-child relationship between var1 and var2, and reset the bridge.
- void setBridgeFlag(const RandomVariableNode& var1, const RandomVariableNode& var2) {
- if (var1->parent1 == var2) {
- mBridgeInfo[var1].isParent1Bridge = true;
- } else if (var1->parent2 == var2) {
- mBridgeInfo[var1].isParent2Bridge = true;
- } else {
- setBridgeFlag(var2, var1);
- }
- }
-
- // Annoate the bridges with DFS -- an edge [u, v] is a bridge if none of u's ancestor is
- // reachable from a node in the subtree of b. The complexity is O(V + E).
- // discoveryTime: The timestamp a node is visited
- // lowTime: The min discovery time of all reachable nodes from the subtree of the node.
- void annotateBridgeHelper(const RandomVariableNode& var, int* time) {
- mBridgeInfo[var].visited = true;
- mBridgeInfo[var].discoveryTime = mBridgeInfo[var].lowTime = (*time)++;
-
- // The algorithm operates on undirected graph. First find all adjacent nodes.
- auto adj = var->children;
- if (var->parent1 != nullptr) adj.push_back(var->parent1);
- if (var->parent2 != nullptr) adj.push_back(var->parent2);
-
- for (const auto& weakChild : adj) {
- auto child = weakChild.lock();
- NN_FUZZER_CHECK(child != nullptr);
- if (mBridgeInfo.find(child) == mBridgeInfo.end()) continue;
- if (!mBridgeInfo[child].visited) {
- mBridgeInfo[child].parent = var;
- annotateBridgeHelper(child, time);
-
- // If none of nodes in the subtree of child is connected to any ancestors of var,
- // then it is a bridge.
- mBridgeInfo[var].lowTime =
- std::min(mBridgeInfo[var].lowTime, mBridgeInfo[child].lowTime);
- if (mBridgeInfo[child].lowTime > mBridgeInfo[var].discoveryTime)
- setBridgeFlag(var, child);
- } else if (mBridgeInfo[var].parent != child) {
- mBridgeInfo[var].lowTime =
- std::min(mBridgeInfo[var].lowTime, mBridgeInfo[child].discoveryTime);
- }
- }
- }
-
- // Find all bridges in the subnet with DFS.
- void annotateBridge(const EvaluationOrder& evalOrder) {
- for (const auto& var : evalOrder) mBridgeInfo[var];
- int time = 0;
- for (const auto& var : evalOrder) {
- if (!mBridgeInfo[var].visited) annotateBridgeHelper(var, &time);
- }
- }
-
- // Re-partition the network by treating bridges as no edge.
- void add(const RandomVariableNode& var) {
- auto parent1 = var->parent1;
- auto parent2 = var->parent2;
- if (mBridgeInfo[var].isParent1Bridge) var->parent1 = nullptr;
- if (mBridgeInfo[var].isParent2Bridge) var->parent2 = nullptr;
- DisjointNetwork::add(var);
- var->parent1 = parent1;
- var->parent2 = parent2;
- }
-
- // Add bridge nodes to the local network and remove single node subnet.
- std::vector<LocalNetwork> get(int timestamp) {
- std::vector<LocalNetwork> res;
- for (auto& pair : mEvalOrderMap) {
- // We do not need to evaluate subnet with only a single node.
- if (pair.second.size() == 1 && pair.second[0]->parent1 == nullptr) continue;
- res.emplace_back();
- for (const auto& var : pair.second) {
- if (mBridgeInfo[var].isParent1Bridge) {
- res.back().evalOrder.push_back(var->parent1);
- res.back().bridgeNodes.push_back(var->parent1);
- }
- if (mBridgeInfo[var].isParent2Bridge) {
- res.back().evalOrder.push_back(var->parent2);
- res.back().bridgeNodes.push_back(var->parent2);
- }
- res.back().evalOrder.push_back(var);
- }
- res.back().timestamp = timestamp;
- }
- return res;
- }
-
- // For bridge discovery algorithm.
- struct BridgeInfo {
- bool isParent1Bridge = false;
- bool isParent2Bridge = false;
- int discoveryTime = 0;
- int lowTime = 0;
- bool visited = false;
- std::shared_ptr<RandomVariableBase> parent = nullptr;
- };
- std::unordered_map<RandomVariableNode, BridgeInfo> mBridgeInfo;
-};
-
-// Evaluate subnets repeatedly until converge.
-// Class T_Subnet must have member evalOrder, timestamp, and member function eval.
-template <class T_Subnet>
-inline bool evalSubnetsRepeatedly(std::vector<T_Subnet>* subnets, EvalContext* context) {
- bool terminate = false;
- while (!terminate) {
- terminate = true;
- for (auto& subnet : *subnets) {
- if (needEvaluate(subnet.evalOrder, subnet.timestamp, context)) {
- if (!subnet.eval(context)) return false;
- subnet.timestamp = RandomVariableNetwork::get()->getGlobalTime();
- terminate = false;
- }
- }
- }
- return true;
-}
-
-// Evaluate the subnet by first partitioning it further into LocalNetworks.
-static bool evalSubnetWithLocalNetwork(const EvaluationOrder& evalOrder, int timestamp,
- EvalContext* context) {
- NN_FUZZER_LOG << "Evaluate with local network";
- auto localNetworks = GraphPartitioner().partition(evalOrder, timestamp);
- return evalSubnetsRepeatedly(&localNetworks, context);
-}
-
-struct LeafNetwork {
- EvaluationOrder evalOrder;
- int timestamp = 0;
- LeafNetwork(const RandomVariableNode& var, int timestamp) : timestamp(timestamp) {
- std::set<RandomVariableNode> visited;
- constructorHelper(var, &visited);
- }
- // Construct the leaf network by recursively including parent nodes.
- void constructorHelper(const RandomVariableNode& var, std::set<RandomVariableNode>* visited) {
- if (var == nullptr || visited->find(var) != visited->end()) return;
- constructorHelper(var->parent1, visited);
- constructorHelper(var->parent2, visited);
- visited->insert(var);
- evalOrder.push_back(var);
- }
- bool eval(EvalContext* context) {
- return evalSubnetWithLocalNetwork(evalOrder, timestamp, context);
- }
-};
-
-// Evaluate the subnet by leaf network.
-// NOTE: This algorithm will only produce correct result for *most* of the time (> 99%).
-// The random graph generator is expected to retry if it fails.
-static bool evalSubnetWithLeafNetwork(const EvaluationOrder& evalOrder, int timestamp,
- EvalContext* context) {
- NN_FUZZER_LOG << "Evaluate with leaf network";
- // Construct leaf networks.
- std::vector<LeafNetwork> leafNetworks;
- for (const auto& var : evalOrder) {
- if (var->children.empty()) {
- NN_FUZZER_LOG << "Found leaf " << toString(var, context);
- leafNetworks.emplace_back(var, timestamp);
- }
- }
- return evalSubnetsRepeatedly(&leafNetworks, context);
-}
-
-void RandomVariableNetwork::addDimensionProd(const std::vector<RandomVariable>& dims) {
- if (dims.size() <= 1) return;
- EvaluationOrder order;
- for (const auto& dim : dims) order.push_back(dim.get());
- mDimProd.push_back(order);
-}
-
-bool enforceDimProd(const std::vector<EvaluationOrder>& mDimProd,
- const std::unordered_map<RandomVariableNode, int>& indexMap,
- EvalContext* context, std::set<int>* dirtySubnets) {
- for (auto& evalOrder : mDimProd) {
- NN_FUZZER_LOG << " Dimension product network size = " << evalOrder.size();
- // Initialize EvalInfo of each RandomVariable.
- for (auto& var : evalOrder) {
- if (context->find(var) == context->end()) context->emplace(var, var);
- NN_FUZZER_LOG << " - " << toString(var, context);
- }
-
- // Enforce the product of the dimension values below kMaxValue:
- // max(dimA) = kMaxValue / (min(dimB) * min(dimC) * ...)
- int prod = 1;
- for (const auto& var : evalOrder) prod *= (*context->at(var).committed.begin());
- for (auto& var : evalOrder) {
- auto& committed = context->at(var).committed;
- int maxValue = kMaxValue / (prod / *committed.begin());
- auto it = committed.upper_bound(maxValue);
- // var has empty range -> no solution.
- if (it == committed.begin()) return false;
- // The range is not modified -> continue.
- if (it == committed.end()) continue;
- // The range is modified -> the subnet of var is dirty, i.e. needs re-evaluation.
- committed.erase(it, committed.end());
- context->at(var).timestamp = RandomVariableNetwork::get()->getGlobalTime();
- dirtySubnets->insert(indexMap.at(var));
- }
- }
- return true;
-}
-
-bool RandomVariableNetwork::evalRange() {
- constexpr uint64_t kMaxNumCombinationsWithBruteForce = 500;
- constexpr uint64_t kMaxNumCombinationsWithLocalNetwork = 1e5;
- NN_FUZZER_LOG << "Evaluate on " << mEvalOrderMap.size() << " sub-networks";
- EvalContext context;
- std::set<int> dirtySubnets; // Which subnets needs evaluation.
- for (auto& pair : mEvalOrderMap) {
- const auto& evalOrder = pair.second;
- // Decide whether needs evaluation by timestamp -- if no range has changed after the last
- // evaluation, then the subnet does not need re-evaluation.
- if (evalOrder.size() == 1 || !needEvaluate(evalOrder, mTimestamp)) continue;
- dirtySubnets.insert(pair.first);
- }
- if (!enforceDimProd(mDimProd, mIndexMap, &context, &dirtySubnets)) return false;
-
- // Repeat until the ranges converge.
- while (!dirtySubnets.empty()) {
- for (int ind : dirtySubnets) {
- const auto& evalOrder = mEvalOrderMap[ind];
- NN_FUZZER_LOG << " Sub-network #" << ind << " size = " << evalOrder.size();
-
- // Initialize EvalInfo of each RandomVariable.
- for (auto& var : evalOrder) {
- if (context.find(var) == context.end()) context.emplace(var, var);
- NN_FUZZER_LOG << " - " << toString(var, &context);
- }
-
- // Dispatch to different algorithm according to search range.
- bool success;
- uint64_t numCombinations = getNumCombinations(evalOrder);
- if (numCombinations <= kMaxNumCombinationsWithBruteForce) {
- success = evalSubnetWithBruteForce(evalOrder, &context);
- } else if (numCombinations <= kMaxNumCombinationsWithLocalNetwork) {
- success = evalSubnetWithLocalNetwork(evalOrder, mTimestamp, &context);
- } else {
- success = evalSubnetWithLeafNetwork(evalOrder, mTimestamp, &context);
- }
- if (!success) return false;
- }
- dirtySubnets.clear();
- if (!enforceDimProd(mDimProd, mIndexMap, &context, &dirtySubnets)) return false;
- }
- // A successful evaluation, update RandomVariables from EvalContext.
- for (auto& pair : context) pair.second.updateRange();
- mTimestamp = getGlobalTime();
- NN_FUZZER_LOG << "Finish range evaluation";
- return true;
-}
-
-static void unsetEqual(const RandomVariableNode& node) {
- if (node == nullptr) return;
- NN_FUZZER_LOG << "Unset equality of var" << node->index;
- auto weakPtrEqual = [&node](const std::weak_ptr<RandomVariableBase>& ptr) {
- return ptr.lock() == node;
- };
- RandomVariableNode parent1 = node->parent1, parent2 = node->parent2;
- parent1->children.erase(
- std::find_if(parent1->children.begin(), parent1->children.end(), weakPtrEqual));
- node->parent1 = nullptr;
- if (parent2 != nullptr) {
- // For Equal.
- parent2->children.erase(
- std::find_if(parent2->children.begin(), parent2->children.end(), weakPtrEqual));
- node->parent2 = nullptr;
- } else {
- // For UnaryEqual.
- node->type = RandomVariableType::FREE;
- node->op = nullptr;
- }
-}
-
-// A class to revert all the changes made to RandomVariableNetwork since the Reverter object is
-// constructed. Only used when setEqualIfCompatible results in incompatible.
-class RandomVariableNetwork::Reverter {
- public:
- // Take a snapshot of RandomVariableNetwork when Reverter is constructed.
- Reverter() : mSnapshot(*RandomVariableNetwork::get()) {}
- // Add constraint (Equal) nodes to the reverter.
- void addNode(const RandomVariableNode& node) { mEqualNodes.push_back(node); }
- void revert() {
- NN_FUZZER_LOG << "Revert RandomVariableNetwork";
- // Release the constraints.
- for (const auto& node : mEqualNodes) unsetEqual(node);
- // Reset all member variables.
- *RandomVariableNetwork::get() = std::move(mSnapshot);
- }
-
- private:
- Reverter(const Reverter&) = delete;
- Reverter& operator=(const Reverter&) = delete;
- RandomVariableNetwork mSnapshot;
- std::vector<RandomVariableNode> mEqualNodes;
-};
-
-bool RandomVariableNetwork::setEqualIfCompatible(const std::vector<RandomVariable>& lhs,
- const std::vector<RandomVariable>& rhs) {
- NN_FUZZER_LOG << "Check compatibility of {" << joinStr(", ", lhs) << "} and {"
- << joinStr(", ", rhs) << "}";
- if (lhs.size() != rhs.size()) return false;
- Reverter reverter;
- bool result = true;
- for (size_t i = 0; i < lhs.size(); i++) {
- auto node = lhs[i].setEqual(rhs[i]).get();
- reverter.addNode(node);
- // Early terminate if there is no common choice between two ranges.
- if (node != nullptr && node->range.empty()) result = false;
- }
- result = result && evalRange();
- if (!result) reverter.revert();
- NN_FUZZER_LOG << "setEqualIfCompatible: " << (result ? "[COMPATIBLE]" : "[INCOMPATIBLE]");
- return result;
-}
-
-bool RandomVariableNetwork::freeze() {
- NN_FUZZER_LOG << "Freeze the random network";
- if (!evalRange()) return false;
-
- std::vector<RandomVariableNode> nodes;
- for (const auto& pair : mEvalOrderMap) {
- // Find all FREE RandomVariables in the subnet.
- for (const auto& var : pair.second) {
- if (var->type == RandomVariableType::FREE) nodes.push_back(var);
- }
- }
-
- // Randomly shuffle the order, this is for a more uniform randomness.
- randomShuffle(&nodes);
-
- // An inefficient algorithm that does freeze -> re-evaluate for every FREE RandomVariable.
- // TODO: Might be able to optimize this.
- for (const auto& var : nodes) {
- if (var->type != RandomVariableType::FREE) continue;
- size_t size = var->range.size();
- NN_FUZZER_LOG << "Freeze " << toString(var);
- var->freeze();
- NN_FUZZER_LOG << " " << toString(var);
- // There is no need to re-evaluate if the FREE RandomVariable have only one choice.
- if (size > 1) {
- var->updateTimestamp();
- if (!evalRange()) {
- NN_FUZZER_LOG << "Freeze failed at " << toString(var);
- return false;
- }
- }
- }
- NN_FUZZER_LOG << "Finish freezing the random network";
- return true;
-}
-
-} // namespace fuzzing_test
-} // namespace nn
-} // namespace android
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "RandomVariable.h"
+
+#include <algorithm>
+#include <memory>
+#include <set>
+#include <string>
+#include <unordered_map>
+#include <utility>
+#include <vector>
+
+#include "RandomGraphGeneratorUtils.h"
+
+namespace android {
+namespace nn {
+namespace fuzzing_test {
+
+unsigned int RandomVariableBase::globalIndex = 0;
+int RandomVariable::defaultValue = 10;
+
+RandomVariableBase::RandomVariableBase(int value)
+ : index(globalIndex++),
+ type(RandomVariableType::CONST),
+ range(value),
+ value(value),
+ timestamp(RandomVariableNetwork::get()->getGlobalTime()) {}
+
+RandomVariableBase::RandomVariableBase(int lower, int upper)
+ : index(globalIndex++),
+ type(RandomVariableType::FREE),
+ range(lower, upper),
+ timestamp(RandomVariableNetwork::get()->getGlobalTime()) {}
+
+RandomVariableBase::RandomVariableBase(const std::vector<int>& choices)
+ : index(globalIndex++),
+ type(RandomVariableType::FREE),
+ range(choices),
+ timestamp(RandomVariableNetwork::get()->getGlobalTime()) {}
+
+RandomVariableBase::RandomVariableBase(const RandomVariableNode& lhs, const RandomVariableNode& rhs,
+ const std::shared_ptr<const IRandomVariableOp>& op)
+ : index(globalIndex++),
+ type(RandomVariableType::OP),
+ range(op->getInitRange(lhs->range, rhs == nullptr ? RandomVariableRange(0) : rhs->range)),
+ op(op),
+ parent1(lhs),
+ parent2(rhs),
+ timestamp(RandomVariableNetwork::get()->getGlobalTime()) {}
+
+void RandomVariableRange::setRange(int lower, int upper) {
+ // kInvalidValue indicates unlimited bound.
+ auto head = lower == kInvalidValue ? mChoices.begin()
+ : std::lower_bound(mChoices.begin(), mChoices.end(), lower);
+ auto tail = upper == kInvalidValue ? mChoices.end()
+ : std::upper_bound(mChoices.begin(), mChoices.end(), upper);
+ NN_FUZZER_CHECK(head <= tail) << "Invalid range!";
+ if (head != mChoices.begin() || tail != mChoices.end()) {
+ mChoices = std::vector<int>(head, tail);
+ }
+}
+
+int RandomVariableRange::toConst() {
+ if (mChoices.size() > 1) mChoices = {getRandomChoice(mChoices)};
+ return mChoices[0];
+}
+
+RandomVariableRange operator&(const RandomVariableRange& lhs, const RandomVariableRange& rhs) {
+ std::vector<int> result(lhs.size() + rhs.size());
+ auto it = std::set_intersection(lhs.mChoices.begin(), lhs.mChoices.end(), rhs.mChoices.begin(),
+ rhs.mChoices.end(), result.begin());
+ result.resize(it - result.begin());
+ return RandomVariableRange(std::move(result));
+}
+
+void RandomVariableBase::freeze() {
+ if (type == RandomVariableType::CONST) return;
+ value = range.toConst();
+ type = RandomVariableType::CONST;
+}
+
+int RandomVariableBase::getValue() const {
+ switch (type) {
+ case RandomVariableType::CONST:
+ return value;
+ case RandomVariableType::OP:
+ return op->eval(parent1->getValue(), parent2 == nullptr ? 0 : parent2->getValue());
+ default:
+ NN_FUZZER_CHECK(false) << "Invalid type when getting value of var" << index;
+ return 0;
+ }
+}
+
+void RandomVariableBase::updateTimestamp() {
+ timestamp = RandomVariableNetwork::get()->getGlobalTime();
+ NN_FUZZER_LOG << "Update timestamp of var" << index << " to " << timestamp;
+}
+
+RandomVariable::RandomVariable(int value) : mVar(new RandomVariableBase(value)) {
+ NN_FUZZER_LOG << "New RandomVariable " << mVar;
+ RandomVariableNetwork::get()->add(mVar);
+}
+RandomVariable::RandomVariable(int lower, int upper) : mVar(new RandomVariableBase(lower, upper)) {
+ NN_FUZZER_LOG << "New RandomVariable " << mVar;
+ RandomVariableNetwork::get()->add(mVar);
+}
+RandomVariable::RandomVariable(const std::vector<int>& choices)
+ : mVar(new RandomVariableBase(choices)) {
+ NN_FUZZER_LOG << "New RandomVariable " << mVar;
+ RandomVariableNetwork::get()->add(mVar);
+}
+RandomVariable::RandomVariable(RandomVariableType type)
+ : mVar(new RandomVariableBase(1, defaultValue)) {
+ NN_FUZZER_CHECK(type == RandomVariableType::FREE);
+ NN_FUZZER_LOG << "New RandomVariable " << mVar;
+ RandomVariableNetwork::get()->add(mVar);
+}
+RandomVariable::RandomVariable(const RandomVariable& lhs, const RandomVariable& rhs,
+ const std::shared_ptr<const IRandomVariableOp>& op)
+ : mVar(new RandomVariableBase(lhs.get(), rhs.get(), op)) {
+ // Make a copy if the parent is CONST. This will resolve the fake dependency problem.
+ if (mVar->parent1->type == RandomVariableType::CONST) {
+ mVar->parent1 = RandomVariable(mVar->parent1->value).get();
+ }
+ if (mVar->parent2 != nullptr && mVar->parent2->type == RandomVariableType::CONST) {
+ mVar->parent2 = RandomVariable(mVar->parent2->value).get();
+ }
+ mVar->parent1->children.push_back(mVar);
+ if (mVar->parent2 != nullptr) mVar->parent2->children.push_back(mVar);
+ RandomVariableNetwork::get()->add(mVar);
+ NN_FUZZER_LOG << "New RandomVariable " << mVar;
+}
+
+void RandomVariable::setRange(int lower, int upper) {
+ NN_FUZZER_CHECK(mVar != nullptr) << "setRange() on nullptr";
+ NN_FUZZER_LOG << "Set range [" << lower << ", " << upper << "] on var" << mVar->index;
+ size_t oldSize = mVar->range.size();
+ mVar->range.setRange(lower, upper);
+ // Only update the timestamp if the range is *indeed* narrowed down.
+ if (mVar->range.size() != oldSize) mVar->updateTimestamp();
+}
+
+RandomVariableRange IRandomVariableOp::getInitRange(const RandomVariableRange& lhs,
+ const RandomVariableRange& rhs) const {
+ std::set<int> st;
+ for (auto i : lhs.getChoices()) {
+ for (auto j : rhs.getChoices()) {
+ int res = this->eval(i, j);
+ if (res > kMaxValue || res < -kMaxValue) continue;
+ st.insert(res);
+ }
+ }
+ return RandomVariableRange(st);
+}
+
+// Check if the range contains exactly all values in [min, max].
+static inline bool isContinuous(const std::set<int>* range) {
+ return (*(range->rbegin()) - *(range->begin()) + 1) == static_cast<int>(range->size());
+}
+
+// Fill the set with a range of values specified by [lower, upper].
+static inline void fillRange(std::set<int>* range, int lower, int upper) {
+ for (int i = lower; i <= upper; i++) range->insert(i);
+}
+
+// The slowest algorithm: iterate through every combinations of parents and save the valid pairs.
+void IRandomVariableOp::eval(const std::set<int>* parent1In, const std::set<int>* parent2In,
+ const std::set<int>* childIn, std::set<int>* parent1Out,
+ std::set<int>* parent2Out, std::set<int>* childOut) const {
+ // Avoid the binary search if the child is a closed range.
+ bool isChildInContinuous = isContinuous(childIn);
+ std::pair<int, int> child = {*childIn->begin(), *childIn->rbegin()};
+ for (auto i : *parent1In) {
+ bool valid = false;
+ for (auto j : *parent2In) {
+ int res = this->eval(i, j);
+ // Avoid the binary search if obviously out of range.
+ if (res > child.second || res < child.first) continue;
+ if (isChildInContinuous || childIn->find(res) != childIn->end()) {
+ parent2Out->insert(j);
+ childOut->insert(res);
+ valid = true;
+ }
+ }
+ if (valid) parent1Out->insert(i);
+ }
+}
+
+// A helper template to make a class into a Singleton.
+template <class T>
+class Singleton : public T {
+ public:
+ static const std::shared_ptr<const T>& get() {
+ static std::shared_ptr<const T> instance(new T);
+ return instance;
+ }
+};
+
+// A set of operations that only compute on a single input value.
+class IUnaryOp : public IRandomVariableOp {
+ public:
+ using IRandomVariableOp::eval;
+ virtual int eval(int val) const = 0;
+ virtual int eval(int lhs, int) const override { return eval(lhs); }
+ // The slowest algorithm: iterate through every value of the parent and save the valid one.
+ virtual void eval(const std::set<int>* parent1In, const std::set<int>* parent2In,
+ const std::set<int>* childIn, std::set<int>* parent1Out,
+ std::set<int>* parent2Out, std::set<int>* childOut) const override {
+ NN_FUZZER_CHECK(parent2In == nullptr);
+ NN_FUZZER_CHECK(parent2Out == nullptr);
+ bool isChildInContinuous = isContinuous(childIn);
+ std::pair<int, int> child = {*childIn->begin(), *childIn->rbegin()};
+ for (auto i : *parent1In) {
+ int res = this->eval(i);
+ if (res > child.second || res < child.first) continue;
+ if (isChildInContinuous || childIn->find(res) != childIn->end()) {
+ parent1Out->insert(i);
+ childOut->insert(res);
+ }
+ }
+ }
+};
+
+// A set of operations that only check conditional constraints.
+class IConstraintOp : public IRandomVariableOp {
+ public:
+ using IRandomVariableOp::eval;
+ virtual bool check(int lhs, int rhs) const = 0;
+ virtual int eval(int lhs, int rhs) const override {
+ return check(lhs, rhs) ? 0 : kInvalidValue;
+ }
+ // The range for a constraint op is always {0}.
+ virtual RandomVariableRange getInitRange(const RandomVariableRange&,
+ const RandomVariableRange&) const override {
+ return RandomVariableRange(0);
+ }
+ // The slowest algorithm:
+ // iterate through every combinations of parents and save the valid pairs.
+ virtual void eval(const std::set<int>* parent1In, const std::set<int>* parent2In,
+ const std::set<int>*, std::set<int>* parent1Out, std::set<int>* parent2Out,
+ std::set<int>* childOut) const override {
+ for (auto i : *parent1In) {
+ bool valid = false;
+ for (auto j : *parent2In) {
+ if (this->check(i, j)) {
+ parent2Out->insert(j);
+ valid = true;
+ }
+ }
+ if (valid) parent1Out->insert(i);
+ }
+ if (!parent1Out->empty()) childOut->insert(0);
+ }
+};
+
+class Addition : public IRandomVariableOp {
+ public:
+ virtual int eval(int lhs, int rhs) const override { return lhs + rhs; }
+ virtual RandomVariableRange getInitRange(const RandomVariableRange& lhs,
+ const RandomVariableRange& rhs) const override {
+ return RandomVariableRange(lhs.min() + rhs.min(), lhs.max() + rhs.max());
+ }
+ virtual void eval(const std::set<int>* parent1In, const std::set<int>* parent2In,
+ const std::set<int>* childIn, std::set<int>* parent1Out,
+ std::set<int>* parent2Out, std::set<int>* childOut) const override {
+ if (!isContinuous(parent1In) || !isContinuous(parent2In) || !isContinuous(childIn)) {
+ IRandomVariableOp::eval(parent1In, parent2In, childIn, parent1Out, parent2Out,
+ childOut);
+ } else {
+ // For parents and child with close range, the out range can be computed directly
+ // without iterations.
+ std::pair<int, int> parent1 = {*parent1In->begin(), *parent1In->rbegin()};
+ std::pair<int, int> parent2 = {*parent2In->begin(), *parent2In->rbegin()};
+ std::pair<int, int> child = {*childIn->begin(), *childIn->rbegin()};
+
+ // From ranges for parent, evaluate range for child.
+ // [a, b] + [c, d] -> [a + c, b + d]
+ fillRange(childOut, std::max(child.first, parent1.first + parent2.first),
+ std::min(child.second, parent1.second + parent2.second));
+
+ // From ranges for child and one parent, evaluate range for another parent.
+ // [a, b] - [c, d] -> [a - d, b - c]
+ fillRange(parent1Out, std::max(parent1.first, child.first - parent2.second),
+ std::min(parent1.second, child.second - parent2.first));
+ fillRange(parent2Out, std::max(parent2.first, child.first - parent1.second),
+ std::min(parent2.second, child.second - parent1.first));
+ }
+ }
+ virtual const char* getName() const override { return "ADD"; }
+};
+
+class Subtraction : public IRandomVariableOp {
+ public:
+ virtual int eval(int lhs, int rhs) const override { return lhs - rhs; }
+ virtual RandomVariableRange getInitRange(const RandomVariableRange& lhs,
+ const RandomVariableRange& rhs) const override {
+ return RandomVariableRange(lhs.min() - rhs.max(), lhs.max() - rhs.min());
+ }
+ virtual void eval(const std::set<int>* parent1In, const std::set<int>* parent2In,
+ const std::set<int>* childIn, std::set<int>* parent1Out,
+ std::set<int>* parent2Out, std::set<int>* childOut) const override {
+ if (!isContinuous(parent1In) || !isContinuous(parent2In) || !isContinuous(childIn)) {
+ IRandomVariableOp::eval(parent1In, parent2In, childIn, parent1Out, parent2Out,
+ childOut);
+ } else {
+ // Similar algorithm as Addition.
+ std::pair<int, int> parent1 = {*parent1In->begin(), *parent1In->rbegin()};
+ std::pair<int, int> parent2 = {*parent2In->begin(), *parent2In->rbegin()};
+ std::pair<int, int> child = {*childIn->begin(), *childIn->rbegin()};
+ fillRange(childOut, std::max(child.first, parent1.first - parent2.second),
+ std::min(child.second, parent1.second - parent2.first));
+ fillRange(parent1Out, std::max(parent1.first, child.first + parent2.first),
+ std::min(parent1.second, child.second + parent2.second));
+ fillRange(parent2Out, std::max(parent2.first, parent1.first - child.second),
+ std::min(parent2.second, parent1.second - child.first));
+ }
+ }
+ virtual const char* getName() const override { return "SUB"; }
+};
+
+class Multiplication : public IRandomVariableOp {
+ public:
+ virtual int eval(int lhs, int rhs) const override { return lhs * rhs; }
+ virtual RandomVariableRange getInitRange(const RandomVariableRange& lhs,
+ const RandomVariableRange& rhs) const override {
+ if (lhs.min() < 0 || rhs.min() < 0) {
+ return IRandomVariableOp::getInitRange(lhs, rhs);
+ } else {
+ int lower = std::min(lhs.min() * rhs.min(), kMaxValue);
+ int upper = std::min(lhs.max() * rhs.max(), kMaxValue);
+ return RandomVariableRange(lower, upper);
+ }
+ }
+ virtual void eval(const std::set<int>* parent1In, const std::set<int>* parent2In,
+ const std::set<int>* childIn, std::set<int>* parent1Out,
+ std::set<int>* parent2Out, std::set<int>* childOut) const override {
+ if (*parent1In->begin() < 0 || *parent2In->begin() < 0 || *childIn->begin() < 0) {
+ IRandomVariableOp::eval(parent1In, parent2In, childIn, parent1Out, parent2Out,
+ childOut);
+ } else {
+ bool isChildInContinuous = isContinuous(childIn);
+ std::pair<int, int> child = {*childIn->begin(), *childIn->rbegin()};
+ for (auto i : *parent1In) {
+ bool valid = false;
+ for (auto j : *parent2In) {
+ int res = this->eval(i, j);
+ // Since MUL increases monotonically with one value, break the loop if the
+ // result is larger than the limit.
+ if (res > child.second) break;
+ if (res < child.first) continue;
+ if (isChildInContinuous || childIn->find(res) != childIn->end()) {
+ valid = true;
+ parent2Out->insert(j);
+ childOut->insert(res);
+ }
+ }
+ if (valid) parent1Out->insert(i);
+ }
+ }
+ }
+ virtual const char* getName() const override { return "MUL"; }
+};
+
+class Division : public IRandomVariableOp {
+ public:
+ virtual int eval(int lhs, int rhs) const override {
+ return rhs == 0 ? kInvalidValue : lhs / rhs;
+ }
+ virtual RandomVariableRange getInitRange(const RandomVariableRange& lhs,
+ const RandomVariableRange& rhs) const override {
+ if (lhs.min() < 0 || rhs.min() <= 0) {
+ return IRandomVariableOp::getInitRange(lhs, rhs);
+ } else {
+ return RandomVariableRange(lhs.min() / rhs.max(), lhs.max() / rhs.min());
+ }
+ }
+ virtual const char* getName() const override { return "DIV"; }
+};
+
+class ExactDivision : public Division {
+ public:
+ virtual int eval(int lhs, int rhs) const override {
+ return (rhs == 0 || lhs % rhs != 0) ? kInvalidValue : lhs / rhs;
+ }
+ virtual const char* getName() const override { return "EXACT_DIV"; }
+};
+
+class Modulo : public IRandomVariableOp {
+ public:
+ virtual int eval(int lhs, int rhs) const override {
+ return rhs == 0 ? kInvalidValue : lhs % rhs;
+ }
+ virtual RandomVariableRange getInitRange(const RandomVariableRange&,
+ const RandomVariableRange& rhs) const override {
+ return RandomVariableRange(0, rhs.max());
+ }
+ virtual void eval(const std::set<int>* parent1In, const std::set<int>* parent2In,
+ const std::set<int>* childIn, std::set<int>* parent1Out,
+ std::set<int>* parent2Out, std::set<int>* childOut) const override {
+ if (*childIn->begin() != 0 || childIn->size() != 1u) {
+ IRandomVariableOp::eval(parent1In, parent2In, childIn, parent1Out, parent2Out,
+ childOut);
+ } else {
+ // For the special case that child is a const 0, it would be faster if the range for
+ // parents are evaluated separately.
+
+ // Evaluate parent1 directly.
+ for (auto i : *parent1In) {
+ for (auto j : *parent2In) {
+ if (i % j == 0) {
+ parent1Out->insert(i);
+ break;
+ }
+ }
+ }
+ // Evaluate parent2, see if a multiple of parent2 value can be found in parent1.
+ int parent1Max = *parent1In->rbegin();
+ for (auto i : *parent2In) {
+ int jMax = parent1Max / i;
+ for (int j = 1; j <= jMax; j++) {
+ if (parent1In->find(i * j) != parent1In->end()) {
+ parent2Out->insert(i);
+ break;
+ }
+ }
+ }
+ if (!parent1Out->empty()) childOut->insert(0);
+ }
+ }
+ virtual const char* getName() const override { return "MOD"; }
+};
+
+class Maximum : public IRandomVariableOp {
+ public:
+ virtual int eval(int lhs, int rhs) const override { return std::max(lhs, rhs); }
+ virtual const char* getName() const override { return "MAX"; }
+};
+
+class Minimum : public IRandomVariableOp {
+ public:
+ virtual int eval(int lhs, int rhs) const override { return std::min(lhs, rhs); }
+ virtual const char* getName() const override { return "MIN"; }
+};
+
+class Square : public IUnaryOp {
+ public:
+ virtual int eval(int val) const override { return val * val; }
+ virtual const char* getName() const override { return "SQUARE"; }
+};
+
+class UnaryEqual : public IUnaryOp {
+ public:
+ virtual int eval(int val) const override { return val; }
+ virtual const char* getName() const override { return "UNARY_EQUAL"; }
+};
+
+class Equal : public IConstraintOp {
+ public:
+ virtual bool check(int lhs, int rhs) const override { return lhs == rhs; }
+ virtual void eval(const std::set<int>* parent1In, const std::set<int>* parent2In,
+ const std::set<int>* childIn, std::set<int>* parent1Out,
+ std::set<int>* parent2Out, std::set<int>* childOut) const override {
+ NN_FUZZER_CHECK(childIn->size() == 1u && *childIn->begin() == 0);
+ // The intersection of two sets can be found in O(n).
+ std::set_intersection(parent1In->begin(), parent1In->end(), parent2In->begin(),
+ parent2In->end(), std::inserter(*parent1Out, parent1Out->begin()));
+ *parent2Out = *parent1Out;
+ childOut->insert(0);
+ }
+ virtual const char* getName() const override { return "EQUAL"; }
+};
+
+class GreaterThan : public IConstraintOp {
+ public:
+ virtual bool check(int lhs, int rhs) const override { return lhs > rhs; }
+ virtual const char* getName() const override { return "GREATER_THAN"; }
+};
+
+class GreaterEqual : public IConstraintOp {
+ public:
+ virtual bool check(int lhs, int rhs) const override { return lhs >= rhs; }
+ virtual const char* getName() const override { return "GREATER_EQUAL"; }
+};
+
+class FloatMultiplication : public IUnaryOp {
+ public:
+ FloatMultiplication(float multiplicand) : mMultiplicand(multiplicand) {}
+ virtual int eval(int val) const override {
+ return static_cast<int>(std::floor(static_cast<float>(val) * mMultiplicand));
+ }
+ virtual const char* getName() const override { return "MUL_FLOAT"; }
+
+ private:
+ float mMultiplicand;
+};
+
+// Arithmetic operators and methods on RandomVariables will create OP RandomVariableNodes.
+// Since there must be at most one edge between two RandomVariableNodes, we have to do something
+// special when both sides are refering to the same node.
+
+RandomVariable operator+(const RandomVariable& lhs, const RandomVariable& rhs) {
+ return lhs.get() == rhs.get() ? RandomVariable(lhs, 2, Singleton<Multiplication>::get())
+ : RandomVariable(lhs, rhs, Singleton<Addition>::get());
+}
+RandomVariable operator-(const RandomVariable& lhs, const RandomVariable& rhs) {
+ return lhs.get() == rhs.get() ? RandomVariable(0)
+ : RandomVariable(lhs, rhs, Singleton<Subtraction>::get());
+}
+RandomVariable operator*(const RandomVariable& lhs, const RandomVariable& rhs) {
+ return lhs.get() == rhs.get() ? RandomVariable(lhs, RandomVariable(), Singleton<Square>::get())
+ : RandomVariable(lhs, rhs, Singleton<Multiplication>::get());
+}
+RandomVariable operator*(const RandomVariable& lhs, const float& rhs) {
+ return RandomVariable(lhs, RandomVariable(), std::make_shared<FloatMultiplication>(rhs));
+}
+RandomVariable operator/(const RandomVariable& lhs, const RandomVariable& rhs) {
+ return lhs.get() == rhs.get() ? RandomVariable(1)
+ : RandomVariable(lhs, rhs, Singleton<Division>::get());
+}
+RandomVariable operator%(const RandomVariable& lhs, const RandomVariable& rhs) {
+ return lhs.get() == rhs.get() ? RandomVariable(0)
+ : RandomVariable(lhs, rhs, Singleton<Modulo>::get());
+}
+RandomVariable max(const RandomVariable& lhs, const RandomVariable& rhs) {
+ return lhs.get() == rhs.get() ? lhs : RandomVariable(lhs, rhs, Singleton<Maximum>::get());
+}
+RandomVariable min(const RandomVariable& lhs, const RandomVariable& rhs) {
+ return lhs.get() == rhs.get() ? lhs : RandomVariable(lhs, rhs, Singleton<Minimum>::get());
+}
+
+RandomVariable RandomVariable::exactDiv(const RandomVariable& other) {
+ return mVar == other.get() ? RandomVariable(1)
+ : RandomVariable(*this, other, Singleton<ExactDivision>::get());
+}
+
+RandomVariable RandomVariable::setEqual(const RandomVariable& other) const {
+ RandomVariableNode node1 = mVar, node2 = other.get();
+ NN_FUZZER_LOG << "Set equality of var" << node1->index << " and var" << node2->index;
+
+ // Do not setEqual on the same pair twice.
+ if (node1 == node2 || (node1->op == Singleton<UnaryEqual>::get() && node1->parent1 == node2) ||
+ (node2->op == Singleton<UnaryEqual>::get() && node2->parent1 == node1)) {
+ NN_FUZZER_LOG << "Already equal. Return.";
+ return RandomVariable();
+ }
+
+ // If possible, always try UnaryEqual first to reduce the search space.
+ // UnaryEqual can be used if node B is FREE and is evaluated later than node A.
+ // TODO: Reduce code duplication.
+ if (RandomVariableNetwork::get()->isSubordinate(node1, node2)) {
+ NN_FUZZER_LOG << " Make var" << node2->index << " a child of var" << node1->index;
+ node2->type = RandomVariableType::OP;
+ node2->parent1 = node1;
+ node2->op = Singleton<UnaryEqual>::get();
+ node1->children.push_back(node2);
+ RandomVariableNetwork::get()->join(node1, node2);
+ node1->updateTimestamp();
+ return other;
+ }
+ if (RandomVariableNetwork::get()->isSubordinate(node2, node1)) {
+ NN_FUZZER_LOG << " Make var" << node1->index << " a child of var" << node2->index;
+ node1->type = RandomVariableType::OP;
+ node1->parent1 = node2;
+ node1->op = Singleton<UnaryEqual>::get();
+ node2->children.push_back(node1);
+ RandomVariableNetwork::get()->join(node2, node1);
+ node1->updateTimestamp();
+ return *this;
+ }
+ return RandomVariable(*this, other, Singleton<Equal>::get());
+}
+
+RandomVariable RandomVariable::setGreaterThan(const RandomVariable& other) const {
+ NN_FUZZER_CHECK(mVar != other.get());
+ return RandomVariable(*this, other, Singleton<GreaterThan>::get());
+}
+RandomVariable RandomVariable::setGreaterEqual(const RandomVariable& other) const {
+ return mVar == other.get() ? *this
+ : RandomVariable(*this, other, Singleton<GreaterEqual>::get());
+}
+
+void DisjointNetwork::add(const RandomVariableNode& var) {
+ // Find the subnet index of the parents and decide the index for var.
+ int ind1 = var->parent1 == nullptr ? -1 : mIndexMap[var->parent1];
+ int ind2 = var->parent2 == nullptr ? -1 : mIndexMap[var->parent2];
+ int ind = join(ind1, ind2);
+ // If no parent, put it into a new subnet component.
+ if (ind == -1) ind = mNextIndex++;
+ NN_FUZZER_LOG << "Add RandomVariable var" << var->index << " to network #" << ind;
+ mIndexMap[var] = ind;
+ mEvalOrderMap[ind].push_back(var);
+}
+
+int DisjointNetwork::join(int ind1, int ind2) {
+ if (ind1 == -1) return ind2;
+ if (ind2 == -1) return ind1;
+ if (ind1 == ind2) return ind1;
+ NN_FUZZER_LOG << "Join network #" << ind1 << " and #" << ind2;
+ auto &order1 = mEvalOrderMap[ind1], &order2 = mEvalOrderMap[ind2];
+ // Append every node in ind2 to the end of ind1
+ for (const auto& var : order2) {
+ order1.push_back(var);
+ mIndexMap[var] = ind1;
+ }
+ // Remove ind2 from mEvalOrderMap.
+ mEvalOrderMap.erase(mEvalOrderMap.find(ind2));
+ return ind1;
+}
+
+RandomVariableNetwork* RandomVariableNetwork::get() {
+ static RandomVariableNetwork instance;
+ return &instance;
+}
+
+void RandomVariableNetwork::initialize(int defaultValue) {
+ RandomVariableBase::globalIndex = 0;
+ RandomVariable::defaultValue = defaultValue;
+ mIndexMap.clear();
+ mEvalOrderMap.clear();
+ mDimProd.clear();
+ mNextIndex = 0;
+ mGlobalTime = 0;
+ mTimestamp = -1;
+}
+
+bool RandomVariableNetwork::isSubordinate(const RandomVariableNode& node1,
+ const RandomVariableNode& node2) {
+ if (node2->type != RandomVariableType::FREE) return false;
+ int ind1 = mIndexMap[node1];
+ // node2 is of a different subnet.
+ if (ind1 != mIndexMap[node2]) return true;
+ for (const auto& node : mEvalOrderMap[ind1]) {
+ if (node == node2) return false;
+ // node2 is of the same subnet but evaluated later than node1.
+ if (node == node1) return true;
+ }
+ NN_FUZZER_CHECK(false) << "Code executed in non-reachable region.";
+ return false;
+}
+
+struct EvalInfo {
+ // The RandomVariableNode that this EvalInfo is associated with.
+ // var->value is the current value during evaluation.
+ RandomVariableNode var;
+
+ // The RandomVariable value is staged when a valid combination is found.
+ std::set<int> staging;
+
+ // The staging values are committed after a subnet evaluation.
+ std::set<int> committed;
+
+ // Keeps track of the latest timestamp that committed is updated.
+ int timestamp;
+
+ // For evalSubnetWithLocalNetwork.
+ RandomVariableType originalType;
+
+ // Should only invoke eval on OP RandomVariable.
+ bool eval() {
+ NN_FUZZER_CHECK(var->type == RandomVariableType::OP);
+ var->value = var->op->eval(var->parent1->value,
+ var->parent2 == nullptr ? 0 : var->parent2->value);
+ if (var->value == kInvalidValue) return false;
+ return committed.find(var->value) != committed.end();
+ }
+ void stage() { staging.insert(var->value); }
+ void commit() {
+ // Only update committed and timestamp if the range is *indeed* changed.
+ if (staging.size() != committed.size()) {
+ committed = std::move(staging);
+ timestamp = RandomVariableNetwork::get()->getGlobalTime();
+ }
+ staging.clear();
+ }
+ void updateRange() {
+ // Only update range and timestamp if the range is *indeed* changed.
+ if (committed.size() != var->range.size()) {
+ var->range = RandomVariableRange(committed);
+ var->timestamp = timestamp;
+ }
+ committed.clear();
+ }
+
+ EvalInfo(const RandomVariableNode& var)
+ : var(var),
+ committed(var->range.getChoices().begin(), var->range.getChoices().end()),
+ timestamp(var->timestamp) {}
+};
+using EvalContext = std::unordered_map<RandomVariableNode, EvalInfo>;
+
+// For logging only.
+inline std::string toString(const RandomVariableNode& var, EvalContext* context) {
+ std::stringstream ss;
+ ss << "var" << var->index << " = ";
+ const auto& committed = context->at(var).committed;
+ switch (var->type) {
+ case RandomVariableType::FREE:
+ ss << "FREE ["
+ << joinStr(", ", 20, std::vector<int>(committed.begin(), committed.end())) << "]";
+ break;
+ case RandomVariableType::CONST:
+ ss << "CONST " << var->value;
+ break;
+ case RandomVariableType::OP:
+ ss << "var" << var->parent1->index << " " << var->op->getName();
+ if (var->parent2 != nullptr) ss << " var" << var->parent2->index;
+ ss << ", [" << joinStr(", ", 20, std::vector<int>(committed.begin(), committed.end()))
+ << "]";
+ break;
+ default:
+ NN_FUZZER_CHECK(false);
+ }
+ ss << ", timestamp = " << context->at(var).timestamp;
+ return ss.str();
+}
+
+// Check if the subnet needs to be re-evaluated by comparing the timestamps.
+static inline bool needEvaluate(const EvaluationOrder& evalOrder, int subnetTime,
+ EvalContext* context = nullptr) {
+ for (const auto& var : evalOrder) {
+ int timestamp = context == nullptr ? var->timestamp : context->at(var).timestamp;
+ // If we find a node that has been modified since last evaluation, the subnet needs to be
+ // re-evaluated.
+ if (timestamp > subnetTime) return true;
+ }
+ return false;
+}
+
+// Helper function to evaluate the subnet recursively.
+// Iterate through all combinations of FREE RandomVariables choices.
+static void evalSubnetHelper(const EvaluationOrder& evalOrder, EvalContext* context, size_t i = 0) {
+ if (i == evalOrder.size()) {
+ // Reach the end of the evaluation, find a valid combination.
+ for (auto& var : evalOrder) context->at(var).stage();
+ return;
+ }
+ const auto& var = evalOrder[i];
+ if (var->type == RandomVariableType::FREE) {
+ // For FREE RandomVariable, iterate through all valid choices.
+ for (int val : context->at(var).committed) {
+ var->value = val;
+ evalSubnetHelper(evalOrder, context, i + 1);
+ }
+ return;
+ } else if (var->type == RandomVariableType::OP) {
+ // For OP RandomVariable, evaluate from parents and terminate if the result is invalid.
+ if (!context->at(var).eval()) return;
+ }
+ evalSubnetHelper(evalOrder, context, i + 1);
+}
+
+// Check if the subnet has only one single OP RandomVariable.
+static inline bool isSingleOpSubnet(const EvaluationOrder& evalOrder) {
+ int numOp = 0;
+ for (const auto& var : evalOrder) {
+ if (var->type == RandomVariableType::OP) numOp++;
+ if (numOp > 1) return false;
+ }
+ return numOp != 0;
+}
+
+// Evaluate with a potentially faster approach provided by IRandomVariableOp.
+static inline void evalSubnetSingleOpHelper(const EvaluationOrder& evalOrder,
+ EvalContext* context) {
+ NN_FUZZER_LOG << "Identified as single op subnet";
+ const auto& var = evalOrder.back();
+ NN_FUZZER_CHECK(var->type == RandomVariableType::OP);
+ var->op->eval(&context->at(var->parent1).committed,
+ var->parent2 == nullptr ? nullptr : &context->at(var->parent2).committed,
+ &context->at(var).committed, &context->at(var->parent1).staging,
+ var->parent2 == nullptr ? nullptr : &context->at(var->parent2).staging,
+ &context->at(var).staging);
+}
+
+// Check if the number of combinations of FREE RandomVariables exceeds the limit.
+static inline uint64_t getNumCombinations(const EvaluationOrder& evalOrder,
+ EvalContext* context = nullptr) {
+ constexpr uint64_t kLimit = 1e8;
+ uint64_t numCombinations = 1;
+ for (const auto& var : evalOrder) {
+ if (var->type == RandomVariableType::FREE) {
+ size_t size =
+ context == nullptr ? var->range.size() : context->at(var).committed.size();
+ numCombinations *= size;
+ // To prevent overflow.
+ if (numCombinations > kLimit) return kLimit;
+ }
+ }
+ return numCombinations;
+}
+
+// Evaluate the subnet recursively. Will return fail if the number of combinations of FREE
+// RandomVariable exceeds the threshold kMaxNumCombinations.
+static bool evalSubnetWithBruteForce(const EvaluationOrder& evalOrder, EvalContext* context) {
+ constexpr uint64_t kMaxNumCombinations = 1e7;
+ NN_FUZZER_LOG << "Evaluate with brute force";
+ if (isSingleOpSubnet(evalOrder)) {
+ // If the network only have one single OP, dispatch to a faster evaluation.
+ evalSubnetSingleOpHelper(evalOrder, context);
+ } else {
+ if (getNumCombinations(evalOrder, context) > kMaxNumCombinations) {
+ NN_FUZZER_LOG << "Terminate the evaluation because of large search range";
+ std::cout << "[ ] Terminate the evaluation because of large search range"
+ << std::endl;
+ return false;
+ }
+ evalSubnetHelper(evalOrder, context);
+ }
+ for (auto& var : evalOrder) {
+ if (context->at(var).staging.empty()) {
+ NN_FUZZER_LOG << "Evaluation failed at " << toString(var, context);
+ return false;
+ }
+ context->at(var).commit();
+ }
+ return true;
+}
+
+struct LocalNetwork {
+ EvaluationOrder evalOrder;
+ std::vector<RandomVariableNode> bridgeNodes;
+ int timestamp = 0;
+
+ bool eval(EvalContext* context) {
+ NN_FUZZER_LOG << "Evaluate local network with timestamp = " << timestamp;
+ // Temporarily treat bridge nodes as FREE RandomVariables.
+ for (const auto& var : bridgeNodes) {
+ context->at(var).originalType = var->type;
+ var->type = RandomVariableType::FREE;
+ }
+ for (const auto& var : evalOrder) {
+ context->at(var).staging.clear();
+ NN_FUZZER_LOG << " - " << toString(var, context);
+ }
+ bool success = evalSubnetWithBruteForce(evalOrder, context);
+ // Reset the RandomVariable types for bridge nodes.
+ for (const auto& var : bridgeNodes) var->type = context->at(var).originalType;
+ return success;
+ }
+};
+
+// Partition the network further into LocalNetworks based on the result from bridge annotation
+// algorithm.
+class GraphPartitioner : public DisjointNetwork {
+ public:
+ GraphPartitioner() = default;
+
+ std::vector<LocalNetwork> partition(const EvaluationOrder& evalOrder, int timestamp) {
+ annotateBridge(evalOrder);
+ for (const auto& var : evalOrder) add(var);
+ return get(timestamp);
+ }
+
+ private:
+ GraphPartitioner(const GraphPartitioner&) = delete;
+ GraphPartitioner& operator=(const GraphPartitioner&) = delete;
+
+ // Find the parent-child relationship between var1 and var2, and reset the bridge.
+ void setBridgeFlag(const RandomVariableNode& var1, const RandomVariableNode& var2) {
+ if (var1->parent1 == var2) {
+ mBridgeInfo[var1].isParent1Bridge = true;
+ } else if (var1->parent2 == var2) {
+ mBridgeInfo[var1].isParent2Bridge = true;
+ } else {
+ setBridgeFlag(var2, var1);
+ }
+ }
+
+ // Annoate the bridges with DFS -- an edge [u, v] is a bridge if none of u's ancestor is
+ // reachable from a node in the subtree of b. The complexity is O(V + E).
+ // discoveryTime: The timestamp a node is visited
+ // lowTime: The min discovery time of all reachable nodes from the subtree of the node.
+ void annotateBridgeHelper(const RandomVariableNode& var, int* time) {
+ mBridgeInfo[var].visited = true;
+ mBridgeInfo[var].discoveryTime = mBridgeInfo[var].lowTime = (*time)++;
+
+ // The algorithm operates on undirected graph. First find all adjacent nodes.
+ auto adj = var->children;
+ if (var->parent1 != nullptr) adj.push_back(var->parent1);
+ if (var->parent2 != nullptr) adj.push_back(var->parent2);
+
+ for (const auto& weakChild : adj) {
+ auto child = weakChild.lock();
+ NN_FUZZER_CHECK(child != nullptr);
+ if (mBridgeInfo.find(child) == mBridgeInfo.end()) continue;
+ if (!mBridgeInfo[child].visited) {
+ mBridgeInfo[child].parent = var;
+ annotateBridgeHelper(child, time);
+
+ // If none of nodes in the subtree of child is connected to any ancestors of var,
+ // then it is a bridge.
+ mBridgeInfo[var].lowTime =
+ std::min(mBridgeInfo[var].lowTime, mBridgeInfo[child].lowTime);
+ if (mBridgeInfo[child].lowTime > mBridgeInfo[var].discoveryTime)
+ setBridgeFlag(var, child);
+ } else if (mBridgeInfo[var].parent != child) {
+ mBridgeInfo[var].lowTime =
+ std::min(mBridgeInfo[var].lowTime, mBridgeInfo[child].discoveryTime);
+ }
+ }
+ }
+
+ // Find all bridges in the subnet with DFS.
+ void annotateBridge(const EvaluationOrder& evalOrder) {
+ for (const auto& var : evalOrder) mBridgeInfo[var];
+ int time = 0;
+ for (const auto& var : evalOrder) {
+ if (!mBridgeInfo[var].visited) annotateBridgeHelper(var, &time);
+ }
+ }
+
+ // Re-partition the network by treating bridges as no edge.
+ void add(const RandomVariableNode& var) {
+ auto parent1 = var->parent1;
+ auto parent2 = var->parent2;
+ if (mBridgeInfo[var].isParent1Bridge) var->parent1 = nullptr;
+ if (mBridgeInfo[var].isParent2Bridge) var->parent2 = nullptr;
+ DisjointNetwork::add(var);
+ var->parent1 = parent1;
+ var->parent2 = parent2;
+ }
+
+ // Add bridge nodes to the local network and remove single node subnet.
+ std::vector<LocalNetwork> get(int timestamp) {
+ std::vector<LocalNetwork> res;
+ for (auto& pair : mEvalOrderMap) {
+ // We do not need to evaluate subnet with only a single node.
+ if (pair.second.size() == 1 && pair.second[0]->parent1 == nullptr) continue;
+ res.emplace_back();
+ for (const auto& var : pair.second) {
+ if (mBridgeInfo[var].isParent1Bridge) {
+ res.back().evalOrder.push_back(var->parent1);
+ res.back().bridgeNodes.push_back(var->parent1);
+ }
+ if (mBridgeInfo[var].isParent2Bridge) {
+ res.back().evalOrder.push_back(var->parent2);
+ res.back().bridgeNodes.push_back(var->parent2);
+ }
+ res.back().evalOrder.push_back(var);
+ }
+ res.back().timestamp = timestamp;
+ }
+ return res;
+ }
+
+ // For bridge discovery algorithm.
+ struct BridgeInfo {
+ bool isParent1Bridge = false;
+ bool isParent2Bridge = false;
+ int discoveryTime = 0;
+ int lowTime = 0;
+ bool visited = false;
+ std::shared_ptr<RandomVariableBase> parent = nullptr;
+ };
+ std::unordered_map<RandomVariableNode, BridgeInfo> mBridgeInfo;
+};
+
+// Evaluate subnets repeatedly until converge.
+// Class T_Subnet must have member evalOrder, timestamp, and member function eval.
+template <class T_Subnet>
+inline bool evalSubnetsRepeatedly(std::vector<T_Subnet>* subnets, EvalContext* context) {
+ bool terminate = false;
+ while (!terminate) {
+ terminate = true;
+ for (auto& subnet : *subnets) {
+ if (needEvaluate(subnet.evalOrder, subnet.timestamp, context)) {
+ if (!subnet.eval(context)) return false;
+ subnet.timestamp = RandomVariableNetwork::get()->getGlobalTime();
+ terminate = false;
+ }
+ }
+ }
+ return true;
+}
+
+// Evaluate the subnet by first partitioning it further into LocalNetworks.
+static bool evalSubnetWithLocalNetwork(const EvaluationOrder& evalOrder, int timestamp,
+ EvalContext* context) {
+ NN_FUZZER_LOG << "Evaluate with local network";
+ auto localNetworks = GraphPartitioner().partition(evalOrder, timestamp);
+ return evalSubnetsRepeatedly(&localNetworks, context);
+}
+
+struct LeafNetwork {
+ EvaluationOrder evalOrder;
+ int timestamp = 0;
+ LeafNetwork(const RandomVariableNode& var, int timestamp) : timestamp(timestamp) {
+ std::set<RandomVariableNode> visited;
+ constructorHelper(var, &visited);
+ }
+ // Construct the leaf network by recursively including parent nodes.
+ void constructorHelper(const RandomVariableNode& var, std::set<RandomVariableNode>* visited) {
+ if (var == nullptr || visited->find(var) != visited->end()) return;
+ constructorHelper(var->parent1, visited);
+ constructorHelper(var->parent2, visited);
+ visited->insert(var);
+ evalOrder.push_back(var);
+ }
+ bool eval(EvalContext* context) {
+ return evalSubnetWithLocalNetwork(evalOrder, timestamp, context);
+ }
+};
+
+// Evaluate the subnet by leaf network.
+// NOTE: This algorithm will only produce correct result for *most* of the time (> 99%).
+// The random graph generator is expected to retry if it fails.
+static bool evalSubnetWithLeafNetwork(const EvaluationOrder& evalOrder, int timestamp,
+ EvalContext* context) {
+ NN_FUZZER_LOG << "Evaluate with leaf network";
+ // Construct leaf networks.
+ std::vector<LeafNetwork> leafNetworks;
+ for (const auto& var : evalOrder) {
+ if (var->children.empty()) {
+ NN_FUZZER_LOG << "Found leaf " << toString(var, context);
+ leafNetworks.emplace_back(var, timestamp);
+ }
+ }
+ return evalSubnetsRepeatedly(&leafNetworks, context);
+}
+
+void RandomVariableNetwork::addDimensionProd(const std::vector<RandomVariable>& dims) {
+ if (dims.size() <= 1) return;
+ EvaluationOrder order;
+ for (const auto& dim : dims) order.push_back(dim.get());
+ mDimProd.push_back(order);
+}
+
+bool enforceDimProd(const std::vector<EvaluationOrder>& mDimProd,
+ const std::unordered_map<RandomVariableNode, int>& indexMap,
+ EvalContext* context, std::set<int>* dirtySubnets) {
+ for (auto& evalOrder : mDimProd) {
+ NN_FUZZER_LOG << " Dimension product network size = " << evalOrder.size();
+ // Initialize EvalInfo of each RandomVariable.
+ for (auto& var : evalOrder) {
+ if (context->find(var) == context->end()) context->emplace(var, var);
+ NN_FUZZER_LOG << " - " << toString(var, context);
+ }
+
+ // Enforce the product of the dimension values below kMaxValue:
+ // max(dimA) = kMaxValue / (min(dimB) * min(dimC) * ...)
+ int prod = 1;
+ for (const auto& var : evalOrder) prod *= (*context->at(var).committed.begin());
+ for (auto& var : evalOrder) {
+ auto& committed = context->at(var).committed;
+ int maxValue = kMaxValue / (prod / *committed.begin());
+ auto it = committed.upper_bound(maxValue);
+ // var has empty range -> no solution.
+ if (it == committed.begin()) return false;
+ // The range is not modified -> continue.
+ if (it == committed.end()) continue;
+ // The range is modified -> the subnet of var is dirty, i.e. needs re-evaluation.
+ committed.erase(it, committed.end());
+ context->at(var).timestamp = RandomVariableNetwork::get()->getGlobalTime();
+ dirtySubnets->insert(indexMap.at(var));
+ }
+ }
+ return true;
+}
+
+bool RandomVariableNetwork::evalRange() {
+ constexpr uint64_t kMaxNumCombinationsWithBruteForce = 500;
+ constexpr uint64_t kMaxNumCombinationsWithLocalNetwork = 1e5;
+ NN_FUZZER_LOG << "Evaluate on " << mEvalOrderMap.size() << " sub-networks";
+ EvalContext context;
+ std::set<int> dirtySubnets; // Which subnets needs evaluation.
+ for (auto& pair : mEvalOrderMap) {
+ const auto& evalOrder = pair.second;
+ // Decide whether needs evaluation by timestamp -- if no range has changed after the last
+ // evaluation, then the subnet does not need re-evaluation.
+ if (evalOrder.size() == 1 || !needEvaluate(evalOrder, mTimestamp)) continue;
+ dirtySubnets.insert(pair.first);
+ }
+ if (!enforceDimProd(mDimProd, mIndexMap, &context, &dirtySubnets)) return false;
+
+ // Repeat until the ranges converge.
+ while (!dirtySubnets.empty()) {
+ for (int ind : dirtySubnets) {
+ const auto& evalOrder = mEvalOrderMap[ind];
+ NN_FUZZER_LOG << " Sub-network #" << ind << " size = " << evalOrder.size();
+
+ // Initialize EvalInfo of each RandomVariable.
+ for (auto& var : evalOrder) {
+ if (context.find(var) == context.end()) context.emplace(var, var);
+ NN_FUZZER_LOG << " - " << toString(var, &context);
+ }
+
+ // Dispatch to different algorithm according to search range.
+ bool success;
+ uint64_t numCombinations = getNumCombinations(evalOrder);
+ if (numCombinations <= kMaxNumCombinationsWithBruteForce) {
+ success = evalSubnetWithBruteForce(evalOrder, &context);
+ } else if (numCombinations <= kMaxNumCombinationsWithLocalNetwork) {
+ success = evalSubnetWithLocalNetwork(evalOrder, mTimestamp, &context);
+ } else {
+ success = evalSubnetWithLeafNetwork(evalOrder, mTimestamp, &context);
+ }
+ if (!success) return false;
+ }
+ dirtySubnets.clear();
+ if (!enforceDimProd(mDimProd, mIndexMap, &context, &dirtySubnets)) return false;
+ }
+ // A successful evaluation, update RandomVariables from EvalContext.
+ for (auto& pair : context) pair.second.updateRange();
+ mTimestamp = getGlobalTime();
+ NN_FUZZER_LOG << "Finish range evaluation";
+ return true;
+}
+
+static void unsetEqual(const RandomVariableNode& node) {
+ if (node == nullptr) return;
+ NN_FUZZER_LOG << "Unset equality of var" << node->index;
+ auto weakPtrEqual = [&node](const std::weak_ptr<RandomVariableBase>& ptr) {
+ return ptr.lock() == node;
+ };
+ RandomVariableNode parent1 = node->parent1, parent2 = node->parent2;
+ parent1->children.erase(
+ std::find_if(parent1->children.begin(), parent1->children.end(), weakPtrEqual));
+ node->parent1 = nullptr;
+ if (parent2 != nullptr) {
+ // For Equal.
+ parent2->children.erase(
+ std::find_if(parent2->children.begin(), parent2->children.end(), weakPtrEqual));
+ node->parent2 = nullptr;
+ } else {
+ // For UnaryEqual.
+ node->type = RandomVariableType::FREE;
+ node->op = nullptr;
+ }
+}
+
+// A class to revert all the changes made to RandomVariableNetwork since the Reverter object is
+// constructed. Only used when setEqualIfCompatible results in incompatible.
+class RandomVariableNetwork::Reverter {
+ public:
+ // Take a snapshot of RandomVariableNetwork when Reverter is constructed.
+ Reverter() : mSnapshot(*RandomVariableNetwork::get()) {}
+ // Add constraint (Equal) nodes to the reverter.
+ void addNode(const RandomVariableNode& node) { mEqualNodes.push_back(node); }
+ void revert() {
+ NN_FUZZER_LOG << "Revert RandomVariableNetwork";
+ // Release the constraints.
+ for (const auto& node : mEqualNodes) unsetEqual(node);
+ // Reset all member variables.
+ *RandomVariableNetwork::get() = std::move(mSnapshot);
+ }
+
+ private:
+ Reverter(const Reverter&) = delete;
+ Reverter& operator=(const Reverter&) = delete;
+ RandomVariableNetwork mSnapshot;
+ std::vector<RandomVariableNode> mEqualNodes;
+};
+
+bool RandomVariableNetwork::setEqualIfCompatible(const std::vector<RandomVariable>& lhs,
+ const std::vector<RandomVariable>& rhs) {
+ NN_FUZZER_LOG << "Check compatibility of {" << joinStr(", ", lhs) << "} and {"
+ << joinStr(", ", rhs) << "}";
+ if (lhs.size() != rhs.size()) return false;
+ Reverter reverter;
+ bool result = true;
+ for (size_t i = 0; i < lhs.size(); i++) {
+ auto node = lhs[i].setEqual(rhs[i]).get();
+ reverter.addNode(node);
+ // Early terminate if there is no common choice between two ranges.
+ if (node != nullptr && node->range.empty()) result = false;
+ }
+ result = result && evalRange();
+ if (!result) reverter.revert();
+ NN_FUZZER_LOG << "setEqualIfCompatible: " << (result ? "[COMPATIBLE]" : "[INCOMPATIBLE]");
+ return result;
+}
+
+bool RandomVariableNetwork::freeze() {
+ NN_FUZZER_LOG << "Freeze the random network";
+ if (!evalRange()) return false;
+
+ std::vector<RandomVariableNode> nodes;
+ for (const auto& pair : mEvalOrderMap) {
+ // Find all FREE RandomVariables in the subnet.
+ for (const auto& var : pair.second) {
+ if (var->type == RandomVariableType::FREE) nodes.push_back(var);
+ }
+ }
+
+ // Randomly shuffle the order, this is for a more uniform randomness.
+ randomShuffle(&nodes);
+
+ // An inefficient algorithm that does freeze -> re-evaluate for every FREE RandomVariable.
+ // TODO: Might be able to optimize this.
+ for (const auto& var : nodes) {
+ if (var->type != RandomVariableType::FREE) continue;
+ size_t size = var->range.size();
+ NN_FUZZER_LOG << "Freeze " << var;
+ var->freeze();
+ NN_FUZZER_LOG << " " << var;
+ // There is no need to re-evaluate if the FREE RandomVariable have only one choice.
+ if (size > 1) {
+ var->updateTimestamp();
+ if (!evalRange()) {
+ NN_FUZZER_LOG << "Freeze failed at " << var;
+ return false;
+ }
+ }
+ }
+ NN_FUZZER_LOG << "Finish freezing the random network";
+ return true;
+}
+
+} // namespace fuzzing_test
+} // namespace nn
+} // namespace android
diff --git a/runtime/test/fuzzing/TestRandomGraph.cpp b/runtime/test/fuzzing/TestRandomGraph.cpp
index 2047cbe..6e71652 100644
--- a/runtime/test/fuzzing/TestRandomGraph.cpp
+++ b/runtime/test/fuzzing/TestRandomGraph.cpp
@@ -41,7 +41,6 @@
#include "SampleDriverFull.h"
using android::nn::sample_driver::SampleDriverFull;
-using namespace android::nn::hal;
#endif
@@ -66,27 +65,27 @@
TestDriverV1_1()
: mDriverV1_2(new SampleDriverFull(name, {.execTime = 0.8f, .powerUsage = 0.8f})) {}
static constexpr char name[] = "TestDriverV1_1";
- Return<void> getCapabilities_1_1(getCapabilities_1_1_cb _hidl_cb) override {
+ hardware::Return<void> getCapabilities_1_1(getCapabilities_1_1_cb _hidl_cb) override {
return mDriverV1_2->getCapabilities_1_1(_hidl_cb);
}
- Return<void> getSupportedOperations_1_1(const V1_1::Model& model,
- getSupportedOperations_1_1_cb _hidl_cb) override {
+ hardware::Return<void> getSupportedOperations_1_1(
+ const V1_1::Model& model, getSupportedOperations_1_1_cb _hidl_cb) override {
return mDriverV1_2->getSupportedOperations_1_1(model, _hidl_cb);
}
- Return<V1_0::ErrorStatus> prepareModel_1_1(
- const V1_1::Model& model, ExecutionPreference preference,
+ hardware::Return<V1_0::ErrorStatus> prepareModel_1_1(
+ const V1_1::Model& model, V1_1::ExecutionPreference preference,
const sp<V1_0::IPreparedModelCallback>& actualCallback) override {
return mDriverV1_2->prepareModel_1_1(model, preference, actualCallback);
}
- Return<DeviceStatus> getStatus() override { return mDriverV1_2->getStatus(); }
- Return<void> getCapabilities(getCapabilities_cb _hidl_cb) override {
+ hardware::Return<V1_0::DeviceStatus> getStatus() override { return mDriverV1_2->getStatus(); }
+ hardware::Return<void> getCapabilities(getCapabilities_cb _hidl_cb) override {
return mDriverV1_2->getCapabilities(_hidl_cb);
}
- Return<void> getSupportedOperations(const V1_0::Model& model,
- getSupportedOperations_cb _hidl_cb) override {
+ hardware::Return<void> getSupportedOperations(const V1_0::Model& model,
+ getSupportedOperations_cb _hidl_cb) override {
return mDriverV1_2->getSupportedOperations(model, _hidl_cb);
}
- Return<V1_0::ErrorStatus> prepareModel(
+ hardware::Return<V1_0::ErrorStatus> prepareModel(
const V1_0::Model& model,
const sp<V1_0::IPreparedModelCallback>& actualCallback) override {
return mDriverV1_2->prepareModel(model, actualCallback);
@@ -102,19 +101,19 @@
TestDriverV1_0()
: mDriverV1_2(new SampleDriverFull(name, {.execTime = 0.7f, .powerUsage = 0.7f})) {}
static constexpr char name[] = "TestDriverV1_0";
- Return<void> getCapabilities(getCapabilities_cb _hidl_cb) override {
+ hardware::Return<void> getCapabilities(getCapabilities_cb _hidl_cb) override {
return mDriverV1_2->getCapabilities(_hidl_cb);
}
- Return<void> getSupportedOperations(const V1_0::Model& model,
- getSupportedOperations_cb _hidl_cb) override {
+ hardware::Return<void> getSupportedOperations(const V1_0::Model& model,
+ getSupportedOperations_cb _hidl_cb) override {
return mDriverV1_2->getSupportedOperations(model, _hidl_cb);
}
- Return<V1_0::ErrorStatus> prepareModel(
+ hardware::Return<V1_0::ErrorStatus> prepareModel(
const V1_0::Model& model,
const sp<V1_0::IPreparedModelCallback>& actualCallback) override {
return mDriverV1_2->prepareModel(model, actualCallback);
}
- Return<DeviceStatus> getStatus() override { return mDriverV1_2->getStatus(); }
+ hardware::Return<V1_0::DeviceStatus> getStatus() override { return mDriverV1_2->getStatus(); }
private:
const sp<V1_2::IDevice> mDriverV1_2;
diff --git a/runtime/test/fuzzing/operation_signatures/OperationSignatureUtils.h b/runtime/test/fuzzing/operation_signatures/OperationSignatureUtils.h
index 8fa9332..53b5aad 100644
--- a/runtime/test/fuzzing/operation_signatures/OperationSignatureUtils.h
+++ b/runtime/test/fuzzing/operation_signatures/OperationSignatureUtils.h
@@ -310,7 +310,7 @@
op->zeroPoint = 0;
break;
default:
- NN_FUZZER_CHECK(false) << "Data type " << toString(dataType)
+ NN_FUZZER_CHECK(false) << "Data type " << dataType
<< " is not supported in defaultScalarOperandConstructor.";
}
}