Replace HIDL memory types with canonical Memory
Bug: 169672209
Test: NNT_static
Change-Id: Ic3d22cd283c96f0864349342b1327ee8fe29f697
Merged-In: Ic3d22cd283c96f0864349342b1327ee8fe29f697
(cherry picked from commit bb6b0a408afb14f19120bbf9977918d9d485b192)
diff --git a/common/BufferTracker.cpp b/common/BufferTracker.cpp
index cb2a326..9770ffb 100644
--- a/common/BufferTracker.cpp
+++ b/common/BufferTracker.cpp
@@ -33,7 +33,7 @@
namespace android::nn {
std::shared_ptr<ManagedBuffer> ManagedBuffer::create(uint32_t size,
- std::set<PreparedModelRole> roles,
+ std::set<HalPreparedModelRole> roles,
const Operand& operand) {
std::unique_ptr<uint8_t[]> buffer(new (std::nothrow) uint8_t[size]);
if (buffer == nullptr) {
@@ -47,7 +47,7 @@
}
ManagedBuffer::ManagedBuffer(std::unique_ptr<uint8_t[]> buffer, uint32_t size,
- std::set<PreparedModelRole> roles, const Operand& operand)
+ std::set<HalPreparedModelRole> roles, const Operand& operand)
: kBuffer(std::move(buffer)),
kSize(size),
kRoles(std::move(roles)),
diff --git a/common/CpuExecutor.cpp b/common/CpuExecutor.cpp
index 4ca9709..03d3164 100644
--- a/common/CpuExecutor.cpp
+++ b/common/CpuExecutor.cpp
@@ -18,13 +18,13 @@
#include "CpuExecutor.h"
-#include <android/hardware_buffer.h>
#include <android-base/scopeguard.h>
-
+#include <android/hardware_buffer.h>
#include <sys/mman.h>
#include <vndk/hardware_buffer.h>
#include <Eigen/Core>
+#include <limits>
#include <memory>
#include <utility>
#include <vector>
@@ -34,19 +34,18 @@
#include <omp.h>
#endif // NNAPI_OPENMP
+#include <nnapi/SharedMemory.h>
+#include <nnapi/TypeUtils.h>
+
#include "ControlFlow.h"
#include "NeuralNetworks.h"
#include "OperationResolver.h"
#include "Operations.h"
#include "OperationsUtils.h"
#include "Tracing.h"
-#include "nnapi/TypeUtils.h"
namespace android {
namespace nn {
-
-using ::android::hidl::memory::V1_0::IMemory;
-
namespace {
class OperationExecutionContext : public IOperationExecutionContext {
@@ -273,159 +272,60 @@
// when the RunTimePoolInfo is destroyed or is assigned to.
class RunTimePoolInfo::RunTimePoolInfoImpl {
public:
- RunTimePoolInfoImpl(const hardware::hidl_memory& hidlMemory, uint8_t* buffer,
- const sp<IMemory>& memory, AHardwareBuffer* hardwareBuffer, uint32_t size);
+ RunTimePoolInfoImpl(Memory memory, Mapping mapping);
- // rule of five...
- ~RunTimePoolInfoImpl();
- RunTimePoolInfoImpl(const RunTimePoolInfoImpl&) = delete;
- RunTimePoolInfoImpl(RunTimePoolInfoImpl&&) noexcept = delete;
- RunTimePoolInfoImpl& operator=(const RunTimePoolInfoImpl&) = delete;
- RunTimePoolInfoImpl& operator=(RunTimePoolInfoImpl&&) noexcept = delete;
-
- uint8_t* getBuffer() const { return mBuffer; }
- uint32_t getSize() const { return mSize; }
+ uint8_t* getBuffer() const;
+ uint32_t getSize() const;
bool flush() const;
- const hardware::hidl_memory& getHidlMemory() const { return mHidlMemory; }
+ const Memory& getMemory() const { return mMemory; }
private:
- const hardware::hidl_memory mHidlMemory; // always used
- uint8_t* const mBuffer = nullptr; // always used
- const sp<IMemory> mMemory; // only used when hidlMemory.name() == "ashmem"
- AHardwareBuffer*
- mAHardwareBuffer; // only used when hidlMemory.name() == "hardware_buffer_blob"
- const uint32_t mSize;
+ const Memory mMemory;
+ const Mapping mMapping;
};
-RunTimePoolInfo::RunTimePoolInfoImpl::RunTimePoolInfoImpl(const hardware::hidl_memory& hidlMemory,
- uint8_t* buffer,
- const sp<IMemory>& memory,
- AHardwareBuffer* hardwareBuffer,
- uint32_t size)
- : mHidlMemory(hidlMemory),
- mBuffer(buffer),
- mMemory(memory),
- mAHardwareBuffer(hardwareBuffer),
- mSize(size) {}
+RunTimePoolInfo::RunTimePoolInfoImpl::RunTimePoolInfoImpl(Memory memory, Mapping mapping)
+ : mMemory(std::move(memory)), mMapping(std::move(mapping)) {}
-RunTimePoolInfo::RunTimePoolInfoImpl::~RunTimePoolInfoImpl() {
- if (mBuffer == nullptr) {
- return;
- }
+uint8_t* RunTimePoolInfo::RunTimePoolInfoImpl::getBuffer() const {
+ return std::visit(
+ [](auto* pointer) {
+ // Writing to a const buffer may lead to undefined behavior.
+ // TODO: Refactor the code to avoid the const_cast.
+ return static_cast<uint8_t*>(const_cast<void*>(pointer));
+ },
+ mMapping.pointer);
+}
- const auto& memType = mHidlMemory.name();
- if (memType == "ashmem") {
- // nothing to do
- } else if (memType == "mmap_fd") {
- const size_t size = mHidlMemory.size();
- if (munmap(mBuffer, size)) {
- LOG(ERROR) << "RunTimePoolInfoImpl::~RunTimePoolInfo(): Can't munmap";
- }
- } else if (memType == "hardware_buffer_blob") {
- AHardwareBuffer_unlock(mAHardwareBuffer, nullptr);
- } else if (memType == "") {
- // Represents a POINTER argument; nothing to do
- } else {
- LOG(ERROR) << "RunTimePoolInfoImpl::~RunTimePoolInfoImpl(): unsupported hidl_memory type";
- }
-
- if (mAHardwareBuffer != nullptr) {
- AHardwareBuffer_release(mAHardwareBuffer);
- }
+uint32_t RunTimePoolInfo::RunTimePoolInfoImpl::getSize() const {
+ CHECK_LE(mMapping.size, std::numeric_limits<uint32_t>::max());
+ return static_cast<uint32_t>(mMapping.size);
}
// Making sure the output data are correctly updated after execution.
bool RunTimePoolInfo::RunTimePoolInfoImpl::flush() const {
- const auto& memType = mHidlMemory.name();
- if (memType == "mmap_fd") {
- const int prot = mHidlMemory.handle()->data[1];
- if (prot & PROT_WRITE) {
- const size_t size = mHidlMemory.size();
- return msync(mBuffer, size, MS_SYNC) == 0;
- }
- }
- // No-op for other types of memory.
- return true;
+ return nn::flush(mMapping);
}
// TODO: short term, make share memory mapping and updating a utility function.
// TODO: long term, implement mmap_fd as a hidl IMemory service.
-std::optional<RunTimePoolInfo> RunTimePoolInfo::createFromMemory(const Memory& canonicalMemory) {
- hardware::hidl_memory hidlMemory = convertToV1_0(canonicalMemory);
- uint8_t* buffer = nullptr;
- sp<IMemory> memory;
- AHardwareBuffer* hardwareBuffer = nullptr;
-
- const auto& memType = hidlMemory.name();
- if (memType == "ashmem") {
- memory = mapMemory(hidlMemory);
- if (memory == nullptr) {
- LOG(ERROR) << "Can't map shared memory.";
- return std::nullopt;
- }
- buffer = static_cast<uint8_t*>(static_cast<void*>(memory->getPointer()));
- if (buffer == nullptr) {
- LOG(ERROR) << "Can't access shared memory.";
- return std::nullopt;
- }
- } else if (memType == "mmap_fd") {
- size_t size = hidlMemory.size();
- int fd = hidlMemory.handle()->data[0];
- int prot = hidlMemory.handle()->data[1];
- size_t offset = getSizeFromInts(hidlMemory.handle()->data[2], hidlMemory.handle()->data[3]);
- buffer = static_cast<uint8_t*>(mmap(nullptr, size, prot, MAP_SHARED, fd, offset));
- if (buffer == MAP_FAILED) {
- LOG(ERROR) << "RunTimePoolInfo::set(): Can't mmap the file descriptor.";
- return std::nullopt;
- }
- } else if (memType == "hardware_buffer_blob") {
- auto handle = hidlMemory.handle();
- auto format = AHARDWAREBUFFER_FORMAT_BLOB;
- auto usage = AHARDWAREBUFFER_USAGE_CPU_READ_OFTEN | AHARDWAREBUFFER_USAGE_CPU_WRITE_OFTEN;
- const uint32_t width = hidlMemory.size();
- const uint32_t height = 1; // height is always 1 for BLOB mode AHardwareBuffer.
- const uint32_t layers = 1; // layers is always 1 for BLOB mode AHardwareBuffer.
- const uint32_t stride = hidlMemory.size();
-
- AHardwareBuffer_Desc desc{
- .width = width,
- .height = height,
- .layers = layers,
- .format = format,
- .usage = usage,
- .stride = stride,
- };
- status_t status = AHardwareBuffer_createFromHandle(
- &desc, handle, AHARDWAREBUFFER_CREATE_FROM_HANDLE_METHOD_CLONE, &hardwareBuffer);
- if (status != NO_ERROR) {
- LOG(ERROR) << "RunTimePoolInfo Can't create AHardwareBuffer from handle. Error: "
- << status;
- return std::nullopt;
- }
- void* gBuffer = nullptr;
- status = AHardwareBuffer_lock(hardwareBuffer, usage, -1, nullptr, &gBuffer);
- if (status != NO_ERROR) {
- AHardwareBuffer_release(hardwareBuffer);
- LOG(ERROR) << "RunTimePoolInfo Can't lock the AHardwareBuffer. Error: " << status;
- return std::nullopt;
- }
- buffer = static_cast<uint8_t*>(gBuffer);
- } else {
- LOG(ERROR) << "RunTimePoolInfo::set(): unsupported hidl_memory type";
+std::optional<RunTimePoolInfo> RunTimePoolInfo::createFromMemory(const Memory& memory) {
+ auto mapping = map(memory);
+ if (!mapping.has_value()) {
+ LOG(ERROR) << "Can't map shared memory: " << mapping.error().message;
return std::nullopt;
}
-
- const auto impl = std::make_shared<const RunTimePoolInfoImpl>(
- hidlMemory, buffer, memory, hardwareBuffer, hidlMemory.size());
- return {RunTimePoolInfo(impl)};
+ const auto impl =
+ std::make_shared<const RunTimePoolInfoImpl>(memory, std::move(mapping).value());
+ return RunTimePoolInfo(impl);
}
RunTimePoolInfo RunTimePoolInfo::createFromExistingBuffer(uint8_t* buffer, uint32_t size) {
- const auto impl = std::make_shared<const RunTimePoolInfoImpl>(hardware::hidl_memory{}, buffer,
- nullptr, nullptr, size);
- return {impl};
+ auto mapping = Mapping{.pointer = buffer, .size = size};
+ const auto impl = std::make_shared<const RunTimePoolInfoImpl>(Memory{}, std::move(mapping));
+ return RunTimePoolInfo(impl);
}
RunTimePoolInfo::RunTimePoolInfo(const std::shared_ptr<const RunTimePoolInfoImpl>& impl)
@@ -443,8 +343,8 @@
return mImpl->flush();
}
-Memory RunTimePoolInfo::getMemory() const {
- return uncheckedConvert(mImpl->getHidlMemory());
+const Memory& RunTimePoolInfo::getMemory() const {
+ return mImpl->getMemory();
}
bool setRunTimePoolInfosFromCanonicalMemories(std::vector<RunTimePoolInfo>* poolInfos,
@@ -1813,22 +1713,21 @@
// Ensure objects are freed
auto cleanupGuard = base::make_scope_guard(
- [&tmp1, &tmp2, &condOperands, &bodyOperands, &operation, &operands] {
- auto freeLoopOutputs = [](const std::vector<uint8_t*>& tmp) {
- for (auto buffer : tmp) {
- if (buffer != nullptr) {
- delete[] buffer;
+ [&tmp1, &tmp2, &condOperands, &bodyOperands, &operation, &operands] {
+ auto freeLoopOutputs = [](const std::vector<uint8_t*>& tmp) {
+ for (auto buffer : tmp) {
+ if (buffer != nullptr) {
+ delete[] buffer;
+ }
}
- }
- };
+ };
- freeLoopOutputs(tmp1);
- freeLoopOutputs(tmp2);
- freeUnusedSubgraphOperands(&condOperands);
- freeUnusedSubgraphOperands(&bodyOperands);
- consumeOperationInputs(operation.inputs, operands);
- }
- );
+ freeLoopOutputs(tmp1);
+ freeLoopOutputs(tmp2);
+ freeUnusedSubgraphOperands(&condOperands);
+ freeUnusedSubgraphOperands(&bodyOperands);
+ consumeOperationInputs(operation.inputs, operands);
+ });
// For body outputs with unknown shape, we skip double buffering and
// allocate on each iteration instead. This allows growing output tensors
diff --git a/common/SharedMemoryAndroid.cpp b/common/SharedMemoryAndroid.cpp
index 18881e0..3e7fa24 100644
--- a/common/SharedMemoryAndroid.cpp
+++ b/common/SharedMemoryAndroid.cpp
@@ -14,6 +14,8 @@
* limitations under the License.
*/
+#define LOG_TAG "SharedMemoryAndroid"
+
#include <android-base/logging.h>
#include <android-base/mapped_file.h>
#include <android-base/scopeguard.h>
@@ -177,6 +179,10 @@
return Mapping{.pointer = data, .size = size, .context = std::move(context)};
}
+static uint32_t roundUpToMultiple(uint32_t value, uint32_t multiple) {
+ return (value + multiple - 1) / multiple * multiple;
+}
+
GeneralResult<Mapping> mapAhwbBlobMemory(const Memory& memory) {
const SharedHandle& handle = memory.handle;
const auto size = memory.size;
@@ -185,21 +191,30 @@
const uint32_t width = size;
const uint32_t height = 1; // height is always 1 for BLOB mode AHardwareBuffer.
const uint32_t layers = 1; // layers is always 1 for BLOB mode AHardwareBuffer.
- const uint32_t stride = size;
- AHardwareBuffer_Desc desc{
- .width = width,
- .height = height,
- .layers = layers,
- .format = format,
- .usage = usage,
- .stride = stride,
- };
-
+ // AHardwareBuffer_createFromHandle() might fail because an allocator
+ // expects a specific stride value. In that case, we try to guess it by
+ // aligning the width to small powers of 2.
+ // TODO(b/174120849): Avoid stride assumptions.
AHardwareBuffer* hardwareBuffer = nullptr;
- status_t status = AHardwareBuffer_createFromHandle(
- &desc, NN_TRY(hidlHandleFromSharedHandle(handle)),
- AHARDWAREBUFFER_CREATE_FROM_HANDLE_METHOD_CLONE, &hardwareBuffer);
+ status_t status = UNKNOWN_ERROR;
+ for (uint32_t alignment : {1, 4, 32, 64, 128, 2, 8, 16}) {
+ const uint32_t stride = roundUpToMultiple(width, alignment);
+ AHardwareBuffer_Desc desc{
+ .width = width,
+ .height = height,
+ .layers = layers,
+ .format = format,
+ .usage = usage,
+ .stride = stride,
+ };
+ status = AHardwareBuffer_createFromHandle(&desc, NN_TRY(hidlHandleFromSharedHandle(handle)),
+ AHARDWAREBUFFER_CREATE_FROM_HANDLE_METHOD_CLONE,
+ &hardwareBuffer);
+ if (status == NO_ERROR) {
+ break;
+ }
+ }
if (status != NO_ERROR) {
return NN_ERROR(ErrorStatus::GENERAL_FAILURE)
<< "Can't create AHardwareBuffer from handle. Error: " << status;
diff --git a/common/SharedMemoryHost.cpp b/common/SharedMemoryHost.cpp
index eeb4907..4c73acb 100644
--- a/common/SharedMemoryHost.cpp
+++ b/common/SharedMemoryHost.cpp
@@ -14,6 +14,8 @@
* limitations under the License.
*/
+#define LOG_TAG "SharedMemoryHost"
+
#include <android-base/logging.h>
#include <android-base/mapped_file.h>
#include <cutils/ashmem.h>
diff --git a/common/ValidateHal.cpp b/common/ValidateHal.cpp
index c4e5f96..b88c5da 100644
--- a/common/ValidateHal.cpp
+++ b/common/ValidateHal.cpp
@@ -871,12 +871,12 @@
const hardware::hidl_vec<V1_3::BufferRole>& inputRoles,
const hardware::hidl_vec<V1_3::BufferRole>& outputRoles,
std::function<const V1_3::Model*(const sp<V1_3::IPreparedModel>&)> getModel,
- std::set<PreparedModelRole>* preparedModelRoles,
+ std::set<HalPreparedModelRole>* preparedModelRoles,
V1_3::Operand* combinedOperand) {
NN_RET_CHECK(preparedModels.size() != 0);
NN_RET_CHECK(inputRoles.size() != 0 || outputRoles.size() != 0);
- std::set<PreparedModelRole> roles;
+ std::set<HalPreparedModelRole> roles;
std::vector<V1_3::Operand> operands;
operands.reserve(inputRoles.size() + outputRoles.size());
for (const auto& role : inputRoles) {
diff --git a/common/include/BufferTracker.h b/common/include/BufferTracker.h
index 60432ca..8cde283 100644
--- a/common/include/BufferTracker.h
+++ b/common/include/BufferTracker.h
@@ -36,12 +36,13 @@
// This class manages a CPU buffer allocated on heap and provides validation methods.
class ManagedBuffer {
public:
- static std::shared_ptr<ManagedBuffer> create(uint32_t size, std::set<PreparedModelRole> roles,
+ static std::shared_ptr<ManagedBuffer> create(uint32_t size,
+ std::set<HalPreparedModelRole> roles,
const Operand& operand);
// Prefer ManagedBuffer::create.
ManagedBuffer(std::unique_ptr<uint8_t[]> buffer, uint32_t size,
- std::set<PreparedModelRole> roles, const Operand& operand);
+ std::set<HalPreparedModelRole> roles, const Operand& operand);
RunTimePoolInfo createRunTimePoolInfo() const {
return RunTimePoolInfo::createFromExistingBuffer(kBuffer.get(), kSize);
@@ -51,7 +52,7 @@
ErrorStatus validateRequest(uint32_t poolIndex, const Request& request,
const V1_3::IPreparedModel* preparedModel) const;
- // "size" is the byte size of the hidl_memory provided to the copyFrom or copyTo method.
+ // "size" is the byte size of the Memory provided to the copyFrom or copyTo method.
ErrorStatus validateCopyFrom(const std::vector<uint32_t>& dimensions, uint32_t size) const;
ErrorStatus validateCopyTo(uint32_t size) const;
@@ -62,7 +63,7 @@
mutable std::mutex mMutex;
const std::unique_ptr<uint8_t[]> kBuffer;
const uint32_t kSize;
- const std::set<PreparedModelRole> kRoles;
+ const std::set<HalPreparedModelRole> kRoles;
const OperandType kOperandType;
const std::vector<uint32_t> kInitialDimensions;
std::vector<uint32_t> mUpdatedDimensions;
diff --git a/common/include/CpuExecutor.h b/common/include/CpuExecutor.h
index 0945729..3609469 100644
--- a/common/include/CpuExecutor.h
+++ b/common/include/CpuExecutor.h
@@ -18,6 +18,7 @@
#define ANDROID_FRAMEWORKS_ML_NN_COMMON_CPU_EXECUTOR_H
#include <android-base/macros.h>
+#include <nnapi/Types.h>
#include <algorithm>
#include <memory>
@@ -28,7 +29,6 @@
#include "OperationResolver.h"
#include "OperationsUtils.h"
#include "Utils.h"
-#include "nnapi/Types.h"
namespace android {
namespace nn {
@@ -110,8 +110,7 @@
uint8_t* getBuffer() const;
bool flush() const;
- // TODO(b/169672209): "const Memory& getMemory() const;"
- Memory getMemory() const;
+ const Memory& getMemory() const;
uint32_t getSize() const;
private:
diff --git a/common/include/ValidateHal.h b/common/include/ValidateHal.h
index 57ba079..86d9520 100644
--- a/common/include/ValidateHal.h
+++ b/common/include/ValidateHal.h
@@ -20,14 +20,14 @@
#include <set>
#include <tuple>
+#include <nnapi/TypeUtils.h>
+#include <nnapi/Validation.h>
#include "HalInterfaces.h"
-#include "nnapi/TypeUtils.h"
namespace android {
namespace nn {
-enum class IOType { INPUT, OUTPUT };
-using PreparedModelRole = std::tuple<const V1_3::IPreparedModel*, IOType, uint32_t>;
+using HalPreparedModelRole = std::tuple<const V1_3::IPreparedModel*, IOType, uint32_t>;
// 1.3 HAL does not support control flow operations with operands of unknown size.
// See http://b/132458982#comment63.
@@ -81,7 +81,7 @@
const hardware::hidl_vec<V1_3::BufferRole>& inputRoles,
const hardware::hidl_vec<V1_3::BufferRole>& outputRoles,
std::function<const V1_3::Model*(const sp<V1_3::IPreparedModel>&)> getModel,
- std::set<PreparedModelRole>* preparedModelRoles,
+ std::set<HalPreparedModelRole>* preparedModelRoles,
V1_3::Operand* combinedOperand);
} // namespace nn
diff --git a/driver/sample/SampleDriver.cpp b/driver/sample/SampleDriver.cpp
index 61e2b8b..8b522e7 100644
--- a/driver/sample/SampleDriver.cpp
+++ b/driver/sample/SampleDriver.cpp
@@ -255,7 +255,7 @@
constexpr uint32_t kInvalidBufferToken = 0;
VLOG(DRIVER) << "SampleDriver::allocate";
- std::set<PreparedModelRole> roles;
+ std::set<HalPreparedModelRole> roles;
V1_3::Operand operand;
auto getModel = [](const sp<V1_3::IPreparedModel>& preparedModel) -> const V1_3::Model* {
const auto* samplePreparedModel = castToSamplePreparedModel(preparedModel);
diff --git a/runtime/ExecutionBuilder.cpp b/runtime/ExecutionBuilder.cpp
index aaf2bbd..28eda3a 100644
--- a/runtime/ExecutionBuilder.cpp
+++ b/runtime/ExecutionBuilder.cpp
@@ -244,8 +244,8 @@
// region is used. We update the length here because the drivers are still expecting a real
// length. For other memories that do not allow this semantic, it is checked in
// MemoryValidatorBase::validate before reaching here.
- if (memory->getHidlMemory().valid() && offset == 0 && length == 0) {
- length = memory->getHidlMemory().size();
+ if (validate(memory->getMemory()).ok() && offset == 0 && length == 0) {
+ length = memory->getMemory().size;
}
// TODO validate the rest
uint32_t poolIndex = mMemories.add(memory);
@@ -322,8 +322,8 @@
// region is used. We update the length here because the drivers are still expecting a real
// length. For other memories that do not allow this semantic, it is checked in
// MemoryValidatorBase::validate before reaching here.
- if (memory->getHidlMemory().valid() && offset == 0 && length == 0) {
- length = memory->getHidlMemory().size();
+ if (validate(memory->getMemory()).ok() && offset == 0 && length == 0) {
+ length = memory->getMemory().size;
}
// TODO validate the rest
uint32_t poolIndex = mMemories.add(memory);
@@ -1466,7 +1466,7 @@
return {nAhwb, {}, {}};
}
if (isUsedAsInput[i]) {
- n = copyIBufferToHidlMemory(memory->getIBuffer(), blobAhwb->getHidlMemory());
+ n = copyIBufferToMemory(memory->getIBuffer(), blobAhwb->getMemory());
if (n != ANEURALNETWORKS_NO_ERROR) {
return {n, {}, {}};
}
@@ -1485,7 +1485,7 @@
for (uint32_t i = 0; i < memories.size(); i++) {
const RuntimeMemory* memory = mMemories[i];
if (memory->getIBuffer() != nullptr && isUsedAsOutput[i]) {
- n = copyHidlMemoryToIBuffer(memories[i]->getHidlMemory(), memory->getIBuffer(), {});
+ n = copyMemoryToIBuffer(memories[i]->getMemory(), memory->getIBuffer(), {});
if (n != ANEURALNETWORKS_NO_ERROR) {
return {n, {}, {}};
}
diff --git a/runtime/ExecutionBuilder.h b/runtime/ExecutionBuilder.h
index 1dbfce6..ca6d5fc 100644
--- a/runtime/ExecutionBuilder.h
+++ b/runtime/ExecutionBuilder.h
@@ -17,6 +17,8 @@
#ifndef ANDROID_FRAMEWORKS_ML_NN_RUNTIME_EXECUTION_BUILDER_H
#define ANDROID_FRAMEWORKS_ML_NN_RUNTIME_EXECUTION_BUILDER_H
+#include <nnapi/Validation.h>
+
#include <atomic>
#include <memory>
#include <string>
diff --git a/runtime/Manager.cpp b/runtime/Manager.cpp
index d977878..fb36b3b 100644
--- a/runtime/Manager.cpp
+++ b/runtime/Manager.cpp
@@ -23,6 +23,7 @@
#include <cutils/native_handle.h>
#include <hidl/HidlTransportSupport.h>
#include <hidl/ServiceManagement.h>
+#include <nnapi/hal/1.3/Buffer.h>
#include <algorithm>
#include <functional>
@@ -256,14 +257,21 @@
CHECK(versionedPreparedModel != nullptr);
return versionedPreparedModel;
});
- auto [status, buffer, token] =
+ auto [status, hidlBuffer, token] =
kInterface->allocate(hidlDesc, preparedModels, desc.inputRoles, desc.outputRoles);
if (status != V1_3::ErrorStatus::NONE) {
LOG(ERROR) << "DriverDevice::allocate -- memory allocation on device " << getName()
<< " failed!";
return {convertErrorStatusToResultCode(status), nullptr};
}
- return MemoryFromDevice::create(std::move(buffer), token);
+ auto buffer =
+ V1_3::utils::Buffer::create(hidlBuffer, static_cast<Request::MemoryDomainToken>(token));
+ if (!buffer.has_value()) {
+ LOG(ERROR) << "DriverDevice::allocate -- memory allocation on device " << getName()
+ << " failed: " << buffer.error().message;
+ return {ANEURALNETWORKS_OP_FAILED, nullptr};
+ }
+ return MemoryFromDevice::create(std::move(buffer).value());
}
// Figures out how to place each of the input or outputs in a buffer. This just
@@ -356,7 +364,7 @@
uint32_t count = localMemories.size();
request.pools.resize(count);
for (uint32_t i = 0; i < count; i++) {
- request.pools[i] = uncheckedConvert(localMemories[i]->getMemoryPool());
+ request.pools[i] = localMemories[i]->getMemoryPool();
}
NNTRACE_FULL_SWITCH(NNTRACE_LAYER_IPC, NNTRACE_PHASE_EXECUTION,
@@ -467,7 +475,7 @@
uint32_t count = localMemories.size();
request.pools.resize(count);
for (uint32_t i = 0; i < count; i++) {
- request.pools[i] = uncheckedConvert(localMemories[i]->getMemoryPool());
+ request.pools[i] = localMemories[i]->getMemoryPool();
}
NNTRACE_FULL_SWITCH(NNTRACE_LAYER_IPC, NNTRACE_PHASE_EXECUTION,
diff --git a/runtime/Memory.cpp b/runtime/Memory.cpp
index 7efaf64..e3d2c24 100644
--- a/runtime/Memory.cpp
+++ b/runtime/Memory.cpp
@@ -21,6 +21,10 @@
#include <android-base/scopeguard.h>
#include <android/hardware_buffer.h>
#include <cutils/native_handle.h>
+#include <hidl/HidlSupport.h>
+#include <nnapi/SharedMemory.h>
+#include <nnapi/TypeUtils.h>
+#include <nnapi/Types.h>
#include <vndk/hardware_buffer.h>
#include <algorithm>
@@ -30,21 +34,15 @@
#include <utility>
#include <vector>
-#include <nnapi/TypeUtils.h>
-#include <nnapi/Types.h>
#include "CompilationBuilder.h"
#include "CpuExecutor.h"
#include "ExecutionBurstController.h"
#include "Manager.h"
-#include "MemoryUtils.h"
#include "TypeManager.h"
#include "Utils.h"
namespace android {
namespace nn {
-
-using ::android::hidl::memory::V1_0::IMemory;
-
namespace {
// The validator for a client-managed single-dimensional memory pool with a known size.
@@ -185,16 +183,14 @@
} // namespace
-RuntimeMemory::RuntimeMemory(hardware::hidl_memory memory)
- : kHidlMemory(std::move(memory)),
- mValidator(std::make_unique<SizedMemoryValidator>(kHidlMemory.size())) {}
+RuntimeMemory::RuntimeMemory(Memory memory)
+ : kMemory(std::move(memory)),
+ mValidator(std::make_unique<SizedMemoryValidator>(kMemory.size)) {}
-RuntimeMemory::RuntimeMemory(hardware::hidl_memory memory,
- std::unique_ptr<MemoryValidatorBase> validator)
- : kHidlMemory(std::move(memory)), mValidator(std::move(validator)) {}
+RuntimeMemory::RuntimeMemory(Memory memory, std::unique_ptr<MemoryValidatorBase> validator)
+ : kMemory(std::move(memory)), mValidator(std::move(validator)) {}
-RuntimeMemory::RuntimeMemory(sp<V1_3::IBuffer> buffer, uint32_t token)
- : kBuffer(std::move(buffer)), kToken(token) {}
+RuntimeMemory::RuntimeMemory(SharedBuffer buffer) : kBuffer(std::move(buffer)) {}
RuntimeMemory::~RuntimeMemory() {
for (const auto& [ptr, weakBurst] : mUsedBy) {
@@ -204,20 +200,17 @@
}
}
-V1_3::Request::MemoryPool RuntimeMemory::getMemoryPool() const {
- V1_3::Request::MemoryPool pool;
- if (kToken > 0) {
- pool.token(kToken);
- } else {
- pool.hidlMemory(kHidlMemory);
+Request::MemoryPool RuntimeMemory::getMemoryPool() const {
+ if (kBuffer != nullptr) {
+ return kBuffer->getToken();
}
- return pool;
+ return kMemory;
}
std::optional<RunTimePoolInfo> RuntimeMemory::getRunTimePoolInfo() const {
std::lock_guard<std::mutex> guard(mMutex);
if (!mHasCachedRunTimePoolInfo) {
- mCachedRunTimePoolInfo = RunTimePoolInfo::createFromMemory(uncheckedConvert(kHidlMemory));
+ mCachedRunTimePoolInfo = RunTimePoolInfo::createFromMemory(kMemory);
mHasCachedRunTimePoolInfo = true;
}
return mCachedRunTimePoolInfo;
@@ -249,33 +242,33 @@
return ANEURALNETWORKS_NO_ERROR;
}
-int copyIBufferToHidlMemory(const sp<V1_3::IBuffer>& src, const hardware::hidl_memory& dst) {
+int copyIBufferToMemory(const SharedBuffer& src, const Memory& dst) {
const auto ret = src->copyTo(dst);
- if (!ret.isOk()) {
- LOG(ERROR) << "ANeuralNetworksMemory_copy failure: " << ret.description();
- return ANEURALNETWORKS_OP_FAILED;
+ if (!ret.has_value()) {
+ LOG(ERROR) << "ANeuralNetworksMemory_copy failure: " << ret.error().message;
+ return convertErrorStatusToResultCode(ret.error().code);
}
- return convertErrorStatusToResultCode(static_cast<V1_3::ErrorStatus>(ret));
+ return ANEURALNETWORKS_NO_ERROR;
}
-int copyHidlMemoryToIBuffer(const hardware::hidl_memory& src, const sp<V1_3::IBuffer>& dst,
- const std::vector<uint32_t>& dimensions) {
+int copyMemoryToIBuffer(const Memory& src, const SharedBuffer& dst,
+ const std::vector<uint32_t>& dimensions) {
const auto ret = dst->copyFrom(src, dimensions);
- if (!ret.isOk()) {
- LOG(ERROR) << "ANeuralNetworksMemory_copy failure: " << ret.description();
- return ANEURALNETWORKS_OP_FAILED;
+ if (!ret.has_value()) {
+ LOG(ERROR) << "ANeuralNetworksMemory_copy failure: " << ret.error().message;
+ return convertErrorStatusToResultCode(ret.error().code);
}
- return convertErrorStatusToResultCode(static_cast<V1_3::ErrorStatus>(ret));
+ return ANEURALNETWORKS_NO_ERROR;
}
-static int copyIBuffers(const sp<V1_3::IBuffer>& src, const sp<V1_3::IBuffer>& dst,
+static int copyIBuffers(const SharedBuffer& src, const SharedBuffer& dst,
const MemoryValidatorBase::Metadata& srcMetadata) {
- const auto [n, memory] = MemoryRuntimeAHWB::create(srcMetadata.logicalSize);
+ const auto [n, memoryAHWB] = MemoryRuntimeAHWB::create(srcMetadata.logicalSize);
NN_RETURN_IF_ERROR(n);
- const hardware::hidl_memory& hidlMemory = memory->getHidlMemory();
- if (!hidlMemory.valid()) return ANEURALNETWORKS_OUT_OF_MEMORY;
- NN_RETURN_IF_ERROR(copyIBufferToHidlMemory(src, hidlMemory));
- NN_RETURN_IF_ERROR(copyHidlMemoryToIBuffer(hidlMemory, dst, srcMetadata.dimensions));
+ const Memory& memory = memoryAHWB->getMemory();
+ if (!validate(memory).ok()) return ANEURALNETWORKS_OUT_OF_MEMORY;
+ NN_RETURN_IF_ERROR(copyIBufferToMemory(src, memory));
+ NN_RETURN_IF_ERROR(copyMemoryToIBuffer(memory, dst, srcMetadata.dimensions));
return ANEURALNETWORKS_NO_ERROR;
}
@@ -293,19 +286,18 @@
return ANEURALNETWORKS_BAD_DATA;
}
- bool srcHasHidlMemory = src.getHidlMemory().valid();
- bool dstHasHidlMemory = dst.getHidlMemory().valid();
+ bool srcHasMemory = validate(src.getMemory()).ok();
+ bool dstHasMemory = validate(dst.getMemory()).ok();
bool srcHasIBuffer = src.getIBuffer() != nullptr;
bool dstHasIBuffer = dst.getIBuffer() != nullptr;
if (srcHasIBuffer && dstHasIBuffer) {
return copyIBuffers(src.getIBuffer(), dst.getIBuffer(), srcMetadata);
- } else if (srcHasHidlMemory && dstHasHidlMemory) {
+ } else if (srcHasMemory && dstHasMemory) {
return copyHidlMemories(src.getRunTimePoolInfo(), dst.getRunTimePoolInfo());
- } else if (srcHasHidlMemory && dstHasIBuffer) {
- return copyHidlMemoryToIBuffer(src.getHidlMemory(), dst.getIBuffer(),
- srcMetadata.dimensions);
- } else if (srcHasIBuffer && dstHasHidlMemory) {
- return copyIBufferToHidlMemory(src.getIBuffer(), dst.getHidlMemory());
+ } else if (srcHasMemory && dstHasIBuffer) {
+ return copyMemoryToIBuffer(src.getMemory(), dst.getIBuffer(), srcMetadata.dimensions);
+ } else if (srcHasIBuffer && dstHasMemory) {
+ return copyIBufferToMemory(src.getIBuffer(), dst.getMemory());
}
return ANEURALNETWORKS_OP_FAILED;
}
@@ -524,86 +516,56 @@
}
std::pair<int, std::unique_ptr<MemoryAshmem>> MemoryAshmem::create(uint32_t size) {
- hardware::hidl_memory hidlMemory = allocateSharedMemory(size);
- sp<IMemory> mapped = mapMemory(hidlMemory);
- if (mapped == nullptr || mapped->getPointer() == nullptr) {
- LOG(ERROR) << "RuntimeMemory::create failed";
- return {ANEURALNETWORKS_OUT_OF_MEMORY, nullptr};
+ auto memory = createSharedMemory(size);
+ if (!memory.has_value()) {
+ LOG(ERROR) << "RuntimeMemory::create() failed: " << memory.error().message;
+ return {convertErrorStatusToResultCode(memory.error().code), nullptr};
+ }
+ auto mapping = map(memory.value());
+ if (!mapping.has_value()) {
+ LOG(ERROR) << "RuntimeMemory::create() map failed: " << mapping.error().message;
+ return {convertErrorStatusToResultCode(mapping.error().code), nullptr};
}
return {ANEURALNETWORKS_NO_ERROR,
- std::make_unique<MemoryAshmem>(std::move(mapped), std::move(hidlMemory))};
+ std::make_unique<MemoryAshmem>(std::move(memory).value(), std::move(mapping).value())};
}
uint8_t* MemoryAshmem::getPointer() const {
- return static_cast<uint8_t*>(static_cast<void*>(kMappedMemory->getPointer()));
+ return static_cast<uint8_t*>(std::get<void*>(kMapping.pointer));
}
-MemoryAshmem::MemoryAshmem(sp<IMemory> mapped, hardware::hidl_memory memory)
- : RuntimeMemory(std::move(memory)), kMappedMemory(std::move(mapped)) {}
+MemoryAshmem::MemoryAshmem(Memory memory, Mapping mapping)
+ : RuntimeMemory(std::move(memory)), kMapping(std::move(mapping)) {}
std::pair<int, std::unique_ptr<MemoryFd>> MemoryFd::create(size_t size, int prot, int fd,
size_t offset) {
- if (size == 0 || fd < 0) {
- LOG(ERROR) << "Invalid size or fd";
- return {ANEURALNETWORKS_BAD_DATA, nullptr};
+ auto memory = createSharedMemoryFromFd(size, prot, fd, offset);
+ if (!memory.has_value()) {
+ LOG(ERROR) << "Failed to create memory from fd: " << memory.error().message;
+ return {convertErrorStatusToResultCode(memory.error().code), nullptr};
}
-
- // Duplicate the file descriptor so MemoryFd owns its own version.
- int dupfd = dup(fd);
- if (dupfd == -1) {
- LOG(ERROR) << "Failed to dup the fd";
- // TODO(b/120417090): is ANEURALNETWORKS_UNEXPECTED_NULL the correct
- // error to return here?
- return {ANEURALNETWORKS_UNEXPECTED_NULL, nullptr};
- }
-
- // Create a temporary native handle to own the dupfd.
- native_handle_t* nativeHandle = native_handle_create(1, 3);
- if (nativeHandle == nullptr) {
- LOG(ERROR) << "Failed to create native_handle";
- close(dupfd);
- // TODO(b/120417090): is ANEURALNETWORKS_UNEXPECTED_NULL the correct
- // error to return here?
- return {ANEURALNETWORKS_UNEXPECTED_NULL, nullptr};
- }
- nativeHandle->data[0] = dupfd;
- nativeHandle->data[1] = prot;
- const uint64_t bits = static_cast<uint64_t>(offset);
- nativeHandle->data[2] = (int32_t)(uint32_t)(bits & 0xffffffff);
- nativeHandle->data[3] = (int32_t)(uint32_t)(bits >> 32);
-
- // Create a hidl_handle which owns the native handle and fd so that we don't
- // have to manually clean either the native handle or the fd.
- hardware::hidl_handle hidlHandle;
- hidlHandle.setTo(nativeHandle, /*shouldOwn=*/true);
-
- // Push the hidl_handle into a hidl_memory object. The hidl_memory object is
- // responsible for cleaning the hidl_handle, the native handle, and the fd.
- hardware::hidl_memory hidlMemory =
- hardware::hidl_memory("mmap_fd", std::move(hidlHandle), size);
-
- return {ANEURALNETWORKS_NO_ERROR, std::make_unique<MemoryFd>(std::move(hidlMemory))};
+ return {ANEURALNETWORKS_NO_ERROR, std::make_unique<MemoryFd>(std::move(memory).value())};
}
-MemoryFd::MemoryFd(hardware::hidl_memory memory) : RuntimeMemory(std::move(memory)) {}
+MemoryFd::MemoryFd(Memory memory) : RuntimeMemory(std::move(memory)) {}
std::pair<int, std::unique_ptr<MemoryAHWB>> MemoryAHWB::create(const AHardwareBuffer& ahwb) {
- AHardwareBuffer_Desc bufferDesc;
- AHardwareBuffer_describe(&ahwb, &bufferDesc);
- const native_handle_t* handle = AHardwareBuffer_getNativeHandle(&ahwb);
- hardware::hidl_memory hidlMemory;
+ auto memory = createSharedMemoryFromAHWB(ahwb);
+ if (!memory.has_value()) {
+ LOG(ERROR) << "Failed to create memory from AHWB: " << memory.error().message;
+ return {convertErrorStatusToResultCode(memory.error().code), nullptr};
+ }
+
std::unique_ptr<MemoryValidatorBase> validator;
- if (bufferDesc.format == AHARDWAREBUFFER_FORMAT_BLOB) {
- hidlMemory = hardware::hidl_memory("hardware_buffer_blob", handle, bufferDesc.width);
- validator = std::make_unique<SizedMemoryValidator>(bufferDesc.width);
+ if (memory.value().name == "hardware_buffer_blob") {
+ validator = std::make_unique<SizedMemoryValidator>(memory.value().size);
} else {
- // memory size is not used.
- hidlMemory = hardware::hidl_memory("hardware_buffer", handle, 0);
validator = std::make_unique<AHardwareBufferNonBlobValidator>();
}
- auto memory = std::make_unique<MemoryAHWB>(std::move(hidlMemory), std::move(validator));
- return {ANEURALNETWORKS_NO_ERROR, std::move(memory)};
-};
+
+ auto memoryAHWB = std::make_unique<MemoryAHWB>(std::move(memory).value(), std::move(validator));
+ return {ANEURALNETWORKS_NO_ERROR, std::move(memoryAHWB)};
+}
std::pair<int, std::unique_ptr<MemoryRuntimeAHWB>> MemoryRuntimeAHWB::create(uint32_t size) {
AHardwareBuffer* ahwb = nullptr;
@@ -621,58 +583,43 @@
LOG(ERROR) << "Failed to allocate BLOB mode AHWB.";
return {ANEURALNETWORKS_OP_FAILED, nullptr};
}
- auto allocateGuard = base::make_scope_guard([&ahwb]() { AHardwareBuffer_release(ahwb); });
+ auto ahwbGuard = base::make_scope_guard([ahwb]() { AHardwareBuffer_release(ahwb); });
- void* buffer = nullptr;
- err = AHardwareBuffer_lock(ahwb, usage, -1, nullptr, &buffer);
- if (err != 0 || buffer == nullptr) {
- LOG(ERROR) << "Failed to lock BLOB mode AHWB.";
- return {ANEURALNETWORKS_OP_FAILED, nullptr};
+ auto memory = createSharedMemoryFromAHWB(*ahwb);
+ if (!memory.has_value()) {
+ LOG(ERROR) << "Failed to allocate BLOB mode AHWB: " << memory.error().message;
+ return {convertErrorStatusToResultCode(memory.error().code), nullptr};
}
- auto lockGuard = base::make_scope_guard([&ahwb]() { AHardwareBuffer_unlock(ahwb, nullptr); });
-
- const native_handle_t* handle = AHardwareBuffer_getNativeHandle(ahwb);
- if (handle == nullptr) {
- LOG(ERROR) << "Failed to retrieve the native handle from the AHWB.";
- return {ANEURALNETWORKS_OP_FAILED, nullptr};
+ auto mapping = map(memory.value());
+ if (!mapping.has_value()) {
+ LOG(ERROR) << "Failed to map BLOB mode AHWB: " << mapping.error().message;
+ return {convertErrorStatusToResultCode(mapping.error().code), nullptr};
}
-
- hardware::hidl_memory hidlMemory =
- hardware::hidl_memory("hardware_buffer_blob", handle, desc.width);
- auto memory = std::make_unique<MemoryRuntimeAHWB>(std::move(hidlMemory), ahwb,
- static_cast<uint8_t*>(buffer));
- allocateGuard.Disable();
- lockGuard.Disable();
- return {ANEURALNETWORKS_NO_ERROR, std::move(memory)};
+ auto memoryAHWB = std::make_unique<MemoryRuntimeAHWB>(
+ std::move(memory).value(), std::move(ahwbGuard), std::move(mapping).value());
+ return {ANEURALNETWORKS_NO_ERROR, std::move(memoryAHWB)};
}
-MemoryRuntimeAHWB::MemoryRuntimeAHWB(hardware::hidl_memory memory, AHardwareBuffer* ahwb,
- uint8_t* buffer)
- : RuntimeMemory(std::move(memory)), mAhwb(ahwb), mBuffer(buffer) {
- CHECK(mAhwb != nullptr);
- CHECK(mBuffer != nullptr);
+uint8_t* MemoryRuntimeAHWB::getPointer() const {
+ return static_cast<uint8_t*>(std::get<void*>(kMapping.pointer));
}
-MemoryRuntimeAHWB::~MemoryRuntimeAHWB() {
- AHardwareBuffer_unlock(mAhwb, nullptr);
- AHardwareBuffer_release(mAhwb);
-}
+MemoryRuntimeAHWB::MemoryRuntimeAHWB(Memory memory,
+ base::ScopeGuard<std::function<void()>> ahwbScopeGuard,
+ Mapping mapping)
+ : RuntimeMemory(std::move(memory)),
+ kAhwbScopeGuard(std::move(ahwbScopeGuard)),
+ kMapping(std::move(mapping)) {}
-std::pair<int, std::unique_ptr<MemoryFromDevice>> MemoryFromDevice::create(sp<V1_3::IBuffer> buffer,
- uint32_t token) {
+std::pair<int, std::unique_ptr<MemoryFromDevice>> MemoryFromDevice::create(SharedBuffer buffer) {
if (buffer == nullptr) {
LOG(ERROR) << "nullptr IBuffer for device memory.";
return {ANEURALNETWORKS_OP_FAILED, nullptr};
}
- if (token <= 0) {
- LOG(ERROR) << "Invalid token for device memory: " << token;
- return {ANEURALNETWORKS_OP_FAILED, nullptr};
- }
- return {ANEURALNETWORKS_NO_ERROR, std::make_unique<MemoryFromDevice>(std::move(buffer), token)};
-};
+ return {ANEURALNETWORKS_NO_ERROR, std::make_unique<MemoryFromDevice>(std::move(buffer))};
+}
-MemoryFromDevice::MemoryFromDevice(sp<V1_3::IBuffer> buffer, uint32_t token)
- : RuntimeMemory(std::move(buffer), token) {}
+MemoryFromDevice::MemoryFromDevice(SharedBuffer buffer) : RuntimeMemory(std::move(buffer)) {}
} // namespace nn
} // namespace android
diff --git a/runtime/Memory.h b/runtime/Memory.h
index f78ef80..b69fbed 100644
--- a/runtime/Memory.h
+++ b/runtime/Memory.h
@@ -18,6 +18,10 @@
#define ANDROID_FRAMEWORKS_ML_NN_RUNTIME_MEMORY_H
#include <android-base/macros.h>
+#include <android-base/scopeguard.h>
+#include <nnapi/IBuffer.h>
+#include <nnapi/SharedMemory.h>
+#include <nnapi/Validation.h>
#include <sys/mman.h>
#include <vndk/hardware_buffer.h>
@@ -32,15 +36,12 @@
#include <vector>
#include "CpuExecutor.h"
-#include "HalInterfaces.h"
#include "NeuralNetworks.h"
#include "Utils.h"
namespace android {
namespace nn {
-using ::android::hidl::memory::V1_0::IMemory;
-
class CompilationBuilder;
class Device;
class ExecutionBurstController;
@@ -160,10 +161,10 @@
virtual bool isInitialized() const { return true; }
};
-int copyIBufferToHidlMemory(const sp<V1_3::IBuffer>& src, const hardware::hidl_memory& dst);
+int copyIBufferToMemory(const SharedBuffer& src, const Memory& dst);
-int copyHidlMemoryToIBuffer(const hardware::hidl_memory& src, const sp<V1_3::IBuffer>& dst,
- const std::vector<uint32_t>& dimensions);
+int copyMemoryToIBuffer(const Memory& src, const SharedBuffer& dst,
+ const std::vector<uint32_t>& dimensions);
// Represents a memory region.
class RuntimeMemory {
@@ -175,10 +176,10 @@
// this memory that it is being freed.
virtual ~RuntimeMemory();
- V1_3::Request::MemoryPool getMemoryPool() const;
- const hardware::hidl_memory& getHidlMemory() const { return kHidlMemory; }
- const sp<V1_3::IBuffer>& getIBuffer() const { return kBuffer; }
- virtual uint32_t getSize() const { return getHidlMemory().size(); }
+ Request::MemoryPool getMemoryPool() const;
+ const Memory& getMemory() const { return kMemory; }
+ const SharedBuffer& getIBuffer() const { return kBuffer; }
+ virtual uint32_t getSize() const { return getMemory().size; }
virtual std::optional<RunTimePoolInfo> getRunTimePoolInfo() const;
MemoryValidatorBase& getValidator() const {
@@ -201,15 +202,14 @@
static int copy(const RuntimeMemory& src, const RuntimeMemory& dst);
protected:
- RuntimeMemory(hardware::hidl_memory memory);
- RuntimeMemory(hardware::hidl_memory memory, std::unique_ptr<MemoryValidatorBase> validator);
- RuntimeMemory(sp<V1_3::IBuffer> buffer, uint32_t token);
+ RuntimeMemory(Memory memory);
+ RuntimeMemory(Memory memory, std::unique_ptr<MemoryValidatorBase> validator);
+ RuntimeMemory(SharedBuffer buffer);
- // The HIDL representation for this memory. We will use one of the following values
- // when communicating with the drivers.
- const hardware::hidl_memory kHidlMemory;
- const sp<V1_3::IBuffer> kBuffer;
- const uint32_t kToken = 0;
+ // The canonical representation for this memory. We will use one of the
+ // following values when communicating with the drivers.
+ const Memory kMemory;
+ const SharedBuffer kBuffer;
std::unique_ptr<MemoryValidatorBase> mValidator;
@@ -290,21 +290,20 @@
uint8_t* getPointer() const;
std::optional<RunTimePoolInfo> getRunTimePoolInfo() const override {
- return RunTimePoolInfo::createFromExistingBuffer(getPointer(), kHidlMemory.size());
+ return RunTimePoolInfo::createFromExistingBuffer(getPointer(), kMemory.size);
}
// prefer using MemoryAshmem::create
- MemoryAshmem(sp<IMemory> mapped, hardware::hidl_memory memory);
+ MemoryAshmem(Memory memory, Mapping mapped);
private:
- const sp<IMemory> kMappedMemory;
+ const Mapping kMapping;
};
class MemoryFd : public RuntimeMemory {
public:
- // Create a memory object based on input size, prot, and fd that can be sent
- // across HIDL. This function duplicates the provided fd, and owns the
- // duplicate.
+ // Create a memory object based on input size, prot, and fd. This function
+ // duplicates the provided fd, and owns the duplicate.
//
// On success, returns ANEURALNETWORKS_NO_ERROR and a memory object.
// On error, returns the appropriate NNAPI error code and nullptr.
@@ -312,7 +311,7 @@
size_t offset);
// prefer using MemoryFd::create
- MemoryFd(hardware::hidl_memory memory);
+ MemoryFd(Memory memory);
};
class MemoryAHWB : public RuntimeMemory {
@@ -325,7 +324,7 @@
static std::pair<int, std::unique_ptr<MemoryAHWB>> create(const AHardwareBuffer& ahwb);
// prefer using MemoryAHWB::create
- MemoryAHWB(hardware::hidl_memory memory, std::unique_ptr<MemoryValidatorBase> validator)
+ MemoryAHWB(Memory memory, std::unique_ptr<MemoryValidatorBase> validator)
: RuntimeMemory(std::move(memory), std::move(validator)) {}
};
@@ -342,19 +341,19 @@
// Get a pointer to the content of the memory. The returned pointer is
// valid for the lifetime of the MemoryRuntimeAHWB object. This call always
// returns non-null because it was validated during MemoryRuntimeAHWB::create.
- uint8_t* getPointer() const { return mBuffer; }
+ uint8_t* getPointer() const;
std::optional<RunTimePoolInfo> getRunTimePoolInfo() const override {
- return RunTimePoolInfo::createFromExistingBuffer(getPointer(), kHidlMemory.size());
+ return RunTimePoolInfo::createFromExistingBuffer(getPointer(), kMemory.size);
}
// prefer using MemoryRuntimeAHWB::create
- MemoryRuntimeAHWB(hardware::hidl_memory memory, AHardwareBuffer* ahwb, uint8_t* buffer);
- ~MemoryRuntimeAHWB();
+ MemoryRuntimeAHWB(Memory memory, base::ScopeGuard<std::function<void()>> ahwbScopeGuard,
+ Mapping mapping);
private:
- AHardwareBuffer* const mAhwb;
- uint8_t* const mBuffer;
+ const base::ScopeGuard<std::function<void()>> kAhwbScopeGuard;
+ const Mapping kMapping;
};
class MemoryFromDevice : public RuntimeMemory {
@@ -364,11 +363,10 @@
//
// On success, returns ANEURALNETWORKS_NO_ERROR and a memory object.
// On error, returns the appropriate NNAPI error code and nullptr.
- static std::pair<int, std::unique_ptr<MemoryFromDevice>> create(sp<V1_3::IBuffer> buffer,
- uint32_t token);
+ static std::pair<int, std::unique_ptr<MemoryFromDevice>> create(SharedBuffer buffer);
// prefer using MemoryFromDevice::create
- MemoryFromDevice(sp<V1_3::IBuffer> buffer, uint32_t token);
+ MemoryFromDevice(SharedBuffer buffer);
};
using MemoryTracker = ObjectTracker<RuntimeMemory>;
diff --git a/runtime/ModelBuilder.cpp b/runtime/ModelBuilder.cpp
index 0c506d5..51a626f 100644
--- a/runtime/ModelBuilder.cpp
+++ b/runtime/ModelBuilder.cpp
@@ -900,7 +900,7 @@
model.operandValues = std::move(mOperandValues);
model.pools.resize(mMemories.size());
std::transform(mMemories.begin(), mMemories.end(), model.pools.begin(),
- [](const RuntimeMemory* m) { return uncheckedConvert(m->getHidlMemory()); });
+ [](const RuntimeMemory* m) { return m->getMemory(); });
model.relaxComputationFloat32toFloat16 = mainModel->mRelaxComputationFloat32toFloat16;
model.extensionNameToPrefix = std::move(mExtensionNameToPrefix);
return model;
diff --git a/runtime/test/TestCompliance.cpp b/runtime/test/TestCompliance.cpp
index 299eebc..2d2dcd6 100644
--- a/runtime/test/TestCompliance.cpp
+++ b/runtime/test/TestCompliance.cpp
@@ -171,7 +171,8 @@
TEST_F(ComplianceTest, HardwareBufferRequest) {
const auto [n, ahwb] = MemoryRuntimeAHWB::create(1024);
ASSERT_EQ(n, ANEURALNETWORKS_NO_ERROR);
- V1_3::Request::MemoryPool sharedMemoryPool, ahwbMemoryPool = ahwb->getMemoryPool();
+ V1_3::Request::MemoryPool sharedMemoryPool,
+ ahwbMemoryPool = convertToV1_3(ahwb->getMemoryPool());
sharedMemoryPool.hidlMemory(allocateSharedMemory(1024));
ASSERT_TRUE(sharedMemoryPool.hidlMemory().valid());
ASSERT_TRUE(ahwbMemoryPool.hidlMemory().valid());
diff --git a/runtime/test/TestMemoryDomain.cpp b/runtime/test/TestMemoryDomain.cpp
index 35a826a..1654299 100644
--- a/runtime/test/TestMemoryDomain.cpp
+++ b/runtime/test/TestMemoryDomain.cpp
@@ -305,12 +305,12 @@
const RuntimeMemory* m = reinterpret_cast<const RuntimeMemory*>(memory.get());
ASSERT_NE(m, nullptr);
EXPECT_EQ(m->getIBuffer(), nullptr);
- const auto& hidlMemory = m->getHidlMemory();
- EXPECT_TRUE(hidlMemory.valid());
+ const auto& memory = m->getMemory();
+ EXPECT_TRUE(validate(memory).ok());
if (kUseV1_2Driver) {
- EXPECT_EQ(hidlMemory.name(), "ashmem");
+ EXPECT_EQ(memory.name, "ashmem");
} else {
- EXPECT_EQ(hidlMemory.name(), "hardware_buffer_blob");
+ EXPECT_EQ(memory.name, "hardware_buffer_blob");
}
}
}
@@ -343,12 +343,12 @@
const RuntimeMemory* m = reinterpret_cast<const RuntimeMemory*>(memory.get());
ASSERT_NE(m, nullptr);
EXPECT_EQ(m->getIBuffer(), nullptr);
- const auto& hidlMemory = m->getHidlMemory();
- EXPECT_TRUE(hidlMemory.valid());
+ const auto& memory = m->getMemory();
+ EXPECT_TRUE(validate(memory).ok());
if (kUseV1_2Driver) {
- EXPECT_EQ(hidlMemory.name(), "ashmem");
+ EXPECT_EQ(memory.name, "ashmem");
} else {
- EXPECT_EQ(hidlMemory.name(), "hardware_buffer_blob");
+ EXPECT_EQ(memory.name, "hardware_buffer_blob");
}
}
}
@@ -367,12 +367,12 @@
const RuntimeMemory* m = reinterpret_cast<const RuntimeMemory*>(memory.get());
ASSERT_NE(m, nullptr);
EXPECT_EQ(m->getIBuffer(), nullptr);
- const auto& hidlMemory = m->getHidlMemory();
- EXPECT_TRUE(hidlMemory.valid());
+ const auto& memory = m->getMemory();
+ EXPECT_TRUE(validate(memory).ok());
if (kUseV1_2Driver) {
- EXPECT_EQ(hidlMemory.name(), "ashmem");
+ EXPECT_EQ(memory.name, "ashmem");
} else {
- EXPECT_EQ(hidlMemory.name(), "hardware_buffer_blob");
+ EXPECT_EQ(memory.name, "hardware_buffer_blob");
}
}
}
@@ -390,12 +390,12 @@
const RuntimeMemory* m = reinterpret_cast<const RuntimeMemory*>(memory.get());
ASSERT_NE(m, nullptr);
EXPECT_EQ(m->getIBuffer(), nullptr);
- const auto& hidlMemory = m->getHidlMemory();
- EXPECT_TRUE(hidlMemory.valid());
+ const auto& memory = m->getMemory();
+ EXPECT_TRUE(validate(memory).ok());
if (kUseV1_2Driver) {
- EXPECT_EQ(hidlMemory.name(), "ashmem");
+ EXPECT_EQ(memory.name, "ashmem");
} else {
- EXPECT_EQ(hidlMemory.name(), "hardware_buffer_blob");
+ EXPECT_EQ(memory.name, "hardware_buffer_blob");
}
}
}