blob: 561df0799a60c7c02f16df1479b891ac5a7cab47 [file] [log] [blame]
Jean-Luc Brouillet4fb1e852017-08-20 18:16:36 -07001/*
2 * Copyright (C) 2017 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#define LOG_TAG "Memory"
18
19#include "Memory.h"
20
Xusong Wang062ec502019-11-27 11:44:03 -080021#include <algorithm>
Michael Butler90fddbd2019-08-02 15:04:00 -070022#include <memory>
Xusong Wang062ec502019-11-27 11:44:03 -080023#include <set>
24#include <tuple>
Michael Butler90fddbd2019-08-02 15:04:00 -070025#include <utility>
Xusong Wang062ec502019-11-27 11:44:03 -080026#include <vector>
27
28#include "CompilationBuilder.h"
Michael Butler50032c02019-03-14 17:34:48 -070029#include "ExecutionBurstController.h"
Xusong Wang550e2a52019-11-27 12:18:19 -080030#include "Manager.h"
Slava Shklyaev79534bc2019-05-22 11:10:13 +010031#include "MemoryUtils.h"
Xusong Wang062ec502019-11-27 11:44:03 -080032#include "TypeManager.h"
Jean-Luc Brouillet4fb1e852017-08-20 18:16:36 -070033#include "Utils.h"
34
35namespace android {
36namespace nn {
37
Michael Butler6bf05b22019-07-11 11:45:01 -070038using namespace hal;
39
Xusong Wangd39f9192019-11-27 15:45:42 -080040namespace {
41
42// The validator for a client-managed single-dimensional memory pool with a known size.
43// The memory may be used for request inputs, request outputs, or model constants.
44class SizedMemoryValidator : public MemoryValidatorBase {
45 public:
46 SizedMemoryValidator(uint32_t size) : kSize(size) {}
47
48 bool validate(const CompilationBuilder*, IOType, uint32_t, const ANeuralNetworksOperandType*,
49 uint32_t offset, uint32_t length) const override {
50 NN_RET_CHECK(offset + length <= kSize) << "request size larger than the memory size.";
51 NN_RET_CHECK(offset != 0 || length != 0) << "memory size cannot be implied.";
52 return true;
53 }
54
55 private:
56 const uint32_t kSize;
57};
58
59// The validator for an AHardwareBuffer with Non-BLOB format.
60// We require the memory only used for request inputs or request outputs,
61// with both offset and length set to zero.
62class AHardwareBufferNonBlobValidator : public MemoryValidatorBase {
63 public:
64 AHardwareBufferNonBlobValidator() = default;
65
66 bool validate(const CompilationBuilder* compilation, IOType, uint32_t,
67 const ANeuralNetworksOperandType*, uint32_t offset,
68 uint32_t length) const override {
69 NN_RET_CHECK(compilation != nullptr)
70 << "cannot use Non-BLOB AHardwareBuffer as model constant";
71 NN_RET_CHECK(offset == 0 && length == 0)
72 << "non-zero offset (" << offset << ") and/or length (" << length
73 << ") for Non-BLOB format AHardwareBuffer.";
74 return true;
75 }
76};
77
78// The validator for a memory created from ANNMemory_createFromDesc.
79// We require the memory only used as one of the pre-specified roles,
80// with both offset and length set to zero.
81class DeviceMemoryValidator : public MemoryValidatorBase {
82 public:
83 DeviceMemoryValidator(std::set<CompilationRole> roles, hal::OperandType type,
84 std::vector<uint32_t> dimensions)
85 : kCompilationRoles(std::move(roles)),
86 mDataType(type),
87 kInitialDimensions(std::move(dimensions)),
88 mUpdatedDimensions(kInitialDimensions) {}
89
90 bool validate(const CompilationBuilder* compilation, IOType ioType, uint32_t index,
91 const ANeuralNetworksOperandType* type, uint32_t offset,
92 uint32_t length) const override {
93 NN_RET_CHECK(kCompilationRoles.count({compilation, ioType, index}) > 0)
94 << "invalid compilation role.";
95 NN_RET_CHECK(offset == 0 && length == 0)
96 << "non-zero offset and/or length for driver-allocated memory.";
97 if (type) {
98 const bool isTensor = TypeManager::get()->isTensorType(mDataType);
99 NN_RET_CHECK(isTensor || type->dimensionCount == 0)
100 << "invalid dimensions for scalar memory.";
101 std::vector<uint32_t> dimensions(type->dimensions,
102 type->dimensions + type->dimensionCount);
103 // We only check against kInitialDimensions here.
104 // For input memories, mUpdatedDimensions will be checked in validateInputDimensions
105 // at the beginning of a computation.
106 const auto combined = combineDimensions(dimensions, kInitialDimensions);
107 NN_RET_CHECK(combined.has_value())
108 << "incompatible dimensions between request and memory. (request: "
109 << toString(dimensions) << ", memory: " << toString(kInitialDimensions) << ")";
110 }
111 return true;
112 }
113
114 bool validateInputDimensions(const std::vector<uint32_t>& dimensions) const override {
115 NN_RET_CHECK(mInitialized) << "using an uninitialized memory as input";
116 NN_RET_CHECK(dimensions == mUpdatedDimensions)
117 << "incompatible input dimensions between request and memory. (request: "
118 << toString(dimensions) << ", memory: " << toString(mUpdatedDimensions) << ")";
119 return true;
120 }
121
122 bool updateDimensions(const std::vector<uint32_t>& dimensions) override {
123 NN_RET_CHECK(TypeManager::get()->isTensorType(mDataType) || dimensions.empty());
124 auto combined = combineDimensions(dimensions, kInitialDimensions);
125 NN_RET_CHECK(combined.has_value());
126 mUpdatedDimensions = std::move(combined.value());
127 return true;
128 }
129
130 void setInitialized(bool initialized) override { mInitialized = initialized; }
131
132 private:
133 const std::set<CompilationRole> kCompilationRoles;
134 OperandType mDataType;
135
136 // The dimensions of the memory when the memory object is created.
137 // May have unknown dimensions or rank.
138 const std::vector<uint32_t> kInitialDimensions;
139
140 // The updated dimensions after a successful execution or memory copying.
141 std::vector<uint32_t> mUpdatedDimensions;
142
143 bool mInitialized = false;
144};
145
146} // namespace
147
148Memory::Memory(hal::hidl_memory memory)
149 : kHidlMemory(std::move(memory)),
150 mValidator(std::make_unique<SizedMemoryValidator>(kHidlMemory.size())) {}
151
152Memory::Memory(hal::hidl_memory memory, std::unique_ptr<MemoryValidatorBase> validator)
153 : kHidlMemory(std::move(memory)), mValidator(std::move(validator)) {}
154
155Memory::Memory(sp<hal::IBuffer> buffer, int32_t token)
156 : kBuffer(std::move(buffer)), kToken(token) {}
157
Michael Butler50032c02019-03-14 17:34:48 -0700158Memory::~Memory() {
159 for (const auto [ptr, weakBurst] : mUsedBy) {
160 if (const std::shared_ptr<ExecutionBurstController> burst = weakBurst.lock()) {
161 burst->freeMemory(getKey());
162 }
163 }
164}
165
Xusong Wang085d0002020-01-08 16:52:37 -0800166hal::Request::MemoryPool Memory::getMemoryPool() const {
167 hal::Request::MemoryPool pool;
168 if (kToken > 0) {
169 pool.token(kToken);
170 } else {
171 pool.hidlMemory(kHidlMemory);
172 }
173 return pool;
174}
175
Michael Butler50032c02019-03-14 17:34:48 -0700176intptr_t Memory::getKey() const {
177 return reinterpret_cast<intptr_t>(this);
178}
179
180void Memory::usedBy(const std::shared_ptr<ExecutionBurstController>& burst) const {
181 std::lock_guard<std::mutex> guard(mMutex);
182 mUsedBy.emplace(burst.get(), burst);
183}
184
Xusong Wang062ec502019-11-27 11:44:03 -0800185bool MemoryBuilder::badState(const char* name) const {
186 if (mFinished) {
187 LOG(ERROR) << "ANeuralNetworksMemoryDesc_" << name << " can't modify after finished";
188 return true;
189 }
190 return false;
191}
192
193int MemoryBuilder::addRole(const CompilationBuilder& compilation, IOType ioType, uint32_t index,
194 float freq) {
195 const char* tag = ioType == IOType::INPUT ? "addInputRole" : "addOutputRole";
196 if (badState(tag)) {
197 return ANEURALNETWORKS_BAD_STATE;
198 }
199 if (mRoles.count({&compilation, ioType, index}) > 0) {
200 LOG(ERROR) << "ANeuralNetworksMemoryDesc_" << tag
201 << " -- the same operand is specified twice.";
202 return ANEURALNETWORKS_BAD_DATA;
203 }
204
205 std::vector<std::tuple<const PreparedModel*, IOType, uint32_t>> roles;
206 auto callback = [&roles](const auto* preparedModel, IOType type, uint32_t index) {
207 roles.emplace_back(preparedModel, type, index);
208 };
209 if (ioType == IOType::INPUT) {
210 if (compilation.forEachStepRoleOfInput(index, callback) != ANEURALNETWORKS_NO_ERROR) {
211 return ANEURALNETWORKS_BAD_DATA;
212 }
213 } else {
214 if (compilation.forEachStepRoleOfOutput(index, callback) != ANEURALNETWORKS_NO_ERROR) {
215 return ANEURALNETWORKS_BAD_DATA;
216 }
217 }
218
219 const ModelBuilder* model = compilation.getModel();
220 CHECK(model != nullptr);
221 Operand operand;
222 if (ioType == IOType::INPUT) {
223 if (index >= model->inputCount()) {
224 LOG(ERROR) << "ANeuralNetworksMemoryDesc_addInputRole -- input index out of range.";
225 return ANEURALNETWORKS_BAD_DATA;
226 }
227 operand = model->getInputOperand(index);
228 } else {
229 if (index >= model->outputCount()) {
230 LOG(ERROR) << "ANeuralNetworksMemoryDesc_addOutputRole -- output index out of range.";
231 return ANEURALNETWORKS_BAD_DATA;
232 }
233 operand = model->getOutputOperand(index);
234 }
235 if (mOperand.has_value()) {
236 if (operand.type != mOperand->type || operand.scale != mOperand->scale ||
237 operand.zeroPoint != mOperand->zeroPoint ||
238 operand.extraParams != mOperand->extraParams) {
239 LOG(ERROR) << "ANeuralNetworksMemoryDesc_" << tag
240 << " -- incompatible operand metadata.";
241 return ANEURALNETWORKS_BAD_DATA;
242 }
243 }
244 if (!TypeManager::get()->isTensorType(operand.type) && !mDesc.dimensions.empty()) {
245 LOG(ERROR) << "ANeuralNetworksMemoryDesc_" << tag << " -- incompatible dimensions.";
246 return ANEURALNETWORKS_BAD_DATA;
247 }
248 auto combined = combineDimensions(mDesc.dimensions, operand.dimensions);
249 if (!combined.has_value()) {
250 LOG(ERROR) << "ANeuralNetworksMemoryDesc_" << tag << " -- incompatible dimensions.";
251 return ANEURALNETWORKS_BAD_DATA;
252 }
253
254 if (freq > 1.0f || freq <= 0.0f) {
255 LOG(ERROR) << "ANeuralNetworksMemoryDesc_" << tag << " -- invalid frequency " << freq;
256 return ANEURALNETWORKS_BAD_DATA;
257 }
258
259 mRoles.emplace(&compilation, ioType, index);
260 for (const auto [preparedModel, type, ind] : roles) {
261 uint32_t modelIndex = mDesc.preparedModels.add(preparedModel);
262 BufferRole role = {.modelIndex = modelIndex, .ioIndex = ind, .frequency = freq};
263 if (type == IOType::INPUT) {
264 mDesc.inputRoles.push_back(role);
265 } else {
266 mDesc.outputRoles.push_back(role);
267 }
268 }
269 mOperand = std::move(operand);
270 mDesc.dimensions = std::move(combined.value());
271 return ANEURALNETWORKS_NO_ERROR;
272}
273
274int MemoryBuilder::setDimensions(const std::vector<uint32_t>& dimensions) {
275 if (badState("setDimensions")) return ANEURALNETWORKS_BAD_STATE;
276 if (mOperand.has_value() && !TypeManager::get()->isTensorType(mOperand->type) &&
277 !dimensions.empty()) {
278 LOG(ERROR) << "ANeuralNetworksMemoryDesc_setDimensions -- incompatible dimensions for "
279 "scalars.";
280 return ANEURALNETWORKS_BAD_DATA;
281 }
282 auto combined = combineDimensions(mDesc.dimensions, dimensions);
283 if (!combined.has_value()) {
284 LOG(ERROR) << "ANeuralNetworksMemoryDesc_setDimensions -- incompatible dimensions.";
285 return ANEURALNETWORKS_BAD_DATA;
286 }
287 mDesc.dimensions = std::move(combined.value());
288 return ANEURALNETWORKS_NO_ERROR;
289}
290
Xusong Wang550e2a52019-11-27 12:18:19 -0800291static void logMemoryDescriptorToInfo(const MemoryDescriptor& desc, const Operand& operand) {
292 LOG(INFO) << "MemoryDescriptor start";
293 LOG(INFO) << " Data type: " << toString(operand.type);
294 LOG(INFO) << " Scale: " << toString(operand.scale);
295 LOG(INFO) << " Zero point: " << toString(operand.zeroPoint);
296 LOG(INFO) << " Extra params: " << toString(operand.extraParams);
297 LOG(INFO) << " Dimensions: " << toString(desc.dimensions);
298 LOG(INFO) << " Submodels [" << desc.preparedModels.size() << "]:";
299 for (const auto* preparedModel : desc.preparedModels) {
300 LOG(INFO) << " service = " << preparedModel->getDevice()->getName();
301 }
302 LOG(INFO) << " Input roles [" << desc.inputRoles.size() << "]:";
303 for (const auto& usage : desc.inputRoles) {
304 LOG(INFO) << " " << toString(usage);
305 }
306 LOG(INFO) << " Output roles [" << desc.outputRoles.size() << "]:";
307 for (const auto& usage : desc.outputRoles) {
308 LOG(INFO) << " " << toString(usage);
309 }
310 LOG(INFO) << "MemoryDescriptor end";
311}
312
313static const Device* selectDeviceMemoryAllocator(const MemoryDescriptor& desc) {
314 const Device* allocator = nullptr;
315 for (const auto* preparedModel : desc.preparedModels) {
316 const auto* device = preparedModel->getDevice();
317 if (allocator == nullptr) {
318 allocator = device;
319 } else if (allocator != device) {
320 LOG(INFO) << "selectDeviceMemoryAllocator -- cannot handle multiple devices.";
321 return nullptr;
322 }
323 }
324 CHECK(allocator != nullptr);
325 VLOG(MEMORY) << "Using " << allocator->getName() << " as allocator.";
326 return allocator;
327}
328
Xusong Wang062ec502019-11-27 11:44:03 -0800329int MemoryBuilder::finish() {
330 if (badState("finish")) return ANEURALNETWORKS_BAD_STATE;
331 if (mRoles.empty()) {
332 LOG(ERROR) << "ANeuralNetworksMemoryDesc_finish -- no role has been specified.";
333 return ANEURALNETWORKS_BAD_DATA;
334 }
Xusong Wang550e2a52019-11-27 12:18:19 -0800335 CHECK(mOperand.has_value());
336 if (VLOG_IS_ON(MEMORY)) {
337 logMemoryDescriptorToInfo(mDesc, mOperand.value());
338 }
339 mAllocator = selectDeviceMemoryAllocator(mDesc);
Xusong Wang062ec502019-11-27 11:44:03 -0800340 mFinished = true;
341 return ANEURALNETWORKS_NO_ERROR;
342}
343
Xusong Wang550e2a52019-11-27 12:18:19 -0800344std::pair<int, std::unique_ptr<Memory>> MemoryBuilder::allocate() const {
345 if (!mFinished) {
346 LOG(ERROR) << "ANeuralNetworksMemory_createFromDesc -- passed an unfinished descriptor";
347 return {ANEURALNETWORKS_BAD_STATE, nullptr};
348 }
349
350 // TODO(xusongw): Does not support dynamic output shape for now.
351 CHECK(mOperand.has_value());
352 uint32_t size = TypeManager::get()->getSizeOfData(mOperand->type, mDesc.dimensions);
353 if (size == 0) {
354 LOG(ERROR)
355 << "ANeuralNetworksMemory_createFromDesc -- does not support unknown dimensions.";
356 return {ANEURALNETWORKS_OP_FAILED, nullptr};
357 }
358
359 int n = ANEURALNETWORKS_OP_FAILED;
360 std::unique_ptr<Memory> memory;
361
362 // Try allocate the memory on device.
363 if (mAllocator != nullptr) {
364 std::tie(n, memory) = mAllocator->allocate(mDesc);
365 }
366
367 // If failed, fallback to ashmem.
368 // TODO(xusongw): Decide on the fallback strategy.
369 // TODO(xusongw): Use BLOB mode hardware buffer when possible.
370 if (n != ANEURALNETWORKS_NO_ERROR) {
371 VLOG(MEMORY) << "MemoryBuilder::allocate -- fallback to ashmem.";
372 std::tie(n, memory) = MemoryAshmem::create(size);
373 }
Xusong Wangd39f9192019-11-27 15:45:42 -0800374
375 if (n == ANEURALNETWORKS_NO_ERROR) {
376 CHECK(memory != nullptr);
377 auto validator =
378 std::make_unique<DeviceMemoryValidator>(mRoles, mOperand->type, mDesc.dimensions);
379 memory->setValidator(std::move(validator));
380 }
Xusong Wang550e2a52019-11-27 12:18:19 -0800381 return {n, std::move(memory)};
382}
383
Michael Butler90fddbd2019-08-02 15:04:00 -0700384std::pair<int, std::unique_ptr<MemoryAshmem>> MemoryAshmem::create(uint32_t size) {
385 hidl_memory hidlMemory = allocateSharedMemory(size);
386 sp<IMemory> mapped = mapMemory(hidlMemory);
387 if (mapped == nullptr || mapped->getPointer() == nullptr) {
388 LOG(ERROR) << "Memory::create failed";
389 return {ANEURALNETWORKS_OUT_OF_MEMORY, nullptr};
David Grossf9a33a82017-11-22 11:41:55 -0800390 }
Michael Butler90fddbd2019-08-02 15:04:00 -0700391 return {ANEURALNETWORKS_NO_ERROR,
392 std::make_unique<MemoryAshmem>(std::move(mapped), std::move(hidlMemory))};
Jean-Luc Brouilletd409e2c2017-09-27 23:59:20 -0700393}
394
Michael Butler90fddbd2019-08-02 15:04:00 -0700395uint8_t* MemoryAshmem::getPointer() const {
396 return static_cast<uint8_t*>(static_cast<void*>(kMappedMemory->getPointer()));
397}
398
399MemoryAshmem::MemoryAshmem(sp<IMemory> mapped, hidl_memory memory)
400 : Memory(std::move(memory)), kMappedMemory(std::move(mapped)) {}
401
402std::pair<int, std::unique_ptr<MemoryFd>> MemoryFd::create(size_t size, int prot, int fd,
403 size_t offset) {
Jean-Luc Brouilletd409e2c2017-09-27 23:59:20 -0700404 if (size == 0 || fd < 0) {
405 LOG(ERROR) << "Invalid size or fd";
Michael Butler90fddbd2019-08-02 15:04:00 -0700406 return {ANEURALNETWORKS_BAD_DATA, nullptr};
Jean-Luc Brouilletd409e2c2017-09-27 23:59:20 -0700407 }
Michael Butler90fddbd2019-08-02 15:04:00 -0700408
409 // Duplicate the file descriptor so MemoryFd owns its own version.
Jean-Luc Brouilletd409e2c2017-09-27 23:59:20 -0700410 int dupfd = dup(fd);
411 if (dupfd == -1) {
412 LOG(ERROR) << "Failed to dup the fd";
Michael Butler90fddbd2019-08-02 15:04:00 -0700413 // TODO(b/120417090): is ANEURALNETWORKS_UNEXPECTED_NULL the correct
414 // error to return here?
415 return {ANEURALNETWORKS_UNEXPECTED_NULL, nullptr};
Jean-Luc Brouilletd409e2c2017-09-27 23:59:20 -0700416 }
417
Michael Butler90fddbd2019-08-02 15:04:00 -0700418 // Create a temporary native handle to own the dupfd.
419 native_handle_t* nativeHandle = native_handle_create(1, 3);
420 if (nativeHandle == nullptr) {
Jean-Luc Brouilletd409e2c2017-09-27 23:59:20 -0700421 LOG(ERROR) << "Failed to create native_handle";
Michael Butler90fddbd2019-08-02 15:04:00 -0700422 // TODO(b/120417090): is ANEURALNETWORKS_UNEXPECTED_NULL the correct
423 // error to return here?
424 return {ANEURALNETWORKS_UNEXPECTED_NULL, nullptr};
Jean-Luc Brouilletd409e2c2017-09-27 23:59:20 -0700425 }
Michael Butler90fddbd2019-08-02 15:04:00 -0700426 nativeHandle->data[0] = dupfd;
427 nativeHandle->data[1] = prot;
428 const uint64_t bits = static_cast<uint64_t>(offset);
429 nativeHandle->data[2] = (int32_t)(uint32_t)(bits & 0xffffffff);
430 nativeHandle->data[3] = (int32_t)(uint32_t)(bits >> 32);
431
432 // Create a hidl_handle which owns the native handle and fd so that we don't
433 // have to manually clean either the native handle or the fd.
434 hardware::hidl_handle hidlHandle;
435 hidlHandle.setTo(nativeHandle, /*shouldOwn=*/true);
436
437 // Push the hidl_handle into a hidl_memory object. The hidl_memory object is
438 // responsible for cleaning the hidl_handle, the native handle, and the fd.
439 hidl_memory hidlMemory = hidl_memory("mmap_fd", std::move(hidlHandle), size);
440
441 return {ANEURALNETWORKS_NO_ERROR, std::make_unique<MemoryFd>(std::move(hidlMemory))};
Jean-Luc Brouilletd409e2c2017-09-27 23:59:20 -0700442}
443
Michael Butler90fddbd2019-08-02 15:04:00 -0700444MemoryFd::MemoryFd(hidl_memory memory) : Memory(std::move(memory)) {}
David Grossf9a33a82017-11-22 11:41:55 -0800445
Michael Butler90fddbd2019-08-02 15:04:00 -0700446std::pair<int, std::unique_ptr<MemoryAHWB>> MemoryAHWB::create(const AHardwareBuffer& ahwb) {
447 AHardwareBuffer_Desc bufferDesc;
448 AHardwareBuffer_describe(&ahwb, &bufferDesc);
449 const native_handle_t* handle = AHardwareBuffer_getNativeHandle(&ahwb);
450 hidl_memory hidlMemory;
Xusong Wangd39f9192019-11-27 15:45:42 -0800451 std::unique_ptr<MemoryAHWB> memory;
452 std::unique_ptr<MemoryValidatorBase> validator;
Michael Butler90fddbd2019-08-02 15:04:00 -0700453 if (bufferDesc.format == AHARDWAREBUFFER_FORMAT_BLOB) {
454 hidlMemory = hidl_memory("hardware_buffer_blob", handle, bufferDesc.width);
Xusong Wangd39f9192019-11-27 15:45:42 -0800455 validator = std::make_unique<SizedMemoryValidator>(bufferDesc.width);
Jean-Luc Brouilletd409e2c2017-09-27 23:59:20 -0700456 } else {
Michael Butler90fddbd2019-08-02 15:04:00 -0700457 // memory size is not used.
458 hidlMemory = hidl_memory("hardware_buffer", handle, 0);
Xusong Wangd39f9192019-11-27 15:45:42 -0800459 validator = std::make_unique<AHardwareBufferNonBlobValidator>();
Jean-Luc Brouilletd409e2c2017-09-27 23:59:20 -0700460 }
Xusong Wangd39f9192019-11-27 15:45:42 -0800461 memory = std::make_unique<MemoryAHWB>(std::move(hidlMemory), std::move(validator));
Michael Butler90fddbd2019-08-02 15:04:00 -0700462 return {ANEURALNETWORKS_NO_ERROR, std::move(memory)};
463};
464
Xusong Wang550e2a52019-11-27 12:18:19 -0800465std::pair<int, std::unique_ptr<MemoryFromDevice>> MemoryFromDevice::create(sp<hal::IBuffer> buffer,
466 int32_t token) {
467 if (buffer == nullptr) {
468 LOG(ERROR) << "nullptr IBuffer for device memory.";
469 return {ANEURALNETWORKS_BAD_DATA, nullptr};
470 }
471 if (token <= 0) {
472 LOG(ERROR) << "Invalid token for device memory: " << token;
473 return {ANEURALNETWORKS_BAD_DATA, nullptr};
474 }
475 return {ANEURALNETWORKS_NO_ERROR, std::make_unique<MemoryFromDevice>(std::move(buffer), token)};
476};
477
478MemoryFromDevice::MemoryFromDevice(sp<hal::IBuffer> buffer, int32_t token)
479 : Memory(std::move(buffer), token) {}
480
Michael Butlerf20c5b52019-07-22 18:59:46 -0700481} // namespace nn
482} // namespace android