Memory Domain Runtime: Device memory as execution I/O.

- Add validation checks in NNAPI runtime when using driver-allocated
  memory as execution I/O.
- Add validation tests.

Additionally, change the result code of using non-BLOB mode AHWB with
ANNModel_setOperandValueFromMemory to BAD_DATA.

Bug: 141353602
Bug: 141363565
Test: NNT_static
Change-Id: I6f253a0a90a0c1b2baa186034482215435f3c831
Merged-In: I6f253a0a90a0c1b2baa186034482215435f3c831
(cherry picked from commit 3c0d4fc46c1d66ae5489885ed44beb43c07130ff)
diff --git a/runtime/ExecutionBuilder.cpp b/runtime/ExecutionBuilder.cpp
index 2c14a07..6c18e1d 100644
--- a/runtime/ExecutionBuilder.cpp
+++ b/runtime/ExecutionBuilder.cpp
@@ -139,13 +139,17 @@
                             "ANeuralNetworksExecution_setInputFromMemory", false)) {
         return ANEURALNETWORKS_BAD_DATA;
     }
-    // Both offset & length must be zero for Non-BLOB format AHardwareBuffer.
-    if (memory->getHidlMemory().name() == "hardware_buffer" && (offset != 0 || length != 0)) {
-        LOG(ERROR) << "ANeuralNetworksExecution_setInputFromMemory has non-zero offset and length"
-                   << " for Non-BLOB format AHardwareBuffer.";
+    if (!memory->getValidator().validate(mCompilation, IOType::INPUT, index, type, offset,
+                                         length)) {
         return ANEURALNETWORKS_BAD_DATA;
-    } else if (!memory->validateSize(offset, length)) {
-        return ANEURALNETWORKS_BAD_DATA;
+    }
+    // For some types of memory, e.g. MemoryAshmem allocated from ANNMemory_createFromDesc, we
+    // allow the client to specify offset == 0 && length == 0 indicating that the entire memory
+    // region is used. We update the length here because the drivers are still expecting a real
+    // length. For other memories that do not allow this semantic, it is checked in
+    // MemoryValidatorBase::validate before reaching here.
+    if (memory->getHidlMemory().valid() && offset == 0 && length == 0) {
+        length = memory->getHidlMemory().size();
     }
     // TODO validate the rest
     uint32_t poolIndex = mMemories.add(memory);
@@ -196,13 +200,17 @@
                             "ANeuralNetworksExecution_setOutputFromMemory", true)) {
         return ANEURALNETWORKS_BAD_DATA;
     }
-    // Both offset & length must be zero for Non-BLOB format AHardwareBuffer.
-    if (memory->getHidlMemory().name() == "hardware_buffer" && (offset != 0 || length != 0)) {
-        LOG(ERROR) << "ANeuralNetworksExecution_setOutputFromMemory has non-zero offset and length"
-                   << " for Non-BLOB format AHardwareBuffer.";
+    if (!memory->getValidator().validate(mCompilation, IOType::OUTPUT, index, type, offset,
+                                         length)) {
         return ANEURALNETWORKS_BAD_DATA;
-    } else if (!memory->validateSize(offset, length)) {
-        return ANEURALNETWORKS_BAD_DATA;
+    }
+    // For some types of memory, e.g. MemoryAshmem allocated from ANNMemory_createFromDesc, we
+    // allow the client to specify offset == 0 && length == 0 indicating that the entire memory
+    // region is used. We update the length here because the drivers are still expecting a real
+    // length. For other memories that do not allow this semantic, it is checked in
+    // MemoryValidatorBase::validate before reaching here.
+    if (memory->getHidlMemory().valid() && offset == 0 && length == 0) {
+        length = memory->getHidlMemory().size();
     }
     // TODO validate the rest
     uint32_t poolIndex = mMemories.add(memory);
@@ -490,6 +498,11 @@
         if (p.state == ModelArgumentInfo::UNSPECIFIED) {
             LOG(ERROR) << "ANeuralNetworksExecution_" << name() << " not all inputs specified";
             return ANEURALNETWORKS_BAD_DATA;
+        } else if (p.state == ModelArgumentInfo::MEMORY) {
+            const Memory* memory = mMemories[p.locationAndLength.poolIndex];
+            if (!memory->getValidator().validateInputDimensions(p.dimensions)) {
+                return ANEURALNETWORKS_OP_FAILED;
+            }
         }
     }
     for (auto& p : mOutputs) {
@@ -586,13 +599,29 @@
     return true;
 }
 
-ErrorStatus ExecutionBuilder::finish(ErrorStatus, const std::vector<OutputShape>& outputShapes) {
+bool ExecutionBuilder::updateMemories() {
+    for (const auto& output : mOutputs) {
+        if (output.state != ModelArgumentInfo::MEMORY) continue;
+        const Memory* memory = mMemories[output.locationAndLength.poolIndex];
+        NN_RET_CHECK(memory->getValidator().updateDimensions(output.dimensions));
+    }
+    return true;
+}
+
+ErrorStatus ExecutionBuilder::finish(ErrorStatus status,
+                                     const std::vector<OutputShape>& outputShapes) {
     CHECK(!mFinished) << "ExecutionBuilder::finish is called twice";
     mFinished = true;
-    if (!updateOutputShapes(outputShapes)) {
-        return ErrorStatus::GENERAL_FAILURE;
+    if (!updateOutputShapes(outputShapes) || !updateMemories()) {
+        status = ErrorStatus::GENERAL_FAILURE;
     }
-    return ErrorStatus::NONE;
+    bool success = status == ErrorStatus::NONE;
+    for (const auto& output : mOutputs) {
+        if (output.state != ModelArgumentInfo::MEMORY) continue;
+        const Memory* memory = mMemories[output.locationAndLength.poolIndex];
+        memory->getValidator().setInitialized(success);
+    }
+    return status;
 }
 
 bool StepExecutor::updateOutputShapes(const std::vector<OutputShape>& from,
diff --git a/runtime/ExecutionBuilder.h b/runtime/ExecutionBuilder.h
index ca7a089..65f08d2 100644
--- a/runtime/ExecutionBuilder.h
+++ b/runtime/ExecutionBuilder.h
@@ -19,6 +19,7 @@
 
 #include <atomic>
 #include <memory>
+#include <tuple>
 #include <vector>
 
 #include "Callbacks.h"
@@ -102,6 +103,8 @@
     // Update output dimensional information from OutputShape to ModelArgumentInfo.
     bool updateOutputShapes(const std::vector<hal::OutputShape>& outputShapes);
 
+    bool updateMemories();
+
     const ModelBuilder* mModel;
     const ExecutionPlan* mPlan;
 
diff --git a/runtime/Memory.cpp b/runtime/Memory.cpp
index 058a31a..561df07 100644
--- a/runtime/Memory.cpp
+++ b/runtime/Memory.cpp
@@ -37,6 +37,124 @@
 
 using namespace hal;
 
+namespace {
+
+// The validator for a client-managed single-dimensional memory pool with a known size.
+// The memory may be used for request inputs, request outputs, or model constants.
+class SizedMemoryValidator : public MemoryValidatorBase {
+   public:
+    SizedMemoryValidator(uint32_t size) : kSize(size) {}
+
+    bool validate(const CompilationBuilder*, IOType, uint32_t, const ANeuralNetworksOperandType*,
+                  uint32_t offset, uint32_t length) const override {
+        NN_RET_CHECK(offset + length <= kSize) << "request size larger than the memory size.";
+        NN_RET_CHECK(offset != 0 || length != 0) << "memory size cannot be implied.";
+        return true;
+    }
+
+   private:
+    const uint32_t kSize;
+};
+
+// The validator for an AHardwareBuffer with Non-BLOB format.
+// We require the memory only used for request inputs or request outputs,
+// with both offset and length set to zero.
+class AHardwareBufferNonBlobValidator : public MemoryValidatorBase {
+   public:
+    AHardwareBufferNonBlobValidator() = default;
+
+    bool validate(const CompilationBuilder* compilation, IOType, uint32_t,
+                  const ANeuralNetworksOperandType*, uint32_t offset,
+                  uint32_t length) const override {
+        NN_RET_CHECK(compilation != nullptr)
+                << "cannot use Non-BLOB AHardwareBuffer as model constant";
+        NN_RET_CHECK(offset == 0 && length == 0)
+                << "non-zero offset (" << offset << ") and/or length (" << length
+                << ") for Non-BLOB format AHardwareBuffer.";
+        return true;
+    }
+};
+
+// The validator for a memory created from ANNMemory_createFromDesc.
+// We require the memory only used as one of the pre-specified roles,
+// with both offset and length set to zero.
+class DeviceMemoryValidator : public MemoryValidatorBase {
+   public:
+    DeviceMemoryValidator(std::set<CompilationRole> roles, hal::OperandType type,
+                          std::vector<uint32_t> dimensions)
+        : kCompilationRoles(std::move(roles)),
+          mDataType(type),
+          kInitialDimensions(std::move(dimensions)),
+          mUpdatedDimensions(kInitialDimensions) {}
+
+    bool validate(const CompilationBuilder* compilation, IOType ioType, uint32_t index,
+                  const ANeuralNetworksOperandType* type, uint32_t offset,
+                  uint32_t length) const override {
+        NN_RET_CHECK(kCompilationRoles.count({compilation, ioType, index}) > 0)
+                << "invalid compilation role.";
+        NN_RET_CHECK(offset == 0 && length == 0)
+                << "non-zero offset and/or length for driver-allocated memory.";
+        if (type) {
+            const bool isTensor = TypeManager::get()->isTensorType(mDataType);
+            NN_RET_CHECK(isTensor || type->dimensionCount == 0)
+                    << "invalid dimensions for scalar memory.";
+            std::vector<uint32_t> dimensions(type->dimensions,
+                                             type->dimensions + type->dimensionCount);
+            // We only check against kInitialDimensions here.
+            // For input memories, mUpdatedDimensions will be checked in validateInputDimensions
+            // at the beginning of a computation.
+            const auto combined = combineDimensions(dimensions, kInitialDimensions);
+            NN_RET_CHECK(combined.has_value())
+                    << "incompatible dimensions between request and memory. (request: "
+                    << toString(dimensions) << ", memory: " << toString(kInitialDimensions) << ")";
+        }
+        return true;
+    }
+
+    bool validateInputDimensions(const std::vector<uint32_t>& dimensions) const override {
+        NN_RET_CHECK(mInitialized) << "using an uninitialized memory as input";
+        NN_RET_CHECK(dimensions == mUpdatedDimensions)
+                << "incompatible input dimensions between request and memory. (request: "
+                << toString(dimensions) << ", memory: " << toString(mUpdatedDimensions) << ")";
+        return true;
+    }
+
+    bool updateDimensions(const std::vector<uint32_t>& dimensions) override {
+        NN_RET_CHECK(TypeManager::get()->isTensorType(mDataType) || dimensions.empty());
+        auto combined = combineDimensions(dimensions, kInitialDimensions);
+        NN_RET_CHECK(combined.has_value());
+        mUpdatedDimensions = std::move(combined.value());
+        return true;
+    }
+
+    void setInitialized(bool initialized) override { mInitialized = initialized; }
+
+   private:
+    const std::set<CompilationRole> kCompilationRoles;
+    OperandType mDataType;
+
+    // The dimensions of the memory when the memory object is created.
+    // May have unknown dimensions or rank.
+    const std::vector<uint32_t> kInitialDimensions;
+
+    // The updated dimensions after a successful execution or memory copying.
+    std::vector<uint32_t> mUpdatedDimensions;
+
+    bool mInitialized = false;
+};
+
+}  // namespace
+
+Memory::Memory(hal::hidl_memory memory)
+    : kHidlMemory(std::move(memory)),
+      mValidator(std::make_unique<SizedMemoryValidator>(kHidlMemory.size())) {}
+
+Memory::Memory(hal::hidl_memory memory, std::unique_ptr<MemoryValidatorBase> validator)
+    : kHidlMemory(std::move(memory)), mValidator(std::move(validator)) {}
+
+Memory::Memory(sp<hal::IBuffer> buffer, int32_t token)
+    : kBuffer(std::move(buffer)), kToken(token) {}
+
 Memory::~Memory() {
     for (const auto [ptr, weakBurst] : mUsedBy) {
         if (const std::shared_ptr<ExecutionBurstController> burst = weakBurst.lock()) {
@@ -55,14 +173,6 @@
     return pool;
 }
 
-bool Memory::validateSize(uint32_t offset, uint32_t length) const {
-    if (offset + length > kHidlMemory.size()) {
-        LOG(ERROR) << "Request size larger than the memory size.";
-        return false;
-    }
-    return true;
-}
-
 intptr_t Memory::getKey() const {
     return reinterpret_cast<intptr_t>(this);
 }
@@ -261,6 +371,13 @@
         VLOG(MEMORY) << "MemoryBuilder::allocate -- fallback to ashmem.";
         std::tie(n, memory) = MemoryAshmem::create(size);
     }
+
+    if (n == ANEURALNETWORKS_NO_ERROR) {
+        CHECK(memory != nullptr);
+        auto validator =
+                std::make_unique<DeviceMemoryValidator>(mRoles, mOperand->type, mDesc.dimensions);
+        memory->setValidator(std::move(validator));
+    }
     return {n, std::move(memory)};
 }
 
@@ -331,31 +448,20 @@
     AHardwareBuffer_describe(&ahwb, &bufferDesc);
     const native_handle_t* handle = AHardwareBuffer_getNativeHandle(&ahwb);
     hidl_memory hidlMemory;
+    std::unique_ptr<MemoryAHWB> memory;
+    std::unique_ptr<MemoryValidatorBase> validator;
     if (bufferDesc.format == AHARDWAREBUFFER_FORMAT_BLOB) {
         hidlMemory = hidl_memory("hardware_buffer_blob", handle, bufferDesc.width);
+        validator = std::make_unique<SizedMemoryValidator>(bufferDesc.width);
     } else {
         // memory size is not used.
         hidlMemory = hidl_memory("hardware_buffer", handle, 0);
+        validator = std::make_unique<AHardwareBufferNonBlobValidator>();
     }
-
-    std::unique_ptr<MemoryAHWB> memory =
-            std::make_unique<MemoryAHWB>(bufferDesc, std::move(hidlMemory));
+    memory = std::make_unique<MemoryAHWB>(std::move(hidlMemory), std::move(validator));
     return {ANEURALNETWORKS_NO_ERROR, std::move(memory)};
 };
 
-bool MemoryAHWB::validateSize(uint32_t offset, uint32_t length) const {
-    // validateSize should only be called on BLOB mode buffer.
-    if (!kBlobMode) {
-        LOG(ERROR) << "Invalid AHARDWAREBUFFER_FORMAT, must be AHARDWAREBUFFER_FORMAT_BLOB.";
-        return false;
-    }
-    // Use normal validation.
-    return Memory::validateSize(offset, length);
-}
-
-MemoryAHWB::MemoryAHWB(const AHardwareBuffer_Desc& desc, hidl_memory memory)
-    : Memory(std::move(memory)), kBlobMode(desc.format == AHARDWAREBUFFER_FORMAT_BLOB) {}
-
 std::pair<int, std::unique_ptr<MemoryFromDevice>> MemoryFromDevice::create(sp<hal::IBuffer> buffer,
                                                                            int32_t token) {
     if (buffer == nullptr) {
diff --git a/runtime/Memory.h b/runtime/Memory.h
index 70ec4c2..5394338 100644
--- a/runtime/Memory.h
+++ b/runtime/Memory.h
@@ -102,6 +102,42 @@
     std::vector<hal::BufferRole> inputRoles, outputRoles;
 };
 
+class MemoryValidatorBase {
+    DISALLOW_COPY_AND_ASSIGN(MemoryValidatorBase);
+
+   public:
+    MemoryValidatorBase() = default;
+    virtual ~MemoryValidatorBase() = default;
+
+    // Validate the memory usage and size information when passed in
+    // ANeuralNetworks{Model,Compilation}_set*FromMemory.
+    //
+    // This method only validates the arguments against the memory. It does not validate the
+    // correctness of the arguments themselves. E.g. it does not validate if the index is out of
+    // range.
+    //
+    // Usages:
+    //   - ANeuralNetworksModel_setOperandValueFromMemory:
+    //         validate(nullptr, IOType::INPUT, operandIndex, nullptr, offset, length)
+    //
+    //   - ANeuralNetworksExecution_setInputFromMemory:
+    //         validate(compilation, IOType::INPUT, inputIndex, type, offset, length)
+    //
+    //   - ANeuralNetworksExecution_setOutputFromMemory:
+    //         validate(compilation, IOType::OUTPUT, outputIndex, type, offset, length)
+    //
+    virtual bool validate(const CompilationBuilder* compilation, IOType ioType, uint32_t index,
+                          const ANeuralNetworksOperandType* type, uint32_t offset,
+                          uint32_t length) const = 0;
+
+    // Validate the memory dimensional information at the beginning of a computation.
+    virtual bool validateInputDimensions(const std::vector<uint32_t>&) const { return true; }
+
+    virtual bool updateDimensions(const std::vector<uint32_t>&) { return true; }
+
+    virtual void setInitialized(bool) {}
+};
+
 // Represents a memory region.
 class Memory {
     // Disallow copy and assign to prevent slicing
@@ -116,7 +152,14 @@
     const hal::hidl_memory& getHidlMemory() const { return kHidlMemory; }
     const sp<hal::IBuffer>& getIBuffer() const { return kBuffer; }
 
-    virtual bool validateSize(uint32_t offset, uint32_t length) const;
+    MemoryValidatorBase& getValidator() const {
+        CHECK(mValidator != nullptr);
+        return *mValidator;
+    }
+
+    void setValidator(std::unique_ptr<MemoryValidatorBase> validator) {
+        mValidator = std::move(validator);
+    }
 
     // Unique key representing this memory object.
     intptr_t getKey() const;
@@ -127,8 +170,9 @@
     void usedBy(const std::shared_ptr<ExecutionBurstController>& burst) const;
 
    protected:
-    Memory(hal::hidl_memory memory) : kHidlMemory(std::move(memory)) {}
-    Memory(sp<hal::IBuffer> buffer, int32_t token) : kBuffer(std::move(buffer)), kToken(token) {}
+    Memory(hal::hidl_memory memory);
+    Memory(hal::hidl_memory memory, std::unique_ptr<MemoryValidatorBase> validator);
+    Memory(sp<hal::IBuffer> buffer, int32_t token);
 
     // The HIDL representation for this memory.  We will use one of the following values
     // when communicating with the drivers.
@@ -136,6 +180,8 @@
     const sp<hal::IBuffer> kBuffer;
     const int32_t kToken = 0;
 
+    std::unique_ptr<MemoryValidatorBase> mValidator;
+
    private:
     mutable std::mutex mMutex;
     // mUsedBy is essentially a set of burst objects which use this Memory
@@ -230,16 +276,10 @@
     // On error, returns the appropriate NNAPI error code and nullptr.
     static std::pair<int, std::unique_ptr<MemoryAHWB>> create(const AHardwareBuffer& ahwb);
 
-    // validateSize should only be called for blob mode AHardwareBuffer.
-    // Calling it on non-blob mode AHardwareBuffer will result in an error.
     // TODO(miaowang): consider separate blob and non-blob into different classes.
-    bool validateSize(uint32_t offset, uint32_t length) const override;
-
     // prefer using MemoryAHWB::create
-    MemoryAHWB(const AHardwareBuffer_Desc& desc, hal::hidl_memory memory);
-
-   private:
-    const bool kBlobMode;
+    MemoryAHWB(hal::hidl_memory memory, std::unique_ptr<MemoryValidatorBase> validator)
+        : Memory(std::move(memory), std::move(validator)) {}
 };
 
 class MemoryFromDevice : public Memory {
diff --git a/runtime/ModelArgumentInfo.cpp b/runtime/ModelArgumentInfo.cpp
index e274021..f8ddbfe 100644
--- a/runtime/ModelArgumentInfo.cpp
+++ b/runtime/ModelArgumentInfo.cpp
@@ -62,11 +62,12 @@
 int ModelArgumentInfo::setFromMemory(const Operand& operand, const ANeuralNetworksOperandType* type,
                                      uint32_t poolIndex, uint32_t offset, uint32_t length) {
     NN_RETURN_IF_ERROR(updateDimensionInfo(operand, type));
-    if (operand.type != OperandType::OEM) {
-        uint32_t neededLength = TypeManager::get()->getSizeOfData(operand.type, dimensions);
+    const bool isMemorySizeKnown = offset != 0 || length != 0;
+    if (isMemorySizeKnown && operand.type != OperandType::OEM) {
+        const uint32_t neededLength = TypeManager::get()->getSizeOfData(operand.type, dimensions);
         if (neededLength != length && neededLength != 0) {
             LOG(ERROR) << "Setting argument with invalid length: " << length
-                       << ", expected length: " << neededLength;
+                       << " (offset: " << offset << "), expected length: " << neededLength;
             return ANEURALNETWORKS_BAD_DATA;
         }
     }
diff --git a/runtime/ModelBuilder.cpp b/runtime/ModelBuilder.cpp
index 302e618..ae70b7c 100644
--- a/runtime/ModelBuilder.cpp
+++ b/runtime/ModelBuilder.cpp
@@ -298,19 +298,16 @@
                    << " which has operand type that is not fully specified";
         return ANEURALNETWORKS_BAD_DATA;
     }
-    // Only BLOB format AHardwareBuffer can be used for constant data.
-    if (memory->getHidlMemory().name() == "hardware_buffer") {
-        LOG(ERROR) << "ANeuralNetworksModel_setOperandValueFromMemory passed an AHardwareBuffer"
-                   << " that is not in AHARDWAREBUFFER_FORMAT_BLOB format";
-        return ANEURALNETWORKS_UNMAPPABLE;
-    }
     uint32_t neededLength = TypeManager::get()->getSizeOfData(operand);
     if (neededLength != length) {
         LOG(ERROR) << "ANeuralNetworksModel_setOperandValueFromMemory setting " << length
                    << " bytes when needing " << neededLength;
         return ANEURALNETWORKS_BAD_DATA;
     }
-    if (!memory->validateSize(offset, length)) {
+    // Set compilation = nullptr to indicate that the memory is used for a model constant.
+    // In this case, IOType::INPUT is a dummy value that is ignored by the validator.
+    if (!memory->getValidator().validate(/*compilation=*/nullptr, /*dummy*/ IOType::INPUT, index,
+                                         nullptr, offset, length)) {
         return ANEURALNETWORKS_BAD_DATA;
     }
     operand.lifetime = OperandLifeTime::CONSTANT_REFERENCE;
diff --git a/runtime/test/TestValidation.cpp b/runtime/test/TestValidation.cpp
index 59ef77e..45c80b5 100644
--- a/runtime/test/TestValidation.cpp
+++ b/runtime/test/TestValidation.cpp
@@ -231,6 +231,134 @@
     ANeuralNetworksMemoryDesc* mDesc = nullptr;
 };
 
+class ValidationTestExecutionDeviceMemory : public ValidationTest {
+   protected:
+    virtual void SetUp() {
+        ValidationTest::SetUp();
+        ASSERT_EQ(ANeuralNetworksModel_create(&mModel), ANEURALNETWORKS_NO_ERROR);
+        createModel(mModel, /*dimensionsUnspecified=*/false, /*isValid=*/true);
+        ASSERT_EQ(ANeuralNetworksCompilation_create(mModel, &mCompilation),
+                  ANEURALNETWORKS_NO_ERROR);
+        ASSERT_EQ(ANeuralNetworksCompilation_finish(mCompilation), ANEURALNETWORKS_NO_ERROR);
+        ASSERT_EQ(ANeuralNetworksExecution_create(mCompilation, &mExecution),
+                  ANEURALNETWORKS_NO_ERROR);
+
+        ASSERT_EQ(ANeuralNetworksModel_create(&mModelDynamic), ANEURALNETWORKS_NO_ERROR);
+        createModel(mModelDynamic, /*dimensionsUnspecified=*/true, /*isValid=*/true);
+        ASSERT_EQ(ANeuralNetworksCompilation_create(mModelDynamic, &mCompilationDynamic),
+                  ANEURALNETWORKS_NO_ERROR);
+        ASSERT_EQ(ANeuralNetworksCompilation_finish(mCompilationDynamic), ANEURALNETWORKS_NO_ERROR);
+        ASSERT_EQ(ANeuralNetworksExecution_create(mCompilationDynamic, &mExecutionDynamic),
+                  ANEURALNETWORKS_NO_ERROR);
+
+        ASSERT_EQ(ANeuralNetworksModel_create(&mInitModel), ANEURALNETWORKS_NO_ERROR);
+        createModel(mInitModel, /*dimensionsUnspecified=*/false, /*isValid=*/true);
+        ASSERT_EQ(ANeuralNetworksCompilation_create(mInitModel, &mInitCompilation),
+                  ANEURALNETWORKS_NO_ERROR);
+        ASSERT_EQ(ANeuralNetworksCompilation_finish(mInitCompilation), ANEURALNETWORKS_NO_ERROR);
+
+        ASSERT_EQ(ANeuralNetworksModel_create(&mDeinitModel), ANEURALNETWORKS_NO_ERROR);
+        createModel(mDeinitModel, /*dimensionsUnspecified=*/false, /*isValid=*/false);
+        ASSERT_EQ(ANeuralNetworksCompilation_create(mDeinitModel, &mDeinitCompilation),
+                  ANEURALNETWORKS_NO_ERROR);
+        ASSERT_EQ(ANeuralNetworksCompilation_finish(mDeinitCompilation), ANEURALNETWORKS_NO_ERROR);
+    }
+    virtual void TearDown() {
+        ANeuralNetworksExecution_free(mExecution);
+        ANeuralNetworksCompilation_free(mCompilation);
+        ANeuralNetworksModel_free(mModel);
+        ANeuralNetworksExecution_free(mExecutionDynamic);
+        ANeuralNetworksCompilation_free(mCompilationDynamic);
+        ANeuralNetworksModel_free(mModelDynamic);
+
+        ANeuralNetworksCompilation_free(mInitCompilation);
+        ANeuralNetworksModel_free(mInitModel);
+        ANeuralNetworksCompilation_free(mDeinitCompilation);
+        ANeuralNetworksModel_free(mDeinitModel);
+
+        ValidationTest::TearDown();
+    }
+
+    void addScalarOperand(ANeuralNetworksModel* model) {
+        ANeuralNetworksOperandType operandType = {
+                .type = ANEURALNETWORKS_INT32, .dimensionCount = 0, .dimensions = nullptr};
+        EXPECT_EQ(ANeuralNetworksModel_addOperand(model, &operandType), ANEURALNETWORKS_NO_ERROR);
+    }
+
+    void addTensorOperand(ANeuralNetworksModel* model, bool dimensionsUnspecified) {
+        uint32_t dimension = dimensionsUnspecified ? 0 : 1;
+        ANeuralNetworksOperandType operandType = {
+                .type = ANEURALNETWORKS_TENSOR_FLOAT32,
+                .dimensionCount = 1,
+                .dimensions = &dimension,
+        };
+        EXPECT_EQ(ANeuralNetworksModel_addOperand(model, &operandType), ANEURALNETWORKS_NO_ERROR);
+    }
+
+    void createModel(ANeuralNetworksModel* model, bool dimensionsUnspecified, bool isValid) {
+        const float constData = 0;
+        const uint32_t actData = isValid ? 0 : 999;
+
+        addTensorOperand(model, dimensionsUnspecified);
+        addTensorOperand(model, /*dimensionsUnspecified=*/false);
+        addScalarOperand(model);
+        addTensorOperand(model, dimensionsUnspecified);
+
+        ANeuralNetworksModel_setOperandValue(model, 1, &constData, sizeof(float));
+        ANeuralNetworksModel_setOperandValue(model, 2, &actData, sizeof(uint32_t));
+
+        uint32_t inList[] = {0, 1, 2}, outList[] = {3};
+        ASSERT_EQ(ANeuralNetworksModel_addOperation(model, ANEURALNETWORKS_ADD, 3, inList, 1,
+                                                    outList),
+                  ANEURALNETWORKS_NO_ERROR);
+        uint32_t inputList[] = {0}, outputList[] = {3};
+        ASSERT_EQ(ANeuralNetworksModel_identifyInputsAndOutputs(model, 1, inputList, 1, outputList),
+                  ANEURALNETWORKS_NO_ERROR);
+        ASSERT_EQ(ANeuralNetworksModel_finish(model), ANEURALNETWORKS_NO_ERROR);
+    }
+
+    void executeWithMemoryAsInput(ANeuralNetworksCompilation* compilation,
+                                  ANeuralNetworksMemory* memory, int expectedResult) {
+        float data = 0;
+        ANeuralNetworksExecution* execution = nullptr;
+        ASSERT_EQ(ANeuralNetworksExecution_create(compilation, &execution),
+                  ANEURALNETWORKS_NO_ERROR);
+        ASSERT_EQ(ANeuralNetworksExecution_setInputFromMemory(execution, 0, nullptr, memory, 0, 0),
+                  ANEURALNETWORKS_NO_ERROR);
+        ASSERT_EQ(ANeuralNetworksExecution_setOutput(execution, 0, nullptr, &data, sizeof(float)),
+                  ANEURALNETWORKS_NO_ERROR);
+        ASSERT_EQ(ANeuralNetworksExecution_compute(execution), expectedResult);
+        ANeuralNetworksExecution_free(execution);
+    }
+
+    void executeWithMemoryAsOutput(ANeuralNetworksCompilation* compilation,
+                                   ANeuralNetworksMemory* memory, int expectedResult) {
+        const float data = 0;
+        ANeuralNetworksExecution* execution = nullptr;
+        ASSERT_EQ(ANeuralNetworksExecution_create(compilation, &execution),
+                  ANEURALNETWORKS_NO_ERROR);
+        ASSERT_EQ(ANeuralNetworksExecution_setInput(execution, 0, nullptr, &data, sizeof(float)),
+                  ANEURALNETWORKS_NO_ERROR);
+        ASSERT_EQ(ANeuralNetworksExecution_setOutputFromMemory(execution, 0, nullptr, memory, 0, 0),
+                  ANEURALNETWORKS_NO_ERROR);
+        ASSERT_EQ(ANeuralNetworksExecution_compute(execution), expectedResult);
+        ANeuralNetworksExecution_free(execution);
+    }
+
+    ANeuralNetworksModel* mModel = nullptr;
+    ANeuralNetworksCompilation* mCompilation = nullptr;
+    ANeuralNetworksExecution* mExecution = nullptr;
+
+    ANeuralNetworksModel* mModelDynamic = nullptr;
+    ANeuralNetworksCompilation* mCompilationDynamic = nullptr;
+    ANeuralNetworksExecution* mExecutionDynamic = nullptr;
+
+    ANeuralNetworksModel* mInitModel = nullptr;
+    ANeuralNetworksCompilation* mInitCompilation = nullptr;
+    ANeuralNetworksModel* mDeinitModel = nullptr;
+    ANeuralNetworksCompilation* mDeinitCompilation = nullptr;
+};
+
 TEST_F(ValidationTest, CreateModel) {
     EXPECT_EQ(ANeuralNetworksModel_create(nullptr), ANEURALNETWORKS_UNEXPECTED_NULL);
 }
@@ -506,7 +634,7 @@
 
     // This should fail, since non-BLOB AHardwareBuffer is not allowed.
     EXPECT_EQ(ANeuralNetworksModel_setOperandValueFromMemory(mModel, 0, memory, 0, sizeof(uint8_t)),
-              ANEURALNETWORKS_UNMAPPABLE);
+              ANEURALNETWORKS_BAD_DATA);
 
     AHardwareBuffer_release(buffer);
 }
@@ -1172,6 +1300,141 @@
     AHardwareBuffer_release(buffer);
 }
 
+TEST_F(ValidationTestExecutionDeviceMemory, SetInputFromMemory) {
+    ANeuralNetworksMemoryDesc* desc;
+    ASSERT_EQ(ANeuralNetworksMemoryDesc_create(&desc), ANEURALNETWORKS_NO_ERROR);
+    EXPECT_EQ(ANeuralNetworksMemoryDesc_addInputRole(desc, mCompilation, 0, 1.0f),
+              ANEURALNETWORKS_NO_ERROR);
+
+    // The following output roles are for init/deinit of the device memory.
+    EXPECT_EQ(ANeuralNetworksMemoryDesc_addOutputRole(desc, mInitCompilation, 0, 1.0f),
+              ANEURALNETWORKS_NO_ERROR);
+    EXPECT_EQ(ANeuralNetworksMemoryDesc_addOutputRole(desc, mDeinitCompilation, 0, 1.0f),
+              ANEURALNETWORKS_NO_ERROR);
+
+    EXPECT_EQ(ANeuralNetworksMemoryDesc_finish(desc), ANEURALNETWORKS_NO_ERROR);
+
+    ANeuralNetworksMemory* memory;
+    EXPECT_EQ(ANeuralNetworksMemory_createFromDesc(desc, &memory), ANEURALNETWORKS_NO_ERROR);
+    ANeuralNetworksMemoryDesc_free(desc);
+
+    // Uninitialized memory as input.
+    // TODO(xusongw): Additionally validate the case when the state of the memory is changed
+    //                between setInputFromMemory and compute.
+    executeWithMemoryAsInput(mCompilation, memory, ANEURALNETWORKS_OP_FAILED);
+
+    // Initialize device memory.
+    executeWithMemoryAsOutput(mInitCompilation, memory, ANEURALNETWORKS_NO_ERROR);
+
+    // Bad offset and length.
+    EXPECT_EQ(ANeuralNetworksExecution_setInputFromMemory(mExecution, 0, nullptr, memory, 1, 0),
+              ANEURALNETWORKS_BAD_DATA);
+    EXPECT_EQ(ANeuralNetworksExecution_setInputFromMemory(mExecution, 0, nullptr, memory, 0,
+                                                          sizeof(float)),
+              ANEURALNETWORKS_BAD_DATA);
+
+    // Bad usage -- not configured for this role.
+    EXPECT_EQ(ANeuralNetworksExecution_setOutputFromMemory(mExecution, 0, nullptr, memory, 0, 0),
+              ANEURALNETWORKS_BAD_DATA);
+
+    // Deinitialize device memory.
+    executeWithMemoryAsOutput(mDeinitCompilation, memory, ANEURALNETWORKS_OP_FAILED);
+
+    // Uninitialized memory as input.
+    executeWithMemoryAsInput(mCompilation, memory, ANEURALNETWORKS_OP_FAILED);
+
+    ANeuralNetworksMemory_free(memory);
+}
+
+TEST_F(ValidationTestExecutionDeviceMemory, SetOutputFromMemory) {
+    ANeuralNetworksMemoryDesc* desc;
+    ASSERT_EQ(ANeuralNetworksMemoryDesc_create(&desc), ANEURALNETWORKS_NO_ERROR);
+    EXPECT_EQ(ANeuralNetworksMemoryDesc_addOutputRole(desc, mCompilation, 0, 1.0f),
+              ANEURALNETWORKS_NO_ERROR);
+    EXPECT_EQ(ANeuralNetworksMemoryDesc_finish(desc), ANEURALNETWORKS_NO_ERROR);
+
+    ANeuralNetworksMemory* memory;
+    EXPECT_EQ(ANeuralNetworksMemory_createFromDesc(desc, &memory), ANEURALNETWORKS_NO_ERROR);
+    ANeuralNetworksMemoryDesc_free(desc);
+
+    // Bad offset and length.
+    EXPECT_EQ(ANeuralNetworksExecution_setOutputFromMemory(mExecution, 0, nullptr, memory, 1, 0),
+              ANEURALNETWORKS_BAD_DATA);
+    EXPECT_EQ(ANeuralNetworksExecution_setOutputFromMemory(mExecution, 0, nullptr, memory, 0,
+                                                           sizeof(float)),
+              ANEURALNETWORKS_BAD_DATA);
+
+    // Bad usage -- not configured for this role.
+    EXPECT_EQ(ANeuralNetworksExecution_setInputFromMemory(mExecution, 0, nullptr, memory, 0, 0),
+              ANEURALNETWORKS_BAD_DATA);
+
+    ANeuralNetworksMemory_free(memory);
+}
+
+TEST_F(ValidationTestExecutionDeviceMemory, SetInputFromMemory_DynamicShape) {
+    uint32_t dimension = 1, badDimension = 2;
+    ANeuralNetworksOperandType badType = {
+            .type = ANEURALNETWORKS_TENSOR_FLOAT32,
+            .dimensionCount = 1,
+            .dimensions = &badDimension,
+    };
+
+    ANeuralNetworksMemoryDesc* desc;
+    ASSERT_EQ(ANeuralNetworksMemoryDesc_create(&desc), ANEURALNETWORKS_NO_ERROR);
+    EXPECT_EQ(ANeuralNetworksMemoryDesc_addInputRole(desc, mCompilationDynamic, 0, 1.0f),
+              ANEURALNETWORKS_NO_ERROR);
+    EXPECT_EQ(ANeuralNetworksMemoryDesc_setDimensions(desc, 1, &dimension),
+              ANEURALNETWORKS_NO_ERROR);
+
+    // The following output role is for init of the device memory.
+    EXPECT_EQ(ANeuralNetworksMemoryDesc_addOutputRole(desc, mInitCompilation, 0, 1.0f),
+              ANEURALNETWORKS_NO_ERROR);
+
+    EXPECT_EQ(ANeuralNetworksMemoryDesc_finish(desc), ANEURALNETWORKS_NO_ERROR);
+
+    ANeuralNetworksMemory* memory;
+    EXPECT_EQ(ANeuralNetworksMemory_createFromDesc(desc, &memory), ANEURALNETWORKS_NO_ERROR);
+    ANeuralNetworksMemoryDesc_free(desc);
+
+    // Initialize device memory.
+    executeWithMemoryAsOutput(mInitCompilation, memory, ANEURALNETWORKS_NO_ERROR);
+
+    // Incompatible dimensions between updated type and memory.
+    EXPECT_EQ(ANeuralNetworksExecution_setInputFromMemory(mExecutionDynamic, 0, &badType, memory, 0,
+                                                          0),
+              ANEURALNETWORKS_BAD_DATA);
+
+    ANeuralNetworksMemory_free(memory);
+}
+
+TEST_F(ValidationTestExecutionDeviceMemory, SetOutputFromMemory_DynamicShape) {
+    uint32_t dimension = 1, badDimension = 2;
+    ANeuralNetworksOperandType badType = {
+            .type = ANEURALNETWORKS_TENSOR_FLOAT32,
+            .dimensionCount = 1,
+            .dimensions = &badDimension,
+    };
+
+    ANeuralNetworksMemoryDesc* desc;
+    ASSERT_EQ(ANeuralNetworksMemoryDesc_create(&desc), ANEURALNETWORKS_NO_ERROR);
+    EXPECT_EQ(ANeuralNetworksMemoryDesc_addOutputRole(desc, mCompilationDynamic, 0, 1.0f),
+              ANEURALNETWORKS_NO_ERROR);
+    EXPECT_EQ(ANeuralNetworksMemoryDesc_setDimensions(desc, 1, &dimension),
+              ANEURALNETWORKS_NO_ERROR);
+    EXPECT_EQ(ANeuralNetworksMemoryDesc_finish(desc), ANEURALNETWORKS_NO_ERROR);
+
+    ANeuralNetworksMemory* memory;
+    EXPECT_EQ(ANeuralNetworksMemory_createFromDesc(desc, &memory), ANEURALNETWORKS_NO_ERROR);
+    ANeuralNetworksMemoryDesc_free(desc);
+
+    // Incompatible dimensions between updated type and memory.
+    EXPECT_EQ(ANeuralNetworksExecution_setOutputFromMemory(mExecutionDynamic, 0, &badType, memory,
+                                                           0, 0),
+              ANEURALNETWORKS_BAD_DATA);
+
+    ANeuralNetworksMemory_free(memory);
+}
+
 TEST_F(ValidationTestExecution, Compute) {
     EXPECT_EQ(ANeuralNetworksExecution_compute(nullptr), ANEURALNETWORKS_UNEXPECTED_NULL);
 }