Support memory domain in sample driver.

Bug: 147777318
Test: NNT_static
Test: 1.3 VTS
Change-Id: I64c2d325d27de36d422e86cd34d7311cededbf48
Merged-In: I64c2d325d27de36d422e86cd34d7311cededbf48
(cherry picked from commit c0622db536ba13cfcc64f0c5e9acea15672978c7)
diff --git a/common/Android.bp b/common/Android.bp
index e8355de..42bf39a 100644
--- a/common/Android.bp
+++ b/common/Android.bp
@@ -143,6 +143,7 @@
         "include",
     ],
     srcs: [
+        "BufferTracker.cpp",
         "CpuExecutor.cpp",
         "ExecutionBurstController.cpp",
         "ExecutionBurstServer.cpp",
diff --git a/common/BufferTracker.cpp b/common/BufferTracker.cpp
new file mode 100644
index 0000000..e6b8d94
--- /dev/null
+++ b/common/BufferTracker.cpp
@@ -0,0 +1,226 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "BufferTracker.h"
+
+#include <android-base/macros.h>
+
+#include <memory>
+#include <mutex>
+#include <set>
+#include <stack>
+#include <utility>
+#include <vector>
+
+#include "CpuExecutor.h"
+#include "HalInterfaces.h"
+#include "Utils.h"
+
+namespace android::nn {
+
+using namespace hal;
+
+std::shared_ptr<ManagedBuffer> ManagedBuffer::create(uint32_t size,
+                                                     std::set<PreparedModelRole> roles,
+                                                     const Operand& operand) {
+    std::unique_ptr<uint8_t[]> buffer(new (std::nothrow) uint8_t[size]);
+    if (buffer == nullptr) {
+        return nullptr;
+    }
+    if (isExtensionOperandType(operand.type)) {
+        LOG(ERROR) << "ManagedBuffer cannot handle extension operands.";
+        return nullptr;
+    }
+    return std::make_shared<ManagedBuffer>(std::move(buffer), size, std::move(roles), operand);
+}
+
+ManagedBuffer::ManagedBuffer(std::unique_ptr<uint8_t[]> buffer, uint32_t size,
+                             std::set<PreparedModelRole> roles, const Operand& operand)
+    : kBuffer(std::move(buffer)),
+      kSize(size),
+      kRoles(std::move(roles)),
+      kOperandType(operand.type),
+      kInitialDimensions(operand.dimensions),
+      mUpdatedDimensions(operand.dimensions) {
+    CHECK(!isExtensionOperandType(kOperandType));
+}
+
+ErrorStatus ManagedBuffer::validateRequest(uint32_t poolIndex, const Request& request,
+                                           const IPreparedModel* preparedModel) const {
+    CHECK_LT(poolIndex, request.pools.size());
+    CHECK(request.pools[poolIndex].getDiscriminator() ==
+          Request::MemoryPool::hidl_discriminator::token);
+    std::lock_guard<std::mutex> guard(mMutex);
+
+    bool usedAsInput = false, usedAsOutput = false;
+    for (uint32_t i = 0; i < request.inputs.size(); i++) {
+        if (request.inputs[i].hasNoValue) continue;
+        if (request.inputs[i].location.poolIndex != poolIndex) continue;
+        // Validate if the input role is specified during allocation.
+        if (kRoles.count({preparedModel, IOType::INPUT, i}) == 0) {
+            LOG(ERROR) << "ManagedBuffer::validateRequest -- invalid buffer role.";
+            return ErrorStatus::INVALID_ARGUMENT;
+        }
+        if (!mInitialized) {
+            LOG(ERROR) << "ManagedBuffer::validateRequest -- using uninitialized buffer as input "
+                          "request.";
+            return ErrorStatus::GENERAL_FAILURE;
+        }
+        auto combined = combineDimensions(mUpdatedDimensions, request.inputs[i].dimensions);
+        if (!combined.has_value()) {
+            LOG(ERROR) << "ManagedBuffer::validateRequest -- incompatible dimensions ("
+                       << toString(mUpdatedDimensions) << " vs "
+                       << toString(request.inputs[i].dimensions) << ")";
+            return ErrorStatus::INVALID_ARGUMENT;
+        }
+        usedAsInput = true;
+    }
+    for (uint32_t i = 0; i < request.outputs.size(); i++) {
+        if (request.outputs[i].hasNoValue) continue;
+        if (request.outputs[i].location.poolIndex != poolIndex) continue;
+        if (usedAsInput || usedAsOutput) {
+            LOG(ERROR) << "ManagedBuffer::validateRequest -- using the same device memory for "
+                          "input/output or multiple outputs";
+            return ErrorStatus::INVALID_ARGUMENT;
+        }
+        // Validate if the output role is specified during allocation.
+        if (kRoles.count({preparedModel, IOType::OUTPUT, i}) == 0) {
+            LOG(ERROR) << "ManagedBuffer::validateRequest -- invalid buffer role.";
+            return ErrorStatus::INVALID_ARGUMENT;
+        }
+        auto combined = combineDimensions(kInitialDimensions, request.outputs[i].dimensions);
+        if (!combined.has_value()) {
+            LOG(ERROR) << "ManagedBuffer::validateRequest -- incompatible dimensions ("
+                       << toString(kInitialDimensions) << " vs "
+                       << toString(request.outputs[i].dimensions) << ")";
+            return ErrorStatus::INVALID_ARGUMENT;
+        }
+        usedAsOutput = true;
+    }
+    return ErrorStatus::NONE;
+}
+
+ErrorStatus ManagedBuffer::validateCopyFrom(const std::vector<uint32_t>& dimensions,
+                                            uint32_t size) const {
+    if (size != kSize) {
+        LOG(ERROR) << "ManagedBuffer::validateCopyFrom -- invalid memory size: " << kSize << " vs "
+                   << size;
+        return ErrorStatus::INVALID_ARGUMENT;
+    }
+
+    if (nonExtensionOperandTypeIsScalar(static_cast<int>(kOperandType))) {
+        if (!dimensions.empty()) {
+            LOG(ERROR)
+                    << "ManagedBuffer::validateCopyFrom -- invalid dimensions for scalar operand: "
+                    << toString(dimensions);
+            return ErrorStatus::INVALID_ARGUMENT;
+        }
+        return ErrorStatus::NONE;
+    }
+
+    if (dimensions.empty()) {
+        if (tensorHasUnspecifiedDimensions(kOperandType, kInitialDimensions)) {
+            LOG(ERROR) << "ManagedBuffer::validateCopyFrom -- the initial dimensions are not fully "
+                          "specified and no dimension update is provided: "
+                       << toString(kInitialDimensions);
+            return ErrorStatus::INVALID_ARGUMENT;
+        }
+    } else {
+        if (tensorHasUnspecifiedDimensions(kOperandType, dimensions)) {
+            LOG(ERROR) << "ManagedBuffer::validateCopyFrom -- the updated dimensions are not fully "
+                          "specified: "
+                       << toString(dimensions);
+            return ErrorStatus::INVALID_ARGUMENT;
+        }
+    }
+
+    const auto combined = combineDimensions(kInitialDimensions, dimensions);
+    if (!combined.has_value()) {
+        LOG(ERROR) << "ManagedBuffer::validateCopyFrom -- incompatible dimensions ("
+                   << toString(kInitialDimensions) << " vs " << toString(dimensions) << ")";
+        return ErrorStatus::INVALID_ARGUMENT;
+    }
+    return ErrorStatus::NONE;
+}
+
+ErrorStatus ManagedBuffer::validateCopyTo(uint32_t size) const {
+    if (size != kSize) {
+        LOG(ERROR) << "ManagedBuffer::validateCopyTo -- invalid memory size: " << kSize << " vs "
+                   << size;
+        return ErrorStatus::INVALID_ARGUMENT;
+    }
+    std::lock_guard<std::mutex> guard(mMutex);
+    if (!mInitialized) {
+        LOG(ERROR) << "ManagedBuffer::validateCopyTo -- using uninitialized buffer as source.";
+        return ErrorStatus::GENERAL_FAILURE;
+    }
+    return ErrorStatus::NONE;
+}
+
+bool ManagedBuffer::updateDimensions(const std::vector<uint32_t>& dimensions) {
+    auto combined = combineDimensions(kInitialDimensions, dimensions);
+    if (!combined) {
+        LOG(ERROR) << "ManagedBuffer::updateDimensions -- incompatible dimensions ("
+                   << toString(kInitialDimensions) << " vs " << toString(dimensions) << ")";
+        return false;
+    }
+    std::lock_guard<std::mutex> guard(mMutex);
+    mUpdatedDimensions = std::move(combined.value());
+    return true;
+}
+
+void ManagedBuffer::setInitialized(bool initialized) {
+    std::lock_guard<std::mutex> guard(mMutex);
+    mInitialized = initialized;
+}
+
+std::unique_ptr<BufferTracker::Token> BufferTracker::add(std::shared_ptr<ManagedBuffer> buffer) {
+    if (buffer == nullptr) {
+        return nullptr;
+    }
+    std::lock_guard<std::mutex> guard(mMutex);
+    uint32_t token = 0;
+    if (mFreeTokens.empty()) {
+        token = mTokenToBuffers.size();
+        mTokenToBuffers.push_back(std::move(buffer));
+    } else {
+        token = mFreeTokens.top();
+        mFreeTokens.pop();
+        mTokenToBuffers[token] = std::move(buffer);
+    }
+    VLOG(MEMORY) << "BufferTracker::add -- new token = " << token;
+    return std::make_unique<Token>(token, shared_from_this());
+}
+
+std::shared_ptr<ManagedBuffer> BufferTracker::get(uint32_t token) const {
+    std::lock_guard<std::mutex> guard(mMutex);
+    if (mTokenToBuffers.size() <= token || mTokenToBuffers[token] == nullptr) {
+        LOG(ERROR) << "BufferTracker::get -- unknown token " << token;
+        return nullptr;
+    }
+    return mTokenToBuffers[token];
+}
+
+void BufferTracker::free(uint32_t token) {
+    std::lock_guard<std::mutex> guard(mMutex);
+    CHECK_LT(token, mTokenToBuffers.size());
+    CHECK(mTokenToBuffers[token] != nullptr);
+    VLOG(MEMORY) << "BufferTracker::free -- release token = " << token;
+    mTokenToBuffers[token] = nullptr;
+    mFreeTokens.push(token);
+}
+
+}  // namespace android::nn
diff --git a/common/Utils.cpp b/common/Utils.cpp
index b5c55ec..7032dda 100644
--- a/common/Utils.cpp
+++ b/common/Utils.cpp
@@ -382,6 +382,11 @@
     return dimCount == 0 || std::find(dim, dim + dimCount, 0) != (dim + dimCount);
 }
 
+bool tensorHasUnspecifiedDimensions(OperandType type, const std::vector<uint32_t>& dimensions) {
+    return tensorHasUnspecifiedDimensions(static_cast<int>(type), dimensions.data(),
+                                          dimensions.size());
+}
+
 bool tensorHasUnspecifiedDimensions(const ANeuralNetworksOperandType* type) {
     return tensorHasUnspecifiedDimensions(type->type, type->dimensions, type->dimensionCount);
 }
diff --git a/common/ValidateHal.cpp b/common/ValidateHal.cpp
index d391ed2..2e1c235 100644
--- a/common/ValidateHal.cpp
+++ b/common/ValidateHal.cpp
@@ -22,6 +22,7 @@
 
 #include <algorithm>
 #include <set>
+#include <utility>
 #include <vector>
 
 #include "NeuralNetworks.h"
@@ -781,6 +782,85 @@
             validatePools(request.pools, HalVersion::V1_3));
 }
 
+bool validateMemoryDesc(const V1_3::BufferDesc& desc,
+                        const hidl_vec<sp<V1_3::IPreparedModel>>& preparedModels,
+                        const hidl_vec<V1_3::BufferRole>& inputRoles,
+                        const hidl_vec<V1_3::BufferRole>& outputRoles,
+                        std::function<const V1_3::Model*(const sp<V1_3::IPreparedModel>&)> getModel,
+                        std::set<PreparedModelRole>* preparedModelRoles,
+                        V1_3::Operand* combinedOperand) {
+    NN_RET_CHECK(preparedModels.size() != 0);
+    NN_RET_CHECK(inputRoles.size() != 0 || outputRoles.size() != 0);
+
+    std::set<PreparedModelRole> roles;
+    std::vector<V1_3::Operand> operands;
+    operands.reserve(inputRoles.size() + outputRoles.size());
+    for (const auto& role : inputRoles) {
+        NN_RET_CHECK_LT(role.modelIndex, preparedModels.size());
+        const auto& preparedModel = preparedModels[role.modelIndex];
+        NN_RET_CHECK(preparedModel != nullptr);
+        const auto* model = getModel(preparedModel);
+        NN_RET_CHECK(model != nullptr);
+        const auto& inputIndexes = model->main.inputIndexes;
+        NN_RET_CHECK_LT(role.ioIndex, inputIndexes.size());
+        NN_RET_CHECK_GT(role.frequency, 0.0f);
+        NN_RET_CHECK_LE(role.frequency, 1.0f);
+        const auto [it, success] = roles.emplace(preparedModel.get(), IOType::INPUT, role.ioIndex);
+        NN_RET_CHECK(success);
+        operands.push_back(model->main.operands[inputIndexes[role.ioIndex]]);
+    }
+    for (const auto& role : outputRoles) {
+        NN_RET_CHECK_LT(role.modelIndex, preparedModels.size());
+        const auto& preparedModel = preparedModels[role.modelIndex];
+        NN_RET_CHECK(preparedModel != nullptr);
+        const auto* model = getModel(preparedModel);
+        NN_RET_CHECK(model != nullptr);
+        const auto& outputIndexes = model->main.outputIndexes;
+        NN_RET_CHECK_LT(role.ioIndex, outputIndexes.size());
+        NN_RET_CHECK_GT(role.frequency, 0.0f);
+        NN_RET_CHECK_LE(role.frequency, 1.0f);
+        const auto [it, success] = roles.emplace(preparedModel.get(), IOType::OUTPUT, role.ioIndex);
+        NN_RET_CHECK(success);
+        operands.push_back(model->main.operands[outputIndexes[role.ioIndex]]);
+    }
+
+    CHECK(!operands.empty());
+    const auto opType = operands[0].type;
+    const bool isExtension = isExtensionOperandType(opType);
+
+    std::vector<uint32_t> dimensions = desc.dimensions;
+    for (const auto& operand : operands) {
+        NN_RET_CHECK(operand.type == operands[0].type)
+                << toString(operand.type) << " vs " << toString(operands[0].type);
+        NN_RET_CHECK_EQ(operand.scale, operands[0].scale);
+        NN_RET_CHECK_EQ(operand.zeroPoint, operands[0].zeroPoint);
+        // NOTE: validateMemoryDesc cannot validate extra parameters for extension operand type.
+        if (!isExtension) {
+            NN_RET_CHECK(operand.extraParams == operands[0].extraParams)
+                    << toString(operand.extraParams) << " vs " << toString(operands[0].extraParams);
+        }
+        const auto combined = combineDimensions(dimensions, operand.dimensions);
+        NN_RET_CHECK(combined.has_value());
+        dimensions = combined.value();
+    }
+
+    // NOTE: validateMemoryDesc cannot validate scalar dimensions with extension operand type.
+    if (!isExtension) {
+        NN_RET_CHECK(!nonExtensionOperandTypeIsScalar(static_cast<int>(opType)) ||
+                     dimensions.empty())
+                << "invalid dimensions with scalar operand type.";
+    }
+
+    if (preparedModelRoles != nullptr) {
+        *preparedModelRoles = std::move(roles);
+    }
+    if (combinedOperand != nullptr) {
+        *combinedOperand = operands[0];
+        combinedOperand->dimensions = dimensions;
+    }
+    return true;
+}
+
 bool validateExecutionPreference(ExecutionPreference preference) {
     return preference == ExecutionPreference::LOW_POWER ||
            preference == ExecutionPreference::FAST_SINGLE_ANSWER ||
diff --git a/common/include/BufferTracker.h b/common/include/BufferTracker.h
new file mode 100644
index 0000000..feabda6
--- /dev/null
+++ b/common/include/BufferTracker.h
@@ -0,0 +1,117 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_FRAMEWORKS_ML_NN_COMMON_BUFFER_TRACKER_H
+#define ANDROID_FRAMEWORKS_ML_NN_COMMON_BUFFER_TRACKER_H
+
+#include <android-base/macros.h>
+
+#include <map>
+#include <memory>
+#include <mutex>
+#include <set>
+#include <stack>
+#include <utility>
+#include <vector>
+
+#include "CpuExecutor.h"
+#include "HalInterfaces.h"
+#include "Utils.h"
+
+namespace android::nn {
+
+// This class manages a CPU buffer allocated on heap and provides validation methods.
+class ManagedBuffer {
+   public:
+    static std::shared_ptr<ManagedBuffer> create(uint32_t size, std::set<PreparedModelRole> roles,
+                                                 const hal::Operand& operand);
+
+    // Prefer ManagedBuffer::create.
+    ManagedBuffer(std::unique_ptr<uint8_t[]> buffer, uint32_t size,
+                  std::set<PreparedModelRole> roles, const hal::Operand& operand);
+
+    RunTimePoolInfo createRunTimePoolInfo() const {
+        return RunTimePoolInfo::createFromExistingBuffer(kBuffer.get(), kSize);
+    }
+
+    // "poolIndex" is the index of this buffer in the request.pools.
+    hal::ErrorStatus validateRequest(uint32_t poolIndex, const hal::Request& request,
+                                     const hal::IPreparedModel* preparedModel) const;
+
+    // "size" is the byte size of the hidl_memory provided to the copyFrom or copyTo method.
+    hal::ErrorStatus validateCopyFrom(const std::vector<uint32_t>& dimensions, uint32_t size) const;
+    hal::ErrorStatus validateCopyTo(uint32_t size) const;
+
+    bool updateDimensions(const std::vector<uint32_t>& dimensions);
+    void setInitialized(bool initialized);
+
+   private:
+    mutable std::mutex mMutex;
+    const std::unique_ptr<uint8_t[]> kBuffer;
+    const uint32_t kSize;
+    const std::set<PreparedModelRole> kRoles;
+    const hal::OperandType kOperandType;
+    const std::vector<uint32_t> kInitialDimensions;
+    std::vector<uint32_t> mUpdatedDimensions;
+    bool mInitialized = false;
+};
+
+// Keep track of all ManagedBuffers and assign each with a unique token.
+class BufferTracker : public std::enable_shared_from_this<BufferTracker> {
+    DISALLOW_COPY_AND_ASSIGN(BufferTracker);
+
+   public:
+    // A RAII class to help manage the lifetime of the token.
+    // It is only supposed to be constructed in BufferTracker::add.
+    class Token {
+        DISALLOW_COPY_AND_ASSIGN(Token);
+
+       public:
+        Token(uint32_t token, std::shared_ptr<BufferTracker> tracker)
+            : kToken(token), kBufferTracker(std::move(tracker)) {}
+        ~Token() { kBufferTracker->free(kToken); }
+        uint32_t get() const { return kToken; }
+
+       private:
+        const uint32_t kToken;
+        const std::shared_ptr<BufferTracker> kBufferTracker;
+    };
+
+    // The factory of BufferTracker. This ensures that the BufferTracker is always managed by a
+    // shared_ptr.
+    static std::shared_ptr<BufferTracker> create() { return std::make_shared<BufferTracker>(); }
+
+    // Prefer BufferTracker::create.
+    BufferTracker() : mTokenToBuffers(1) {}
+
+    std::unique_ptr<Token> add(std::shared_ptr<ManagedBuffer> buffer);
+    std::shared_ptr<ManagedBuffer> get(uint32_t token) const;
+
+   private:
+    void free(uint32_t token);
+
+    mutable std::mutex mMutex;
+    std::stack<uint32_t, std::vector<uint32_t>> mFreeTokens;
+
+    // Since the tokens are allocated in a non-sparse way, we use a vector to represent the mapping.
+    // The index of the vector is the token. When the token gets freed, the corresponding entry is
+    // set to nullptr. mTokenToBuffers[0] is always set to nullptr because 0 is an invalid token.
+    std::vector<std::shared_ptr<ManagedBuffer>> mTokenToBuffers;
+};
+
+}  // namespace android::nn
+
+#endif  // ANDROID_FRAMEWORKS_ML_NN_COMMON_BUFFER_TRACKER_H
diff --git a/common/include/Utils.h b/common/include/Utils.h
index de4f811..5c64f6a 100644
--- a/common/include/Utils.h
+++ b/common/include/Utils.h
@@ -279,6 +279,7 @@
 //
 // Undefined behavior if the operand type is a scalar type.
 bool tensorHasUnspecifiedDimensions(int type, const uint32_t* dim, uint32_t dimCount);
+bool tensorHasUnspecifiedDimensions(hal::OperandType type, const std::vector<uint32_t>& dimensions);
 bool tensorHasUnspecifiedDimensions(const hal::Operand& operand);
 bool tensorHasUnspecifiedDimensions(const ANeuralNetworksOperandType* type);
 
diff --git a/common/include/ValidateHal.h b/common/include/ValidateHal.h
index af234ac..7b097fd 100644
--- a/common/include/ValidateHal.h
+++ b/common/include/ValidateHal.h
@@ -17,6 +17,9 @@
 #ifndef ANDROID_FRAMEWORKS_ML_NN_COMMON_VALIDATE_HAL_H
 #define ANDROID_FRAMEWORKS_ML_NN_COMMON_VALIDATE_HAL_H
 
+#include <set>
+#include <tuple>
+
 #include "HalInterfaces.h"
 
 namespace android {
@@ -31,6 +34,9 @@
     LATEST = V1_3,
 };
 
+enum class IOType { INPUT, OUTPUT };
+using PreparedModelRole = std::tuple<const hal::IPreparedModel*, IOType, uint32_t>;
+
 // Verifies that the model is valid, i.e. it is consistent, takes
 // only acceptable values, the constants don't extend outside the memory
 // regions they are part of, etc.
@@ -40,7 +46,7 @@
 template <class T_Model>
 bool validateModel(const T_Model& model);
 
-// Verfies that the request for the given model is valid.
+// Verifies that the request for the given model is valid.
 // IMPORTANT: This function cannot validate that OEM operation and operands
 // are correctly defined, as these are specific to each implementation.
 // Each driver should do their own validation of OEM types.
@@ -51,10 +57,10 @@
 bool validateRequest(const T_Request& request, const T_Model& model,
                      bool allowUnspecifiedOutput = true);
 
-// Verfies that the execution preference is valid.
+// Verifies that the execution preference is valid.
 bool validateExecutionPreference(hal::ExecutionPreference preference);
 
-// Verfies that the priority is valid.
+// Verifies that the priority is valid.
 bool validatePriority(hal::Priority priority);
 
 bool validOperationType(hal::V1_0::OperationType operation);
@@ -65,10 +71,23 @@
 bool validOperandType(hal::V1_2::OperandType operand);
 bool validOperandType(hal::V1_3::OperandType operand);
 
-// Verfies that the memory pool is valid in the specified HAL version.
+// Verifies that the memory pool is valid in the specified HAL version.
 bool validatePool(const hal::hidl_memory& pool, HalVersion ver = HalVersion::LATEST);
 bool validatePool(const hal::V1_3::Request::MemoryPool& pool, HalVersion ver = HalVersion::LATEST);
 
+// Verifies that the input arguments to IDevice::allocate are valid.
+// Optionally, this function can return a flattened prepared model roles and a combined operand.
+// Pass nullptr if either value is not needed.
+// IMPORTANT: This function cannot validate dimensions and extraParams with extension operand type.
+// Each driver should do their own validation of extension type dimensions and extraParams.
+bool validateMemoryDesc(
+        const hal::V1_3::BufferDesc& desc,
+        const hal::hidl_vec<sp<hal::V1_3::IPreparedModel>>& preparedModels,
+        const hal::hidl_vec<hal::V1_3::BufferRole>& inputRoles,
+        const hal::hidl_vec<hal::V1_3::BufferRole>& outputRoles,
+        std::function<const hal::V1_3::Model*(const sp<hal::V1_3::IPreparedModel>&)> getModel,
+        std::set<PreparedModelRole>* preparedModelRoles, hal::V1_3::Operand* combinedOperand);
+
 }  // namespace nn
 }  // namespace android