Add support for mmaped file descriptor

  - create new API to create ANeuralNetworks_Memory from a mmaped fd.
  - add the corresponding Memory constructor to NeuralNetworksWrapper.h
  - Add unit test TestFd for the new API.

Bug: 63905942
Test: mm
Test: TestFd pass with CPU path.
Test: TestFd pass on sample HIDL driver, with ag/2588539

Change-Id: I1fcb0f0bd01e58bd592e18eff11c27034ee4a4c3
diff --git a/runtime/Android.bp b/runtime/Android.bp
index 4fdc98d..7379441 100644
--- a/runtime/Android.bp
+++ b/runtime/Android.bp
@@ -45,6 +45,7 @@
         android: {
             shared_libs: [
                 "libbase",
+                "libcutils",
                 "libhidlbase",
                 "libhidltransport",
                 "libhidlmemory",
diff --git a/runtime/Memory.h b/runtime/Memory.h
index ff74bfe..5ee1bb6 100644
--- a/runtime/Memory.h
+++ b/runtime/Memory.h
@@ -20,6 +20,8 @@
 #include "NeuralNetworks.h"
 #include "Utils.h"
 
+#include <cutils/native_handle.h>
+#include <sys/mman.h>
 #include <unordered_map>
 
 namespace android {
@@ -30,47 +32,102 @@
 // Represents a memory region.
 class Memory {
 public:
+    Memory() {}
+    virtual ~Memory() {}
+
+    // Disallow copy semantics to ensure the runtime object can only be freed
+    // once. Copy semantics could be enabled if some sort of reference counting
+    // or deep-copy system for runtime objects is added later.
+    Memory(const Memory&) = delete;
+    Memory& operator=(const Memory&) = delete;
+
     // Creates a shared memory object of the size specified in bytes.
     int create(uint32_t size);
 
-    /* TODO implement
-    int setFromHidlMemory(hardware::hidl_memory hidlMemory) {
-        mHidlMemory = hidlMemory;
-        mMemory = mapMemory(hidlMemory);
-        if (mMemory == nullptr) {
-            LOG(ERROR) << "setFromHidlMemory failed";
-            return ANEURALNETWORKS_OP_FAILED;
-        }
-        return ANEURALNETWORKS_NO_ERROR;
-    }
-    int setFromFd(int fd) {
-        return ANEURALNETWORKS_NO_ERROR;
-    }
-    int setFromGrallocBuffer(buffer_handle_t buffer,
-                             ANeuralNetworksMemory** memory) {
-        return ANEURALNETWORKS_NO_ERROR;
-    }
-    int setFromHardwareBuffer(AHardwareBuffer* buffer,
-                              ANeuralNetworksMemory** memory) {
-        return ANEURALNETWORKS_NO_ERROR;
-    }
-    */
-
     hardware::hidl_memory getHidlMemory() const { return mHidlMemory; }
 
     // Returns a pointer to the underlying memory of this memory object.
-    int getPointer(uint8_t** buffer) const {
+    virtual int getPointer(uint8_t** buffer) const {
         *buffer = static_cast<uint8_t*>(static_cast<void*>(mMemory->getPointer()));
         return ANEURALNETWORKS_NO_ERROR;
     }
 
-private:
+    virtual bool validateSize(uint32_t offset, uint32_t length) const {
+        if (offset + length > mHidlMemory.size()) {
+            LOG(ERROR) << "Request size larger than the memory size.";
+            return false;
+        } else {
+            return true;
+        }
+    }
+protected:
     // The hidl_memory handle for this shared memory.  We will pass this value when
     // communicating with the drivers.
     hardware::hidl_memory mHidlMemory;
     sp<IMemory> mMemory;
 };
 
+class MemoryFd : public Memory {
+public:
+    MemoryFd() {}
+    ~MemoryFd() {
+        // Delete the native_handle.
+        if (mHandle) {
+            native_handle_delete(mHandle);
+        }
+    }
+
+    // Disallow copy semantics to ensure the runtime object can only be freed
+    // once. Copy semantics could be enabled if some sort of reference counting
+    // or deep-copy system for runtime objects is added later.
+    MemoryFd(const MemoryFd&) = delete;
+    MemoryFd& operator=(const MemoryFd&) = delete;
+
+    // Create the native_handle based on input size, prot, and fd.
+    // Existing native_handle will be deleted, and mHidlMemory will wrap
+    // the newly created native_handle.
+    int set(size_t size, int prot, int fd) {
+        if (size == 0 || fd < 0) {
+            LOG(ERROR) << "Invalid size or fd";
+            return ANEURALNETWORKS_BAD_DATA;
+        }
+
+        if (mHandle) {
+            native_handle_delete(mHandle);
+        }
+        mHandle = native_handle_create(1, 1);
+        if (mHandle == nullptr) {
+            LOG(ERROR) << "Failed to create native_handle";
+            return ANEURALNETWORKS_UNEXPECTED_NULL;
+        }
+        mHandle->data[0] = fd;
+        mHandle->data[1] = prot;
+        mHidlMemory = hidl_memory("mmap_fd", mHandle, size);
+        return ANEURALNETWORKS_NO_ERROR;
+    }
+
+    int getPointer(uint8_t** buffer) const override {
+        if (mHandle == nullptr) {
+            LOG(ERROR) << "Memory not initialized";
+            return ANEURALNETWORKS_UNEXPECTED_NULL;
+        }
+
+        int fd = mHandle->data[0];
+        int prot = mHandle->data[1];
+        void* data = mmap(nullptr, mHidlMemory.size(), prot, MAP_SHARED, fd, 0);
+        if (data == MAP_FAILED) {
+            LOG(ERROR) << "Can't mmap the file descriptor.";
+            return ANEURALNETWORKS_UNMAPPABLE;
+        } else {
+            *buffer = static_cast<uint8_t*>(data);
+            return ANEURALNETWORKS_NO_ERROR;
+        }
+    }
+
+private:
+    native_handle_t* mHandle = nullptr;
+};
+
 // A utility class to accumulate mulitple Memory objects and assign each
 // a distinct index number, starting with 0.
 //
diff --git a/runtime/NeuralNetworks.cpp b/runtime/NeuralNetworks.cpp
index 47535f8..2257dee 100644
--- a/runtime/NeuralNetworks.cpp
+++ b/runtime/NeuralNetworks.cpp
@@ -291,37 +291,18 @@
     return ANEURALNETWORKS_NO_ERROR;
 }
 
-/* TODO
-int ANeuralNetworksMemory_createFromHidlMemory(hidl_memory hidlMemory,
-                                               ANeuralNetworksMemory** memory) {
-    if (!memory) {
-        LOG(ERROR) << "ANeuralNetworksMemory_createFromHidlMemory passed a nullptr";
-        return ANEURALNETWORKS_UNEXPECTED_NULL;
-    }
-    *memory = nullptr;
-    std::unique_ptr<Memory> m = std::make_unique<Memory>(Memory());
-    if (m == nullptr) {
-        return ANEURALNETWORKS_OUT_OF_MEMORY;
-    }
-    int n = m->setFromHidlMemory(hidlMemory);
-    if (n != ANEURALNETWORKS_NO_ERROR) {
-        return n;
-    }
-    *memory = reinterpret_cast<ANeuralNetworksMemory*>(m.release());
-    return ANEURALNETWORKS_NO_ERROR;
-}
-
-int ANeuralNetworksMemory_createFromFd(int fd, ANeuralNetworksMemory** memory) {
+int ANeuralNetworksMemory_createFromFd(size_t size, int prot, int fd,
+                                       ANeuralNetworksMemory** memory) {
     if (fd < 0) {
         LOG(ERROR) << "ANeuralNetworksMemory_createFromFd invalid fd " << fd;
         return ANEURALNETWORKS_UNEXPECTED_NULL;
     }
     *memory = nullptr;
-    std::unique_ptr<Memory> m = std::make_unique<Memory>(Memory());
+    std::unique_ptr<MemoryFd> m = std::make_unique<MemoryFd>();
     if (m == nullptr) {
         return ANEURALNETWORKS_OUT_OF_MEMORY;
     }
-    int n = m->setFromFd(fd);
+    int n = m->set(size, prot, fd);
     if (n != ANEURALNETWORKS_NO_ERROR) {
         return n;
     }
@@ -329,21 +310,6 @@
     return ANEURALNETWORKS_NO_ERROR;
 }
 
-int ANeuralNetworksMemory_createFromGrallocBuffer(buffer_handle_t buffer,
-                                                  ANeuralNetworksMemory** memory) {
-    *memory = nullptr;
-    // TODO implement
-    return ANEURALNETWORKS_NO_ERROR;
-}
-
-int ANeuralNetworksMemory_createFromHardwareBuffer(AHardwareBuffer* buffer,
-                                                   ANeuralNetworksMemory** memory) {
-    *memory = nullptr;
-    // TODO implement
-    return ANEURALNETWORKS_NO_ERROR;
-}
-*/
-
 int ANeuralNetworksMemory_getPointer(ANeuralNetworksMemory* memory, uint8_t** buffer) {
     if (!memory || !buffer) {
         LOG(ERROR) << "ANeuralNetworksMemory_getPointer passed a nullptr";
diff --git a/runtime/RequestBuilder.cpp b/runtime/RequestBuilder.cpp
index ab027a1..1c7e4aa 100644
--- a/runtime/RequestBuilder.cpp
+++ b/runtime/RequestBuilder.cpp
@@ -106,6 +106,9 @@
                    << count;
         return ANEURALNETWORKS_BAD_DATA;
     }
+    if (!memory->validateSize(offset, length)) {
+        return ANEURALNETWORKS_BAD_DATA;
+    }
     uint32_t poolIndex = mMemories.add(memory);
     return mInputs[index].setFromMemory(mModel->getInputOperand(index), type, poolIndex, offset,
                                         length);
@@ -129,6 +132,9 @@
                    << count;
         return ANEURALNETWORKS_BAD_DATA;
     }
+    if (!memory->validateSize(offset, length)) {
+        return ANEURALNETWORKS_BAD_DATA;
+    }
     uint32_t poolIndex = mMemories.add(memory);
     return mOutputs[index].setFromMemory(mModel->getOutputOperand(index), type, poolIndex, offset,
                                          length);
diff --git a/runtime/include/NeuralNetworks.h b/runtime/include/NeuralNetworks.h
index cdd4092..9896c80 100644
--- a/runtime/include/NeuralNetworks.h
+++ b/runtime/include/NeuralNetworks.h
@@ -330,15 +330,24 @@
  */
 int ANeuralNetworksMemory_createShared(size_t size, ANeuralNetworksMemory** memory);
 
-/* TODO Should we also have from Surface, IONBuffer, ashmem and:
-int ANeuralNetworksMemory_createFromHidlMemory(android::hardware::hidl_memory hidlMemory,
-                                               ANeuralNetworksMemory** memory);
-int ANeuralNetworksMemory_createFromFd(int fd, ANeuralNetworksMemory** memory);
-int ANeuralNetworksMemory_createFromGrallocBuffer(buffer_handle_t buffer,
-                                                  ANeuralNetworksMemory** memory);
-int ANeuralNetworksMemory_createFromHardwareBuffer(AHardwareBuffer* buffer,
-                                                   ANeuralNetworksMemory** memory);
-*/
+/**
+ * Creates a shared memory object from a file descriptor.
+ *
+ * The shared memory is backed by a file descriptor via mmap.
+ * See {@link ANeuralNetworksMemory} for a description on how to use
+ * this shared memory.
+ *
+ * @param size The requested size in bytes.
+ *             Must not be larger than the file size.
+ * @param prot The desired memory protection for mmap.
+ * @param fd The requested file descriptor.
+ * @param memory The memory object to be created.
+ *               Set to NULL if unsuccessful.
+ *
+ * @return ANEURALNETWORKS_NO_ERROR if the request completed normally.
+ */
+int ANeuralNetworksMemory_createFromFd(size_t size, int protect, int fd,
+                                       ANeuralNetworksMemory** memory);
 
 /**
  * Returns pointer to the memory.
diff --git a/runtime/include/NeuralNetworksWrapper.h b/runtime/include/NeuralNetworksWrapper.h
index 2ddf563..c3e8704 100644
--- a/runtime/include/NeuralNetworksWrapper.h
+++ b/runtime/include/NeuralNetworksWrapper.h
@@ -99,10 +99,14 @@
 
 class Memory {
 public:
-    // TODO Also have constructors for file descriptor, gralloc buffers, etc.
     Memory(size_t size) {
         mValid = ANeuralNetworksMemory_createShared(size, &mMemory) == ANEURALNETWORKS_NO_ERROR;
     }
+    Memory(size_t size, int protect, int fd) {
+        mValid = ANeuralNetworksMemory_createFromFd(size, protect, fd, &mMemory) ==
+                         ANEURALNETWORKS_NO_ERROR;
+    }
+
     ~Memory() { ANeuralNetworksMemory_free(mMemory); }
     Result getPointer(uint8_t** buffer) {
         return static_cast<Result>(ANeuralNetworksMemory_getPointer(mMemory, buffer));
diff --git a/runtime/test/Android.bp b/runtime/test/Android.bp
index d97a033..3d2415d 100644
--- a/runtime/test/Android.bp
+++ b/runtime/test/Android.bp
@@ -62,4 +62,8 @@
         "libneuralnetworks",
         "libneuralnetworks_common",
     ],
+
+    shared_libs: [
+        "libcutils",
+    ]
 }
diff --git a/runtime/test/TestMemory.cpp b/runtime/test/TestMemory.cpp
index c1b0ac2..f1fbd48 100644
--- a/runtime/test/TestMemory.cpp
+++ b/runtime/test/TestMemory.cpp
@@ -17,6 +17,7 @@
 #include "NeuralNetworksWrapper.h"
 
 #include <gtest/gtest.h>
+#include <sys/mman.h>
 #include <sys/types.h>
 #include <unistd.h>
 
@@ -121,11 +122,9 @@
     ASSERT_EQ(CompareMatrices(expected3, *reinterpret_cast<Matrix3x4*>(data + offsetForActual)), 0);
 }
 
-/*
-// TODO Enable this test once we figure out how to pass fd across the HAL
 TEST_F(MemoryTest, TestFd) {
     // Create a file that contains matrix2 and matrix3.
-    char path[] = "/tmp/TestMemoryXXXXXX";
+    char path[] = "/data/local/tmp/TestMemoryXXXXXX";
     int fd = mkstemp(path);
     const uint32_t offsetForMatrix2 = 20;
     const uint32_t offsetForMatrix3 = 200;
@@ -136,7 +135,7 @@
     write(fd, matrix3, sizeof(matrix3));
     fsync(fd);
 
-    Memory weights(fd);
+    Memory weights(offsetForMatrix3 + sizeof(matrix3), PROT_READ, fd);
     ASSERT_TRUE(weights.isValid());
 
     Model model;
@@ -150,8 +149,8 @@
     auto e = model.addOperand(&matrixType);
     auto f = model.addOperand(&scalarType);
 
-    model.setOperandValueFromMemory(e, weights, offsetForMatrix2, sizeof(Matrix3x4));
-    model.setOperandValueFromMemory(a, weights, offsetForMatrix3, sizeof(Matrix3x4));
+    model.setOperandValueFromMemory(e, &weights, offsetForMatrix2, sizeof(Matrix3x4));
+    model.setOperandValueFromMemory(a, &weights, offsetForMatrix3, sizeof(Matrix3x4));
     model.setOperandValue(f, &activation, sizeof(activation));
     model.addOperation(ANEURALNETWORKS_ADD, {a, c, f}, {b});
     model.addOperation(ANEURALNETWORKS_ADD, {b, e, f}, {d});
@@ -170,6 +169,5 @@
     close(fd);
     unlink(path);
 }
-*/
 
 }  // end namespace