Pass the model as an HIDL component.

Major rework where instead of serializing the model and
passing it in a shared memory, we pass it via HIDL.

The runtime/test code does two run, one goes through the
default CPU path, the other through any available driver.

Note: The code in hardware/../sample was written by Michael Buttler.

Bug: 63905942
Test: Runs both the CPU path and through the sample driver.
Change-Id: Ie3ee975d33056ba299895b13193f4698a690dd04
diff --git a/common/Android.bp b/common/Android.bp
index 8f00067..85aea54 100644
--- a/common/Android.bp
+++ b/common/Android.bp
@@ -14,10 +14,16 @@
  * limitations under the License.
  */
 
+cc_library_headers {
+    name: "libneuralnetworks_common_headers",
+    host_supported: false,
+    export_include_dirs: ["include"],
+}
+
 cc_library_static {
     name: "libneuralnetworks_common",
     defaults: ["neuralnetworks_defaults"],
-    host_supported: true,
+    host_supported: false,
     export_include_dirs: ["include"],
 
     srcs: [
@@ -26,7 +32,18 @@
         "OperationsUtils.cpp",
         "Utils.cpp",
     ],
-
+    shared_libs: [
+        "libbase",
+        "libhidlbase",
+        "libhidltransport",
+        "libhidlmemory",
+        "liblog",
+        "libnativehelper",
+        "libutils",
+        "[email protected]",
+        "[email protected]",
+        "[email protected]",
+    ],
     header_libs: [
         "libneuralnetworks_headers",
     ],
diff --git a/common/CpuExecutor.cpp b/common/CpuExecutor.cpp
index 3539edc..485a779 100644
--- a/common/CpuExecutor.cpp
+++ b/common/CpuExecutor.cpp
@@ -18,7 +18,6 @@
 
 #include "CpuExecutor.h"
 
-#include "Model.h"
 #include "NeuralNetworks.h"
 #include "Operations.h"
 
@@ -28,133 +27,161 @@
 // If we don't have a buffer, allocate it.
 static bool allocateIfNeeded(RunTimeOperandInfo* info) {
     if (info->buffer == nullptr) {
-        uint32_t length =
-                sizeOfData(info->shape.type,
-                           Range<uint32_t>(info->shape.numberOfDimensions, info->shape.dimensions));
-        info->buffer = malloc(length);
+        uint32_t length = sizeOfData(info->type, info->dimensions);
+        info->buffer = new uint8_t[length];
+        if (info->buffer == nullptr) {
+            return false;
+        }
     }
     return true;
 }
 
-CpuExecutor::CpuExecutor(const IModel* model, const std::vector<InputOutputInfo>& modelInputs,
-                         const std::vector<InputOutputInfo>& modelOutputs)
-      : mModel(model) {
-    mModel->copyDimensionStorage(&mDimensions);
+// Ignore the .pools entry in model and request.  This will have been taken care of
+// by the caller.
+int CpuExecutor::run(const Model& model, const Request& request,
+                     const std::vector<RunTimePoolInfo>& runTimePoolInfos) {
+    LOG(DEBUG) << "CpuExecutor::run()";
+    LOG(DEBUG) << "model: " << toString(model);
+    LOG(DEBUG) << "request: " << toString(request);
 
-    const Range<OperandEntry> modelOperands = model->getOperands();
-    const size_t count = modelOperands.count();
-    mOperands.resize(count);
-    for (size_t i = 0; i < count; i++) {
-        const OperandEntry& from = modelOperands[i];
-        RunTimeOperandInfo& to = mOperands[i];
-        to.shape.type = from.type;
-        to.shape.numberOfDimensions = from.dimensions.count;
-        // It's safe to take the address. The size of mDimensions won't change.
-        to.shape.dimensions = &mDimensions[from.dimensions.offset];
-        if (from.location.pool == LOCATION_AT_RUN_TIME) {
-            to.buffer = nullptr;
-            to.numberOfUsesLeft = from.numberOfConsumers;
-        } else if (from.location.pool == LOCATION_SAME_BLOCK) {
-            to.buffer = const_cast<void*>(mModel->getDataPointer(from.location.offset));
-            to.numberOfUsesLeft = 0;
-        } else {
-            // TODO: Revisit when we add support for multiple pools.
-            nnAssert(false);
-        }
-        to.length = from.length;
-    }
-
-    for (uint32_t i = 0; i < modelInputs.size(); i++) {
-        overrideOperand(mModel->getInputOperandIndex(i), modelInputs[i]);
-    }
-    for (uint32_t i = 0; i < modelOutputs.size(); i++) {
-        overrideOperand(mModel->getOutputOperandIndex(i), modelOutputs[i]);
-    }
-}
-
-int CpuExecutor::run() {
+    mModel = &model;
+    mRequest = &request; // TODO check if mRequest is needed
+    initializeRunTimeInfo(runTimePoolInfos);
     // The model has serialized the operation in execution order.
-    for (const auto& operation : mModel->getOperations()) {
+    for (const auto& operation : model.operations) {
         int n = executeOperation(operation);
         if (n != ANEURALNETWORKS_NO_ERROR) {
             return n;
         }
     }
+    mModel = nullptr;
+    mRequest = nullptr;
+    LOG(DEBUG) << "Completed run normally";
     return ANEURALNETWORKS_NO_ERROR;
 }
 
-void CpuExecutor::overrideOperand(uint32_t operandIndex, const InputOutputInfo& from) {
-    RunTimeOperandInfo& to = mOperands[operandIndex];
-    if (from.dimensionChanged) {
-        nnAssert(to.shape.numberOfDimensions == from.dimensions.size());
-        for (uint32_t i = 0; i < to.shape.numberOfDimensions; i++) {
-            to.shape.dimensions[i] = from.dimensions[i];
+bool CpuExecutor::initializeRunTimeInfo(const std::vector<RunTimePoolInfo>& runTimePoolInfos) {
+    LOG(DEBUG) << "CpuExecutor::initializeRunTimeInfo";
+    const size_t count = mModel->operands.size();
+    mOperands.resize(count);
+    for (size_t i = 0; i < count; i++) {
+        const Operand& from = mModel->operands[i];
+        if (!setRunTimeOperandInfo(i, from.dimensions, from.location, from.numberOfConsumers,
+                                   runTimePoolInfos)) {
+            return false;
+        }
+        mOperands[i].type = from.type;
+    }
+
+    nnAssert(mModel->inputIndexes.size() == mRequest->inputs.size());
+    for (size_t i = 0; i < mModel->inputIndexes.size(); i++) {
+        const InputOutputInfo& from = mRequest->inputs[i];
+        if (!setRunTimeOperandInfo(mModel->inputIndexes[i], from.dimensions, from.location, 0,
+                                   runTimePoolInfos)) {
+            return false;
         }
     }
-    nnAssert(to.buffer == nullptr);
-    to.buffer = from.buffer;
-    to.length = from.length;
-    to.numberOfUsesLeft = 0;
+    nnAssert(mModel->outputIndexes.size() == mRequest->outputs.size());
+    for (size_t i = 0; i < mModel->outputIndexes.size(); i++) {
+        const InputOutputInfo& from = mRequest->outputs[i];
+        if (!setRunTimeOperandInfo(mModel->outputIndexes[i], from.dimensions, from.location, 0,
+                                   runTimePoolInfos)) {
+            return false;
+        }
+    }
+    return true;
 }
 
-void CpuExecutor::freeNoLongerUsedOperands(const Range<uint32_t>& inputs) {
+bool CpuExecutor::setRunTimeOperandInfo(uint32_t operandIndex,
+                                        const std::vector<uint32_t>& dimensions,
+                                        const DataLocation& location, uint32_t useCount,
+                                        const std::vector<RunTimePoolInfo>& runTimePoolInfos) {
+    LOG(DEBUG) << "CpuExecutor::setRunTimeOperand(" << operandIndex << ", " << toString(dimensions)
+               << ", " << toString(location) << ")";
+
+    RunTimeOperandInfo& to = mOperands[operandIndex];
+    if (dimensions.size() > 0) {
+        to.dimensions = dimensions;
+    }
+    if (location.poolIndex == static_cast<uint32_t>(LocationValues::LOCATION_AT_RUN_TIME)) {
+        to.buffer = nullptr;
+        to.numberOfUsesLeft = useCount;
+    } else if (location.poolIndex == static_cast<uint32_t>(LocationValues::LOCATION_SAME_BLOCK)) {
+        to.buffer = const_cast<uint8_t*>(&mModel->operandValues[location.offset]);
+        to.numberOfUsesLeft = 0;
+    } else {
+        if (location.poolIndex >= runTimePoolInfos.size()) {
+            LOG(ERROR) << "For operand " << operandIndex << ", got a poolIndex id "
+                       << location.poolIndex << " which is >= " << runTimePoolInfos.size();
+            return false;
+        }
+        auto& r = runTimePoolInfos[location.poolIndex];
+        to.buffer = r.buffer + location.offset;
+        to.numberOfUsesLeft = 0;
+    }
+    to.length = location.length;
+    return true;
+}
+
+void CpuExecutor::freeNoLongerUsedOperands(const std::vector<uint32_t>& inputs) {
     for (uint32_t i : inputs) {
         auto& info = mOperands[i];
         // Check if it's a static or model input/output.
         if (info.numberOfUsesLeft == 0) {
             continue;
         }
-        nnAssert(mModel->getOperands()[i].location.pool == LOCATION_AT_RUN_TIME);
+        nnAssert(mModel->operands[i].location.poolIndex ==
+                 static_cast<uint32_t>(LocationValues::LOCATION_AT_RUN_TIME));
         info.numberOfUsesLeft--;
         if (info.numberOfUsesLeft == 0) {
-            auto* buffer = mOperands[i].buffer;
-            nnAssert(buffer != nullptr);
-            free(buffer);
-            buffer = nullptr;
+            nnAssert(info.buffer != nullptr);
+            delete[] info.buffer;
+            info.buffer = nullptr;
         }
     }
 }
 
-int CpuExecutor::executeOperation(const OperationEntry& operation) {
-    ALOGI("Executing %s", getOperationName(operation.opCode));
-    const Range<uint32_t> ins = mModel->getOperandIndexes(operation.inputs);
-    const Range<uint32_t> outs = mModel->getOperandIndexes(operation.outputs);
+int CpuExecutor::executeOperation(const Operation& operation) {
+    LOG(DEBUG) << "CpuExecutor::executeOperation(" << toString(operation) << ")";
+    const auto& ins = operation.inputs;
+    const auto& outs = operation.outputs;
     bool success = false;
 
     // Function to verify that the number of input and output parameters
     // matches what is expected.
-    auto parameterCountIs = [&ins, &outs, &operation](uint32_t expectedIns,
-                                                      uint32_t expectedOuts) -> bool {
-        if (ins.count() != expectedIns || outs.count() != expectedOuts) {
-            ALOGE("%s: Invalid number of ins %u/%u and outs %u/%u",
-                  getOperationName(operation.opCode), ins.count(), expectedIns, outs.count(),
-                  expectedOuts);
+    auto parameterCountIs = [&ins, &outs, &operation](size_t expectedIns,
+                                                      size_t expectedOuts) -> bool {
+        if (ins.size() != expectedIns || outs.size() != expectedOuts) {
+            LOG(ERROR) << getOperationName(operation.type) << ": Invalid number of ins "
+                       << ins.size() << " / " << expectedIns << " and outs " << outs.size() << " / "
+                       << expectedOuts;
             return false;
         }
         return true;
     };
 
-    switch (static_cast<OperatorType>(operation.opCode)) {
-        case OperatorType::ADD_FLOAT32: {
+    switch (operation.type) { // static_cast<OperationType>(operation.type)) {
+        case OperationType::ADD_FLOAT32: {
             if (!parameterCountIs(2, 1)) {
                 return ANEURALNETWORKS_BAD_DATA;
             }
-            const RunTimeOperandInfo& in1 = mOperands[ins[0]];
-            const RunTimeOperandInfo& in2 = mOperands[ins[1]];
+            RunTimeOperandInfo& in1 = mOperands[ins[0]];
+            RunTimeOperandInfo& in2 = mOperands[ins[1]];
             RunTimeOperandInfo& out = mOperands[outs[0]];
+            Shape outShape = out.shape();
 
-            success = addTensorsFloat32Prepare(in1.shape, in2.shape, &out.shape) &&
+            success = addTensorsFloat32Prepare(in1.shape(), in2.shape(), &outShape) &&
                     allocateIfNeeded(&out) &&
                     addTensorsFloat32(reinterpret_cast<const float*>(in1.buffer),
                                       reinterpret_cast<const float*>(in2.buffer),
-                                      reinterpret_cast<float*>(out.buffer), in1.shape);
+                                      reinterpret_cast<float*>(out.buffer), outShape);
         } break;
         default:
             nnAssert(false);
             break;
     }
     if (!success) {
-        ALOGE("%s failed.", getOperationName(operation.opCode));
+        LOG(ERROR) << getOperationName(operation.type) << " failed.";
         return ANEURALNETWORKS_OP_FAILED;
     }
 
diff --git a/common/Operations.cpp b/common/Operations.cpp
index 7e727ec..36b1cb0 100644
--- a/common/Operations.cpp
+++ b/common/Operations.cpp
@@ -24,8 +24,8 @@
 namespace android {
 namespace nn {
 
-bool addTensorsFloat32Prepare(const Shape& in1, const Shape& in2, Shape* out1) {
-    return SameShape(in1, in2) && SetShape(in1, out1);
+bool addTensorsFloat32Prepare(const Shape& in1, const Shape& in2, Shape* out) {
+    return SameShape(in1, in2) && SetShape(in1, out);
 }
 
 bool addTensorsFloat32(const float* in1, const float* in2, float* out, const Shape& shape) {
@@ -36,5 +36,5 @@
     return true;
 }
 
-}  // namespace nn
-}  // namespace android
+} // namespace nn
+} // namespace android
diff --git a/common/OperationsUtils.cpp b/common/OperationsUtils.cpp
index e9c232d..fa9b4ea 100644
--- a/common/OperationsUtils.cpp
+++ b/common/OperationsUtils.cpp
@@ -52,5 +52,17 @@
     return count;
 }
 
-}  // namespace nn
-}  // namespace android
+uint32_t getNumberOfDimensions(const Shape& shape) {
+    return shape.numberOfDimensions;
+}
+
+uint32_t getSizeOfDimension(const Shape& shape, uint32_t dimensionIdx) {
+    if (dimensionIdx >= shape.numberOfDimensions) {
+        // TODO, log the error
+        return 0;
+    }
+    return shape.dimensions[dimensionIdx];
+}
+
+} // namespace nn
+} // namespace android
diff --git a/common/Utils.cpp b/common/Utils.cpp
index 55f7726..a050dfd 100644
--- a/common/Utils.cpp
+++ b/common/Utils.cpp
@@ -17,94 +17,201 @@
 #define LOG_TAG "Utils"
 
 #include "Utils.h"
-
 #include "NeuralNetworks.h"
 
+#include <android-base/logging.h>
+
+using ::android::hidl::allocator::V1_0::IAllocator;
+
 namespace android {
 namespace nn {
 
-const char* typeNames[ANEURALNETWORKS_NUMBER_DATA_TYPES] = {
-            "FLOAT16",
-            "FLOAT32",
-            "INT8",
-            "UINT8",
-            "INT16",
-            "UINT16",
-            "INT32",
-            "UINT32",
-            "TENSOR_FLOAT16",
-            "TENSOR_FLOAT32",
-            "TENSOR_SIMMETRICAL_QUANT8",
+const char* kTypeNames[ANEURALNETWORKS_NUMBER_DATA_TYPES] = {
+        "FLOAT16",
+        "FLOAT32",
+        "INT8",
+        "UINT8",
+        "INT16",
+        "UINT16",
+        "INT32",
+        "UINT32",
+        "TENSOR_FLOAT16",
+        "TENSOR_FLOAT32",
+        "TENSOR_SIMMETRICAL_QUANT8",
 };
 
-const char* errorNames[] = {
-            "NO_ERROR",        "OUT_OF_MEMORY", "INCOMPLETE", "NULL", "BAD_DATA",
-            "NOT_IMPLEMENTED",  // TODO remove
+// TODO Check if this useful
+const char* kErrorNames[] = {
+        "NO_ERROR",        "OUT_OF_MEMORY", "INCOMPLETE", "NULL", "BAD_DATA",
+        "NOT_IMPLEMENTED", // TODO remove
 };
 
 const char* kOperationNames[ANEURALNETWORKS_NUMBER_OPERATION_TYPES] = {
-            "AVERAGE_POOL_FLOAT32",
-            "CONCATENATION_FLOAT32",
-            "CONV_FLOAT32",
-            "DEPTHWISE_CONV_FLOAT32",
-            "MAX_POOL_FLOAT32",
-            "L2_POOL_FLOAT32",
-            "DEPTH_TO_SPACE_FLOAT32",
-            "SPACE_TO_DEPTH_FLOAT32",
-            "LOCAL_RESPONSE_NORMALIZATION_FLOAT32",
-            "SOFTMAX_FLOAT32",
-            "RESHAPE_FLOAT32",
-            "SPLIT_FLOAT32",
-            "FAKE_QUANT_FLOAT32",
-            "ADD_FLOAT32",
-            "FULLY_CONNECTED_FLOAT32",
-            "CAST_FLOAT32",
-            "MUL_FLOAT32",
-            "L2_NORMALIZATION_FLOAT32",
-            "LOGISTIC_FLOAT32",
-            "RELU_FLOAT32",
-            "RELU6_FLOAT32",
-            "RELU1_FLOAT32",
-            "TANH_FLOAT32",
-            "DEQUANTIZE_FLOAT32",
-            "FLOOR_FLOAT32",
-            "GATHER_FLOAT32",
-            "RESIZE_BILINEAR_FLOAT32",
-            "LSH_PROJECTION_FLOAT32",
-            "LSTM_FLOAT32",
-            "SVDF_FLOAT32",
-            "RNN_FLOAT32",
-            "N_GRAM_FLOAT32",
-            "LOOKUP_FLOAT32",
+        "AVERAGE_POOL_FLOAT32",
+        "CONCATENATION_FLOAT32",
+        "CONV_FLOAT32",
+        "DEPTHWISE_CONV_FLOAT32",
+        "MAX_POOL_FLOAT32",
+        "L2_POOL_FLOAT32",
+        "DEPTH_TO_SPACE_FLOAT32",
+        "SPACE_TO_DEPTH_FLOAT32",
+        "LOCAL_RESPONSE_NORMALIZATION_FLOAT32",
+        "SOFTMAX_FLOAT32",
+        "RESHAPE_FLOAT32",
+        "SPLIT_FLOAT32",
+        "FAKE_QUANT_FLOAT32",
+        "ADD_FLOAT32",
+        "FULLY_CONNECTED_FLOAT32",
+        "CAST_FLOAT32",
+        "MUL_FLOAT32",
+        "L2_NORMALIZATION_FLOAT32",
+        "LOGISTIC_FLOAT32",
+        "RELU_FLOAT32",
+        "RELU6_FLOAT32",
+        "RELU1_FLOAT32",
+        "TANH_FLOAT32",
+        "DEQUANTIZE_FLOAT32",
+        "FLOOR_FLOAT32",
+        "GATHER_FLOAT32",
+        "RESIZE_BILINEAR_FLOAT32",
+        "LSH_PROJECTION_FLOAT32",
+        "LSTM_FLOAT32",
+        "SVDF_FLOAT32",
+        "RNN_FLOAT32",
+        "N_GRAM_FLOAT32",
+        "LOOKUP_FLOAT32",
 };
 
-const char* getOperationName(uint32_t opCode) {
-    return kOperationNames[opCode];
+const char* getOperationName(OperationType type) {
+    uint32_t n = static_cast<uint32_t>(type);
+    nnAssert(n < ANEURALNETWORKS_NUMBER_OPERATION_TYPES);
+    return kOperationNames[n];
 }
 
-uint32_t sizeOfDataType[ANEURALNETWORKS_NUMBER_DATA_TYPES]{
-            2,  // ANEURALNETWORKS_FLOAT16
-            4,  // ANEURALNETWORKS_FLOAT32
-            1,  // ANEURALNETWORKS_INT8
-            1,  // ANEURALNETWORKS_UINT8
-            2,  // ANEURALNETWORKS_INT16
-            2,  // ANEURALNETWORKS_UINT16
-            4,  // ANEURALNETWORKS_INT32
-            4,  // ANEURALNETWORKS_UINT32
-            2,  // ANEURALNETWORKS_TENSOR_FLOAT16
-            4,  // ANEURALNETWORKS_TENSOR_FLOAT32
-            1   // ANEURALNETWORKS_TENSOR_SIMMETRICAL_QUANT8
+const uint32_t kSizeOfDataType[ANEURALNETWORKS_NUMBER_DATA_TYPES]{
+        2, // ANEURALNETWORKS_FLOAT16
+        4, // ANEURALNETWORKS_FLOAT32
+        1, // ANEURALNETWORKS_INT8
+        1, // ANEURALNETWORKS_UINT8
+        2, // ANEURALNETWORKS_INT16
+        2, // ANEURALNETWORKS_UINT16
+        4, // ANEURALNETWORKS_INT32
+        4, // ANEURALNETWORKS_UINT32
+        2, // ANEURALNETWORKS_TENSOR_FLOAT16
+        4, // ANEURALNETWORKS_TENSOR_FLOAT32
+        1  // ANEURALNETWORKS_TENSOR_SIMMETRICAL_QUANT8
 };
 
-uint32_t sizeOfData(uint32_t type, const Range<uint32_t>& dimensions) {
-    nnAssert(type < ANEURALNETWORKS_NUMBER_DATA_TYPES);
+uint32_t sizeOfData(OperandType type, const std::vector<uint32_t>& dimensions) {
+    int n = static_cast<int>(type);
+    nnAssert(n < ANEURALNETWORKS_NUMBER_DATA_TYPES);
 
-    uint32_t size = sizeOfDataType[type];
+    uint32_t size = kSizeOfDataType[n];
     for (auto d : dimensions) {
         size *= d;
     }
     return size;
 }
 
-}  // namespace nn
-}  // namespace android
+hidl_memory allocateSharedMemory(int64_t size) {
+    hidl_memory memory;
+
+    // TODO: should we align memory size to nearest page? doesn't seem necessary...
+    const std::string& type = "ashmem";
+    sp<IAllocator> allocator = IAllocator::getService(type);
+    allocator->allocate(size, [&](bool success, const hidl_memory& mem) {
+        if (!success) {
+            LOG(ERROR) << "unable to allocate " << size << " bytes of " << type;
+        } else {
+            memory = mem;
+        }
+    });
+
+    return memory;
+}
+
+uint32_t alignBytesNeeded(uint32_t index, size_t length) {
+    uint32_t pattern;
+    if (length < 2) {
+        pattern = 0; // No alignment necessary
+    } else if (length < 4) {
+        pattern = 1; // Align on 2-byte boundary
+    } else {
+        pattern = 3; // Align on 4-byte boundary
+    }
+    uint32_t extra = (~(index - 1)) & pattern;
+    return extra;
+}
+
+static bool validOperandIndexes(const hidl_vec<uint32_t> indexes, size_t operandCount) {
+    for (uint32_t i : indexes) {
+        if (i >= operandCount) {
+            LOG(ERROR) << "Index out of range " << i << "/" << operandCount;
+            return false;
+        }
+    }
+    return true;
+}
+
+static bool validOperands(const hidl_vec<Operand>& operands, const hidl_vec<uint8_t>& operandValues,
+                          size_t poolCount) {
+    for (auto& operand : operands) {
+        if (static_cast<uint32_t>(operand.type) >= HAL_NUM_OPERAND_TYPES) {
+            LOG(ERROR) << "Invalid operand type " << toString(operand.type);
+            return false;
+        }
+        /* TODO validate dim with type
+        if (!validOperandIndexes(operand.dimensions, mDimensions)) {
+            return false;
+        }
+        */
+        if (operand.location.poolIndex ==
+            static_cast<uint32_t>(LocationValues::LOCATION_SAME_BLOCK)) {
+            if (operand.location.offset + operand.location.length > operandValues.size()) {
+                LOG(ERROR) << "OperandValue location out of range.  Starts at "
+                           << operand.location.offset << ", length " << operand.location.length
+                           << ", max " << operandValues.size();
+                return false;
+            }
+        } else if (operand.location.poolIndex ==
+                   static_cast<uint32_t>(LocationValues::LOCATION_AT_RUN_TIME)) {
+            if (operand.location.offset != 0 || operand.location.length != 0) {
+                LOG(ERROR) << "Unexpected offset " << operand.location.offset << " or length "
+                           << operand.location.length << " for runtime location.";
+                return false;
+            }
+        } else if (operand.location.poolIndex >= poolCount) {
+            // TODO: Revisit when we add support for multiple pools.
+            LOG(ERROR) << "Invalid poolIndex " << operand.location.poolIndex << "/" << poolCount;
+            return false;
+        }
+    }
+    return true;
+}
+
+static bool validOperations(const hidl_vec<Operation>& operations, size_t operandCount) {
+    for (auto& op : operations) {
+        if (static_cast<uint32_t>(op.type) >= HAL_NUM_OPERATION_TYPES) {
+            LOG(ERROR) << "Invalid operation type " << toString(op.type);
+            return false;
+        }
+        if (!validOperandIndexes(op.inputs, operandCount) ||
+            !validOperandIndexes(op.outputs, operandCount)) {
+            return false;
+        }
+    }
+    return true;
+}
+
+// TODO doublecheck
+// TODO also do validateRequest
+bool validateModel(const Model& model) {
+    const size_t operandCount = model.operands.size();
+    return (validOperands(model.operands, model.operandValues, model.pools.size()) &&
+            validOperations(model.operations, operandCount) &&
+            validOperandIndexes(model.inputIndexes, operandCount) &&
+            validOperandIndexes(model.outputIndexes, operandCount));
+}
+
+} // namespace nn
+} // namespace android
diff --git a/common/include/CpuExecutor.h b/common/include/CpuExecutor.h
index ecccd7c..66d4b34 100644
--- a/common/include/CpuExecutor.h
+++ b/common/include/CpuExecutor.h
@@ -17,7 +17,7 @@
 #ifndef ANDROID_ML_NN_COMMON_CPU_EXECUTOR_H
 #define ANDROID_ML_NN_COMMON_CPU_EXECUTOR_H
 
-#include "HalAbstraction.h"
+#include "HalInterfaces.h"
 #include "OperationsUtils.h"
 #include "Utils.h"
 
@@ -26,19 +26,20 @@
 namespace android {
 namespace nn {
 
-class IModel;
-
-// Information we maintain about each operand during execution.
+// Information we maintain about each operand during execution that
+// may change during execution.
 struct RunTimeOperandInfo {
+    // TODO Storing the type here is redundant, as it won't change during execution.
+    OperandType type;
     // The type and dimensions of the operand.  The dimensions can
     // change at runtime.  We include the type because it's useful
     // to pass together with the dimension to the functions implementing
     // the operators.
-    Shape shape;
+    std::vector<uint32_t> dimensions;
     // Where the operand's data is stored.  Check the corresponding
     // location information in the model to figure out if this points
     // to memory we have allocated for an temporary operand.
-    void* buffer;
+    uint8_t* buffer;
     // The length of the buffer.
     uint32_t length;
     // Keeps track of how many operations have yet to make use
@@ -46,42 +47,61 @@
     // we free the buffer.  For non-temporary variables, this count is
     // always 0.
     uint32_t numberOfUsesLeft;
+
+    Shape shape() {
+        return Shape{.type = static_cast<uint32_t>(type),
+                     .numberOfDimensions = static_cast<uint32_t>(dimensions.size()),
+                     .dimensions = dimensions.data()};
+    }
+};
+
+struct RunTimePoolInfo {
+    sp<IMemory> memory;
+    uint8_t* buffer;
 };
 
 // This class is used to execute a model on the CPU.
 class CpuExecutor {
 public:
-    // The model must outlive the executor.  We prevent it from being modified
-    // while this is executing.
-    CpuExecutor(const IModel* model, const std::vector<InputOutputInfo>& modelInputs,
-                const std::vector<InputOutputInfo>& modelOutputs);
     // Executes the model. The results will be stored at the locations
     // specified in the constructor.
-    int run();
+    // The model must outlive the executor.  We prevent it from being modified
+    // while this is executing.
+    int run(const Model& model, const Request& request,
+            const std::vector<RunTimePoolInfo>& runTimePoolInfos);
 
 private:
+    bool initializeRunTimeInfo(const std::vector<RunTimePoolInfo>& runTimePoolInfos);
     // Runs one operation of the graph.
-    int executeOperation(const OperationEntry& entry);
+    int executeOperation(const Operation& entry);
     // Decrement the usage count for the operands listed.  Frees the memory
     // allocated for any temporary variable with a count of zero.
-    void freeNoLongerUsedOperands(const Range<uint32_t>& inputs);
-
+    void freeNoLongerUsedOperands(const std::vector<uint32_t>& inputs);
+    void setLocationAndUses(RunTimeOperandInfo* to, const DataLocation& location,
+                            const std::vector<RunTimePoolInfo>& runTimePoolInfos);
+    bool setRunTimeOperandInfo(uint32_t operandIndex, const std::vector<uint32_t>& dimensions,
+                               const DataLocation& location, uint32_t useCount,
+                               const std::vector<RunTimePoolInfo>& runTimePoolInfos);
     // The operand is a model input or output.  Override the information that
     // came with the model with the one passed by the calling program.
-    void overrideOperand(uint32_t operandIndex, const InputOutputInfo& info);
+    // void overrideOperand(uint32_t operandIndex, const InputOutputInfo& info);
+    //  void overrideAddress(uint32_t operandIndex, void* buffer);
 
-    // The model that we'll execute.
-    const IModel* mModel;
+    // The model and the request that we'll execute. Only valid while run()
+    // is being executed.
+    const Model* mModel = nullptr;
+    const Request* mRequest = nullptr;
+
     // We're copying the list of all the dimensions from the model, as
     // these may be modified when we run the operatins.  Since we're
     // making a full copy, the indexes used in the operand description
     // stay valid.
-    std::vector<uint32_t> mDimensions;
+    //    std::vector<uint32_t> mDimensions;
     // Runtime information about all the operands.
     std::vector<RunTimeOperandInfo> mOperands;
 };
 
-}  // namespace nn
-}  // namespace android
+} // namespace nn
+} // namespace android
 
-#endif  // ANDROID_ML_NN_COMMON_CPU_EXECUTOR_H
+#endif // ANDROID_ML_NN_COMMON_CPU_EXECUTOR_H
diff --git a/common/include/HalAbstraction.h b/common/include/HalAbstraction.h
deleted file mode 100644
index 9bf5a6c..0000000
--- a/common/include/HalAbstraction.h
+++ /dev/null
@@ -1,153 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ANDROID_ML_NN_COMMON_HAL_ABSTRACTION_H
-#define ANDROID_ML_NN_COMMON_HAL_ABSTRACTION_H
-
-#include <vector>
-
-// This class is used to abstract the HAL interface that will be created
-// HIDL gen from the HIDL files.  We may not need this long term, although
-// it is useful for running on a desktop without the HIDL compiler.
-
-namespace android {
-namespace nn {
-
-// The types the operands can take.  These must be the same value as the NN API>
-// TODO Use a single file for both.
-enum class DataType {
-    FLOAT16 = 0,
-    FLOAT32 = 1,
-    INT8 = 2,
-    UINT8 = 3,
-    INT16 = 4,
-    UINT16 = 5,
-    INT32 = 6,
-    UINT32 = 7,
-    TENSOR_FLOAT16 = 8,
-    TENSOR_FLOAT32 = 9,
-    TENSOR_SIMMETRICAL_QUANT8 = 10,
-
-    NUM_DATA_TYPES = 11
-};
-
-// TODO There's currently a 1:1 mapping with the NN API constants.
-// This will no longer be the case once an op supports more than one type.
-// We'll need to add a conversion when finalizing the model.
-enum class OperatorType {
-    AVERAGE_POOL_FLOAT32 = 0,
-    CONCATENATION_FLOAT32 = 1,
-    CONV_FLOAT32 = 2,
-    DEPTHWISE_CONV_FLOAT32 = 3,
-    MAX_POOL_FLOAT32 = 4,
-    L2_POOL_FLOAT32 = 5,
-    DEPTH_TO_SPACE_FLOAT32 = 6,
-    SPACE_TO_DEPTH_FLOAT32 = 7,
-    LOCAL_RESPONSE_NORMALIZATION_FLOAT32 = 8,
-    SOFTMAX_FLOAT32 = 9,
-    RESHAPE_FLOAT32 = 10,
-    SPLIT_FLOAT32 = 11,
-    FAKE_QUANT_FLOAT32 = 12,
-    ADD_FLOAT32 = 13,
-    FULLY_CONNECTED_FLOAT32 = 14,
-    CAST_FLOAT32 = 15,
-    MUL_FLOAT32 = 16,
-    L2_NORMALIZATION_FLOAT32 = 17,
-    LOGISTIC_FLOAT32 = 18,
-    RELU_FLOAT32 = 19,
-    RELU6_FLOAT32 = 20,
-    RELU1_FLOAT32 = 21,
-    TANH_FLOAT32 = 22,
-    DEQUANTIZE_FLOAT32 = 23,
-    FLOOR_FLOAT32 = 24,
-    GATHER_FLOAT32 = 25,
-    RESIZE_BILINEAR_FLOAT32 = 26,
-    LSH_PROJECTION_FLOAT32 = 27,
-    LSTM_FLOAT32 = 28,
-    SVDF_FLOAT32 = 29,
-    RNN_FLOAT32 = 30,
-    N_GRAM_FLOAT32 = 31,
-    LOOKUP_FLOAT32 = 32,
-
-    NUM_OPERATOR_TYPES = 33
-};
-
-// Status of a driver.
-enum Status { AVAILABLE, BUSY, OFFLINE, UNKNOWN };
-
-// Used by a driver to report its performance characteristics.
-// TODO revisit the data types and scales.
-struct PerformanceInfo {
-    float execTime;    // in nanoseconds
-    float powerUsage;  // in picoJoules
-};
-
-// Serialized representation of the model.
-struct SerializedModel {
-    std::vector<uint8_t> memory;
-};
-
-// The capabilities of a driver.
-struct Capabilities {
-    bool supportedOperatorTypes[static_cast<size_t>(OperatorType::NUM_OPERATOR_TYPES)];
-    // TODO Do the same for baseline model IDs
-    bool cachesCompilation;
-    // TODO revisit the data types and scales.
-    float bootupTime;  // in nanoseconds
-    PerformanceInfo float16Performance;
-    PerformanceInfo float32Performance;
-    PerformanceInfo quantized8Performance;
-};
-
-// Informaton about one input or output operand of a model.
-struct InputOutputInfo {
-    void* buffer;
-    uint32_t length;  // In bytes.
-    // If true, the calling program has provided different dimensions for the
-    // operand than was specified in the model.
-    bool dimensionChanged;
-    // The dimensions to use if the dimensions have been changed.
-    std::vector<uint32_t> dimensions;
-};
-
-// See the HAL files for documentation on these interfaces.
-class IEvent {
-public:
-    virtual ~IEvent(){}
-    virtual uint32_t wait() = 0;
-};
-
-class IRequest {
-public:
-    virtual ~IRequest(){}
-    virtual int execute(const std::vector<InputOutputInfo>& inputs,
-                        const std::vector<InputOutputInfo>& outputs, IEvent** event) = 0;
-    virtual void releaseTempMemory() = 0;
-};
-
-class IDevice {
-public:
-    virtual ~IDevice(){}
-    virtual void initialize(Capabilities* capabilities) = 0;
-    virtual void getSupportedSubgraph(void* graph, std::vector<bool>& canDo) = 0;
-    virtual int prepareRequest(const SerializedModel* model, IRequest** request) = 0;
-    virtual Status getStatus() = 0;
-};
-
-}  // namespace nn
-}  // namespace android
-
-#endif  // ANDROID_ML_NN_COMMON_HAL_ABSTRACTION_H
diff --git a/common/include/HalInterfaces.h b/common/include/HalInterfaces.h
new file mode 100644
index 0000000..8e341ab
--- /dev/null
+++ b/common/include/HalInterfaces.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_ML_NN_COMMON_HAL_INTERFACES_H
+#define ANDROID_ML_NN_COMMON_HAL_INTERFACES_H
+
+#include <android/hardware/neuralnetworks/1.0/IDevice.h>
+#include <android/hardware/neuralnetworks/1.0/IPreparedModel.h>
+#include <android/hardware/neuralnetworks/1.0/types.h>
+#include <android/hidl/allocator/1.0/IAllocator.h>
+#include <android/hidl/memory/1.0/IMemory.h>
+#include <hidlmemory/mapping.h>
+
+using ::android::hardware::Return;
+using ::android::hardware::Void;
+using ::android::hardware::hidl_memory;
+using ::android::hardware::hidl_string;
+using ::android::hardware::hidl_vec;
+using ::android::hardware::neuralnetworks::V1_0::Capabilities;
+using ::android::hardware::neuralnetworks::V1_0::DataLocation;
+using ::android::hardware::neuralnetworks::V1_0::DeviceStatus;
+using ::android::hardware::neuralnetworks::V1_0::IDevice;
+using ::android::hardware::neuralnetworks::V1_0::IPreparedModel;
+using ::android::hardware::neuralnetworks::V1_0::InputOutputInfo;
+using ::android::hardware::neuralnetworks::V1_0::LocationValues;
+using ::android::hardware::neuralnetworks::V1_0::Model;
+using ::android::hardware::neuralnetworks::V1_0::Operand;
+using ::android::hardware::neuralnetworks::V1_0::OperandType;
+using ::android::hardware::neuralnetworks::V1_0::Operation;
+using ::android::hardware::neuralnetworks::V1_0::OperationType;
+using ::android::hardware::neuralnetworks::V1_0::PerformanceInfo;
+using ::android::hardware::neuralnetworks::V1_0::Request;
+using ::android::hidl::allocator::V1_0::IAllocator;
+using ::android::hidl::memory::V1_0::IMemory;
+
+const uint32_t HAL_NUM_OPERAND_TYPES = 11;
+const uint32_t HAL_NUM_OPERATION_TYPES = 33;
+
+#endif // ANDROID_ML_NN_COMMON_HAL_INTERFACES_H
diff --git a/common/include/HalModel.h b/common/include/HalModel.h
deleted file mode 100644
index 40247e1..0000000
--- a/common/include/HalModel.h
+++ /dev/null
@@ -1,117 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ANDROID_ML_NN_COMMON_HAL_MODEL_H
-#define ANDROID_ML_NN_COMMON_HAL_MODEL_H
-
-// This file contains the data structures that used to access the
-
-//namespace android {
-//namespace nn {
-
-#include <cstdint>
-#include <sys/cdefs.h>
-
-__BEGIN_DECLS
-
-// The location will be specified at runtime. It's either a temporary
-// variable, an input, or an output.
-const uint32_t LOCATION_AT_RUN_TIME = 0xFFFFFFFF;
-// The operand's value is in the same memory pool as the model.
-const uint32_t LOCATION_SAME_BLOCK = 0xFFFFFFFE;
-
-// Used to represent a variable length array.
-struct ArrayInfo {
-    // The number of elements of the array.
-    uint32_t count;
-    // The offset in whichevere data structure to find the first
-    // element of the array.  The unit type of the offset depends
-    // on the data structure it is indexing.
-    uint32_t offset;
-};
-
-// A serialized model starts with this block of memory.
-// TODO Look into alignment or padding issues.
-struct ModelHeader {
-    // Size and location of the operation table, an array of OperationEntry.
-    // The offset is the distance in bytes from the start of the header.
-    ArrayInfo operations;
-    // Size and location of the operand table, an array of OperandEntry.
-    // The offset is the distance in bytes from the start of the header.
-    ArrayInfo operands;
-    // Size and location of the table of dimensions, an array of uint32_t.
-    // The offset is the distance in bytes from the start of the header.
-    ArrayInfo dimensions;
-    // Size and location of the table of operand indexes, an array of uint32_t.
-    // The offset is the distance in bytes from the start of the header.
-    ArrayInfo operandIndexes;
-    // Size and location of the memory block containing all the fixed
-    // operand values.  The element type is uint8_t.
-    // The offset is the distance in bytes from the start of the header.
-    ArrayInfo operandValues;
-
-    // The list of operand indexes for the inputs of the model.
-    // The offset is an index in the operandIndexes table.
-    ArrayInfo modelInputs;
-    // The list of operand indexes for the outputs of the model.
-    // The offset is an index in the operandIndexes table.
-    ArrayInfo modelOutputs;
-};
-
-// Describes one operation of the graph.
-struct OperationEntry {
-    // The type of operation.
-    uint32_t opCode;
-    // Describes the table that contains the indexes of the inputs of the
-    // operation. The offset is the index in the operandIndexes table.
-    ArrayInfo inputs;
-    // Describes the table that contains the indexes of the outputs of the
-    // operation. The offset is the index in the operandIndexes table.
-    ArrayInfo outputs;
-};
-
-// Describes the location of a data object.
-struct DataLocation {
-    // The index of the memory pool where this location is found.
-    // Two special values can also be used.  See the LOCATION_* constants above.
-    uint32_t pool;
-    // Offset in bytes from the start of the pool.
-    uint32_t offset;
-};
-
-// Describes one operand of the graph.
-struct OperandEntry {
-    uint32_t type;
-    // The number of operations that uses this operand as input.
-    uint32_t numberOfConsumers;
-    // TODO handle quantization params.
-
-    // The following three fields maybe superseded at runtime.
-
-    // Dimensions of the operand.  The offset is an index in the dimensions table.
-    ArrayInfo dimensions;
-    // Where to find the data for this operand.
-    DataLocation location;
-    // The length of the data, in bytes.
-    uint32_t length;
-};
-
-__END_DECLS
-
-//}  // namespace nn
-//}  // namespace android
-
-#endif  // ANDROID_ML_NN_COMMON_HAL_MODEL_H
diff --git a/common/include/Model.h b/common/include/Model.h
deleted file mode 100644
index 0efc967..0000000
--- a/common/include/Model.h
+++ /dev/null
@@ -1,43 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-// Interface used by the CpuExecutor to communicate with the two model
-// implementations.
-
-#ifndef ANDROID_ML_NN_COMMON_MODEL_BUILDER_H
-#define ANDROID_ML_NN_COMMON_MODEL_BUILDER_H
-
-#include "Utils.h"
-
-namespace android {
-namespace nn {
-
-class IModel {
-public:
-    virtual ~IModel() {}
-    virtual Range<OperationEntry> getOperations() const = 0;
-    virtual Range<OperandEntry> getOperands() const = 0;
-    virtual Range<uint32_t> getOperandIndexes(const ArrayInfo& info) const = 0;
-    virtual void copyDimensionStorage(std::vector<uint32_t>* dimensions) const = 0;
-    virtual uint32_t getInputOperandIndex(uint32_t listIndex) const = 0;
-    virtual uint32_t getOutputOperandIndex(uint32_t listIndex) const = 0;
-    virtual const void* getDataPointer(uint32_t offset) const = 0;
-};
-
-}  // namespace nn
-}  // namespace android
-
-#endif  // ANDROID_ML_NN_COMMON_MODEL_BUILDER_H
diff --git a/common/include/Operations.h b/common/include/Operations.h
index 56d1c92..4d05ef9 100644
--- a/common/include/Operations.h
+++ b/common/include/Operations.h
@@ -24,10 +24,10 @@
 
 struct Shape;
 
+bool addTensorsFloat32Prepare(const Shape& in1, const Shape& in2, Shape* out);
 bool addTensorsFloat32(const float* in1, const float* in2, float* out, const Shape& shape);
-bool addTensorsFloat32Prepare(const Shape& in1, const Shape& in2, Shape* out1);
 
-}  // namespace nn
-}  // namespace android
+} // namespace nn
+} // namespace android
 
-#endif  // ANDROID_ML_NN_COMMON_OPERATIONS_H
+#endif // ANDROID_ML_NN_COMMON_OPERATIONS_H
diff --git a/common/include/OperationsUtils.h b/common/include/OperationsUtils.h
index ec45912..ae53dd1 100644
--- a/common/include/OperationsUtils.h
+++ b/common/include/OperationsUtils.h
@@ -39,7 +39,11 @@
 // together. For a scalar, returns one.
 uint32_t getNumberOfElements(const Shape& shape);
 
-}  // namespace nn
-}  // namespace android
+uint32_t getNumberOfDimensions(const Shape& shape);
 
-#endif  // ANDROID_ML_NN_COMMON_OPERATIONS_UTILS_H
+uint32_t getSizeOfDimension(const Shape& shape, uint32_t dimensionIdx);
+
+} // namespace nn
+} // namespace android
+
+#endif // ANDROID_ML_NN_COMMON_OPERATIONS_UTILS_H
diff --git a/common/include/Utils.h b/common/include/Utils.h
index e3619e1..05e0278 100644
--- a/common/include/Utils.h
+++ b/common/include/Utils.h
@@ -17,91 +17,75 @@
 #ifndef ANDROID_ML_NN_COMMON_UTILS_H
 #define ANDROID_ML_NN_COMMON_UTILS_H
 
-#include "HalModel.h"
+#include "HalInterfaces.h"
+#include "NeuralNetworks.h"
 
-#include <stdio.h>
+#include <android-base/logging.h>
 #include <vector>
 
 namespace android {
 namespace nn {
 
-// TODO Replace with the real Android logging macros.
-#define ALOGE(format, ...) printf(LOG_TAG ": ERROR " format "\n", ##__VA_ARGS__)
-#define ALOGI(format, ...) printf(LOG_TAG ": " format "\n", ##__VA_ARGS__)
+// TODO Remove all the LOG(DEBUG) statements in all the files.
 
 // Assert macro, as Android does not generally support assert.
-#define nnAssert(v)                                                                       \
-    do {                                                                                  \
-        if (!(v)) {                                                                       \
-            fprintf(stderr, "nnAssert failed at %s:%d - '%s'\n", __FILE__, __LINE__, #v); \
-            abort();                                                                      \
-        }                                                                                 \
+#define nnAssert(v)                                                                            \
+    do {                                                                                       \
+        if (!(v)) {                                                                            \
+            LOG(ERROR) << "nnAssert failed at " << __FILE__ << ":" << __LINE__ << " - '" << #v \
+                       << "'\n";                                                               \
+            abort();                                                                           \
+        }                                                                                      \
     } while (0)
 
-// Represent a list of items.  Handy to iterate over lists and sublists.
-template <typename T>
-class Range {
-public:
-    // The default constructor should only be used when followed by a call
-    // to setFromBuffer.
-    Range() {}
-    // Range over all the elements of the vector.
-    Range(const std::vector<T>& data) {
-        mCount = static_cast<uint32_t>(data.size());
-        mBegin = data.data();
-    }
-    // Range over the sublist of elements of the vector, as specified by info.
-    Range(const std::vector<T>& data, const ArrayInfo& info) {
-        mCount = info.count;
-        mBegin = data.data() + info.offset;
-    }
-    // Range over the sublist of the range, as specified by info.
-    Range(const Range<T>& data, const ArrayInfo& info) {
-        mCount = info.count;
-        mBegin = data.begin() + info.offset;
-    }
-    // Range of the specified number of elements, starting at the specified value.
-    Range(uint32_t count, T* start) {
-        mCount = count;
-        mBegin = start;
-    }
-
-    // Range over consecutive elements starting at buffer + info.offset.
-    void setFromBuffer(const ArrayInfo& info, const uint8_t* buffer) {
-        mCount = info.count;
-        mBegin = reinterpret_cast<const T*>(buffer + info.offset);
-    }
-
-    // These two methods enable the use of for(x:Range(..)).
-    const T* begin() const { return mBegin; }
-    const T* end() const { return mBegin + mCount; }
-
-    // Returns the element at the specifed index.
-    T operator[](uint32_t index) const {
-        nnAssert(index < mCount);
-        return mBegin[index];
-    }
-    // All our ranges are read-only.  If we need to write, use this:
-    // uint32_t& operator[] (uint32_t index) {
-    //    nnAssert(index < mCount);
-    //    return mBegin[index];
-    // }
-
-    uint32_t count() const { return mCount; }
-
-private:
-    const T* mBegin = nullptr;  // The start of the range.
-    uint32_t mCount = 0;        // The number of elements in the range.
-};
-
 // Returns the the amount of space needed to store a tensor of the specified
 // dimensions and type.
-uint32_t sizeOfData(uint32_t type, const Range<uint32_t>& dimensions);
+uint32_t sizeOfData(OperandType type, const std::vector<uint32_t>& dimensions);
 
 // Returns the name of the operation in ASCII.
-const char* getOperationName(uint32_t opCode);
+const char* getOperationName(OperationType opCode);
 
-}  // namespace nn
-}  // namespace android
+hidl_memory allocateSharedMemory(int64_t size);
 
-#endif  // ANDROID_ML_NN_COMMON_UTILS_H
+// Returns the number of padding bytes needed to align data of the
+// specified length.  It aligns object of length:
+// 2, 3 on a 2 byte boundary,
+// 4+ on a 4 byte boundary.
+// We may want to have different alignments for tensors.
+// TODO: This is arbitrary, more a proof of concept.  We need
+// to determine what this should be.
+uint32_t alignBytesNeeded(uint32_t index, size_t length);
+
+inline void setFromIntList(hidl_vec<uint32_t>* vec, const ANeuralNetworksIntList& list) {
+    vec->resize(list.count);
+    for (uint32_t i = 0; i < list.count; i++) {
+        (*vec)[i] = list.data[i];
+    }
+}
+
+inline void setFromIntList(std::vector<uint32_t>* vec, const ANeuralNetworksIntList& list) {
+    vec->resize(list.count);
+    for (uint32_t i = 0; i < list.count; i++) {
+        (*vec)[i] = list.data[i];
+    }
+}
+
+inline std::string toString(uint32_t obj) {
+    return std::to_string(obj);
+}
+
+template <typename Type>
+std::string toString(const std::vector<Type>& range) {
+    std::string os = "[";
+    for (size_t i = 0; i < range.size(); ++i) {
+        os += (i == 0 ? "" : ", ") + toString(range[i]);
+    }
+    return os += "]";
+}
+
+bool validateModel(const Model& model);
+
+} // namespace nn
+} // namespace android
+
+#endif // ANDROID_ML_NN_COMMON_UTILS_H