First implementation of the Neural Networks API.
This first version can run a simple query on the CPU either
via the fallback path or through a simulated driver.
This code has many deficiencies: single threaded, not all
validation are done, not going through HIDL, and not
enough unit tests. Expect more changes!
Test: Compiled and ran the unit tests
Change-Id: I9f6a485a2e7207aeb5f91a2904dcb4b7fd8a6f65
diff --git a/runtime/Android.bp b/runtime/Android.bp
new file mode 100644
index 0000000..af5be02
--- /dev/null
+++ b/runtime/Android.bp
@@ -0,0 +1,54 @@
+/*
+ * Copyright 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+cc_library_headers {
+ name: "libneuralnetworks_headers",
+ host_supported: true,
+ export_include_dirs: ["include"],
+}
+
+cc_library_headers {
+ name: "libneuralnetworks_private_headers",
+ host_supported: true,
+ export_include_dirs: ["."],
+}
+
+cc_library {
+ name: "libneuralnetworks",
+ defaults: ["neuralnetworks_defaults"],
+ host_supported: true,
+
+ srcs: [
+ "Manager.cpp",
+ "ModelBuilder.cpp",
+ "NeuralNetworks.cpp",
+ "Request.cpp",
+ ],
+
+ static_libs: [
+ "libneuralnetworks_common",
+ ],
+
+ header_libs: [
+ "libneuralnetworks_headers",
+ ],
+
+ export_header_lib_headers: [
+ "libneuralnetworks_headers",
+ ],
+}
+
+subdirs = ["test"]
diff --git a/runtime/Manager.cpp b/runtime/Manager.cpp
new file mode 100644
index 0000000..d8333ed
--- /dev/null
+++ b/runtime/Manager.cpp
@@ -0,0 +1,44 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "Manager"
+
+#include "Manager.h"
+#include "Request.h"
+#include "Utils.h"
+
+namespace android {
+namespace nn {
+
+DriverManager DriverManager::manager;
+
+void DriverManager::initialize() {
+ if (mUsageCount++ == 0) {
+ // TODO query drivers for capabilities
+ }
+}
+
+void DriverManager::shutdown() {
+ nnAssert(mUsageCount > 0);
+ if (mUsageCount > 0) {
+ if (--mUsageCount == 0) {
+ mDrivers.clear();
+ }
+ }
+}
+
+} // namespace nn
+} // namespace android
diff --git a/runtime/Manager.h b/runtime/Manager.h
new file mode 100644
index 0000000..8bb6d00
--- /dev/null
+++ b/runtime/Manager.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_ML_NN_RUNTIME_MANAGER_H
+#define ANDROID_ML_NN_RUNTIME_MANAGER_H
+
+#include "HalAbstraction.h"
+
+#include <vector>
+
+namespace android {
+namespace nn {
+
+// Manages the NN HAL drivers. Only one instance of this class will exist.
+// Use get() to retrieve it.
+class DriverManager {
+public:
+ // Initializes the manager: discover drivers, query for their capabilities, etc.
+ // This can be expensive, so we do it only when requested by the application.
+ void initialize();
+ void shutdown();
+
+ // Adds a driver for the manager to use.
+ void registerDriver(std::shared_ptr<IDevice> device) { mDrivers.push_back(device); }
+
+ // TODO For now, just return the first one.
+ std::shared_ptr<IDevice> getAvailableDriver() const { return mDrivers.empty() ? nullptr : mDrivers[0]; }
+
+ // Returns the singleton manager.
+ static DriverManager* get() { return &manager; }
+
+private:
+ // List of all the drivers currently discovered.
+ std::vector<std::shared_ptr<IDevice>> mDrivers;
+
+ // The number of times initialise() has been called. We will reset the content
+ // of the manager when the equivalent number of shutdown() have been called.
+ // This is done so that a library can call initialize and shutdown without
+ // interfering with other code.
+ //
+ // TODO Need to revisit this whole section when integrating with HIDL and
+ // ensuring multithreading is good. Consider std::atomic<int>.
+ int mUsageCount = 0;
+
+ static DriverManager manager;
+};
+
+} // namespace nn
+} // namespace android
+
+#endif // ANDROID_ML_NN_RUNTIME_MANAGER_H
diff --git a/runtime/ModelBuilder.cpp b/runtime/ModelBuilder.cpp
new file mode 100644
index 0000000..7669fed
--- /dev/null
+++ b/runtime/ModelBuilder.cpp
@@ -0,0 +1,273 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "ModelBuilder"
+
+#include "ModelBuilder.h"
+
+#include "Request.h"
+#include "Utils.h"
+
+#include <map>
+#include <utility>
+
+namespace android {
+namespace nn {
+
+static uint32_t alignBytesNeeded(uint32_t index, size_t length) {
+ uint32_t pattern;
+ if (length < 2) {
+ pattern = 0; // No alignment necessary
+ } else if (length < 4) {
+ pattern = 1; // Align on 2-byte boundary
+ } else {
+ pattern = 3; // Align on 4-byte boundary
+ }
+ uint32_t extra = (~(index - 1)) & pattern;
+ return extra;
+}
+
+static void storeIntList(const ANeuralNetworksIntList& from, std::vector<uint32_t>* into,
+ ArrayInfo* info) {
+ info->count = from.count;
+ if (from.count == 0) {
+ info->offset = 0;
+ } else {
+ size_t size = into->size();
+ info->offset = static_cast<uint32_t>(size); // TODO not the same as in file
+ into->reserve(size + from.count);
+ into->insert(into->end(), from.data, from.data + from.count);
+ }
+}
+
+int ModelBuilder::addOperand(const ANeuralNetworksOperandType& type) {
+ if (mCompletedModel) {
+ ALOGE("ANeuralNetworksModel_addOperand can't modify after request creation");
+ return ANEURALNETWORKS_BAD_DATA;
+ }
+ size_t idx = operandCount();
+ if (idx >= MAX_NUMBER_OF_OPERANDS) {
+ ALOGE("ANeuralNetworksModel_addOperand exceed max operands");
+ return ANEURALNETWORKS_BAD_DATA;
+ }
+ mOperands.resize(idx + 1);
+ auto& entry = mOperands[idx];
+ entry.type = type.type;
+ entry.numberOfConsumers = 0;
+ storeIntList(type.dimensions, &mDimensions, &entry.dimensions);
+ entry.location = {.pool = LOCATION_AT_RUN_TIME, .offset = 0};
+ entry.length = 0;
+
+ return ANEURALNETWORKS_NO_ERROR;
+}
+
+int ModelBuilder::setOperandValue(uint32_t index, const void* buffer, size_t length) {
+ if (index >= operandCount()) {
+ ALOGE("ANeuralNetworksModel_setOperandValue setting operand %u of %u", index,
+ operandCount());
+ return ANEURALNETWORKS_BAD_DATA;
+ }
+ OperandEntry& operand = mOperands[index];
+ uint32_t neededLength =
+ sizeOfData(operand.type, Range<uint32_t>(mDimensions, operand.dimensions));
+ if (neededLength != length) {
+ ALOGE("ANeuralNetworksModel_setOperandValue setting %zu bytes when needing "
+ "%u",
+ length, neededLength);
+ return ANEURALNETWORKS_BAD_DATA;
+ }
+ uint32_t existingSize = static_cast<uint32_t>(mOperandValues.size());
+ uint32_t extraBytes = alignBytesNeeded(existingSize, length);
+ mOperandValues.resize(existingSize + extraBytes + length);
+ operand.location.offset = existingSize + extraBytes;
+ operand.location.pool = LOCATION_SAME_BLOCK;
+ memcpy(&mOperandValues[operand.location.offset], buffer, length);
+ return ANEURALNETWORKS_NO_ERROR;
+}
+
+int ModelBuilder::addOperation(ANeuralNetworksOperationType type,
+ const ANeuralNetworksIntList* inputs,
+ const ANeuralNetworksIntList* outputs) {
+ if (mCompletedModel) {
+ ALOGE("ANeuralNetworksModel_addOperation can't modify after request "
+ "creation");
+ return ANEURALNETWORKS_BAD_DATA;
+ }
+ uint32_t operationIndex = operationCount();
+ if (operationIndex >= MAX_NUMBER_OF_OPERATIONS) {
+ ALOGE("ANeuralNetworksModel_addOperation exceed max operations");
+ return ANEURALNETWORKS_BAD_DATA;
+ }
+ mOperations.resize(operationIndex + 1);
+ auto& entry = mOperations[operationIndex];
+ entry.opCode = type;
+
+ storeIntList(*inputs, &mOperandIndexes, &entry.inputs);
+ storeIntList(*outputs, &mOperandIndexes, &entry.outputs);
+ for (uint32_t i = 0; i < inputs->count; i++) {
+ mOperands[inputs->data[i]].numberOfConsumers++;
+ }
+ return ANEURALNETWORKS_NO_ERROR;
+}
+
+int ModelBuilder::setInputsAndOutputs(const ANeuralNetworksIntList* inputs,
+ const ANeuralNetworksIntList* outputs) {
+ if (mCompletedModel) {
+ ALOGE("ANeuralNetworksModel_setInputsAndOutputs can't modify after request "
+ "creation");
+ return ANEURALNETWORKS_BAD_DATA;
+ }
+ // TODO Validate all inputs
+ storeIntList(*inputs, &mOperandIndexes, &mModelInputs);
+ storeIntList(*outputs, &mOperandIndexes, &mModelOutputs);
+ return ANEURALNETWORKS_NO_ERROR;
+}
+
+int ModelBuilder::loadBaseLineModel(uint32_t modelId) {
+ if (mCompletedModel) {
+ ALOGE("ANeuralNetworksModel_loadBaseLineModel can't modify after request "
+ "creation");
+ return ANEURALNETWORKS_BAD_DATA;
+ }
+ // TODO implement
+ switch (modelId) {
+ case ANEURALNETWORKS_INCEPTION_SMALL_20_20:
+ case ANEURALNETWORKS_INCEPTION_LARGE_20_20:
+ case ANEURALNETWORKS_MOBILE_NETS_100_100:
+ break;
+ }
+ return ANEURALNETWORKS_NOT_IMPLEMENTED;
+}
+
+Request* ModelBuilder::createRequest() {
+ finishTheModel();
+ return new Request(this);
+}
+
+void ModelBuilder::finishTheModel() {
+ if (!mCompletedModel) {
+ // We sort the operations so that they will be in the appropriate
+ // order for a single-threaded, op at a time execution.
+ sortIntoRunOrder();
+ mCompletedModel = true;
+ }
+}
+
+void ModelBuilder::sortIntoRunOrder() {
+ // Tracks the operations that can be executed.
+ std::vector<uint32_t> opsReadyToRun;
+ std::vector<OperationEntry> runOrder;
+
+ // Mark the inputs
+ for (auto i : getOperandIndexes(mModelInputs)) {
+ mOperands[i].location.pool = 0; // We'll reset it to unknown aftewards
+ }
+
+ // Tracks how many inputs are needed for each operation to be ready to run.
+ std::multimap<uint32_t, uint32_t> operandToOperations;
+ std::vector<uint32_t> unknownInputCount(operationCount());
+ for (uint32_t operationIndex = 0; operationIndex < operationCount(); operationIndex++) {
+ uint32_t& count = unknownInputCount[operationIndex];
+ count = 0;
+ for (uint32_t operandIndex : getOperandIndexes(mOperations[operationIndex].inputs)) {
+ if (mOperands[operandIndex].location.pool == LOCATION_AT_RUN_TIME) {
+ count++;
+ operandToOperations.insert(
+ std::pair<uint32_t, uint32_t>(operandIndex, operationIndex));
+ }
+ }
+ if (count == 0) {
+ opsReadyToRun.push_back(operationIndex);
+ }
+ }
+ // TODO verify that a modelInput can't be set as output or vice-versa
+ // TODO test what happens when a model output is also used as input to an
+ // op!!!
+ for (auto i : getOperandIndexes(mModelInputs)) {
+ mOperands[i].location.pool = LOCATION_AT_RUN_TIME;
+ }
+
+ while (opsReadyToRun.size() > 0) {
+ // Execute the next op
+ int opIndex = opsReadyToRun.back();
+ opsReadyToRun.pop_back();
+ const OperationEntry& operation = mOperations[opIndex];
+
+ runOrder.push_back(mOperations[opIndex]);
+
+ // Mark all its output as known.
+ for (uint32_t operandIndex : getOperandIndexes(operation.outputs)) {
+ // const OperandEntry& output = mOperands[operandIndex];
+ auto range = operandToOperations.equal_range(operandIndex);
+ for (auto i = range.first; i != range.second; i++) {
+ uint32_t& count = unknownInputCount[i->second];
+ if (--count == 0) {
+ opsReadyToRun.push_back(i->second);
+ }
+ }
+ }
+ }
+ mOperations = runOrder;
+}
+
+void ModelBuilder::serialize(std::vector<uint8_t>* buffer) const {
+ auto roundUp = [](size_t x) { return (x + 0xF) & ~0xF; };
+
+ ModelHeader header;
+ header.modelInputs = mModelInputs;
+ header.modelOutputs = mModelOutputs;
+
+ header.operations.count = static_cast<uint32_t>(mOperations.size());
+ header.operands.count = static_cast<uint32_t>(mOperands.size());
+ header.dimensions.count = static_cast<uint32_t>(mDimensions.size());
+ header.operandIndexes.count = static_cast<uint32_t>(mOperandIndexes.size());
+ header.operandValues.count = static_cast<uint32_t>(mOperandValues.size());
+
+ size_t sizeOfHeader = sizeof(ModelHeader);
+ size_t sizeOfOperations = sizeof(OperationEntry) * header.operations.count;
+ size_t sizeOfOperands = sizeof(OperandEntry) * header.operands.count;
+ size_t sizeOfDimensions = sizeof(uint32_t) * header.dimensions.count;
+ size_t sizeOfOperandIndexes = sizeof(uint32_t) * header.operandIndexes.count;
+ size_t sizeOfOperandValues = sizeof(uint8_t) * header.operandValues.count;
+
+ size_t totalSize = 0;
+ auto addUp = [&totalSize, &roundUp](size_t length, ArrayInfo* info) {
+ info->offset = static_cast<uint32_t>(totalSize);
+ totalSize += roundUp(length);
+ };
+ ArrayInfo headerInfo;
+ addUp(sizeOfHeader, &headerInfo);
+ addUp(sizeOfOperations, &header.operations);
+ addUp(sizeOfOperands, &header.operands);
+ addUp(sizeOfDimensions, &header.dimensions);
+ addUp(sizeOfOperandIndexes, &header.operandIndexes);
+ addUp(sizeOfOperandValues, &header.operandValues);
+
+ buffer->resize(totalSize);
+ uint8_t* start = buffer->data();
+ auto copy = [start](size_t length, const void* from, const ArrayInfo& info) {
+ memcpy(start + info.offset, from, length);
+ };
+ copy(sizeOfHeader, &header, headerInfo);
+ copy(sizeOfOperations, mOperations.data(), header.operations);
+ copy(sizeOfOperands, mOperands.data(), header.operands);
+ copy(sizeOfDimensions, mDimensions.data(), header.dimensions);
+ copy(sizeOfOperandIndexes, mOperandIndexes.data(), header.operandIndexes);
+ copy(sizeOfOperandValues, mOperandValues.data(), header.operandValues);
+}
+
+} // namespace nn
+} // namespace android
diff --git a/runtime/ModelBuilder.h b/runtime/ModelBuilder.h
new file mode 100644
index 0000000..64fe16b
--- /dev/null
+++ b/runtime/ModelBuilder.h
@@ -0,0 +1,124 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Class used to build a model through a succession of successive calls
+// to the NN API.
+
+#ifndef ANDROID_ML_NN_RUNTIME_MODEL_H
+#define ANDROID_ML_NN_RUNTIME_MODEL_H
+
+#include "Model.h"
+#include "NeuralNetworks.h"
+#include "Utils.h"
+
+namespace android {
+namespace nn {
+
+class Request;
+
+class ModelBuilder : public IModel {
+public:
+ virtual ~ModelBuilder() {}
+ // Adds an operand to the model.
+ int addOperand(const ANeuralNetworksOperandType& type);
+ int setOperandValue(uint32_t index, const void* buffer, size_t length);
+
+ int addOperation(ANeuralNetworksOperationType type, const ANeuralNetworksIntList* inputs,
+ const ANeuralNetworksIntList* outputs);
+ int setInputsAndOutputs(const ANeuralNetworksIntList* inputs,
+ const ANeuralNetworksIntList* outputs);
+ int loadBaseLineModel(uint32_t modelId);
+ Request* createRequest();
+
+ // Serialize the model into the buffer.
+ // TODO This should be a shared memory buffer instead, or a file.
+ void serialize(std::vector<uint8_t>* buffer) const;
+
+ uint32_t operandCount() const {
+ // We don't allow more than uint32_t worth of operands
+ return static_cast<uint32_t>(mOperands.size());
+ }
+ uint32_t operationCount() const {
+ // We don't allow more than uint32_t worth of operations
+ return static_cast<uint32_t>(mOperations.size());
+ }
+ uint32_t inputCount() const { return mModelInputs.count; }
+ uint32_t outputCount() const { return mModelOutputs.count; }
+ uint32_t getOperandType(uint32_t index) const { return mOperands[index].type; }
+ uint32_t getOperandNumberOfDimensions(uint32_t index) const {
+ return mOperands[index].dimensions.count;
+ }
+
+ // From IModel
+ virtual Range<OperationEntry> getOperations() const {
+ return Range<OperationEntry>(mOperations);
+ }
+ virtual Range<OperandEntry> getOperands() const { return Range<OperandEntry>(mOperands); }
+ virtual Range<uint32_t> getOperandIndexes(const ArrayInfo& info) const {
+ return Range<uint32_t>(mOperandIndexes, info);
+ }
+ virtual void copyDimensionStorage(std::vector<uint32_t>* dimensions) const {
+ *dimensions = mDimensions;
+ }
+ virtual uint32_t getInputOperandIndex(uint32_t listIndex) const {
+ return getOperandIndex(mModelInputs, listIndex);
+ }
+ virtual uint32_t getOutputOperandIndex(uint32_t listIndex) const {
+ return getOperandIndex(mModelOutputs, listIndex);
+ }
+ virtual const void* getDataPointer(uint32_t offset) const {
+ return mOperandValues.data() + offset;
+ }
+
+private:
+ // Sorts the operations to be in the correct order for single threaded
+ // node-at-a-time execution.
+ void sortIntoRunOrder();
+
+ int32_t getOperandIndex(const ArrayInfo& info, uint32_t listIndex) const {
+ nnAssert(listIndex < info.count);
+ return mOperandIndexes[info.offset + listIndex];
+ }
+ void finishTheModel();
+
+ // The operations of the graph.
+ std::vector<OperationEntry> mOperations;
+ // The description of the operands of the graph.
+ std::vector<OperandEntry> mOperands;
+ // Used by OperandEntry to store arrays of dimension values.
+ std::vector<uint32_t> mDimensions;
+ // Usded to store arrays of indexes into the mOperands table.
+ std::vector<uint32_t> mOperandIndexes;
+ // The value of the operands that are defined at model
+ // creation time.
+ // TODO We are copying all the values. Once we support memory
+ // pools, revisit.
+ std::vector<uint8_t> mOperandValues;
+ // Specifies where to find the list of indexes identifying
+ // the inputs and outputs of the model. The offset is into
+ // the mOperandIndexes table.
+ ArrayInfo mModelInputs;
+ ArrayInfo mModelOutputs;
+
+ // Once the request has been created, we should not allow further
+ // modifications to the model.
+ mutable bool mCompletedModel = false;
+};
+
+} // namespace nn
+} // namespace android
+
+#endif // ANDROID_ML_NN_RUNTIME_MODEL_H
diff --git a/runtime/NOTICE b/runtime/NOTICE
new file mode 100644
index 0000000..2ba05c2
--- /dev/null
+++ b/runtime/NOTICE
@@ -0,0 +1,189 @@
+
+ Copyright (c) 2017, The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
diff --git a/runtime/NeuralNetworks.cpp b/runtime/NeuralNetworks.cpp
new file mode 100644
index 0000000..73b2346
--- /dev/null
+++ b/runtime/NeuralNetworks.cpp
@@ -0,0 +1,377 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Contains all the entry points to the C Neural Networks API.
+// We do basic validation of the operands and then call the class
+// that implements the functionality.
+
+#define LOG_TAG "NeuralNetworks"
+
+#include "NeuralNetworks.h"
+#include "Manager.h"
+#include "ModelBuilder.h"
+#include "Request.h"
+
+#include <vector>
+
+using namespace android::nn;
+
+// Validates the type. The used dimensions can be underspecified.
+static int ValidateOperandType(const ANeuralNetworksOperandType& type, const char* tag,
+ bool allowPartial) {
+ if (!allowPartial) {
+ for (uint32_t i = 0; i < type.dimensions.count; i++) {
+ if (type.dimensions.data[i] == 0) {
+ ALOGE("%s OperandType invalid dimensions[%u] = %u", tag, i,
+ type.dimensions.data[i]);
+ return ANEURALNETWORKS_BAD_DATA;
+ }
+ }
+ }
+ if (type.type >= ANEURALNETWORKS_NUMBER_DATA_TYPES) {
+ ALOGE("%s OperandType invalid type %u", tag, type.type);
+ return ANEURALNETWORKS_BAD_DATA;
+ }
+ /* TODO validate the quantization info.
+ if (type.offset != 0.f && type.scale == 0.f) {
+ ALOGE("%s OperandType invalid offset %f and scale %f", tag, type.offset,
+ type.scale); return ANEURALNETWORKS_BAD_DATA;
+ }
+ if (type.scale != 0.f &&
+ (type.type == ANEURALNETWORKS_FLOAT16 ||
+ type.type != ANEURALNETWORKS_FLOAT32)) {
+ ALOGE("%s OperandType scale %f with float type %u", tag, type.scale,
+ type.type); return ANEURALNETWORKS_BAD_DATA;
+ }
+ */
+ return ANEURALNETWORKS_NO_ERROR;
+}
+
+static int ValidateOperandList(const ANeuralNetworksIntList& list, uint32_t count,
+ const char* tag) {
+ for (uint32_t i = 0; i < list.count; i++) {
+ if (list.data[i] >= count) {
+ ALOGE("%s invalid operand index at %u = %u, count %u", tag, i, list.data[i], count);
+ return ANEURALNETWORKS_BAD_DATA;
+ }
+ }
+ return ANEURALNETWORKS_NO_ERROR;
+}
+
+int ANeuralNetworksInitialize() {
+ DriverManager::get()->initialize();
+ return ANEURALNETWORKS_NO_ERROR;
+}
+
+void ANeuralNetworksShutdown() {
+ DriverManager::get()->shutdown();
+}
+
+int ANeuralNetworksModel_create(ANeuralNetworksModel** model) {
+ if (!model) {
+ ALOGE("ANeuralNetworksModel_create passed a nullptr");
+ return ANEURALNETWORKS_UNEXPECTED_NULL;
+ }
+ ModelBuilder* m = new ModelBuilder();
+ if (m == nullptr) {
+ *model = nullptr;
+ return ANEURALNETWORKS_OUT_OF_MEMORY;
+ }
+ *model = reinterpret_cast<ANeuralNetworksModel*>(m);
+ return ANEURALNETWORKS_NO_ERROR;
+}
+
+int ANeuralNetworksModel_createBaselineModel(ANeuralNetworksModel** model, uint32_t modelId) {
+ if (!model) {
+ ALOGE("ANeuralNetworksModel_create passed a nullptr");
+ return ANEURALNETWORKS_UNEXPECTED_NULL;
+ }
+ if (modelId >= ANEURALNETWORKS_NUMBER_BASELINE_MODELS) {
+ ALOGE("ANeuralNetworksModel_createBaselineModel invalid modelId %u", modelId);
+ return ANEURALNETWORKS_BAD_DATA;
+ }
+
+ ModelBuilder* m = new ModelBuilder();
+ if (m == nullptr) {
+ *model = nullptr;
+ return ANEURALNETWORKS_OUT_OF_MEMORY;
+ }
+ /* TODO uint32_t n = m->loadBaseLineModel(modelId);
+ if (n != ANEURALNETWORKS_NO_ERROR) {
+ delete m;
+ return n;
+ }
+ */
+ *model = reinterpret_cast<ANeuralNetworksModel*>(m);
+ return ANEURALNETWORKS_NO_ERROR;
+}
+
+void ANeuralNetworksModel_free(ANeuralNetworksModel* model) {
+ // No validation. Free of nullptr is valid.
+ ModelBuilder* m = reinterpret_cast<ModelBuilder*>(model);
+ delete m;
+}
+
+int ANeuralNetworksModel_addOperand(ANeuralNetworksModel* model,
+ const ANeuralNetworksOperandType* type) {
+ if (!model || !type) {
+ ALOGE("ANeuralNetworksModel_addOperand passed a nullptr");
+ return ANEURALNETWORKS_UNEXPECTED_NULL;
+ }
+ ModelBuilder* m = reinterpret_cast<ModelBuilder*>(model);
+ int n = ValidateOperandType(*type, "ANeuralNetworksModel_addOperand", true);
+ if (n != ANEURALNETWORKS_NO_ERROR) {
+ return n;
+ }
+ return m->addOperand(*type);
+}
+
+int ANeuralNetworksModel_setOperandValue(ANeuralNetworksModel* model, int32_t index,
+ const void* buffer, size_t length) {
+ if (!model || !buffer) {
+ ALOGE("ANeuralNetworksModel_setOperandValue passed a nullptr");
+ return ANEURALNETWORKS_UNEXPECTED_NULL;
+ }
+ ModelBuilder* m = reinterpret_cast<ModelBuilder*>(model);
+ return m->setOperandValue(index, buffer, length);
+}
+
+int ANeuralNetworksModel_addOperation(ANeuralNetworksModel* model,
+ ANeuralNetworksOperationType type,
+ ANeuralNetworksIntList* inputs,
+ ANeuralNetworksIntList* outputs) {
+ if (!model || !inputs || !outputs) {
+ ALOGE("ANeuralNetworksModel_addOperation passed a nullptr");
+ return ANEURALNETWORKS_UNEXPECTED_NULL;
+ }
+ ModelBuilder* m = reinterpret_cast<ModelBuilder*>(model);
+ if (type >= ANEURALNETWORKS_NUMBER_OPERATION_TYPES) {
+ ALOGE("ANeuralNetworksModel_addOperation invalid operations type %u", type);
+ return ANEURALNETWORKS_BAD_DATA;
+ }
+ int n = ValidateOperandList(*inputs, m->operandCount(),
+ "ANeuralNetworksModel_addOperation inputs");
+ if (n != ANEURALNETWORKS_NO_ERROR) {
+ return n;
+ }
+ n = ValidateOperandList(*outputs, m->operandCount(),
+ "ANeuralNetworksModel_addOperation outputs");
+ if (n != ANEURALNETWORKS_NO_ERROR) {
+ return n;
+ }
+
+ return m->addOperation(type, inputs, outputs);
+}
+
+int ANeuralNetworksModel_setInputsAndOutputs(ANeuralNetworksModel* model,
+ ANeuralNetworksIntList* inputs,
+ ANeuralNetworksIntList* outputs) {
+ if (!model || !inputs || !outputs) {
+ ALOGE("ANeuralNetworksModel_setInputsAndOutputs passed a nullptr");
+ return ANEURALNETWORKS_UNEXPECTED_NULL;
+ }
+ ModelBuilder* m = reinterpret_cast<ModelBuilder*>(model);
+ int n = ValidateOperandList(*inputs, m->operandCount(),
+ "ANeuralNetworksModel_setInputsAndOutputs inputs");
+ if (n != ANEURALNETWORKS_NO_ERROR) {
+ return n;
+ }
+ n = ValidateOperandList(*outputs, m->operandCount(),
+ "ANeuralNetworksModel_setInputsAndOutputs outputs");
+ if (n != ANEURALNETWORKS_NO_ERROR) {
+ return n;
+ }
+
+ return m->setInputsAndOutputs(inputs, outputs);
+}
+
+int ANeuralNetworksModel_addSubModel(ANeuralNetworksModel* model,
+ const ANeuralNetworksModel* submodel,
+ ANeuralNetworksIntList* inputs,
+ ANeuralNetworksIntList* outputs) {
+ if (!model || !submodel) {
+ ALOGE("ANeuralNetworksModel_addSubModel passed a nullptr");
+ return ANEURALNETWORKS_UNEXPECTED_NULL;
+ }
+ ModelBuilder* m = reinterpret_cast<ModelBuilder*>(model);
+ int n = ValidateOperandList(*inputs, m->operandCount(),
+ "ANeuralNetworksModel_addSubModel inputs");
+ if (n != ANEURALNETWORKS_NO_ERROR) {
+ return n;
+ }
+ n = ValidateOperandList(*outputs, m->operandCount(),
+ "ANeuralNetworksModel_addSubModel outputs");
+ if (n != ANEURALNETWORKS_NO_ERROR) {
+ return n;
+ }
+ return ANEURALNETWORKS_NOT_IMPLEMENTED;
+}
+
+int ANeuralNetworksModel_setBaselineId(ANeuralNetworksModel* model, uint32_t baseLineId) {
+ if (!model) {
+ ALOGE("ANeuralNetworksModel_setBaselineId passed a nullptr");
+ return ANEURALNETWORKS_UNEXPECTED_NULL;
+ }
+ if (baseLineId >= ANEURALNETWORKS_NUMBER_BASELINE_MODELS) {
+ ALOGE("ANeuralNetworksModel_setBaselineId invalid baselineId %u", baseLineId);
+ return ANEURALNETWORKS_BAD_DATA;
+ }
+ // TODO implement
+ return ANEURALNETWORKS_NOT_IMPLEMENTED;
+}
+
+int ANeuralNetworksRequest_create(ANeuralNetworksModel* model, ANeuralNetworksRequest** request) {
+ if (!model || !request) {
+ ALOGE("ANeuralNetworksRequest_create passed a nullptr");
+ return ANEURALNETWORKS_UNEXPECTED_NULL;
+ }
+
+ ModelBuilder* m = reinterpret_cast<ModelBuilder*>(model);
+ Request* r = m->createRequest();
+ if (r == nullptr) {
+ *request = nullptr;
+ return ANEURALNETWORKS_OUT_OF_MEMORY;
+ }
+ *request = reinterpret_cast<ANeuralNetworksRequest*>(r);
+ return ANEURALNETWORKS_NO_ERROR;
+}
+
+void ANeuralNetworksRequest_free(ANeuralNetworksRequest* request) {
+ // No validation. Free of nullptr is valid.
+ Request* r = reinterpret_cast<Request*>(request);
+ delete r;
+}
+
+int ANeuralNetworksRequest_setPreference(ANeuralNetworksRequest* request, uint32_t preference) {
+ if (!request) {
+ ALOGE("ANeuralNetworksRequest_setPreference passed a nullptr");
+ return ANEURALNETWORKS_UNEXPECTED_NULL;
+ }
+ if (preference >= ANEURALNETWORKS_NUMBER_PREFERENCES) {
+ ALOGE("ANeuralNetworksRequest_setPreference invalid preference %u", preference);
+ return ANEURALNETWORKS_BAD_DATA;
+ }
+
+ Request* r = reinterpret_cast<Request*>(request);
+ r->setPreference(preference);
+ return ANEURALNETWORKS_NO_ERROR;
+}
+
+int ANeuralNetworksRequest_setInput(ANeuralNetworksRequest* request, int32_t index,
+ const ANeuralNetworksOperandType* type, const void* buffer,
+ size_t length) {
+ if (!request || !buffer) {
+ ALOGE("ANeuralNetworksRequest_setInput passed a nullptr");
+ return ANEURALNETWORKS_UNEXPECTED_NULL;
+ }
+ if (type != nullptr) {
+ int n = ValidateOperandType(*type, "ANeuralNetworksRequest_setInput", false);
+ if (n != ANEURALNETWORKS_NO_ERROR) {
+ return n;
+ }
+ }
+ if (length > 0xFFFFFFFF) {
+ ALOGE("ANeuralNetworksRequest_setInput input exceeds max length %zu", length);
+ }
+ uint32_t l = static_cast<uint32_t>(length);
+ Request* r = reinterpret_cast<Request*>(request);
+ return r->setInput(index, type, buffer, l);
+}
+
+int ANeuralNetworksRequest_setInputFromHardwareBuffer(ANeuralNetworksRequest* request,
+ int32_t index,
+ const ANeuralNetworksOperandType* type,
+ const AHardwareBuffer* buffer) {
+ if (!request || !type || !buffer) {
+ ALOGE("ANeuralNetworksRequest_setInputFromHardwareBuffer passed a nullptr");
+ return ANEURALNETWORKS_UNEXPECTED_NULL;
+ }
+ // TODO validate the rest
+
+ Request* r = reinterpret_cast<Request*>(request);
+ return r->setInputFromHardwareBuffer(index, type, buffer);
+}
+
+int ANeuralNetworksRequest_setOutput(ANeuralNetworksRequest* request, int32_t index,
+ const ANeuralNetworksOperandType* type, void* buffer,
+ size_t length) {
+ if (!request || !buffer) {
+ ALOGE("ANeuralNetworksRequest_setOutput passed a nullptr");
+ return ANEURALNETWORKS_UNEXPECTED_NULL;
+ }
+ if (type != nullptr) {
+ int n = ValidateOperandType(*type, "ANeuralNetworksRequest_setOutput", false);
+ if (n != ANEURALNETWORKS_NO_ERROR) {
+ return n;
+ }
+ }
+ if (length > 0xFFFFFFFF) {
+ ALOGE("ANeuralNetworksRequest_setOutput input exceeds max length %zu", length);
+ }
+ uint32_t l = static_cast<uint32_t>(length);
+
+ Request* r = reinterpret_cast<Request*>(request);
+ return r->setOutput(index, type, buffer, l);
+}
+
+int ANeuralNetworksRequest_setOutputFromHardwareBuffer(ANeuralNetworksRequest* request,
+ int32_t index,
+ const ANeuralNetworksOperandType* type,
+ const AHardwareBuffer* buffer) {
+ if (!request || !type || !buffer) {
+ ALOGE("ANeuralNetworksRequest_setOutputFromHardwareBuffer passed a nullptr");
+ return ANEURALNETWORKS_UNEXPECTED_NULL;
+ }
+ // TODO validate the rest
+
+ Request* r = reinterpret_cast<Request*>(request);
+ return r->setOutputFromHardwareBuffer(index, type, buffer);
+}
+
+int ANeuralNetworksRequest_startCompute(ANeuralNetworksRequest* request,
+ ANeuralNetworksEvent** event) {
+ if (!request || !event) {
+ ALOGE("ANeuralNetworksRequest_startCompute passed a nullptr");
+ return ANEURALNETWORKS_UNEXPECTED_NULL;
+ }
+ // TODO validate the rest
+
+ Request* r = reinterpret_cast<Request*>(request);
+ Event* e = nullptr;
+ int n = r->startCompute(&e);
+ if (n != ANEURALNETWORKS_NO_ERROR) {
+ return n;
+ }
+ *event = reinterpret_cast<ANeuralNetworksEvent*>(e);
+ return ANEURALNETWORKS_NO_ERROR;
+}
+
+int ANeuralNetworksEvent_wait(ANeuralNetworksEvent* event) {
+ if (event == nullptr) {
+ ALOGE("ANeuralNetworksEvent_wait passed a nullptr");
+ return ANEURALNETWORKS_UNEXPECTED_NULL;
+ }
+
+ Event* e = reinterpret_cast<Event*>(event);
+ e->wait();
+ return ANEURALNETWORKS_NO_ERROR;
+}
+
+void ANeuralNetworksEvent_free(ANeuralNetworksEvent* event) {
+ // No validation. Free of nullptr is valid.
+ Event* e = reinterpret_cast<Event*>(event);
+ delete e;
+}
diff --git a/runtime/Request.cpp b/runtime/Request.cpp
new file mode 100644
index 0000000..ee5bae0
--- /dev/null
+++ b/runtime/Request.cpp
@@ -0,0 +1,137 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "Request"
+
+#include "Request.h"
+
+#include "CpuExecutor.h"
+#include "Manager.h"
+#include "ModelBuilder.h"
+
+namespace android {
+namespace nn {
+
+Request::Request(const ModelBuilder* model) : mModel(model) {
+ mInputs.resize(model->inputCount());
+ for (auto& info : mInputs) {
+ info.buffer = nullptr;
+ info.length = 0;
+ }
+ mOutputs.resize(model->outputCount());
+ for (auto& info : mOutputs) {
+ info.buffer = nullptr;
+ info.length = 0;
+ }
+}
+
+int Request::setInput(uint32_t index, const ANeuralNetworksOperandType* type, const void* buffer,
+ uint32_t length) {
+ uint32_t count = static_cast<uint32_t>(mInputs.size());
+ if (index >= count) {
+ ALOGE("ANeuralNetworksRequest_setInput bad index %u %u", index, count);
+ return ANEURALNETWORKS_BAD_DATA;
+ }
+ updateModelInputOutputInfo(&mInputs[index], type, const_cast<void*>(buffer), length,
+ mModel->getInputOperandIndex(index));
+ return ANEURALNETWORKS_NO_ERROR;
+}
+
+int Request::setInputFromHardwareBuffer([[maybe_unused]] uint32_t index,
+ [[maybe_unused]] const ANeuralNetworksOperandType* type,
+ [[maybe_unused]] const AHardwareBuffer* buffer) {
+ return ANEURALNETWORKS_NOT_IMPLEMENTED;
+}
+
+int Request::setOutput(uint32_t index, const ANeuralNetworksOperandType* type, void* buffer,
+ uint32_t length) {
+ uint32_t count = static_cast<uint32_t>(mOutputs.size());
+ if (index >= count) {
+ ALOGE("ANeuralNetworksRequest_setOutput bad index %u %u", index, count);
+ return ANEURALNETWORKS_BAD_DATA;
+ }
+ updateModelInputOutputInfo(&mOutputs[index], type, buffer, length,
+ mModel->getOutputOperandIndex(index));
+ return ANEURALNETWORKS_NO_ERROR;
+}
+
+int Request::setOutputFromHardwareBuffer([[maybe_unused]] uint32_t index,
+ [[maybe_unused]] const ANeuralNetworksOperandType* type,
+ [[maybe_unused]] const AHardwareBuffer* buffer) {
+ return ANEURALNETWORKS_NOT_IMPLEMENTED;
+}
+
+int Request::updateModelInputOutputInfo(InputOutputInfo* info,
+ const ANeuralNetworksOperandType* newType, void* buffer,
+ uint32_t length, uint32_t operandIndex) {
+ info->buffer = buffer;
+ info->length = length;
+ info->dimensionChanged = newType != nullptr;
+ if (info->dimensionChanged) {
+ uint32_t count = newType->dimensions.count;
+ if (newType->type != mModel->getOperandType(operandIndex) ||
+ count != mModel->getOperandNumberOfDimensions(operandIndex)) {
+ ALOGE("ANeuralNetworksRequest_setInput/Output incompatible types");
+ return ANEURALNETWORKS_BAD_DATA;
+ }
+
+ info->dimensions.clear();
+ info->dimensions.resize(count);
+ info->dimensions.insert(info->dimensions.begin(), newType->dimensions.data,
+ newType->dimensions.data + count);
+ }
+ return ANEURALNETWORKS_NO_ERROR;
+}
+
+int Request::startCompute(Event** event) {
+ // TODO validate that we have full types for all inputs and outputs,
+ // that the graph is not cyclic,
+ std::shared_ptr<IDevice> driver = DriverManager::get()->getAvailableDriver();
+ return driver == nullptr ? startComputeOnCpu(event) : startComputeOnDevice(driver, event);
+}
+
+int Request::startComputeOnDevice(std::shared_ptr<IDevice> driver, Event** event) {
+ SerializedModel model;
+ mModel->serialize(&model.memory);
+
+ IRequest* request = nullptr;
+ // TODO Dangerous! In async, the model will outlive it here. Safe for now
+ int n = driver->prepareRequest(&model, &request);
+ if (n != ANEURALNETWORKS_NO_ERROR) {
+ return n;
+ }
+
+ IEvent* ievent = nullptr;
+ std::vector<int> inputsAndOutputs;
+ n = request->execute(mInputs, mOutputs, &ievent);
+ if (n != ANEURALNETWORKS_NO_ERROR) {
+ return n;
+ }
+ *event = new Event(); // TODO pass ievent
+ return ANEURALNETWORKS_NO_ERROR;
+}
+
+int Request::startComputeOnCpu(Event** event) {
+ // TODO: use a thread pool
+ Event* e = new Event();
+ *event = e;
+
+ CpuExecutor executor(mModel, mInputs, mOutputs);
+ return executor.run();
+}
+
+} // namespace nn
+} // namespace android
diff --git a/runtime/Request.h b/runtime/Request.h
new file mode 100644
index 0000000..5ca33f3
--- /dev/null
+++ b/runtime/Request.h
@@ -0,0 +1,71 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_ML_NN_RUNTIME_REQUEST_H
+#define ANDROID_ML_NN_RUNTIME_REQUEST_H
+
+#include <vector>
+#include "NeuralNetworks.h"
+
+namespace android {
+namespace nn {
+
+// TODO Have a real implementation for this class.
+class Event {
+public:
+ void wait() {}
+};
+
+class IDevice;
+class ModelBuilder;
+struct InputOutputInfo;
+
+class Request {
+public:
+ Request(const ModelBuilder* model);
+
+ void setPreference(uint32_t preference) { mPreference = preference; }
+
+ int setInput(uint32_t index, const ANeuralNetworksOperandType* type, const void* buffer,
+ uint32_t length);
+ int setInputFromHardwareBuffer(uint32_t index, const ANeuralNetworksOperandType* type,
+ const AHardwareBuffer* buffer);
+ int setOutput(uint32_t index, const ANeuralNetworksOperandType* type, void* buffer,
+ uint32_t length);
+ int setOutputFromHardwareBuffer(uint32_t index, const ANeuralNetworksOperandType* type,
+ const AHardwareBuffer* buffer);
+ int startCompute(Event** event);
+
+private:
+ int updateModelInputOutputInfo(InputOutputInfo* info, const ANeuralNetworksOperandType* newType,
+ void* buffer, uint32_t length, uint32_t operandIndex);
+
+ int startComputeOnDevice(std::shared_ptr<IDevice> driver, Event** event);
+ int startComputeOnCpu(Event** event);
+
+ const ModelBuilder* mModel;
+ // Whether the application prefers to go fast or use low power for this request.
+ uint32_t mPreference = ANEURALNETWORKS_PREFER_FAST_SINGLE_ANSWER;
+
+ // The collected list of inputs and outputs of this request.
+ std::vector<InputOutputInfo> mInputs;
+ std::vector<InputOutputInfo> mOutputs;
+};
+
+} // namespace nn
+} // namespace android
+
+#endif // ANDROID_ML_NN_RUNTIME_REQUEST_H
diff --git a/runtime/include/NeuralNetworks.h b/runtime/include/NeuralNetworks.h
new file mode 100644
index 0000000..be49208
--- /dev/null
+++ b/runtime/include/NeuralNetworks.h
@@ -0,0 +1,617 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_ML_NN_RUNTIME_NEURAL_NETWORKS_H
+#define ANDROID_ML_NN_RUNTIME_NEURAL_NETWORKS_H
+
+// TODO Before submitting to NDK, fix all the TODOs in here.
+
+// TODO Change this to 27 or better __ANDROID_API_O_MR1__
+#if __ANDROID_API__ >= __ANDROID_API_O__
+
+#include <stdint.h>
+#include <stddef.h>
+#include <sys/cdefs.h>
+
+__BEGIN_DECLS
+
+/**
+ * Operand types.
+ *
+ * [TODO: Make sure these are compatible with TensorFlow Lite.]
+ */
+enum {
+ ANEURALNETWORKS_FLOAT16 = 0,
+ ANEURALNETWORKS_FLOAT32 = 1,
+ ANEURALNETWORKS_INT8 = 2,
+ ANEURALNETWORKS_UINT8 = 3,
+ ANEURALNETWORKS_INT16 = 4,
+ ANEURALNETWORKS_UINT16 = 5,
+ ANEURALNETWORKS_INT32 = 6,
+ ANEURALNETWORKS_UINT32 = 7,
+ ANEURALNETWORKS_TENSOR_FLOAT16 = 8,
+ ANEURALNETWORKS_TENSOR_FLOAT32 = 9,
+ ANEURALNETWORKS_TENSOR_SYMMETRICAL_QUANT8 = 10,
+
+ ANEURALNETWORKS_NUMBER_DATA_TYPES = 11
+};
+
+/**
+ * Operation types.
+ *
+ * [TODO: Make sure these are compatible with TensorFlow Lite.]
+ */
+enum {
+ ANEURALNETWORKS_AVERAGE_POOL = 0,
+ ANEURALNETWORKS_CONCATENATION = 1,
+ ANEURALNETWORKS_CONV = 2,
+ ANEURALNETWORKS_DEPTHWISE_CONV = 3,
+ ANEURALNETWORKS_MAX_POOL = 4,
+ ANEURALNETWORKS_L2_POOL = 5,
+ ANEURALNETWORKS_DEPTH_TO_SPACE = 6,
+ ANEURALNETWORKS_SPACE_TO_DEPTH = 7,
+ ANEURALNETWORKS_LOCAL_RESPONSE_NORMALIZATION = 8,
+ ANEURALNETWORKS_SOFTMAX = 9,
+ ANEURALNETWORKS_RESHAPE = 10,
+ ANEURALNETWORKS_SPLIT = 11,
+ ANEURALNETWORKS_FAKE_QUANT = 12,
+ ANEURALNETWORKS_ADD = 13,
+ ANEURALNETWORKS_FULLY_CONNECTED = 14,
+ ANEURALNETWORKS_CAST = 15,
+ ANEURALNETWORKS_MUL = 16,
+ ANEURALNETWORKS_L2_NORMALIZATION = 17,
+ ANEURALNETWORKS_LOGISTIC = 18,
+ ANEURALNETWORKS_RELU = 19,
+ ANEURALNETWORKS_RELU6 = 20,
+ ANEURALNETWORKS_RELU1 = 21,
+ ANEURALNETWORKS_TANH = 22,
+ ANEURALNETWORKS_DEQUANTIZE = 23,
+ ANEURALNETWORKS_FLOOR = 24,
+ ANEURALNETWORKS_GATHER = 25,
+ ANEURALNETWORKS_RESIZE_BILINEAR = 26,
+ ANEURALNETWORKS_LSH_PROJECTION = 27,
+ ANEURALNETWORKS_LSTM = 28,
+ ANEURALNETWORKS_SVDF = 29,
+ ANEURALNETWORKS_RNN = 30,
+ ANEURALNETWORKS_N_GRAM = 31,
+ ANEURALNETWORKS_LOOKUP = 32,
+
+ ANEURALNETWORKS_NUMBER_OPERATION_TYPES = 33
+};
+
+/**
+ * Baseline models.
+ */
+enum {
+ ANEURALNETWORKS_INCEPTION_SMALL_20_20 = 0,
+ ANEURALNETWORKS_INCEPTION_LARGE_20_20 = 1,
+ ANEURALNETWORKS_MOBILE_NETS_100_100 = 2,
+
+ ANEURALNETWORKS_NUMBER_BASELINE_MODELS = 3
+};
+
+/**
+ * Request execution preferences.
+ */
+enum {
+ /**
+ * Prefer executing the request in a way that minimizes battery drain.
+ * This is desirable for requests that will be executed often.
+ */
+ ANEURALNETWORKS_PREFER_LOW_POWER = 0,
+ /**
+ * Prefer returning a single answer as fast as possible, even if this causes
+ * more power consumption.
+ */
+ ANEURALNETWORKS_PREFER_FAST_SINGLE_ANSWER = 1,
+ /**
+ * Prefer maximizing the throughput of successive frames, for example when
+ * processing successive frames coming from the camera.
+ */
+ ANEURALNETWORKS_PREFER_SUSTAINED_SPEED = 2,
+
+ ANEURALNETWORKS_NUMBER_PREFERENCES = 3
+};
+
+/**
+ * Result codes.
+ */
+enum {
+ ANEURALNETWORKS_NO_ERROR = 0,
+ ANEURALNETWORKS_OUT_OF_MEMORY = 1,
+ ANEURALNETWORKS_INCOMPLETE = 2,
+ ANEURALNETWORKS_UNEXPECTED_NULL = 3,
+ ANEURALNETWORKS_BAD_DATA = 4,
+ ANEURALNETWORKS_OP_FAILED = 5,
+ ANEURALNETWORKS_NOT_IMPLEMENTED = 6 // TODO remove
+};
+
+// The maximum number of operands and operations that a model may have.
+const uint32_t MAX_NUMBER_OF_OPERANDS = 0xFFFFFFFE;
+const uint32_t MAX_NUMBER_OF_OPERATIONS = 0xFFFFFFFE;
+
+// TODO use real declaration
+typedef struct AHardwareBuffer AHardwareBuffer;
+
+/**
+ * ANeuralNetworksRequest is an opaque type that can be used to apply a machine
+ * learning model to a set of inputs.
+ *
+ * <p>To use:<ul>
+ * <li>Create a new request instance by calling the
+ * {@link ANeuralNetworksRequest_create} function.</li>
+ * <li>Associate data to the model inputs with
+ * {@link ANeuralNetworksRequest_setInput}.</li>
+ * <li>Associate output buffers to the model outputs with
+ * {@link ANeuralNetworksRequest_setOutput}.</li>
+ * <li>Apply the model with {@link ANeuralNetworksRequest_startCompute}.</li>
+ * <li>Wait for the request to complete with {@link
+ * ANeuralNetworksRequest_wait}.</li> <li>Repeat the previous steps as often as
+ * needed.</li> <li>Destroy the request with {@link
+ * ANeuralNetworksRequest_free}.</li></ul></p>
+ *
+ * <p>A request can be reused by simply modifying the content of the input
+ * buffers and restarting the computation. It's also valid to call
+ * ANeuralNetworksRequest_setInput or ANeuralNetworksRequest_setOutput before
+ * restarting the request, as long as only the address of the buffer
+ * changes.</p>
+ *
+ * <p>The functions that manipulate requests are thread safe.</p>
+ * [TODO: We could have it that it's the responsibility of the application to
+ * ensure that no two threads manipulate the same request concurrently. Internal
+ * structures not specific to a request would always be protected.]
+ */
+typedef struct ANeuralNetworksRequest ANeuralNetworksRequest;
+
+/**
+ * ANeuralNetworksModel is an opaque type that contains a description of the
+ * mathematical operations that constitute the model.
+ *
+ * <p>The model will be built by calling<ul>
+ * <li>{@link ANeuralNetworksModel_create},</li>
+ * <li>{@link ANeuralNetworksModel_addOperation},</li>
+ * <li>{@link ANeuralNetworksModel_addOperand},</li>
+ * </ul>
+ *
+ * A model is destroyed by calling{@link ANeuralNetworksModel_free}.
+ */
+typedef struct ANeuralNetworksModel ANeuralNetworksModel;
+
+typedef struct ANeuralNetworksIntList {
+ uint32_t count;
+ const uint32_t* data;
+} ANeuralNetworksIntList;
+
+/**
+ * ANeuralNetworksOperandType describes the type of an operand.
+ * This structure is used to describe both scalars and tensors.
+ */
+typedef struct ANeuralNetworksOperandType {
+ // The data type, e.g ANEURALNETWORKS_INT8.
+ uint32_t type;
+ // Count and size of each dimension. The count should be 0 for scalars.
+ ANeuralNetworksIntList dimensions;
+ /* These two fields are only used for quantized tensors.
+ * They should be zero for scalars and non-fixed point tensors.
+ * The dequantized value of each entry is (value - offset) * scale.
+ * TODO: revisit once we have a final representation for quantization.
+ */
+ float scale;
+ float offset;
+} ANeuralNetworksOperandType;
+
+/**
+ * ANeuralNetworksEvent is an opaque type that represents an event
+ * that will be signaled once a request completes.
+ */
+typedef struct ANeuralNetworksEvent ANeuralNetworksEvent;
+
+typedef uint32_t ANeuralNetworksOperationType;
+
+/**
+ * Initializes the machine learning runtime.
+ *
+ * This should be called before any other ANeuralNetworks functions.
+ * This function may start work threads, may clean up part of the
+ * cache, and query the capabilities of the drivers.
+ *
+ * As the initialization may take some time, you may want to call
+ * this function outside of the initialization path of your application,
+ * so that your application starts quickly. [TODO verify the startup cost]
+ *
+ * Your application should call {@link ANeuralNetworksShutdown} to tear
+ * down the runtime.
+ *
+ * It is safe for a process to call this function multiple times.
+ * The first call performs the initialization. Successive calls increase
+ * an internal reference count. An equivalent number of calls to
+ * ANeuralNetworksShutdown must be performed for the runtime to be
+ * destroyed. This enables libraries to safely call Initialize and Shutdown.
+ *
+ * This function is thread safe.
+ *
+ * @return NO_ERROR if successful, else [?]
+ */
+int ANeuralNetworksInitialize();
+
+/**
+ * Destroys the machine learning runtime.
+ *
+ * This function frees any resource used by the runtime. It will wait
+ * until in flight requests have completed and will prevent new ones
+ * from being started with {@link ANeuralNetworksRequest_startCompute}.
+ *
+ * Threads blocked on {@link ANeuralNetworksRequest_wait} calls will be
+ * released before this function terminates.
+ *
+ * See {@link ANeuralNetworksInitialize} for details on how multiple calls
+ * to Initialize and Shutdown work.
+ *
+ * This function is thread safe.
+ *
+ * [TODO It's possible that the Initialize and Shutdown calls don't need to
+ * affect the models created by the ANeuralNetworksModel_* APIs. If so,
+ * we may want to modify the name of this API and specify it here.]
+ */
+void ANeuralNetworksShutdown();
+
+/**
+ * Create an empty {@link ANeuralNetworksModel}.
+ *
+ * <p>This only creates the object. Computation is performed once
+ * {@link ANeuralNetworksRequest_startCompute} is invoked.
+ *
+ * The model should be constructed with calls to
+ * {@link ANeuralNetworksModel_addOperation} and
+ * {@link ANeuralNetworksModel_addOperand}
+ *
+ * <p>{@link ANeuralNetworksModel_free} should be called once the model
+ * is no longer needed.</p>
+ *
+ * This function is thread safe.
+ *
+ * @param model The {@link ANeuralNetworksModel} to be created.
+ * Set to NULL if unsuccessful.
+ *
+ * @return NO_ERROR if successful, [?] otherwise.
+ */
+int ANeuralNetworksModel_create(ANeuralNetworksModel** model);
+
+int ANeuralNetworksModel_createBaselineModel(ANeuralNetworksModel** model, uint32_t modelId);
+
+/**
+ * Destroy a model.
+ *
+ * An application is responsible to make sure that no other thread uses
+ * the model at the same time.
+ *
+ * @param model The model to be destroyed. Passing NULL is acceptable and
+ * results in no operation.
+ */
+void ANeuralNetworksModel_free(ANeuralNetworksModel* model);
+
+/**
+ * Add an operand to a model.
+ *
+ * The order in which the operands are added is important. The first one added
+ * to a model will have the index value 0, the second 1, etc. These indexes are
+ * used as operand identifiers in {@link ANeuralNetworksModel_addOperation},
+ * {@link ANeuralNetworksRequest_setInput}, {@link
+ * ANeuralNetworksRequest_setOutput}, and {@link
+ * ANeuralNetworksRequest_setOperandValue}.
+ *
+ * To build a model that can accomodate inputs of various sizes, as you may want
+ * to do for a CNN, set the size of the dimensions that will vary at run time to
+ * 0. These dimensions will have to be set when the application calls
+ * {@link ANeuralNetworksRequest_setInput}.
+ *
+ * An application is responsible to make sure that no other thread uses
+ * the model at the same time.
+ *
+ * A model can't be modified once a request has been created for it by
+ * {@link ANeuralNetworksRequest_create}.
+ *
+ * @param model The model to be modified.
+ * @param type The {@link ANeuralNetworksOperandType} that describes the shape
+ * of the operand.
+ *
+ * @return NO_ERROR if successful, [?] otherwise.
+ */
+int ANeuralNetworksModel_addOperand(ANeuralNetworksModel* model,
+ const ANeuralNetworksOperandType* type);
+
+/**
+ * Sets an operand to a constant value.
+ *
+ * This value can't be changed when a request is executed.
+ *
+ * A model can't be modified once a request has been created for it by
+ * {@link ANeuralNetworksRequest_create}.
+ */
+int ANeuralNetworksModel_setOperandValue(ANeuralNetworksModel* model, int32_t index,
+ const void* buffer, size_t length);
+
+/**
+ * Add an operation to a model.
+ *
+ * @param model The model to be modified.
+ * @param type The type of the operation.
+ * @param inputs An array of indexes identifying each an operand.
+ * @param outputs An array of indexes identifying each an operand.
+ * [TODO: Make sure these are compatible with TensorFlow Lite.]
+ *
+ * The operands specified by inputs and outputs must have been
+ * previously added by calls to {@link ANeuralNetworksModel_addOperand}.
+ *
+ * An application is responsible to make sure that no other thread uses
+ * the model at the same time.
+ *
+ * A model can't be modified once a request has been created for it by
+ * {@link ANeuralNetworksRequest_create}.
+ *
+ * @return NO_ERROR if successful, [?] otherwise.
+ */
+int ANeuralNetworksModel_addOperation(ANeuralNetworksModel* model,
+ ANeuralNetworksOperationType type,
+ ANeuralNetworksIntList* inputs,
+ ANeuralNetworksIntList* outputs);
+
+/** Adds a submodel.
+ *
+ * [TODO] This makes a copy. The submodel is modified afterwards, no change.
+ * How about the data?
+ *
+ * The operands specified by inputs and outputs must have been
+ * previously added by calls to {@link ANeuralNetworksModel_addOperand}.
+ *
+ * A model can't be modified once a request has been created for it by
+ * {@link ANeuralNetworksRequest_create}.
+ */
+int ANeuralNetworksModel_addSubModel(ANeuralNetworksModel* model,
+ const ANeuralNetworksModel* submodel,
+ ANeuralNetworksIntList* inputs,
+ ANeuralNetworksIntList* outputs);
+
+/**
+ * Specfifies which operands will be the model's inputs and outputs.
+ *
+ * TODO: Can an operand be used for both input and output?
+ *
+ * @param model The model to be modified.
+ * @param inputs An array of indexes identifying the input operands.
+ * @param outputs An array of indexes identifying the output operands.
+ *
+ * The operands specified by inputs and outputs must have been
+ * previously added by calls to {@link ANeuralNetworksModel_addOperand}.
+ *
+ * A model can't be modified once a request has been created for it by
+ * {@link ANeuralNetworksRequest_create}.
+ */
+int ANeuralNetworksModel_setInputsAndOutputs(ANeuralNetworksModel* model,
+ ANeuralNetworksIntList* inputs,
+ ANeuralNetworksIntList* outputs);
+
+/**
+ * If the model is one of the baseline models, set its ID so that it can be
+ * easily recognized by the drivers.
+ *
+ * A model can't be modified once a request has been created for it by
+ * {@link ANeuralNetworksRequest_create}.
+ */
+int ANeuralNetworksModel_setBaselineId(ANeuralNetworksModel* model, uint32_t baseLineId);
+
+/**
+ * Create a {@link ANeuralNetworksRequest} to apply the given model.
+ * This only creates the object. Computation is only performed once
+ * {@link ANeuralNetworksRequest_startCompute} is invoked.
+ *
+ * <p>The provided model must outlive the request.</p>
+ *
+ * This function is thread safe.
+ *
+ * @param model The {@link ANeuralNetworksModel} to be evaluated.
+ * @param request The newly created object or NULL if unsuccessful.
+ *
+ * @return NO_ERROR if successful, BAD_DATA if the model is invalid.
+ */
+int ANeuralNetworksRequest_create(ANeuralNetworksModel* model, ANeuralNetworksRequest** request);
+
+/**
+ * Destroy a request.
+ *
+ * <p>If called on a request for which
+ * {@link ANeuralNetworksRequest_startCompute} has been called, the
+ * function will return immediately but will mark the request to be deleted
+ * once the computation completes. The related {@link ANeuralNetworksEvent}
+ * will be signaled but the {link ANeuralNetworksRequest_wait} will return
+ * ERROR_DELETED.
+ *
+ * This function is thread safe.
+ *
+ * @param request The request to be destroyed. Passing NULL is acceptable and
+ * results in no operation.
+ */
+void ANeuralNetworksRequest_free(ANeuralNetworksRequest* request);
+
+/**
+ * Sets the execution preference.
+ *
+ * <p>Provides guidance to the runtime when trade-offs are possible.</p>
+ *
+ * This function is thread safe.
+ *
+ * @param request The request to be modified.
+ * @param preference Either {@link PREFER_LOW_POWER},
+ * {@link PREFER_SINGLE_FAST_ANSWER}, or
+ * {@link PREFER_SUSTAINED_SPEED}.
+ *
+ * @return NO_ERROR if successful.
+ */
+int ANeuralNetworksRequest_setPreference(ANeuralNetworksRequest* request, uint32_t preference);
+
+/**
+ * Associate a user buffer with an input of the model of the
+ * {@link ANeuralNetworksRequest}.
+ *
+ * <p>The provided buffer must outlive the request.</p>
+ *
+ * This function is thread safe.
+ *
+ * @param request The request to be modified.
+ * @param index The index of the model operand we're associating the input to.
+ * @param type The type of the operand. This is useful if the model did not
+ * fully specify the operand. If specified in the model, type should be NULL or
+ * have the same value as specified in the model.
+ * [TODO: We know the dimensions may change. Anything else? Base
+ * type?]
+ * @param buffer The buffer containing the data.
+ * @param length The length in bytes of the buffer.
+ *
+ * @return NO_ERROR if successful, BAD_DATA if the name is not recognized
+ * or the buffer is too small for the input.
+ */
+int ANeuralNetworksRequest_setInput(ANeuralNetworksRequest* request, int32_t index,
+ const ANeuralNetworksOperandType* type, const void* buffer,
+ size_t length);
+
+/**
+ * Associate a hardware buffer with an input of the model of the
+ * {@link ANeuralNetworksRequest}.
+ *
+ * <p>The provided buffer must outlive the request.</p>
+ *
+ * This function is thread safe.
+ *
+ * @param request The request to be modified.
+ * @param index The index of the model operand we're associating the input to.
+ * @param type The type of the operand. This is useful if the model did not
+ * fully specify the operand. If specified in the model, type should be NULL or
+ * have the same value as specified in the model.
+ * [TODO: We know the dimensions may change. Anything else? Base
+ * type?]
+ * @param buffer The buffer containing the data.
+ * [TODO Would it be useful to have a rect param?]
+ *
+ * @return NO_ERROR if successful, BAD_DATA if the name is not recognized
+ * or the buffer is too small for the input.
+ */
+int ANeuralNetworksRequest_setInputFromHardwareBuffer(ANeuralNetworksRequest* request,
+ int32_t index,
+ const ANeuralNetworksOperandType* type,
+ const AHardwareBuffer* buffer);
+
+/**
+ * Associate a user buffer with an output of the model of the
+ * {@link ANeuralNetworksRequest}.
+ *
+ * <p>The provided buffer must outlive the request.</p>
+ *
+ * This function is thread safe.
+ *
+ * @param request The request to be modified.
+ * @param index The index of the model operand we're associating the input to.
+ * @param type The type of the operand. This is useful if the model did not
+ * fully specify the operand. If specified in the model, type should be NULL or
+ * have the same value as specified in the model.
+ * [TODO: We know the dimensions may change. Anything else? Base
+ * type?]
+ * @param buffer The buffer where the data will be written.
+ * @param length The length in bytes of the buffer.
+ *
+ * @return NO_ERROR if successful, BAD_DATA if the name is not recognized
+ * or the buffer is too small for the output.
+ */
+int ANeuralNetworksRequest_setOutput(ANeuralNetworksRequest* request, int32_t index,
+ const ANeuralNetworksOperandType* type, void* buffer,
+ size_t length);
+
+/**
+ * Associate a hardware buffer with an output of the model of the
+ * {@link ANeuralNetworksRequest}.
+ *
+ * <p>The provided buffer must outlive the request.</p>
+ *
+ * @param request The request to be modified.
+ * @param index The index of the model operand we're associating the input to.
+ * @param type The type of the operand. This is useful if the model did not
+ * fully specify the operand. If specified in the model, type should be NULL or
+ * have the same value as specified in the model.
+ * [TODO: We know the dimensions may change. Anything else? Base
+ * type?]
+ * @param buffer The buffer containing the data.
+ * [todo Would it be useful to have a rect param?]
+ *
+ * @return NO_ERROR if successful, BAD_DATA if the name is not recognized
+ * or the buffer is too small for the output.
+ */
+int ANeuralNetworksRequest_setOutputFromHardwareBuffer(ANeuralNetworksRequest* request,
+ int32_t index,
+ const ANeuralNetworksOperandType* type,
+ const AHardwareBuffer* buffer);
+
+/**
+ * Queue the request for execution.
+ *
+ * <p>Puts the request in a queue for execution. Once the model has been
+ * applied and the outputs are ready to be consumed, the returned event will be
+ * signaled. Use {@link ANeuralNetworksRequest_wait} to wait for that event.
+ * </p>
+ *
+ * Multiple requests can be queued and executed concurrently. The runtime makes
+ * no guarantee on the ordering of the completion of the requests. If it's
+ * important to the application, the application should enforces the ordering by
+ * using the return events.
+ *
+ * ANeuralNetworksRequest_wait must be called to recuperate the resources used
+ * by the event.
+ *
+ * This function is thread safe.
+ *
+ * @param request The request to be modified.
+ * @param event The event that will be signaled on completion.
+ * [TODO define the functions to create/delete events]
+ *
+ * @return NO_ERROR if successful, BAD_DATA if callback is NULL.
+ */
+int ANeuralNetworksRequest_startCompute(ANeuralNetworksRequest* request,
+ ANeuralNetworksEvent** event);
+
+/**
+ * Waits until the request completes.
+ *
+ * More than one thread can wait on an event. When the request completes,
+ * all threads will be released.
+ * [TODO Should we free just one to enable thread pools?]
+ *
+ * This function is thread safe.
+ *
+ * @return NO_ERROR if the request completed normally.
+ */
+int ANeuralNetworksEvent_wait(ANeuralNetworksEvent* event);
+
+/**
+ * Destroys the event.
+ *
+ * TODO: Figure out lifetime management if multiple threads can wait on an
+ * event.
+ */
+void ANeuralNetworksEvent_free(ANeuralNetworksEvent* event);
+
+__END_DECLS
+
+#endif // __ANDROID_API__ >= 27
+
+#endif // ANDROID_ML_NN_RUNTIME_NEURAL_NETWORKS_H
diff --git a/runtime/include/NeuralNetworksWrapper.h b/runtime/include/NeuralNetworksWrapper.h
new file mode 100644
index 0000000..db6e657
--- /dev/null
+++ b/runtime/include/NeuralNetworksWrapper.h
@@ -0,0 +1,227 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Provides C++ classes to more easily use the Neural Networks API.
+
+#ifndef ANDROID_ML_NN_RUNTIME_NEURAL_NETWORKS_WRAPPER_H
+#define ANDROID_ML_NN_RUNTIME_NEURAL_NETWORKS_WRAPPER_H
+
+#include "NeuralNetworks.h"
+
+#include <vector>
+
+namespace android {
+namespace nn {
+namespace wrapper {
+
+enum class Type {
+ FLOAT16 = ANEURALNETWORKS_FLOAT16,
+ FLOAT32 = ANEURALNETWORKS_FLOAT32,
+ INT8 = ANEURALNETWORKS_INT8,
+ UINT8 = ANEURALNETWORKS_UINT8,
+ INT16 = ANEURALNETWORKS_INT16,
+ UINT16 = ANEURALNETWORKS_UINT16,
+ INT32 = ANEURALNETWORKS_INT32,
+ UINT32 = ANEURALNETWORKS_UINT32,
+ TENSOR_FLOAT16 = ANEURALNETWORKS_TENSOR_FLOAT16,
+ TENSOR_FLOAT32 = ANEURALNETWORKS_TENSOR_FLOAT32,
+ TENSOR_SYMMETRICAL_QUANT8 = ANEURALNETWORKS_TENSOR_SYMMETRICAL_QUANT8,
+};
+
+enum class ExecutePreference {
+ PREFER_LOW_POWER = ANEURALNETWORKS_PREFER_LOW_POWER,
+ PREFER_FAST_SINGLE_ANSWER = ANEURALNETWORKS_PREFER_FAST_SINGLE_ANSWER,
+ PREFER_SUSTAINED_SPEED = ANEURALNETWORKS_PREFER_SUSTAINED_SPEED
+};
+
+enum class Result {
+ NO_ERROR = ANEURALNETWORKS_NO_ERROR,
+ OUT_OF_MEMORY = ANEURALNETWORKS_OUT_OF_MEMORY,
+ INCOMPLETE = ANEURALNETWORKS_INCOMPLETE,
+ UNEXPECTED_NULL = ANEURALNETWORKS_UNEXPECTED_NULL,
+ BAD_DATA = ANEURALNETWORKS_BAD_DATA,
+};
+
+struct OperandType {
+ ANeuralNetworksOperandType operandType;
+ // uint32_t type;
+ std::vector<uint32_t> dimensions;
+
+ OperandType(Type type, const std::vector<uint32_t>& d) : dimensions(d) {
+ operandType.type = static_cast<uint32_t>(type);
+ operandType.dimensions.count = static_cast<uint32_t>(dimensions.size());
+ operandType.dimensions.data = dimensions.data();
+ }
+};
+
+inline Result Initialize() {
+ return static_cast<Result>(ANeuralNetworksInitialize());
+}
+
+inline void Shutdown() {
+ ANeuralNetworksShutdown();
+}
+
+class Model {
+public:
+ Model() {
+ // TODO handle the value returned by this call
+ ANeuralNetworksModel_create(&mModel);
+ }
+ ~Model() { ANeuralNetworksModel_free(mModel); }
+
+ uint32_t addOperand(const OperandType* type) {
+ if (ANeuralNetworksModel_addOperand(mModel, &(type->operandType)) !=
+ ANEURALNETWORKS_NO_ERROR) {
+ mValid = false;
+ }
+ return mNextOperandId++;
+ }
+
+ void setOperandValue(uint32_t index, const void* buffer, size_t length) {
+ if (ANeuralNetworksModel_setOperandValue(mModel, index, buffer, length) !=
+ ANEURALNETWORKS_NO_ERROR) {
+ mValid = false;
+ }
+ }
+
+ void addOperation(ANeuralNetworksOperationType type, const std::vector<uint32_t>& inputs,
+ const std::vector<uint32_t>& outputs) {
+ ANeuralNetworksIntList in, out;
+ Set(&in, inputs);
+ Set(&out, outputs);
+ if (ANeuralNetworksModel_addOperation(mModel, type, &in, &out) !=
+ ANEURALNETWORKS_NO_ERROR) {
+ mValid = false;
+ }
+ }
+ void setInputsAndOutputs(const std::vector<uint32_t>& inputs,
+ const std::vector<uint32_t>& outputs) {
+ ANeuralNetworksIntList in, out;
+ Set(&in, inputs);
+ Set(&out, outputs);
+ if (ANeuralNetworksModel_setInputsAndOutputs(mModel, &in, &out) !=
+ ANEURALNETWORKS_NO_ERROR) {
+ mValid = false;
+ }
+ }
+ ANeuralNetworksModel* getHandle() const { return mModel; }
+ bool isValid() const { return mValid; }
+ static Model* createBaselineModel(uint32_t modelId) {
+ Model* model = new Model();
+ if (ANeuralNetworksModel_createBaselineModel(&model->mModel, modelId) !=
+ ANEURALNETWORKS_NO_ERROR) {
+ delete model;
+ model = nullptr;
+ }
+ return model;
+ }
+
+private:
+ /**
+ * WARNING list won't be valid once vec is destroyed or modified.
+ */
+ void Set(ANeuralNetworksIntList* list, const std::vector<uint32_t>& vec) {
+ list->count = static_cast<uint32_t>(vec.size());
+ list->data = vec.data();
+ }
+
+ ANeuralNetworksModel* mModel = nullptr;
+ // We keep track of the operand ID as a convenience to the caller.
+ uint32_t mNextOperandId = 0;
+ bool mValid = true;
+};
+
+class Event {
+public:
+ ~Event() { ANeuralNetworksEvent_free(mEvent); }
+ Result wait() { return static_cast<Result>(ANeuralNetworksEvent_wait(mEvent)); }
+ void set(ANeuralNetworksEvent* newEvent) {
+ ANeuralNetworksEvent_free(mEvent);
+ mEvent = newEvent;
+ }
+
+private:
+ ANeuralNetworksEvent* mEvent = nullptr;
+};
+
+class Request {
+public:
+ Request(const Model* model) {
+ int result = ANeuralNetworksRequest_create(model->getHandle(), &mRequest);
+ if (result != 0) {
+ // TODO Handle the error
+ }
+ }
+
+ ~Request() { ANeuralNetworksRequest_free(mRequest); }
+
+ Result setPreference(ExecutePreference preference) {
+ return static_cast<Result>(ANeuralNetworksRequest_setPreference(
+ mRequest, static_cast<uint32_t>(preference)));
+ }
+
+ Result setInput(uint32_t index, const void* buffer, size_t length,
+ const ANeuralNetworksOperandType* type = nullptr) {
+ return static_cast<Result>(
+ ANeuralNetworksRequest_setInput(mRequest, index, type, buffer, length));
+ }
+
+ Result setInputFromHardwareBuffer(uint32_t index, const AHardwareBuffer* buffer,
+ const ANeuralNetworksOperandType* type) {
+ return static_cast<Result>(ANeuralNetworksRequest_setInputFromHardwareBuffer(
+ mRequest, index, type, buffer));
+ }
+
+ Result setOutput(uint32_t index, void* buffer, size_t length,
+ const ANeuralNetworksOperandType* type = nullptr) {
+ return static_cast<Result>(
+ ANeuralNetworksRequest_setOutput(mRequest, index, type, buffer, length));
+ }
+
+ Result setOutputFromHardwareBuffer(uint32_t index, const AHardwareBuffer* buffer,
+ const ANeuralNetworksOperandType* type = nullptr) {
+ return static_cast<Result>(ANeuralNetworksRequest_setOutputFromHardwareBuffer(
+ mRequest, index, type, buffer));
+ }
+
+ Result startCompute(Event* event) {
+ ANeuralNetworksEvent* ev = nullptr;
+ Result result = static_cast<Result>(ANeuralNetworksRequest_startCompute(mRequest, &ev));
+ event->set(ev);
+ return result;
+ }
+
+ Result compute() {
+ ANeuralNetworksEvent* event = nullptr;
+ Result result = static_cast<Result>(ANeuralNetworksRequest_startCompute(mRequest, &event));
+ if (result != Result::NO_ERROR) {
+ return result;
+ }
+ // TODO how to manage the lifetime of events when multiple waiters is not
+ // clear.
+ return static_cast<Result>(ANeuralNetworksEvent_wait(event));
+ }
+
+private:
+ ANeuralNetworksRequest* mRequest = nullptr;
+};
+
+} // namespace wrapper
+} // namespace nn
+} // namespace android
+
+#endif // ANDROID_ML_NN_RUNTIME_NEURAL_NETWORKS_WRAPPER_H
diff --git a/runtime/test/Android.bp b/runtime/test/Android.bp
new file mode 100644
index 0000000..ac56418
--- /dev/null
+++ b/runtime/test/Android.bp
@@ -0,0 +1,51 @@
+/*
+ * Copyright 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+cc_defaults {
+ name: "NeuralNetworksTest_defaults",
+ defaults: ["neuralnetworks_defaults"],
+ host_supported: true,
+
+ srcs: [
+ "TestMain.cpp",
+ "TestTrivialModel.cpp",
+ "TestValidation.cpp",
+ ],
+ static_libs: [
+ "libneuralnetworks_sample_driver",
+ "libneuralnetworks_driver",
+ "libneuralnetworks_common",
+ ],
+ header_libs: [
+ "libneuralnetworks_private_headers",
+ ],
+}
+
+cc_test {
+ name: "NeuralNetworksTest",
+ defaults: ["NeuralNetworksTest_defaults"],
+ shared_libs: [
+ "libneuralnetworks",
+ ],
+}
+
+cc_test {
+ name: "NeuralNetworksTest_static",
+ defaults: ["NeuralNetworksTest_defaults"],
+ static_libs: [
+ "libneuralnetworks",
+ ],
+}
diff --git a/runtime/test/TestMain.cpp b/runtime/test/TestMain.cpp
new file mode 100644
index 0000000..e4e14c0
--- /dev/null
+++ b/runtime/test/TestMain.cpp
@@ -0,0 +1,43 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Manager.h"
+#include "NeuralNetworksWrapper.h"
+#include "SampleDriver.h"
+#include "Utils.h"
+
+#include <gtest/gtest.h>
+
+using namespace android::nn::wrapper;
+
+int main(int argc, char** argv) {
+ ::testing::InitGoogleTest(&argc, argv);
+
+ // Test with the CPU default path
+ int n1 = RUN_ALL_TESTS();
+
+ // Create our own driver to simulate testing through the HAL.
+ // A real Android program would not do this.
+ nnAssert(ANeuralNetworksInitialize() == ANEURALNETWORKS_NO_ERROR);
+ std::shared_ptr<android::nn::SampleDriver> sampleDriver {new android::nn::SampleDriver()};
+ android::nn::DriverManager::get()->registerDriver(sampleDriver);
+
+ // Tests a second time.
+ int n2 = RUN_ALL_TESTS();
+ ANeuralNetworksShutdown();
+
+ return n1 != 0 || n2 != 0;;
+}
diff --git a/runtime/test/TestTrivialModel.cpp b/runtime/test/TestTrivialModel.cpp
new file mode 100644
index 0000000..401d585
--- /dev/null
+++ b/runtime/test/TestTrivialModel.cpp
@@ -0,0 +1,133 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "NeuralNetworksWrapper.h"
+
+#include <gtest/gtest.h>
+
+using namespace android::nn::wrapper;
+
+namespace {
+
+typedef float Matrix3x4[3][4];
+
+class TrivialTest : public ::testing::Test {
+protected:
+ virtual void SetUp() { ASSERT_EQ(Initialize(), Result::NO_ERROR); }
+ virtual void TearDown() { Shutdown(); }
+
+ const Matrix3x4 matrix1 = {{1.f, 2.f, 3.f, 4.f}, {5.f, 6.f, 7.f, 8.f}, {9.f, 10.f, 11.f, 12.f}};
+ const Matrix3x4 matrix2 = {{100.f, 200.f, 300.f, 400.f},
+ {500.f, 600.f, 700.f, 800.f},
+ {900.f, 1000.f, 1100.f, 1200.f}};
+ const Matrix3x4 matrix3 = {{20.f, 30.f, 40.f, 50.f},
+ {21.f, 22.f, 23.f, 24.f},
+ {31.f, 32.f, 33.f, 34.f}};
+ const Matrix3x4 expected2 = {{101.f, 202.f, 303.f, 404.f},
+ {505.f, 606.f, 707.f, 808.f},
+ {909.f, 1010.f, 1111.f, 1212.f}};
+ const Matrix3x4 expected3 = {{121.f, 232.f, 343.f, 454.f},
+ {526.f, 628.f, 730.f, 832.f},
+ {940.f, 1042.f, 1144.f, 1246.f}};
+ const Matrix3x4 expected3b = {{22.f, 34.f, 46.f, 58.f},
+ {31.f, 34.f, 37.f, 40.f},
+ {49.f, 52.f, 55.f, 58.f}};
+};
+
+// Create a model that can add two tensors using a one node graph.
+void CreateAddTwoTensorModel(Model* model) {
+ OperandType matrixType(Type::TENSOR_FLOAT32, {3, 4});
+ auto a = model->addOperand(&matrixType);
+ auto b = model->addOperand(&matrixType);
+ auto c = model->addOperand(&matrixType);
+ model->addOperation(ANEURALNETWORKS_ADD, {a, b}, {c});
+ model->setInputsAndOutputs({a, b}, {c});
+ ASSERT_TRUE(model->isValid());
+}
+
+// Create a model that can add three tensors using a two node graph,
+// with one tensor set as part of the model.
+void CreateAddThreeTensorModel(Model* model, const Matrix3x4 bias) {
+ OperandType matrixType(Type::TENSOR_FLOAT32, {3, 4});
+ auto a = model->addOperand(&matrixType);
+ auto b = model->addOperand(&matrixType);
+ auto c = model->addOperand(&matrixType);
+ auto d = model->addOperand(&matrixType);
+ auto e = model->addOperand(&matrixType);
+ model->setOperandValue(e, bias, sizeof(Matrix3x4));
+ model->addOperation(ANEURALNETWORKS_ADD, {a, c}, {b});
+ model->addOperation(ANEURALNETWORKS_ADD, {b, e}, {d});
+ model->setInputsAndOutputs({c, a}, {d});
+ ASSERT_TRUE(model->isValid());
+}
+
+// Check that the values are the same. This works only if dealing with integer
+// value, otherwise we should accept values that are similar if not exact.
+int CompareMatrices(const Matrix3x4 expected, const Matrix3x4 actual) {
+ int errors = 0;
+ for (int i = 0; i < 3; i++) {
+ for (int j = 0; j < 4; j++) {
+ if (expected[i][j] != actual[i][j]) {
+ printf("expected[%d][%d] != actual[%d][%d], %f != %f\n", i, j, i, j,
+ static_cast<double>(expected[i][j]), static_cast<double>(actual[i][j]));
+ errors++;
+ }
+ }
+ }
+ return errors;
+}
+
+// TODO convert this test to a gtest format
+TEST_F(TrivialTest, AddTwo) {
+ Model modelAdd2;
+ CreateAddTwoTensorModel(&modelAdd2);
+
+ // Test the one node model.
+ Matrix3x4 actual;
+ memset(&actual, 0, sizeof(actual));
+ Request request(&modelAdd2);
+ ASSERT_EQ(request.setInput(0, matrix1, sizeof(Matrix3x4)), Result::NO_ERROR);
+ ASSERT_EQ(request.setInput(1, matrix2, sizeof(Matrix3x4)), Result::NO_ERROR);
+ ASSERT_EQ(request.setOutput(0, actual, sizeof(Matrix3x4)), Result::NO_ERROR);
+ ASSERT_EQ(request.compute(), Result::NO_ERROR);
+ ASSERT_EQ(CompareMatrices(expected2, actual), 0);
+}
+
+TEST_F(TrivialTest, AddThree) {
+ Model modelAdd3;
+ CreateAddThreeTensorModel(&modelAdd3, matrix3);
+
+ // Test the two node model.
+ Matrix3x4 actual;
+ memset(&actual, 0, sizeof(actual));
+ Request request2(&modelAdd3);
+ ASSERT_EQ(request2.setInput(0, matrix1, sizeof(Matrix3x4)), Result::NO_ERROR);
+ ASSERT_EQ(request2.setInput(1, matrix2, sizeof(Matrix3x4)), Result::NO_ERROR);
+ ASSERT_EQ(request2.setOutput(0, actual, sizeof(Matrix3x4)), Result::NO_ERROR);
+ ASSERT_EQ(request2.compute(), Result::NO_ERROR);
+ ASSERT_EQ(CompareMatrices(expected3, actual), 0);
+
+ // Test it a second time to make sure the model is reusable.
+ memset(&actual, 0, sizeof(actual));
+ Request request3(&modelAdd3);
+ ASSERT_EQ(request3.setInput(0, matrix1, sizeof(Matrix3x4)), Result::NO_ERROR);
+ ASSERT_EQ(request3.setInput(1, matrix1, sizeof(Matrix3x4)), Result::NO_ERROR);
+ ASSERT_EQ(request3.setOutput(0, actual, sizeof(Matrix3x4)), Result::NO_ERROR);
+ ASSERT_EQ(request3.compute(), Result::NO_ERROR);
+ ASSERT_EQ(CompareMatrices(expected3b, actual), 0);
+}
+
+} // end namespace
diff --git a/runtime/test/TestValidation.cpp b/runtime/test/TestValidation.cpp
new file mode 100644
index 0000000..c91dd88
--- /dev/null
+++ b/runtime/test/TestValidation.cpp
@@ -0,0 +1,236 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "NeuralNetworks.h"
+
+#include <gtest/gtest.h>
+#include <string>
+
+// This file tests all the validations done by the Neural Networks API.
+
+namespace {
+class ValidationTest : public ::testing::Test {
+protected:
+ virtual void SetUp() { ASSERT_EQ(ANeuralNetworksInitialize(), ANEURALNETWORKS_NO_ERROR); }
+ virtual void TearDown() { ANeuralNetworksShutdown(); }
+};
+
+class ValidationTestModel : public ::testing::Test {
+protected:
+ virtual void SetUp() {
+ ASSERT_EQ(ANeuralNetworksInitialize(), ANEURALNETWORKS_NO_ERROR);
+ ASSERT_EQ(ANeuralNetworksModel_create(&mModel), ANEURALNETWORKS_NO_ERROR);
+ }
+ virtual void TearDown() {
+ ANeuralNetworksModel_free(mModel);
+ ANeuralNetworksShutdown();
+ }
+ ANeuralNetworksModel* mModel = nullptr;
+};
+
+class ValidationTestRequest : public ::testing::Test {
+protected:
+ virtual void SetUp() {
+ ASSERT_EQ(ANeuralNetworksInitialize(), ANEURALNETWORKS_NO_ERROR);
+
+ ASSERT_EQ(ANeuralNetworksModel_create(&mModel), ANEURALNETWORKS_NO_ERROR);
+ uint32_t dimensions[]{1};
+ ANeuralNetworksOperandType tensorType{.type = ANEURALNETWORKS_TENSOR_FLOAT32,
+ .dimensions = {.count = 1, .data = dimensions}};
+ ASSERT_EQ(ANeuralNetworksModel_addOperand(mModel, &tensorType), ANEURALNETWORKS_NO_ERROR);
+ ASSERT_EQ(ANeuralNetworksModel_addOperand(mModel, &tensorType), ANEURALNETWORKS_NO_ERROR);
+ ASSERT_EQ(ANeuralNetworksModel_addOperand(mModel, &tensorType), ANEURALNETWORKS_NO_ERROR);
+ uint32_t inList[2]{0, 1};
+ uint32_t outList[1]{2};
+ ANeuralNetworksIntList inputs{.count = 2, .data = inList};
+ ANeuralNetworksIntList outputs{.count = 1, .data = outList};
+ ASSERT_EQ(ANeuralNetworksModel_addOperation(mModel, ANEURALNETWORKS_ADD, &inputs, &outputs),
+ ANEURALNETWORKS_NO_ERROR);
+
+ ASSERT_EQ(ANeuralNetworksRequest_create(mModel, &mRequest), ANEURALNETWORKS_NO_ERROR);
+ }
+ virtual void TearDown() {
+ ANeuralNetworksRequest_free(mRequest);
+ ANeuralNetworksModel_free(mModel);
+ ANeuralNetworksShutdown();
+ }
+ ANeuralNetworksModel* mModel = nullptr;
+ ANeuralNetworksRequest* mRequest = nullptr;
+};
+
+TEST_F(ValidationTest, CreateModel) {
+ EXPECT_EQ(ANeuralNetworksModel_create(nullptr), ANEURALNETWORKS_UNEXPECTED_NULL);
+
+ EXPECT_EQ(ANeuralNetworksModel_createBaselineModel(nullptr,
+ ANEURALNETWORKS_INCEPTION_SMALL_20_20),
+ ANEURALNETWORKS_UNEXPECTED_NULL);
+
+ ANeuralNetworksModel* model = nullptr;
+ EXPECT_EQ(ANeuralNetworksModel_createBaselineModel(&model,
+ ANEURALNETWORKS_NUMBER_BASELINE_MODELS),
+ ANEURALNETWORKS_BAD_DATA);
+}
+
+TEST_F(ValidationTestModel, AddOperand) {
+ ANeuralNetworksOperandType floatType{.type = ANEURALNETWORKS_FLOAT32,
+ .dimensions = {.count = 0, .data = nullptr}};
+ EXPECT_EQ(ANeuralNetworksModel_addOperand(nullptr, &floatType),
+ ANEURALNETWORKS_UNEXPECTED_NULL);
+ EXPECT_EQ(ANeuralNetworksModel_addOperand(mModel, nullptr), ANEURALNETWORKS_UNEXPECTED_NULL);
+ // TODO more types,
+}
+
+TEST_F(ValidationTestModel, SetOperandValue) {
+ ANeuralNetworksOperandType floatType{.type = ANEURALNETWORKS_FLOAT32,
+ .dimensions = {.count = 0, .data = nullptr}};
+ EXPECT_EQ(ANeuralNetworksModel_addOperand(mModel, &floatType), ANEURALNETWORKS_NO_ERROR);
+
+ char buffer[20];
+ EXPECT_EQ(ANeuralNetworksModel_setOperandValue(nullptr, 0, buffer, sizeof(buffer)),
+ ANEURALNETWORKS_UNEXPECTED_NULL);
+ EXPECT_EQ(ANeuralNetworksModel_setOperandValue(mModel, 0, nullptr, sizeof(buffer)),
+ ANEURALNETWORKS_UNEXPECTED_NULL);
+
+ // This should fail, since buffer is not the size of a float32.
+ EXPECT_EQ(ANeuralNetworksModel_setOperandValue(mModel, 0, buffer, sizeof(buffer)),
+ ANEURALNETWORKS_BAD_DATA);
+
+ // This should fail, as this operand does not exist.
+ EXPECT_EQ(ANeuralNetworksModel_setOperandValue(mModel, 1, buffer, 4), ANEURALNETWORKS_BAD_DATA);
+
+ // TODO lots of validation of type
+ // EXPECT_EQ(ANeuralNetworksModel_setOperandValue(mModel, 0, buffer,
+ // sizeof(buffer)), ANEURALNETWORKS_UNEXPECTED_NULL);
+}
+
+TEST_F(ValidationTestModel, AddOperation) {
+ ANeuralNetworksIntList inputs{};
+ ANeuralNetworksIntList outputs{};
+ EXPECT_EQ(ANeuralNetworksModel_addOperation(nullptr, ANEURALNETWORKS_AVERAGE_POOL, &inputs,
+ &outputs),
+ ANEURALNETWORKS_UNEXPECTED_NULL);
+ EXPECT_EQ(ANeuralNetworksModel_addOperation(mModel, ANEURALNETWORKS_AVERAGE_POOL, nullptr,
+ &outputs),
+ ANEURALNETWORKS_UNEXPECTED_NULL);
+ EXPECT_EQ(ANeuralNetworksModel_addOperation(mModel, ANEURALNETWORKS_AVERAGE_POOL, &inputs,
+ nullptr),
+ ANEURALNETWORKS_UNEXPECTED_NULL);
+ // EXPECT_EQ(ANeuralNetworksModel_addOperation(mModel,
+ // ANEURALNETWORKS_AVERAGE_POOL, &inputs,
+ // &outputs),
+ // ANEURALNETWORKS_UNEXPECTED_NULL);
+}
+
+TEST_F(ValidationTestModel, AddSubModel) {
+ ANeuralNetworksIntList inputs;
+ ANeuralNetworksIntList outputs;
+ ANeuralNetworksModel* submodel;
+ EXPECT_EQ(ANeuralNetworksModel_addSubModel(nullptr, submodel, &inputs, &outputs),
+ ANEURALNETWORKS_UNEXPECTED_NULL);
+ EXPECT_EQ(ANeuralNetworksModel_addSubModel(mModel, nullptr, &inputs, &outputs),
+ ANEURALNETWORKS_UNEXPECTED_NULL);
+ // EXPECT_EQ(ANeuralNetworksModel_addSubModel(mModel, &submodel,
+ // &inputs, &outputs),
+ // ANEURALNETWORKS_UNEXPECTED_NULL);
+}
+
+TEST_F(ValidationTestModel, SetInputsAndOutputs) {
+ ANeuralNetworksIntList inputs;
+ ANeuralNetworksIntList outputs;
+ EXPECT_EQ(ANeuralNetworksModel_setInputsAndOutputs(nullptr, &inputs, &outputs),
+ ANEURALNETWORKS_UNEXPECTED_NULL);
+ EXPECT_EQ(ANeuralNetworksModel_setInputsAndOutputs(mModel, nullptr, &outputs),
+ ANEURALNETWORKS_UNEXPECTED_NULL);
+ EXPECT_EQ(ANeuralNetworksModel_setInputsAndOutputs(mModel, &inputs, nullptr),
+ ANEURALNETWORKS_UNEXPECTED_NULL);
+}
+
+TEST_F(ValidationTestModel, SetBaselineId) {
+ EXPECT_EQ(ANeuralNetworksModel_setBaselineId(nullptr, ANEURALNETWORKS_INCEPTION_SMALL_20_20),
+ ANEURALNETWORKS_UNEXPECTED_NULL);
+ EXPECT_EQ(ANeuralNetworksModel_setBaselineId(mModel, ANEURALNETWORKS_NUMBER_BASELINE_MODELS),
+ ANEURALNETWORKS_BAD_DATA);
+}
+
+TEST_F(ValidationTestModel, CreateRequest) {
+ ANeuralNetworksRequest* request = nullptr;
+ EXPECT_EQ(ANeuralNetworksRequest_create(nullptr, &request), ANEURALNETWORKS_UNEXPECTED_NULL);
+ EXPECT_EQ(ANeuralNetworksRequest_create(mModel, nullptr), ANEURALNETWORKS_UNEXPECTED_NULL);
+ // EXPECT_EQ(ANeuralNetworksRequest_create(mModel, ANeuralNetworksRequest *
+ // *request),
+ // ANEURALNETWORKS_UNEXPECTED_NULL);
+}
+
+TEST_F(ValidationTestRequest, SetPreference) {
+ EXPECT_EQ(ANeuralNetworksRequest_setPreference(nullptr, ANEURALNETWORKS_PREFER_LOW_POWER),
+ ANEURALNETWORKS_UNEXPECTED_NULL);
+
+ EXPECT_EQ(ANeuralNetworksRequest_setPreference(mRequest, ANEURALNETWORKS_NUMBER_PREFERENCES),
+ ANEURALNETWORKS_BAD_DATA);
+}
+
+#if 0
+// TODO do more..
+TEST_F(ValidationTestRequest, SetInput) {
+ EXPECT_EQ(ANeuralNetworksRequest_setInput(ANeuralNetworksRequest * request, int32_t index,
+ const ANeuralNetworksOperandType* type,
+ const void* buffer, size_t length),
+ ANEURALNETWORKS_UNEXPECTED_NULL);
+}
+
+TEST_F(ValidationTestRequest, SetInputFromHardwareBuffer) {
+ EXPECT_EQ(ANeuralNetworksRequest_setInputFromHardwareBuffer(ANeuralNetworksRequest * request,
+ int32_t index,
+ const ANeuralNetworksOperandType*
+ type,
+ const AHardwareBuffer* buffer),
+ ANEURALNETWORKS_UNEXPECTED_NULL);
+}
+
+TEST_F(ValidationTestRequest, SetOutput) {
+ EXPECT_EQ(ANeuralNetworksRequest_setOutput(ANeuralNetworksRequest * request, int32_t index,
+ const ANeuralNetworksOperandType* type, void* buffer,
+ size_t length),
+ ANEURALNETWORKS_UNEXPECTED_NULL);
+}
+
+TEST_F(ValidationTestRequest, SetOutputFromHardwareBuffer) {
+ EXPECT_EQ(ANeuralNetworksRequest_setOutputFromHardwareBuffer(ANeuralNetworksRequest * request,
+ int32_t index,
+ const ANeuralNetworksOperandType*
+ type,
+ const AHardwareBuffer* buffer),
+ ANEURALNETWORKS_UNEXPECTED_NULL);
+}
+
+TEST_F(ValidationTestRequest, StartCompute) {
+ EXPECT_EQ(ANeuralNetworksRequest_startCompute(ANeuralNetworksRequest * request,
+ ANeuralNetworksEvent * *event),
+ ANEURALNETWORKS_UNEXPECTED_NULL);
+}
+
+TEST_F(ValidationTestEvent, Wait) {
+ EXPECT_EQ(ANeuralNetworksEvent_wait(ANeuralNetworksEvent * event),
+ ANEURALNETWORKS_UNEXPECTED_NULL);
+}
+
+TEST_F(ValidationTestEvent, Free) {
+ EXPECT_EQ(d ANeuralNetworksEvent_free(ANeuralNetworksEvent * event),
+ ANEURALNETWORKS_UNEXPECTED_NULL);
+}
+#endif
+
+} // namespace