Update documentation. Remove IntList.
As NDK council request, updated some documentation and removed
IntList. Other issues will be addressed in other CLs.
Bug: 63905942
Test: Compiled and ran the unit tests.
Change-Id: Iff6898737696daa6bbf41643b07d0a4508af4c0d
diff --git a/runtime/ExecutionBuilder.cpp b/runtime/ExecutionBuilder.cpp
index 5f1cc35..e8531be 100644
--- a/runtime/ExecutionBuilder.cpp
+++ b/runtime/ExecutionBuilder.cpp
@@ -61,14 +61,14 @@
if (newType == nullptr) {
locationAndDimension.dimensions = hidl_vec<uint32_t>();
} else {
- uint32_t count = newType->dimensions.count;
+ uint32_t count = newType->dimensionCount;
if (static_cast<OperandType>(newType->type) != operand.type ||
count != operand.dimensions.size()) {
LOG(ERROR) << "ANeuralNetworksExecution_setInput/Output incompatible types";
return ANEURALNETWORKS_BAD_DATA;
}
for (uint32_t i = 0; i < count; i++) {
- locationAndDimension.dimensions[i] = newType->dimensions.data[i];
+ locationAndDimension.dimensions[i] = newType->dimensions[i];
}
}
return ANEURALNETWORKS_NO_ERROR;
diff --git a/runtime/ModelBuilder.cpp b/runtime/ModelBuilder.cpp
index edcb4cc..ff4abec 100644
--- a/runtime/ModelBuilder.cpp
+++ b/runtime/ModelBuilder.cpp
@@ -44,7 +44,7 @@
mOperands.resize(idx + 1);
auto& operand = mOperands[idx];
operand.type = static_cast<OperandType>(type.type);
- setFromIntList(&operand.dimensions, type.dimensions);
+ setFromIntList(&operand.dimensions, type.dimensionCount, type.dimensions);
operand.numberOfConsumers = 0;
operand.scale = type.scale;
operand.zeroPoint = type.offset;
@@ -70,9 +70,8 @@
uint32_t extraBytes = alignBytesNeeded(existingSize, length);
mOperandValues.resize(existingSize + extraBytes + length);
operand.lifetime = OperandLifeTime::CONSTANT_COPY;
- operand.location = {.poolIndex = 0,
- .offset = existingSize + extraBytes,
- .length = neededLength};
+ operand.location = {
+ .poolIndex = 0, .offset = existingSize + extraBytes, .length = neededLength};
memcpy(&mOperandValues[operand.location.offset], buffer, length);
return ANEURALNETWORKS_NO_ERROR;
}
@@ -93,15 +92,14 @@
}
// TODO validate does not exceed length of memory
operand.lifetime = OperandLifeTime::CONSTANT_REFERENCE;
- operand.location = {.poolIndex = mMemories.add(memory),
- .offset = offset,
- .length = neededLength};
+ operand.location = {
+ .poolIndex = mMemories.add(memory), .offset = offset, .length = neededLength};
return ANEURALNETWORKS_NO_ERROR;
}
-int ModelBuilder::addOperation(ANeuralNetworksOperationType type,
- const ANeuralNetworksIntList* inputs,
- const ANeuralNetworksIntList* outputs) {
+int ModelBuilder::addOperation(ANeuralNetworksOperationType type, uint32_t inputCount,
+ const uint32_t* inputs, uint32_t outputCount,
+ const uint32_t* outputs) {
if (mCompletedModel) {
LOG(ERROR) << "ANeuralNetworksModel_addOperation can't modify after model finished";
return ANEURALNETWORKS_BAD_DATA;
@@ -114,10 +112,10 @@
mOperations.resize(operationIndex + 1);
auto& entry = mOperations[operationIndex];
entry.opTuple = {static_cast<OperationType>(type),
- static_cast<OperandType>(mOperands[inputs->data[0]].type)};
+ static_cast<OperandType>(mOperands[inputs[0]].type)};
- setFromIntList(&entry.inputs, *inputs);
- setFromIntList(&entry.outputs, *outputs);
+ setFromIntList(&entry.inputs, inputCount, inputs);
+ setFromIntList(&entry.outputs, outputCount, outputs);
for (uint32_t i : entry.inputs) {
mOperands[i].numberOfConsumers++;
// TODO mOperands[i].consumers.push_back(operationIndex);
@@ -125,22 +123,20 @@
return ANEURALNETWORKS_NO_ERROR;
}
-int ModelBuilder::setInputsAndOutputs(const ANeuralNetworksIntList* inputs,
- const ANeuralNetworksIntList* outputs) {
+int ModelBuilder::setInputsAndOutputs(uint32_t inputCount, const uint32_t* inputs,
+ uint32_t outputCount, const uint32_t* outputs) {
if (mCompletedModel) {
- LOG(ERROR)
- << "ANeuralNetworksModel_setInputsAndOutputs can't modify after model finished";
+ LOG(ERROR) << "ANeuralNetworksModel_setInputsAndOutputs can't modify after model finished";
return ANEURALNETWORKS_BAD_DATA;
}
// Makes a copy of the index list, validates the arguments, and changes
// the lifetime info of the corresponding operand.
- auto setArguments = [&](std::vector<uint32_t>* indexVector,
- const ANeuralNetworksIntList& indexList,
- OperandLifeTime lifetime) -> bool {
- indexVector->resize(indexList.count);
- for (uint32_t i = 0; i < indexList.count; i++) {
- const uint32_t operandIndex = indexList.data[i];
+ auto setArguments = [&](std::vector<uint32_t>* indexVector, uint32_t indexCount,
+ const uint32_t* indexList, OperandLifeTime lifetime) -> bool {
+ indexVector->resize(indexCount);
+ for (uint32_t i = 0; i < indexCount; i++) {
+ const uint32_t operandIndex = indexList[i];
if (operandIndex >= mOperands.size()) {
LOG(ERROR) << "ANeuralNetworksModel_setInputsAndOutputs Can't set input or output "
"to be "
@@ -162,8 +158,8 @@
return true;
};
- if (!setArguments(&mInputIndexes, *inputs, OperandLifeTime::MODEL_INPUT) ||
- !setArguments(&mOutputIndexes, *outputs, OperandLifeTime::MODEL_OUTPUT)) {
+ if (!setArguments(&mInputIndexes, inputCount, inputs, OperandLifeTime::MODEL_INPUT) ||
+ !setArguments(&mOutputIndexes, outputCount, outputs, OperandLifeTime::MODEL_OUTPUT)) {
return ANEURALNETWORKS_BAD_DATA;
}
@@ -210,7 +206,7 @@
lifetime == OperandLifeTime::MODEL_OUTPUT) {
count++;
operandToOperations.insert(
- std::pair<uint32_t, uint32_t>(operandIndex, operationIndex));
+ std::pair<uint32_t, uint32_t>(operandIndex, operationIndex));
}
}
if (count == 0) {
@@ -254,5 +250,5 @@
}
}
-} // namespace nn
-} // namespace android
+} // namespace nn
+} // namespace android
diff --git a/runtime/ModelBuilder.h b/runtime/ModelBuilder.h
index 6b93e88..b3eff9f 100644
--- a/runtime/ModelBuilder.h
+++ b/runtime/ModelBuilder.h
@@ -40,10 +40,10 @@
int setOperandValueFromMemory(uint32_t index, const Memory* memory, uint32_t offset,
size_t length);
- int addOperation(ANeuralNetworksOperationType type, const ANeuralNetworksIntList* inputs,
- const ANeuralNetworksIntList* outputs);
- int setInputsAndOutputs(const ANeuralNetworksIntList* inputs,
- const ANeuralNetworksIntList* outputs);
+ int addOperation(ANeuralNetworksOperationType type, uint32_t inputCount, const uint32_t* inputs,
+ uint32_t outputCount, const uint32_t* outputs);
+ int setInputsAndOutputs(uint32_t inputCount, const uint32_t* inputs, uint32_t outputCount,
+ const uint32_t* outputs);
int finish();
bool isFinished() const { return mCompletedModel; }
@@ -101,7 +101,7 @@
mutable bool mCompletedModel = false;
};
-} // namespace nn
-} // namespace android
+} // namespace nn
+} // namespace android
-#endif // ANDROID_ML_NN_RUNTIME_MODEL_BUILDER_H
+#endif // ANDROID_ML_NN_RUNTIME_MODEL_BUILDER_H
diff --git a/runtime/NeuralNetworks.cpp b/runtime/NeuralNetworks.cpp
index d27d939..b0f1e3f 100644
--- a/runtime/NeuralNetworks.cpp
+++ b/runtime/NeuralNetworks.cpp
@@ -20,13 +20,14 @@
#define LOG_TAG "NeuralNetworks"
+#include "NeuralNetworks.h"
+
#include "CompilationBuilder.h"
#include "Event.h"
#include "ExecutionBuilder.h"
-#include "NeuralNetworks.h"
-#include "NeuralNetworksOEM.h"
#include "Manager.h"
#include "Memory.h"
+#include "NeuralNetworksOEM.h"
#include "ModelBuilder.h"
#include <memory>
@@ -133,7 +134,8 @@
static_assert(static_cast<int32_t>(OperationType::ADD) == ANEURALNETWORKS_ADD,
"OperationType::ADD != ANEURALNETWORKS_ADD");
-static_assert(static_cast<int32_t>(OperationType::AVERAGE_POOL_2D) == ANEURALNETWORKS_AVERAGE_POOL_2D,
+static_assert(static_cast<int32_t>(OperationType::AVERAGE_POOL_2D) ==
+ ANEURALNETWORKS_AVERAGE_POOL_2D,
"OperationType::AVERAGE_POOL_2D != ANEURALNETWORKS_AVERAGE_POOL_2D");
static_assert(static_cast<int32_t>(OperationType::CONV_2D) == ANEURALNETWORKS_CONV_2D,
"OperationType::CONV_2D != ANEURALNETWORKS_CONV_2D");
@@ -215,10 +217,10 @@
static int ValidateOperandType(const ANeuralNetworksOperandType& type, const char* tag,
bool allowPartial) {
if (!allowPartial) {
- for (uint32_t i = 0; i < type.dimensions.count; i++) {
- if (type.dimensions.data[i] == 0) {
+ for (uint32_t i = 0; i < type.dimensionCount; i++) {
+ if (type.dimensions[i] == 0) {
LOG(ERROR) << tag << " OperandType invalid dimensions[" << i
- << "] = " << type.dimensions.data[i];
+ << "] = " << type.dimensions[i];
return ANEURALNETWORKS_BAD_DATA;
}
}
@@ -241,12 +243,12 @@
return ANEURALNETWORKS_NO_ERROR;
}
-static int ValidateOperandList(const ANeuralNetworksIntList& list, uint32_t count,
+static int ValidateOperandList(uint32_t count, const uint32_t* list, uint32_t operandCount,
const char* tag) {
- for (uint32_t i = 0; i < list.count; i++) {
- if (list.data[i] >= count) {
- LOG(ERROR) << tag << " invalid operand index at " << i << " = " << list.data[i]
- << ", count " << count;
+ for (uint32_t i = 0; i < count; i++) {
+ if (list[i] >= operandCount) {
+ LOG(ERROR) << tag << " invalid operand index at " << i << " = " << list[i]
+ << ", operandCount " << operandCount;
return ANEURALNETWORKS_BAD_DATA;
}
}
@@ -353,9 +355,9 @@
}
int ANeuralNetworksModel_addOperation(ANeuralNetworksModel* model,
- ANeuralNetworksOperationType type,
- ANeuralNetworksIntList* inputs,
- ANeuralNetworksIntList* outputs) {
+ ANeuralNetworksOperationType type, uint32_t inputCount,
+ const uint32_t* inputs, uint32_t outputCount,
+ const uint32_t* outputs) {
if (!model || !inputs || !outputs) {
LOG(ERROR) << "ANeuralNetworksModel_addOperation passed a nullptr";
return ANEURALNETWORKS_UNEXPECTED_NULL;
@@ -365,40 +367,40 @@
LOG(ERROR) << "ANeuralNetworksModel_addOperation invalid operations type " << type;
return ANEURALNETWORKS_BAD_DATA;
}
- int n = ValidateOperandList(*inputs, m->operandCount(),
+ int n = ValidateOperandList(inputCount, inputs, m->operandCount(),
"ANeuralNetworksModel_addOperation inputs");
if (n != ANEURALNETWORKS_NO_ERROR) {
return n;
}
- n = ValidateOperandList(*outputs, m->operandCount(),
+ n = ValidateOperandList(outputCount, outputs, m->operandCount(),
"ANeuralNetworksModel_addOperation outputs");
if (n != ANEURALNETWORKS_NO_ERROR) {
return n;
}
- return m->addOperation(type, inputs, outputs);
+ return m->addOperation(type, inputCount, inputs, outputCount, outputs);
}
-int ANeuralNetworksModel_setInputsAndOutputs(ANeuralNetworksModel* model,
- ANeuralNetworksIntList* inputs,
- ANeuralNetworksIntList* outputs) {
+int ANeuralNetworksModel_setInputsAndOutputs(ANeuralNetworksModel* model, uint32_t inputCount,
+ const uint32_t* inputs, uint32_t outputCount,
+ const uint32_t* outputs) {
if (!model || !inputs || !outputs) {
LOG(ERROR) << ("ANeuralNetworksModel_setInputsAndOutputs passed a nullptr");
return ANEURALNETWORKS_UNEXPECTED_NULL;
}
ModelBuilder* m = reinterpret_cast<ModelBuilder*>(model);
- int n = ValidateOperandList(*inputs, m->operandCount(),
+ int n = ValidateOperandList(inputCount, inputs, m->operandCount(),
"ANeuralNetworksModel_setInputsAndOutputs inputs");
if (n != ANEURALNETWORKS_NO_ERROR) {
return n;
}
- n = ValidateOperandList(*outputs, m->operandCount(),
+ n = ValidateOperandList(outputCount, outputs, m->operandCount(),
"ANeuralNetworksModel_setInputsAndOutputs outputs");
if (n != ANEURALNETWORKS_NO_ERROR) {
return n;
}
- return m->setInputsAndOutputs(inputs, outputs);
+ return m->setInputsAndOutputs(inputCount, inputs, outputCount, outputs);
}
int ANeuralNetworksCompilation_create(ANeuralNetworksModel* model,
diff --git a/runtime/include/NeuralNetworks.h b/runtime/include/NeuralNetworks.h
index 66882c8..58b7915 100644
--- a/runtime/include/NeuralNetworks.h
+++ b/runtime/include/NeuralNetworks.h
@@ -377,8 +377,8 @@
* a 2-D Tensor. The 2-D Tensor is handled as if dimensions corresponded to shape
* [batch_size, input_size], where “batch_size” corresponds to the batching dimension,
* and “input_size” is the size of the input.
- * * 1: A 2-D tensor, specifying the weights, of shape [num_units, input_size], where “num_units”
- * corresponds to the number of output nodes.
+ * * 1: A 2-D tensor, specifying the weights, of shape [num_units, input_size], where
+ * "num_units" corresponds to the number of output nodes.
* * 2: A 1-D tensor, of shape [num_units], specifying the bias.
* For input tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT32} type, the bias should
* also be of {@link ANEURALNETWORKS_TENSOR_FLOAT32}.
@@ -1064,8 +1064,7 @@
*
* Memory objects can also be used to specify the input and output arguments of
* an execution. See {@link ANeuralNetworksExecution_setInputFromMemory}
- * and {@link ANeuralNetworksExecution_setOutputFromMemory}. This is a typical
- * usage for hardware buffers. See {@link ANeuralNetworksMemory_createFromHardwareBuffer}.
+ * and {@link ANeuralNetworksExecution_setOutputFromMemory}.
*/
typedef struct ANeuralNetworksMemory ANeuralNetworksMemory;
@@ -1085,6 +1084,10 @@
* <p>It is the application's responsibility to make sure that only one thread
* modifies a model at a given time. It is however safe for more than one
* thread to use the model once {@link ANeuralNetworksModel_finish} has returned.</p>
+ *
+ * <p>It is also the application's responsibility to ensure that there are no other
+ * uses of the model after calling {@link ANeuralNetworksModel_free}.
+ * This includes any compilation or execution object created using the model.</p>
*/
typedef struct ANeuralNetworksModel ANeuralNetworksModel;
@@ -1110,6 +1113,10 @@
* thread to use {@link ANeuralNetworksCompilation_wait} at the same time.
* It is also safe for multiple threads to use a compilation object once
* {@link ANeuralNetworksCompilation_wait} has completed.</p>
+ *
+ * <p>It is also the application's responsibility to ensure that there are no other
+ * uses of the compilation after calling {@link ANeuralNetworksCompilation_free}.
+ * This includes any execution object created using the compilation.</p>
*/
typedef struct ANeuralNetworksCompilation ANeuralNetworksCompilation;
@@ -1142,14 +1149,12 @@
* <p>It is the application's responsibility to make sure that only one thread
* modifies an execution at a given time. It is however safe for more than one
* thread to use {@link ANeuralNetworksExecution_wait} at the same time.</p>
+ *
+ * <p>It is also the application's responsibility to ensure that there are no other
+ * uses of the request after calling {@link ANeuralNetworksRequest_free}.</p>
*/
typedef struct ANeuralNetworksExecution ANeuralNetworksExecution;
-typedef struct ANeuralNetworksIntList {
- uint32_t count;
- const uint32_t* data;
-} ANeuralNetworksIntList;
-
/**
* ANeuralNetworksOperandType describes the type of an operand.
* This structure is used to describe both scalars and tensors.
@@ -1157,8 +1162,10 @@
typedef struct ANeuralNetworksOperandType {
/** The data type, e.g ANEURALNETWORKS_INT8. */
int32_t type;
- /** Count and size of each dimension. The count should be 0 for scalars. */
- ANeuralNetworksIntList dimensions;
+ /** The number of dimensions. It should be 0 for scalars. */
+ uint32_t dimensionCount;
+ /** The dimensions of the tensor. It should be nullptr for scalars. */
+ const uint32_t* dimensions;
/** These two fields are only used for quantized tensors.
* They should be zero for scalars and non-fixed point tensors.
* The dequantized value of each entry is (value - offset) * scale.
@@ -1264,8 +1271,6 @@
* <p>{@link ANeuralNetworksModel_free} should be called once the model
* is no longer needed.</p>
*
- * This function is thread safe.
- *
* @param model The {@link ANeuralNetworksModel} to be created.
* Set to NULL if unsuccessful.
*
@@ -1391,8 +1396,10 @@
*
* @param model The model to be modified.
* @param type The type of the operation.
- * @param inputs An array of indexes identifying each an operand.
- * @param outputs An array of indexes identifying each an operand.
+ * @param inputCount The number of entries in the inputs array.
+ * @param inputs An array of indexes identifying each operand.
+ * @param outputCount The number of entries in the outputs array.
+ * @param outputs An array of indexes identifying each operand.
*
* The operands specified by inputs and outputs must have been
* previously added by calls to {@link ANeuralNetworksModel_addOperand}.
@@ -1405,9 +1412,9 @@
* @return ANEURALNETWORKS_NO_ERROR if successful.
*/
int ANeuralNetworksModel_addOperation(ANeuralNetworksModel* model,
- ANeuralNetworksOperationType type,
- ANeuralNetworksIntList* inputs,
- ANeuralNetworksIntList* outputs);
+ ANeuralNetworksOperationType type, uint32_t inputCount,
+ const uint32_t* inputs, uint32_t outputCount,
+ const uint32_t* outputs);
/**
* Specfifies which operands will be the model's inputs and outputs.
@@ -1416,7 +1423,9 @@
* return an error.
*
* @param model The model to be modified.
+ * @param inputCount The number of entries in the inputs array.
* @param inputs An array of indexes identifying the input operands.
+ * @param outputCount The number of entries in the outputs array.
* @param outputs An array of indexes identifying the output operands.
*
* The operands specified by inputs and outputs must have been
@@ -1428,9 +1437,9 @@
* See {@link ANeuralNetworksModel} for information on multithreaded usage.
*
*/
-int ANeuralNetworksModel_setInputsAndOutputs(ANeuralNetworksModel* model,
- ANeuralNetworksIntList* inputs,
- ANeuralNetworksIntList* outputs);
+int ANeuralNetworksModel_setInputsAndOutputs(ANeuralNetworksModel* model, uint32_t inputCount,
+ const uint32_t* inputs, uint32_t outputCount,
+ const uint32_t* outputs);
/**
* Create a {@link ANeuralNetworksCompilation} to compile the given model.
@@ -1569,7 +1578,10 @@
* See {@link ANeuralNetworksExecution} for information on multithreaded usage.
*
* @param execution The execution to be modified.
- * @param index The index of the model operand we're associating the input to.
+ * @param index The index of the input argument we are setting. It is
+ * an index into the lists passed to
+ * {@link ANeuralNetworksModel_setInputsAndOutputs}. It is not
+ * the index associated with {@link ANeuralNetworksModel_addOperand}.
* @param type The type of the operand. This should be used to specify the
* dimensions that were set to 0 when the operand was added to the
* model. All other properties of the type must be the same as
@@ -1594,7 +1606,10 @@
* See {@link ANeuralNetworksExecution} for information on multithreaded usage.
*
* @param execution The execution to be modified.
- * @param index The index of the model operand we're associating the input to.
+ * @param index The index of the input argument we are setting. It is
+ * an index into the lists passed to
+ * {@link ANeuralNetworksModel_setInputsAndOutputs}. It is not
+ * the index associated with {@link ANeuralNetworksModel_addOperand}.
* @param type The type of the operand. This can be used to specify the
* dimensions that were set to 0 when the operand was added to the
* model. All other values must be the same as specified in the
@@ -1622,7 +1637,10 @@
* See {@link ANeuralNetworksExecution} for information on multithreaded usage.
*
* @param execution The execution to be modified.
- * @param index The index of the model operand we're associating the output to.
+ * @param index The index of the output argument we are setting. It is
+ * an index into the lists passed to
+ * {@link ANeuralNetworksModel_setInputsAndOutputs}. It is not
+ * the index associated with {@link ANeuralNetworksModel_addOperand}.
* @param type The type of the operand. This can be used to specify the
* dimensions that were set to 0 when the operand was added to the
* model. All other values must be the same as specified in the
@@ -1647,7 +1665,10 @@
* See {@link ANeuralNetworksExecution} for information on multithreaded usage.
*
* @param execution The execution to be modified.
- * @param index The index of the model operand we're associating the input to.
+ * @param index The index of the output argument we are setting. It is
+ * an index into the lists passed to
+ * {@link ANeuralNetworksModel_setInputsAndOutputs}. It is not
+ * the index associated with {@link ANeuralNetworksModel_addOperand}.
* @param type The type of the operand. This can be used to specify the
* dimensions that were set to 0 when the operand was added to the
* model. All other values must be the same as specified in the
diff --git a/runtime/include/NeuralNetworksWrapper.h b/runtime/include/NeuralNetworksWrapper.h
index 6e7f892..d91f80d 100644
--- a/runtime/include/NeuralNetworksWrapper.h
+++ b/runtime/include/NeuralNetworksWrapper.h
@@ -61,8 +61,8 @@
operandType.scale = 0.0f;
operandType.offset = 0;
- operandType.dimensions.count = static_cast<uint32_t>(dimensions.size());
- operandType.dimensions.data = dimensions.data();
+ operandType.dimensionCount = static_cast<uint32_t>(dimensions.size());
+ operandType.dimensions = dimensions.data();
}
OperandType(Type type, float scale, const std::vector<uint32_t>& d) : OperandType(type, d) {
@@ -93,10 +93,9 @@
class Memory {
public:
-
Memory(size_t size, int protect, int fd, size_t offset) {
mValid = ANeuralNetworksMemory_createFromFd(size, protect, fd, offset, &mMemory) ==
- ANEURALNETWORKS_NO_ERROR;
+ ANEURALNETWORKS_NO_ERROR;
}
~Memory() { ANeuralNetworksMemory_free(mMemory); }
@@ -110,9 +109,7 @@
// Move semantics to remove access to the runtime object from the wrapper
// object that is being moved. This ensures the runtime object will be
// freed only once.
- Memory(Memory&& other) {
- *this = std::move(other);
- }
+ Memory(Memory&& other) { *this = std::move(other); }
Memory& operator=(Memory&& other) {
if (this != &other) {
mMemory = other.mMemory;
@@ -148,9 +145,7 @@
// Move semantics to remove access to the runtime object from the wrapper
// object that is being moved. This ensures the runtime object will be
// freed only once.
- Model(Model&& other) {
- *this = std::move(other);
- }
+ Model(Model&& other) { *this = std::move(other); }
Model& operator=(Model&& other) {
if (this != &other) {
mModel = other.mModel;
@@ -190,21 +185,18 @@
void addOperation(ANeuralNetworksOperationType type, const std::vector<uint32_t>& inputs,
const std::vector<uint32_t>& outputs) {
- ANeuralNetworksIntList in, out;
- Set(&in, inputs);
- Set(&out, outputs);
- if (ANeuralNetworksModel_addOperation(mModel, type, &in, &out) !=
- ANEURALNETWORKS_NO_ERROR) {
+ if (ANeuralNetworksModel_addOperation(mModel, type, static_cast<uint32_t>(inputs.size()),
+ inputs.data(), static_cast<uint32_t>(outputs.size()),
+ outputs.data()) != ANEURALNETWORKS_NO_ERROR) {
mValid = false;
}
}
void setInputsAndOutputs(const std::vector<uint32_t>& inputs,
const std::vector<uint32_t>& outputs) {
- ANeuralNetworksIntList in, out;
- Set(&in, inputs);
- Set(&out, outputs);
- if (ANeuralNetworksModel_setInputsAndOutputs(mModel, &in, &out) !=
- ANEURALNETWORKS_NO_ERROR) {
+ if (ANeuralNetworksModel_setInputsAndOutputs(mModel, static_cast<uint32_t>(inputs.size()),
+ inputs.data(),
+ static_cast<uint32_t>(outputs.size()),
+ outputs.data()) != ANEURALNETWORKS_NO_ERROR) {
mValid = false;
}
}
@@ -212,14 +204,6 @@
bool isValid() const { return mValid; }
private:
- /**
- * WARNING list won't be valid once vec is destroyed or modified.
- */
- void Set(ANeuralNetworksIntList* list, const std::vector<uint32_t>& vec) {
- list->count = static_cast<uint32_t>(vec.size());
- list->data = vec.data();
- }
-
ANeuralNetworksModel* mModel = nullptr;
// We keep track of the operand ID as a convenience to the caller.
uint32_t mNextOperandId = 0;
@@ -238,11 +222,9 @@
~Compilation() { ANeuralNetworksCompilation_free(mCompilation); }
Compilation(const Compilation&) = delete;
- Compilation& operator=(const Compilation &) = delete;
+ Compilation& operator=(const Compilation&) = delete;
- Compilation(Compilation&& other) {
- *this = std::move(other);
- }
+ Compilation(Compilation&& other) { *this = std::move(other); }
Compilation& operator=(Compilation&& other) {
if (this != &other) {
mCompilation = other.mCompilation;
@@ -294,9 +276,7 @@
// Move semantics to remove access to the runtime object from the wrapper
// object that is being moved. This ensures the runtime object will be
// freed only once.
- Execution(Execution&& other) {
- *this = std::move(other);
- }
+ Execution(Execution&& other) { *this = std::move(other); }
Execution& operator=(Execution&& other) {
if (this != &other) {
mExecution = other.mExecution;
@@ -334,9 +314,7 @@
return result;
}
- Result wait() {
- return static_cast<Result>(ANeuralNetworksExecution_wait(mExecution));
- }
+ Result wait() { return static_cast<Result>(ANeuralNetworksExecution_wait(mExecution)); }
Result compute() {
Result result = static_cast<Result>(ANeuralNetworksExecution_startCompute(mExecution));
diff --git a/runtime/test/TestValidation.cpp b/runtime/test/TestValidation.cpp
index 13aa238..af69692 100644
--- a/runtime/test/TestValidation.cpp
+++ b/runtime/test/TestValidation.cpp
@@ -48,19 +48,20 @@
uint32_t dimensions[]{1};
ANeuralNetworksOperandType tensorType{.type = ANEURALNETWORKS_TENSOR_FLOAT32,
- .dimensions = {.count = 1, .data = dimensions}};
+ .dimensionCount = 1,
+ .dimensions = dimensions};
ASSERT_EQ(ANeuralNetworksModel_addOperand(mModel, &tensorType), ANEURALNETWORKS_NO_ERROR);
ASSERT_EQ(ANeuralNetworksModel_addOperand(mModel, &tensorType), ANEURALNETWORKS_NO_ERROR);
ASSERT_EQ(ANeuralNetworksModel_addOperand(mModel, &tensorType), ANEURALNETWORKS_NO_ERROR);
uint32_t inList[2]{0, 1};
uint32_t outList[1]{2};
- ANeuralNetworksIntList inputs{.count = 2, .data = inList};
- ANeuralNetworksIntList outputs{.count = 1, .data = outList};
- ASSERT_EQ(ANeuralNetworksModel_addOperation(mModel, ANEURALNETWORKS_ADD, &inputs, &outputs),
+ ASSERT_EQ(ANeuralNetworksModel_addOperation(mModel, ANEURALNETWORKS_ADD, 2, inList, 1,
+ outList),
ANEURALNETWORKS_NO_ERROR);
ASSERT_EQ(ANeuralNetworksModel_finish(mModel), ANEURALNETWORKS_NO_ERROR);
- ASSERT_EQ(ANeuralNetworksCompilation_create(mModel, &mCompilation), ANEURALNETWORKS_NO_ERROR);
+ ASSERT_EQ(ANeuralNetworksCompilation_create(mModel, &mCompilation),
+ ANEURALNETWORKS_NO_ERROR);
}
virtual void TearDown() {
ANeuralNetworksCompilation_free(mCompilation);
@@ -92,8 +93,8 @@
}
TEST_F(ValidationTestModel, AddOperand) {
- ANeuralNetworksOperandType floatType{.type = ANEURALNETWORKS_FLOAT32,
- .dimensions = {.count = 0, .data = nullptr}};
+ ANeuralNetworksOperandType floatType{
+ .type = ANEURALNETWORKS_FLOAT32, .dimensionCount = 0, .dimensions = nullptr};
EXPECT_EQ(ANeuralNetworksModel_addOperand(nullptr, &floatType),
ANEURALNETWORKS_UNEXPECTED_NULL);
EXPECT_EQ(ANeuralNetworksModel_addOperand(mModel, nullptr), ANEURALNETWORKS_UNEXPECTED_NULL);
@@ -101,8 +102,8 @@
}
TEST_F(ValidationTestModel, SetOperandValue) {
- ANeuralNetworksOperandType floatType{.type = ANEURALNETWORKS_FLOAT32,
- .dimensions = {.count = 0, .data = nullptr}};
+ ANeuralNetworksOperandType floatType{
+ .type = ANEURALNETWORKS_FLOAT32, .dimensionCount = 0, .dimensions = nullptr};
EXPECT_EQ(ANeuralNetworksModel_addOperand(mModel, &floatType), ANEURALNETWORKS_NO_ERROR);
char buffer[20];
@@ -124,16 +125,16 @@
}
TEST_F(ValidationTestModel, AddOperation) {
- ANeuralNetworksIntList inputs{};
- ANeuralNetworksIntList outputs{};
- EXPECT_EQ(ANeuralNetworksModel_addOperation(nullptr, ANEURALNETWORKS_AVERAGE_POOL_2D, &inputs,
- &outputs),
+ uint32_t input = 0;
+ uint32_t output = 0;
+ EXPECT_EQ(ANeuralNetworksModel_addOperation(nullptr, ANEURALNETWORKS_AVERAGE_POOL_2D, 1, &input,
+ 1, &output),
ANEURALNETWORKS_UNEXPECTED_NULL);
- EXPECT_EQ(ANeuralNetworksModel_addOperation(mModel, ANEURALNETWORKS_AVERAGE_POOL_2D, nullptr,
- &outputs),
+ EXPECT_EQ(ANeuralNetworksModel_addOperation(mModel, ANEURALNETWORKS_AVERAGE_POOL_2D, 0, nullptr,
+ 1, &output),
ANEURALNETWORKS_UNEXPECTED_NULL);
- EXPECT_EQ(ANeuralNetworksModel_addOperation(mModel, ANEURALNETWORKS_AVERAGE_POOL_2D, &inputs,
- nullptr),
+ EXPECT_EQ(ANeuralNetworksModel_addOperation(mModel, ANEURALNETWORKS_AVERAGE_POOL_2D, 1, &input,
+ 0, nullptr),
ANEURALNETWORKS_UNEXPECTED_NULL);
// EXPECT_EQ(ANeuralNetworksModel_addOperation(mModel,
// ANEURALNETWORKS_AVERAGE_POOL_2D, &inputs,
@@ -142,13 +143,13 @@
}
TEST_F(ValidationTestModel, SetInputsAndOutputs) {
- ANeuralNetworksIntList inputs;
- ANeuralNetworksIntList outputs;
- EXPECT_EQ(ANeuralNetworksModel_setInputsAndOutputs(nullptr, &inputs, &outputs),
+ uint32_t input = 0;
+ uint32_t output = 0;
+ EXPECT_EQ(ANeuralNetworksModel_setInputsAndOutputs(nullptr, 1, &input, 1, &output),
ANEURALNETWORKS_UNEXPECTED_NULL);
- EXPECT_EQ(ANeuralNetworksModel_setInputsAndOutputs(mModel, nullptr, &outputs),
+ EXPECT_EQ(ANeuralNetworksModel_setInputsAndOutputs(mModel, 0, nullptr, 1, &output),
ANEURALNETWORKS_UNEXPECTED_NULL);
- EXPECT_EQ(ANeuralNetworksModel_setInputsAndOutputs(mModel, &inputs, nullptr),
+ EXPECT_EQ(ANeuralNetworksModel_setInputsAndOutputs(mModel, 1, &input, 0, nullptr),
ANEURALNETWORKS_UNEXPECTED_NULL);
}
@@ -160,7 +161,8 @@
TEST_F(ValidationTestModel, CreateCompilation) {
ANeuralNetworksCompilation* compilation = nullptr;
- EXPECT_EQ(ANeuralNetworksCompilation_create(nullptr, &compilation), ANEURALNETWORKS_UNEXPECTED_NULL);
+ EXPECT_EQ(ANeuralNetworksCompilation_create(nullptr, &compilation),
+ ANEURALNETWORKS_UNEXPECTED_NULL);
EXPECT_EQ(ANeuralNetworksCompilation_create(mModel, nullptr), ANEURALNETWORKS_UNEXPECTED_NULL);
EXPECT_EQ(ANeuralNetworksCompilation_create(mModel, &compilation), ANEURALNETWORKS_BAD_STATE);
@@ -173,8 +175,7 @@
EXPECT_EQ(ANeuralNetworksCompilation_setPreference(nullptr, ANEURALNETWORKS_PREFER_LOW_POWER),
ANEURALNETWORKS_UNEXPECTED_NULL);
- EXPECT_EQ(ANeuralNetworksCompilation_setPreference(mCompilation, 40),
- ANEURALNETWORKS_BAD_DATA);
+ EXPECT_EQ(ANeuralNetworksCompilation_setPreference(mCompilation, 40), ANEURALNETWORKS_BAD_DATA);
}
TEST_F(ValidationTestCompilation, CreateExecution) {
@@ -240,4 +241,4 @@
}
#endif
-} // namespace
+} // namespace