Adds float16 support for LSH_PROJECTION.
Bug: 118607785
Test: NeuralNetworksTest_static
Change-Id: Ibfb752efae48cc63a3a3b11e8ef664e2b4dcd988
Merged-In: Ibfb752efae48cc63a3a3b11e8ef664e2b4dcd988
(cherry picked from commit 62fc7896f1ed790260bef1849cd950482fb8c315)
diff --git a/common/Android.bp b/common/Android.bp
index f812beb..ef06e44 100644
--- a/common/Android.bp
+++ b/common/Android.bp
@@ -85,8 +85,9 @@
vendor_available: true,
// b/109953668, disable OpenMP
// openmp: true,
- export_include_dirs: ["include"],
-
+ export_include_dirs: [
+ "include",
+ ],
srcs: [
"CpuExecutor.cpp",
"GraphDump.cpp",
diff --git a/common/CpuExecutor.cpp b/common/CpuExecutor.cpp
index 814402e..6aa6638 100644
--- a/common/CpuExecutor.cpp
+++ b/common/CpuExecutor.cpp
@@ -1681,16 +1681,29 @@
lookup.Eval();
} break;
case OperationType::LSH_PROJECTION: {
- RunTimeOperandInfo &output =
- mOperands[outs[LSHProjection::kOutputTensor]];
-
+ RunTimeOperandInfo& output = mOperands[outs[LSHProjection::kOutputTensor]];
Shape outputShape;
- LSHProjection lsh(operation, mOperands);
+ if (!LSHProjection::Prepare(operation, mOperands, &outputShape) ||
+ !setInfoAndAllocateIfNeeded(&output, outputShape)) {
+ break;
+ }
- success = LSHProjection::Prepare(operation, mOperands,
- &outputShape) &&
- setInfoAndAllocateIfNeeded(&output, outputShape) &&
- lsh.Eval();
+ LSHProjection lsh(operation, mOperands);
+ const RunTimeOperandInfo& hash = mOperands[ins[LSHProjection::kHashTensor]];
+ switch (hash.type) {
+ case OperandType::TENSOR_FLOAT32: {
+ success = lsh.Eval<float>();
+ break;
+ }
+ case OperandType::TENSOR_FLOAT16: {
+ success = lsh.Eval<_Float16>();
+ break;
+ }
+ default: {
+ success = false;
+ LOG(ERROR) << "Unsupported data type";
+ }
+ }
} break;
case OperationType::LSTM: {
RunTimeOperandInfo& scratch = mOperands[outs[LSTMCell::kScratchBufferTensor]];
diff --git a/common/Utils.cpp b/common/Utils.cpp
index 9946180..c3e4fc1 100644
--- a/common/Utils.cpp
+++ b/common/Utils.cpp
@@ -1531,24 +1531,40 @@
return ANEURALNETWORKS_BAD_DATA;
}
auto inputType = operands[inputIndexes[1]].type;
- if (inputType != OperandType::TENSOR_FLOAT32 &&
+ if (inputType != OperandType::TENSOR_FLOAT16 &&
+ inputType != OperandType::TENSOR_FLOAT32 &&
inputType != OperandType::TENSOR_INT32 &&
inputType != OperandType::TENSOR_QUANT8_ASYMM) {
LOG(ERROR) << "Unsupported input tensor type for operation "
<< getOperationName(opType);
return ANEURALNETWORKS_BAD_DATA;
}
- std::vector<OperandType> inExpectedTypes = {OperandType::TENSOR_FLOAT32,
- inputType,
- OperandType::TENSOR_FLOAT32,
- OperandType::INT32};
+ auto hashType = operands[inputIndexes[0]].type;
+ std::vector<OperandType> inExpectedTypes;
+ if (hashType == OperandType::TENSOR_FLOAT16) {
+ NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
+ inExpectedTypes = {
+ OperandType::TENSOR_FLOAT16,
+ inputType,
+ OperandType::TENSOR_FLOAT16,
+ OperandType::INT32,
+ };
+ } else if (hashType == OperandType::TENSOR_FLOAT32) {
+ NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0));
+ inExpectedTypes = {
+ OperandType::TENSOR_FLOAT32,
+ inputType,
+ OperandType::TENSOR_FLOAT32,
+ OperandType::INT32,
+ };
+ } else {
+ LOG(ERROR) << "Unsupported hash tensor type for operation "
+ << getOperationName(opType);
+ return ANEURALNETWORKS_BAD_DATA;
+ }
std::vector<OperandType> outExpectedTypes = {OperandType::TENSOR_INT32};
- // TODO(mks): Return V1_2 if inputType is sparse.
- NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0));
- return validateOperationOperandTypes(operands,
- inputCount, inputIndexes,
- inExpectedTypes,
- outputCount, outputIndexes,
+ return validateOperationOperandTypes(operands, inputCount, inputIndexes,
+ inExpectedTypes, outputCount, outputIndexes,
outExpectedTypes);
}
case ANEURALNETWORKS_LSTM: {
diff --git a/common/operations/LSHProjection.cpp b/common/operations/LSHProjection.cpp
index fc5bc26..e66aade 100644
--- a/common/operations/LSHProjection.cpp
+++ b/common/operations/LSHProjection.cpp
@@ -82,14 +82,14 @@
// to match the trained model. This is going to be changed once the new
// model is trained in an optimized method.
//
-int running_sign_bit(const RunTimeOperandInfo* input, const RunTimeOperandInfo* weight,
- float seed) {
+template <typename T>
+int runningSignBit(const RunTimeOperandInfo* input, const RunTimeOperandInfo* weight, float seed) {
double score = 0.0;
int input_item_bytes = sizeOfData(input->type, input->dimensions) / SizeOfDimension(input, 0);
char* input_ptr = (char*)(input->buffer);
- const size_t seed_size = sizeof(float);
- const size_t key_bytes = sizeof(float) + input_item_bytes;
+ const size_t seed_size = sizeof(seed);
+ const size_t key_bytes = seed_size + input_item_bytes;
std::unique_ptr<char[]> key(new char[key_bytes]);
for (uint32_t i = 0; i < SizeOfDimension(input, 0); ++i) {
@@ -103,13 +103,14 @@
if (weight->lifetime == OperandLifeTime::NO_VALUE) {
score += running_value;
} else {
- score += reinterpret_cast<float*>(weight->buffer)[i] * running_value;
+ score += static_cast<double>(reinterpret_cast<T*>(weight->buffer)[i]) * running_value;
}
}
return (score > 0) ? 1 : 0;
}
+template <typename T>
void SparseLshProjection(LSHProjectionType type, const RunTimeOperandInfo* hash,
const RunTimeOperandInfo* input, const RunTimeOperandInfo* weight,
int32_t* out_buf) {
@@ -118,8 +119,8 @@
for (int i = 0; i < num_hash; i++) {
int32_t hash_signature = 0;
for (int j = 0; j < num_bits; j++) {
- float seed = reinterpret_cast<float*>(hash->buffer)[i * num_bits + j];
- int bit = running_sign_bit(input, weight, seed);
+ T seed = reinterpret_cast<T*>(hash->buffer)[i * num_bits + j];
+ int bit = runningSignBit<T>(input, weight, static_cast<float>(seed));
hash_signature = (hash_signature << 1) | bit;
}
if (type == LSHProjectionType_SPARSE_DEPRECATED) {
@@ -130,19 +131,21 @@
}
}
+template <typename T>
void DenseLshProjection(const RunTimeOperandInfo* hash, const RunTimeOperandInfo* input,
const RunTimeOperandInfo* weight, int32_t* out_buf) {
int num_hash = SizeOfDimension(hash, 0);
int num_bits = SizeOfDimension(hash, 1);
for (int i = 0; i < num_hash; i++) {
for (int j = 0; j < num_bits; j++) {
- float seed = reinterpret_cast<float*>(hash->buffer)[i * num_bits + j];
- int bit = running_sign_bit(input, weight, seed);
+ T seed = reinterpret_cast<T*>(hash->buffer)[i * num_bits + j];
+ int bit = runningSignBit<T>(input, weight, static_cast<float>(seed));
*out_buf++ = bit;
}
}
}
+template <typename T>
bool LSHProjection::Eval() {
NNTRACE_COMP("LSHProjection::Eval");
@@ -150,11 +153,11 @@
switch (type_) {
case LSHProjectionType_DENSE:
- DenseLshProjection(hash_, input_, weight_, out_buf);
+ DenseLshProjection<T>(hash_, input_, weight_, out_buf);
break;
case LSHProjectionType_SPARSE:
case LSHProjectionType_SPARSE_DEPRECATED:
- SparseLshProjection(type_, hash_, input_, weight_, out_buf);
+ SparseLshProjection<T>(type_, hash_, input_, weight_, out_buf);
break;
default:
return false;
@@ -162,5 +165,27 @@
return true;
}
+template bool LSHProjection::Eval<float>();
+template bool LSHProjection::Eval<_Float16>();
+
+template int runningSignBit<float>(const RunTimeOperandInfo* input,
+ const RunTimeOperandInfo* weight, float seed);
+template int runningSignBit<_Float16>(const RunTimeOperandInfo* input,
+ const RunTimeOperandInfo* weight, float seed);
+
+template void SparseLshProjection<float>(LSHProjectionType type, const RunTimeOperandInfo* hash,
+ const RunTimeOperandInfo* input,
+ const RunTimeOperandInfo* weight, int32_t* outBuffer);
+template void SparseLshProjection<_Float16>(LSHProjectionType type, const RunTimeOperandInfo* hash,
+ const RunTimeOperandInfo* input,
+ const RunTimeOperandInfo* weight, int32_t* outBuffer);
+
+template void DenseLshProjection<float>(const RunTimeOperandInfo* hash,
+ const RunTimeOperandInfo* input,
+ const RunTimeOperandInfo* weight, int32_t* outBuffer);
+template void DenseLshProjection<_Float16>(const RunTimeOperandInfo* hash,
+ const RunTimeOperandInfo* input,
+ const RunTimeOperandInfo* weight, int32_t* outBuffer);
+
} // namespace nn
} // namespace android
diff --git a/common/operations/LSHProjection.h b/common/operations/LSHProjection.h
index 8cf2fdc..7a25518 100644
--- a/common/operations/LSHProjection.h
+++ b/common/operations/LSHProjection.h
@@ -40,6 +40,7 @@
static bool Prepare(const Operation& operation, std::vector<RunTimeOperandInfo>& operands,
Shape* outputShape);
+ template <typename T>
bool Eval();
static constexpr int kHashTensor = 0;
@@ -60,6 +61,18 @@
RunTimeOperandInfo* output_;
};
+template <typename T>
+int runningSignBit(const RunTimeOperandInfo* input, const RunTimeOperandInfo* weight, T seed);
+
+template <typename T>
+void SparseLshProjection(LSHProjectionType type, const RunTimeOperandInfo* hash,
+ const RunTimeOperandInfo* input, const RunTimeOperandInfo* weight,
+ int32_t* outBuffer);
+
+template <typename T>
+void DenseLshProjection(const RunTimeOperandInfo* hash, const RunTimeOperandInfo* input,
+ const RunTimeOperandInfo* weight, int32_t* outBuffer);
+
} // namespace nn
} // namespace android
diff --git a/runtime/include/NeuralNetworks.h b/runtime/include/NeuralNetworks.h
index 186484e..900d252 100644
--- a/runtime/include/NeuralNetworks.h
+++ b/runtime/include/NeuralNetworks.h
@@ -914,6 +914,7 @@
* Projects an input to a bit vector via locality senstive hashing.
*
* Supported input tensor {@link OperandCode}:
+ * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29)
* * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
* * {@link ANEURALNETWORKS_TENSOR_INT32}
* * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
@@ -935,7 +936,7 @@
* Tensor[1].Dim[0] == Tensor[2].Dim[0]
* * 3: Type:
* Sparse:
- * Value LSHProjectionType_SPARSE(=3).
+ * Value LSHProjectionType_SPARSE(=3) (since API level 29).
* Computed bit vector is considered to be sparse.
* Each output element is an int32 made up of multiple bits
* computed from hash functions.
@@ -963,7 +964,7 @@
* A flattened tensor that represents projected bit vectors.
*
* Available since API level 27.
- * The offset value for sparse projections was added in API level 28.
+ * The offset value for sparse projections was added in API level 29.
*/
ANEURALNETWORKS_LSH_PROJECTION = 15,
diff --git a/runtime/test/TestValidateOperations.cpp b/runtime/test/TestValidateOperations.cpp
index a2e25b9..d14b63f 100644
--- a/runtime/test/TestValidateOperations.cpp
+++ b/runtime/test/TestValidateOperations.cpp
@@ -1141,9 +1141,9 @@
hashtableLookupTest(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM);
}
-void lshProjectionTest(int32_t operandCode) {
+void lshProjectionTest(int32_t operandCode, int32_t hashAndWeightOperandCode) {
uint32_t inputDimensions[2] = {5, 5};
- ANeuralNetworksOperandType hash = {.type = ANEURALNETWORKS_TENSOR_FLOAT32,
+ ANeuralNetworksOperandType hash = {.type = hashAndWeightOperandCode,
.dimensionCount = 2,
.dimensions = inputDimensions,
.scale = 0.0f,
@@ -1156,7 +1156,7 @@
}
uint32_t weightDimensions[1] = {5};
- ANeuralNetworksOperandType weight = {.type = ANEURALNETWORKS_TENSOR_FLOAT32,
+ ANeuralNetworksOperandType weight = {.type = hashAndWeightOperandCode,
.dimensionCount = 1,
.dimensions = weightDimensions,
.scale = 0.0f,
@@ -1180,12 +1180,18 @@
EXPECT_TRUE(lshProjTest.testMutatingOutputOperandCounts());
}
+TEST(OperationValidationTest, LSH_PROJECTION_float16) {
+ lshProjectionTest(ANEURALNETWORKS_TENSOR_FLOAT16, ANEURALNETWORKS_TENSOR_FLOAT32);
+ lshProjectionTest(ANEURALNETWORKS_TENSOR_FLOAT16, ANEURALNETWORKS_TENSOR_FLOAT16);
+}
TEST(OperationValidationTest, LSH_PROJECTION_float32) {
- lshProjectionTest(ANEURALNETWORKS_TENSOR_FLOAT32);
+ lshProjectionTest(ANEURALNETWORKS_TENSOR_FLOAT32, ANEURALNETWORKS_TENSOR_FLOAT32);
+ lshProjectionTest(ANEURALNETWORKS_TENSOR_FLOAT32, ANEURALNETWORKS_TENSOR_FLOAT16);
}
TEST(OperationValidationTest, LSH_PROJECTION_quant8) {
- lshProjectionTest(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM);
+ lshProjectionTest(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM, ANEURALNETWORKS_TENSOR_FLOAT32);
+ lshProjectionTest(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM, ANEURALNETWORKS_TENSOR_FLOAT16);
}
TEST(OperationValidationTest, LSTM_float32) {
diff --git a/runtime/test/for-cts/TestGeneratedOneFile.cpp b/runtime/test/for-cts/TestGeneratedOneFile.cpp
index 7be807d..a87db19 100644
--- a/runtime/test/for-cts/TestGeneratedOneFile.cpp
+++ b/runtime/test/for-cts/TestGeneratedOneFile.cpp
@@ -374,6 +374,7 @@
#include "../generated/tests/lsh_projection_3_relaxed.mod.py.cpp"
#include "../generated/tests/lsh_projection_4_relaxed.mod.py.cpp"
#include "../generated/tests/lsh_projection_deprecated.mod.py.cpp"
+#include "../generated/tests/lsh_projection_float16.mod.py.cpp"
#include "../generated/tests/max_pool_v1_2.mod.py.cpp"
#include "../generated/tests/maximum.mod.py.cpp"
#include "../generated/tests/mean_float16.mod.py.cpp"
diff --git a/runtime/test/generated/all_generated_V1_2_vts_tests.cpp b/runtime/test/generated/all_generated_V1_2_vts_tests.cpp
index d5a95e8..6950596 100644
--- a/runtime/test/generated/all_generated_V1_2_vts_tests.cpp
+++ b/runtime/test/generated/all_generated_V1_2_vts_tests.cpp
@@ -15423,6 +15423,44 @@
}
+// Generated from: lsh_projection_float16.mod.py.
+namespace lsh_projection_float16 {
+// Generated lsh_projection_float16 test
+#include "examples/lsh_projection_float16.example.cpp"
+// Generated model constructor
+#include "vts_models/lsh_projection_float16.model.cpp"
+} // namespace lsh_projection_float16
+
+TEST_F(NeuralnetworksHidlTest, lsh_projection_float16) {
+ generated_tests::Execute(device,
+ lsh_projection_float16::createTestModel,
+ lsh_projection_float16::is_ignored,
+ lsh_projection_float16::get_examples());
+}
+
+TEST_F(ValidationTest, lsh_projection_float16) {
+ const Model model = lsh_projection_float16::createTestModel();
+ const std::vector<Request> requests = createRequests(lsh_projection_float16::get_examples());
+ validateModel(model);
+ validateRequests(model, requests);
+}
+
+
+TEST_F(NeuralnetworksHidlTest, lsh_projection_float16_float16) {
+ generated_tests::Execute(device,
+ lsh_projection_float16::createTestModel_float16,
+ lsh_projection_float16::is_ignored_float16,
+ lsh_projection_float16::get_examples_float16());
+}
+
+TEST_F(ValidationTest, lsh_projection_float16_float16) {
+ const Model model = lsh_projection_float16::createTestModel_float16();
+ const std::vector<Request> requests = createRequests(lsh_projection_float16::get_examples_float16());
+ validateModel(model);
+ validateRequests(model, requests);
+}
+
+
// Generated from: max_pool_v1_2.mod.py.
namespace max_pool_v1_2 {
// Generated max_pool_v1_2 test
diff --git a/runtime/test/generated/examples/lsh_projection_float16.example.cpp b/runtime/test/generated/examples/lsh_projection_float16.example.cpp
new file mode 100644
index 0000000..a0d7612
--- /dev/null
+++ b/runtime/test/generated/examples/lsh_projection_float16.example.cpp
@@ -0,0 +1,84 @@
+// clang-format off
+// Generated file (from: lsh_projection_float16.mod.py). Do not edit
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> FLOAT32 map
+ {},
+ // int -> INT32 map
+ {{0, {12345, 54321, 67890, 9876, -12345678, -87654321}}},
+ // int -> QUANT8_ASYMM map
+ {},
+ // int -> QUANT16_SYMM map
+ {},
+ // int -> FLOAT16 map
+ {{1, {0.12f, 0.34f, 0.56f}}},
+ // int -> BOOL8 map
+ {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> FLOAT32 map
+ {},
+ // int -> INT32 map
+ {{0, {1, 1, 1, 1, 1, 0, 0, 0}}},
+ // int -> QUANT8_ASYMM map
+ {},
+ // int -> QUANT16_SYMM map
+ {},
+ // int -> FLOAT16 map
+ {},
+ // int -> BOOL8 map
+ {},
+}
+},
+}, // End of an example
+};
+return examples;
+};
+
+std::vector<MixedTypedExample>& get_examples_float16() {
+static std::vector<MixedTypedExample> examples_float16 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> FLOAT32 map
+ {},
+ // int -> INT32 map
+ {{0, {12345, 54321, 67890, 9876, -12345678, -87654321}}},
+ // int -> QUANT8_ASYMM map
+ {},
+ // int -> QUANT16_SYMM map
+ {},
+ // int -> FLOAT16 map
+ {{1, {0.12f, 0.34f, 0.56f}}},
+ // int -> BOOL8 map
+ {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> FLOAT32 map
+ {},
+ // int -> INT32 map
+ {{0, {1, 1, 1, 1, 1, 0, 0, 0}}},
+ // int -> QUANT8_ASYMM map
+ {},
+ // int -> QUANT16_SYMM map
+ {},
+ // int -> FLOAT16 map
+ {},
+ // int -> BOOL8 map
+ {},
+}
+},
+}, // End of an example
+};
+return examples_float16;
+};
+
diff --git a/runtime/test/generated/models/lsh_projection_float16.model.cpp b/runtime/test/generated/models/lsh_projection_float16.model.cpp
new file mode 100644
index 0000000..ecf84bb
--- /dev/null
+++ b/runtime/test/generated/models/lsh_projection_float16.model.cpp
@@ -0,0 +1,62 @@
+// clang-format off
+// Generated file (from: lsh_projection_float16.mod.py). Do not edit
+void CreateModel(Model *model) {
+ OperandType type0(Type::TENSOR_FLOAT16, {4, 2});
+ OperandType type1(Type::TENSOR_INT32, {3, 2});
+ OperandType type2(Type::TENSOR_FLOAT16, {3});
+ OperandType type3(Type::INT32, {});
+ OperandType type4(Type::TENSOR_INT32, {8});
+ // Phase 1, operands
+ auto hash = model->addOperand(&type0);
+ auto lookup = model->addOperand(&type1);
+ auto weight = model->addOperand(&type2);
+ auto type_param = model->addOperand(&type3);
+ auto output = model->addOperand(&type4);
+ // Phase 2, operations
+ static _Float16 hash_init[] = {0.123f, 0.456f, -0.321f, -0.654f, 1.234f, 5.678f, -4.321f, -8.765f};
+ model->setOperandValue(hash, hash_init, sizeof(_Float16) * 8);
+ static int32_t type_param_init[] = {2};
+ model->setOperandValue(type_param, type_param_init, sizeof(int32_t) * 1);
+ model->addOperation(ANEURALNETWORKS_LSH_PROJECTION, {hash, lookup, weight, type_param}, {output});
+ // Phase 3, inputs and outputs
+ model->identifyInputsAndOutputs(
+ {lookup, weight},
+ {output});
+ assert(model->isValid());
+}
+
+inline bool is_ignored(int i) {
+ static std::set<int> ignore = {};
+ return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_float16(Model *model) {
+ OperandType type0(Type::TENSOR_FLOAT16, {4, 2});
+ OperandType type1(Type::TENSOR_INT32, {3, 2});
+ OperandType type2(Type::TENSOR_FLOAT16, {3});
+ OperandType type3(Type::INT32, {});
+ OperandType type4(Type::TENSOR_INT32, {8});
+ // Phase 1, operands
+ auto hash = model->addOperand(&type0);
+ auto lookup = model->addOperand(&type1);
+ auto weight = model->addOperand(&type2);
+ auto type_param = model->addOperand(&type3);
+ auto output = model->addOperand(&type4);
+ // Phase 2, operations
+ static _Float16 hash_init[] = {0.123f, 0.456f, -0.321f, -0.654f, 1.234f, 5.678f, -4.321f, -8.765f};
+ model->setOperandValue(hash, hash_init, sizeof(_Float16) * 8);
+ static int32_t type_param_init[] = {2};
+ model->setOperandValue(type_param, type_param_init, sizeof(int32_t) * 1);
+ model->addOperation(ANEURALNETWORKS_LSH_PROJECTION, {hash, lookup, weight, type_param}, {output});
+ // Phase 3, inputs and outputs
+ model->identifyInputsAndOutputs(
+ {lookup, weight},
+ {output});
+ assert(model->isValid());
+}
+
+inline bool is_ignored_float16(int i) {
+ static std::set<int> ignore = {};
+ return ignore.find(i) != ignore.end();
+}
+
diff --git a/runtime/test/generated/tests/lsh_projection_float16.mod.py.cpp b/runtime/test/generated/tests/lsh_projection_float16.mod.py.cpp
new file mode 100644
index 0000000..a222899
--- /dev/null
+++ b/runtime/test/generated/tests/lsh_projection_float16.mod.py.cpp
@@ -0,0 +1,23 @@
+// clang-format off
+// Generated file (from: lsh_projection_float16.mod.py). Do not edit
+#include "../../TestGenerated.h"
+
+namespace lsh_projection_float16 {
+// Generated lsh_projection_float16 test
+#include "generated/examples/lsh_projection_float16.example.cpp"
+// Generated model constructor
+#include "generated/models/lsh_projection_float16.model.cpp"
+} // namespace lsh_projection_float16
+
+TEST_F(GeneratedTests, lsh_projection_float16) {
+ execute(lsh_projection_float16::CreateModel,
+ lsh_projection_float16::is_ignored,
+ lsh_projection_float16::get_examples());
+}
+
+TEST_F(GeneratedTests, lsh_projection_float16_float16) {
+ execute(lsh_projection_float16::CreateModel_float16,
+ lsh_projection_float16::is_ignored_float16,
+ lsh_projection_float16::get_examples_float16());
+}
+
diff --git a/runtime/test/generated/vts_models/lsh_projection_float16.model.cpp b/runtime/test/generated/vts_models/lsh_projection_float16.model.cpp
new file mode 100644
index 0000000..c1ae806
--- /dev/null
+++ b/runtime/test/generated/vts_models/lsh_projection_float16.model.cpp
@@ -0,0 +1,162 @@
+// clang-format off
+// Generated file (from: lsh_projection_float16.mod.py). Do not edit
+// Create the model
+Model createTestModel() {
+ const std::vector<Operand> operands = {
+ {
+ .type = OperandType::TENSOR_FLOAT16,
+ .dimensions = {4, 2},
+ .numberOfConsumers = 1,
+ .scale = 0.0f,
+ .zeroPoint = 0,
+ .lifetime = OperandLifeTime::CONSTANT_COPY,
+ .location = {.poolIndex = 0, .offset = 0, .length = 16},
+ },
+ {
+ .type = OperandType::TENSOR_INT32,
+ .dimensions = {3, 2},
+ .numberOfConsumers = 1,
+ .scale = 0.0f,
+ .zeroPoint = 0,
+ .lifetime = OperandLifeTime::MODEL_INPUT,
+ .location = {.poolIndex = 0, .offset = 0, .length = 0},
+ },
+ {
+ .type = OperandType::TENSOR_FLOAT16,
+ .dimensions = {3},
+ .numberOfConsumers = 1,
+ .scale = 0.0f,
+ .zeroPoint = 0,
+ .lifetime = OperandLifeTime::MODEL_INPUT,
+ .location = {.poolIndex = 0, .offset = 0, .length = 0},
+ },
+ {
+ .type = OperandType::INT32,
+ .dimensions = {},
+ .numberOfConsumers = 1,
+ .scale = 0.0f,
+ .zeroPoint = 0,
+ .lifetime = OperandLifeTime::CONSTANT_COPY,
+ .location = {.poolIndex = 0, .offset = 16, .length = 4},
+ },
+ {
+ .type = OperandType::TENSOR_INT32,
+ .dimensions = {8},
+ .numberOfConsumers = 0,
+ .scale = 0.0f,
+ .zeroPoint = 0,
+ .lifetime = OperandLifeTime::MODEL_OUTPUT,
+ .location = {.poolIndex = 0, .offset = 0, .length = 0},
+ }
+ };
+
+ const std::vector<Operation> operations = {
+ {
+ .type = OperationType::LSH_PROJECTION,
+ .inputs = {0, 1, 2, 3},
+ .outputs = {4},
+ }
+ };
+
+ const std::vector<uint32_t> inputIndexes = {1, 2};
+ const std::vector<uint32_t> outputIndexes = {4};
+ std::vector<uint8_t> operandValues = {
+ 223, 47, 76, 55, 35, 181, 59, 185, 240, 60, 174, 69, 82, 196, 98, 200, 2, 0, 0, 0
+ };
+ const std::vector<hidl_memory> pools = {};
+
+ return {
+ .operands = operands,
+ .operations = operations,
+ .inputIndexes = inputIndexes,
+ .outputIndexes = outputIndexes,
+ .operandValues = operandValues,
+ .pools = pools,
+ };
+}
+
+inline bool is_ignored(int i) {
+ static std::set<int> ignore = {};
+ return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_float16() {
+ const std::vector<Operand> operands = {
+ {
+ .type = OperandType::TENSOR_FLOAT16,
+ .dimensions = {4, 2},
+ .numberOfConsumers = 1,
+ .scale = 0.0f,
+ .zeroPoint = 0,
+ .lifetime = OperandLifeTime::CONSTANT_COPY,
+ .location = {.poolIndex = 0, .offset = 0, .length = 16},
+ },
+ {
+ .type = OperandType::TENSOR_INT32,
+ .dimensions = {3, 2},
+ .numberOfConsumers = 1,
+ .scale = 0.0f,
+ .zeroPoint = 0,
+ .lifetime = OperandLifeTime::MODEL_INPUT,
+ .location = {.poolIndex = 0, .offset = 0, .length = 0},
+ },
+ {
+ .type = OperandType::TENSOR_FLOAT16,
+ .dimensions = {3},
+ .numberOfConsumers = 1,
+ .scale = 0.0f,
+ .zeroPoint = 0,
+ .lifetime = OperandLifeTime::MODEL_INPUT,
+ .location = {.poolIndex = 0, .offset = 0, .length = 0},
+ },
+ {
+ .type = OperandType::INT32,
+ .dimensions = {},
+ .numberOfConsumers = 1,
+ .scale = 0.0f,
+ .zeroPoint = 0,
+ .lifetime = OperandLifeTime::CONSTANT_COPY,
+ .location = {.poolIndex = 0, .offset = 16, .length = 4},
+ },
+ {
+ .type = OperandType::TENSOR_INT32,
+ .dimensions = {8},
+ .numberOfConsumers = 0,
+ .scale = 0.0f,
+ .zeroPoint = 0,
+ .lifetime = OperandLifeTime::MODEL_OUTPUT,
+ .location = {.poolIndex = 0, .offset = 0, .length = 0},
+ }
+ };
+
+ const std::vector<Operation> operations = {
+ {
+ .type = OperationType::LSH_PROJECTION,
+ .inputs = {0, 1, 2, 3},
+ .outputs = {4},
+ }
+ };
+
+ const std::vector<uint32_t> inputIndexes = {1, 2};
+ const std::vector<uint32_t> outputIndexes = {4};
+ std::vector<uint8_t> operandValues = {
+ 223, 47, 76, 55, 35, 181, 59, 185, 240, 60, 174, 69, 82, 196, 98, 200, 2, 0, 0, 0
+ };
+ const std::vector<hidl_memory> pools = {};
+
+ return {
+ .operands = operands,
+ .operations = operations,
+ .inputIndexes = inputIndexes,
+ .outputIndexes = outputIndexes,
+ .operandValues = operandValues,
+ .pools = pools,
+ };
+}
+
+inline bool is_ignored_float16(int i) {
+ static std::set<int> ignore = {};
+ return ignore.find(i) != ignore.end();
+}
+
diff --git a/runtime/test/specs/V1_2/lsh_projection_float16.mod.py b/runtime/test/specs/V1_2/lsh_projection_float16.mod.py
new file mode 100644
index 0000000..ed19b17
--- /dev/null
+++ b/runtime/test/specs/V1_2/lsh_projection_float16.mod.py
@@ -0,0 +1,39 @@
+#
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+num_input = 3
+num_hash = 4
+num_bits = 2
+
+model = Model()
+
+hhash = Parameter("hash", "TENSOR_FLOAT16", "{%d, %d}" % (num_hash, num_bits),
+ [0.123, 0.456, -0.321, -0.654, 1.234, 5.678, -4.321, -8.765])
+lookup = Input("lookup", "TENSOR_INT32", "{%d, %d}" % (num_input, num_bits))
+weight = Input("weight", "TENSOR_FLOAT16", "{%d}" % (num_input))
+type_param = Int32Scalar("type_param", 2) # DENSE
+output = Output("output", "TENSOR_INT32", "{%d}" % (num_hash * num_bits))
+model = model.Operation("LSH_PROJECTION", hhash, lookup, weight,
+ type_param).To(output)
+
+#TODO: weight should be a constant, too.
+input0 = {
+ lookup: [12345, 54321, 67890, 9876, -12345678, -87654321],
+ weight: [0.12, 0.34, 0.56]
+}
+output0 = {output: [1, 1, 1, 1, 1, 0, 0, 0]}
+
+Example((input0, output0)).AddVariations("float16");