Verify non-optional tensors have values in CpuExecutor

This change adds additional validation for non-optional tensors for the
following operations:
* EMBEDDING_LOOKUP
* HASHTABLE_LOOKUP
* LSH_PROJECTION
* BIDIRECTIONAL_SEQUENCE_LSTM
* LSTM
* RANDOM_MULTINOMIAL
* RNN
* SVDF
* SPLIT

Some operations such as SVDF unpack the scalar values without checking
if the value is present, leading to a failed CHECK. This CL adds
protections to use default values in these cases, and relies on a
corresponding Prepare method to cause these cases to fail validation.

Bug: 157516274
Test: mma
Test: CtsNNAPITestCases
Test: NeuralNetworksTest_static
Test: libneuralnetworks_fuzzer
Change-Id: I6bb804ec40205c9741b04231022894c714ad28ec
Merged-In: I6bb804ec40205c9741b04231022894c714ad28ec
(cherry picked from commit ac18d190b96c493961cf1d57e327806afbe319bb)
diff --git a/common/CpuExecutor.cpp b/common/CpuExecutor.cpp
index 2673f2d..d8582ed 100644
--- a/common/CpuExecutor.cpp
+++ b/common/CpuExecutor.cpp
@@ -991,6 +991,9 @@
             }
         } break;
         case OperationType::EMBEDDING_LOOKUP: {
+            if (!allParametersPresent(2, 1)) {
+                return ANEURALNETWORKS_BAD_DATA;
+            }
             const RunTimeOperandInfo& values = operands[ins[EmbeddingLookup::kValueTensor]];
             const RunTimeOperandInfo& lookups = operands[ins[EmbeddingLookup::kLookupTensor]];
             RunTimeOperandInfo& output = operands[outs[EmbeddingLookup::kOutputTensor]];
@@ -1002,6 +1005,9 @@
                       setInfoAndAllocateIfNeeded(&output, outputShape, &result) && lookup.Eval();
         } break;
         case OperationType::HASHTABLE_LOOKUP: {
+            if (!allParametersPresent(3, 2)) {
+                return ANEURALNETWORKS_BAD_DATA;
+            }
             const RunTimeOperandInfo& lookups = operands[ins[HashtableLookup::kLookupTensor]];
             const RunTimeOperandInfo& keys = operands[ins[HashtableLookup::kKeyTensor]];
             const RunTimeOperandInfo& values = operands[ins[HashtableLookup::kValueTensor]];
@@ -1102,6 +1108,9 @@
                       setInfoAndAllocateIfNeeded(&output, outputShape, &result) && lstm_cell.Eval();
         } break;
         case OperationType::RANDOM_MULTINOMIAL: {
+            if (!allParametersPresent(3, 1)) {
+                return ANEURALNETWORKS_BAD_DATA;
+            }
             const RunTimeOperandInfo& lookups = operands[ins[HashtableLookup::kLookupTensor]];
             const RunTimeOperandInfo& keys = operands[ins[HashtableLookup::kKeyTensor]];
             const RunTimeOperandInfo& values = operands[ins[HashtableLookup::kValueTensor]];
@@ -1115,6 +1124,10 @@
                       multinomial.Eval();
         } break;
         case OperationType::RNN: {
+            if (!allParametersPresent(6, 2)) {
+                return ANEURALNETWORKS_BAD_DATA;
+            }
+
             RunTimeOperandInfo& hiddenStateOut = operands[outs[RNN::kHiddenStateOutTensor]];
             RunTimeOperandInfo& output = operands[outs[RNN::kOutputTensor]];
 
@@ -1409,8 +1422,8 @@
                       expand_dims::eval(input.buffer, input.shape(), axis, output.buffer, outShape);
         } break;
         case OperationType::SPLIT: {
-            if (ins.size() != 3) {
-                LOG(ERROR) << "Wrong input count";
+            const size_t outCount = outs.size();
+            if (!allParametersPresent(3, outCount)) {
                 return ANEURALNETWORKS_BAD_DATA;
             }
 
diff --git a/common/include/CpuExecutor.h b/common/include/CpuExecutor.h
index a6bf74c..edb2332 100644
--- a/common/include/CpuExecutor.h
+++ b/common/include/CpuExecutor.h
@@ -250,6 +250,14 @@
     return data[0];
 }
 
+template <typename T>
+T getScalarDataWithDefault(const RunTimeOperandInfo& info, T defaultValue) {
+    if (info.length < sizeof(T)) {
+        return defaultValue;
+    }
+    return getScalarData<T>(info);
+}
+
 inline bool IsNullInput(const RunTimeOperandInfo* input) {
     return input->lifetime == hal::OperandLifeTime::NO_VALUE;
 }
diff --git a/common/operations/BidirectionalSequenceLSTM.cpp b/common/operations/BidirectionalSequenceLSTM.cpp
index d4d32b9..12ac43f 100644
--- a/common/operations/BidirectionalSequenceLSTM.cpp
+++ b/common/operations/BidirectionalSequenceLSTM.cpp
@@ -169,19 +169,24 @@
     bw_cell_layer_norm_weights_ = GetInput(operation, operands, kBwCellLayerNormWeightsTensor);
     bw_output_layer_norm_weights_ = GetInput(operation, operands, kBwOutputLayerNormWeightsTensor);
 
-    params_.activation = static_cast<TfLiteFusedActivation>(
-            getScalarData<int32_t>(*GetInput(operation, operands, kActivationParam)));
+    const auto& activationOperand = *GetInput(operation, operands, kActivationParam);
+    params_.activation = static_cast<TfLiteFusedActivation>(getScalarDataWithDefault<int32_t>(
+            activationOperand, TfLiteFusedActivation::kTfLiteActNone));
+    const auto& clipOperand = *GetInput(operation, operands, kCellClipParam);
+    const auto& projOperand = *GetInput(operation, operands, kProjClipParam);
     if (input_->type == OperandType::TENSOR_FLOAT32) {
-        params_.cell_clip = getScalarData<float>(*GetInput(operation, operands, kCellClipParam));
-        params_.proj_clip = getScalarData<float>(*GetInput(operation, operands, kProjClipParam));
+        params_.cell_clip = getScalarDataWithDefault<float>(clipOperand, 0.0f);
+        params_.proj_clip = getScalarDataWithDefault<float>(projOperand, 0.0f);
     } else {
-        params_.cell_clip = static_cast<float>(
-                getScalarData<_Float16>(*GetInput(operation, operands, kCellClipParam)));
-        params_.proj_clip = static_cast<float>(
-                getScalarData<_Float16>(*GetInput(operation, operands, kProjClipParam)));
+        params_.cell_clip =
+                static_cast<float>(getScalarDataWithDefault<_Float16>(clipOperand, 0.0f));
+        params_.proj_clip =
+                static_cast<float>(getScalarDataWithDefault<_Float16>(projOperand, 0.0f));
     }
-    params_.merge_outputs = getScalarData<bool>(*GetInput(operation, operands, kMergeOutputsParam));
-    params_.time_major = getScalarData<bool>(*GetInput(operation, operands, kTimeMajorParam));
+    const auto& mergeOutputsOperand = *GetInput(operation, operands, kMergeOutputsParam);
+    params_.merge_outputs = getScalarDataWithDefault<bool>(mergeOutputsOperand, false);
+    const auto& timeMajorOperand = *GetInput(operation, operands, kTimeMajorParam);
+    params_.time_major = getScalarDataWithDefault<bool>(timeMajorOperand, false);
     params_.use_layer_norm = !IsNullInput(fw_input_layer_norm_weights_);
 
     fw_output_ = GetOutput(operation, operands, kFwOutputTensor);
@@ -205,6 +210,59 @@
                                         Shape* fwOutputShape, Shape* bwOutputShape,
                                         Shape* fwOutputActivationState, Shape* fwOutputCellState,
                                         Shape* bwOutputActivationState, Shape* bwOutputCellState) {
+    // Check we have all the inputs and outputs we need.
+    constexpr int requiredInputs[] = {
+            kInputTensor,
+            kFwInputToForgetWeightsTensor,
+            kFwInputToCellWeightsTensor,
+            kFwInputToOutputWeightsTensor,
+            kFwRecurrentToForgetWeightsTensor,
+            kFwRecurrentToCellWeightsTensor,
+            kFwRecurrentToOutputWeightsTensor,
+            kFwForgetGateBiasTensor,
+            kFwCellGateBiasTensor,
+            kFwOutputGateBiasTensor,
+            kBwInputToForgetWeightsTensor,
+            kBwInputToCellWeightsTensor,
+            kBwInputToOutputWeightsTensor,
+            kBwRecurrentToForgetWeightsTensor,
+            kBwRecurrentToCellWeightsTensor,
+            kBwRecurrentToOutputWeightsTensor,
+            kBwForgetGateBiasTensor,
+            kBwCellGateBiasTensor,
+            kBwOutputGateBiasTensor,
+            kFwInputActivationStateTensor,
+            kFwInputCellStateTensor,
+            kBwInputActivationStateTensor,
+            kBwInputCellStateTensor,
+            kActivationParam,
+            kCellClipParam,
+            kProjClipParam,
+            kMergeOutputsParam,
+            kTimeMajorParam,
+    };
+    for (const int requiredInput : requiredInputs) {
+        NN_RET_CHECK(!IsNullInput(GetInput(operation, operands, requiredInput)))
+                << "required input " << requiredInput << " is omitted";
+    }
+
+    // Check that the scalar operands' buffers are large enough.
+    const auto& activationOperand = *GetInput(operation, operands, kActivationParam);
+    NN_RET_CHECK(activationOperand.length >= sizeof(int32_t));
+    const auto& cellOperand = *GetInput(operation, operands, kCellClipParam);
+    const auto& projOperand = *GetInput(operation, operands, kProjClipParam);
+    if (input_->type == OperandType::TENSOR_FLOAT32) {
+        NN_RET_CHECK(cellOperand.length >= sizeof(float));
+        NN_RET_CHECK(projOperand.length >= sizeof(float));
+    } else {
+        NN_RET_CHECK(cellOperand.length >= sizeof(_Float16));
+        NN_RET_CHECK(projOperand.length >= sizeof(_Float16));
+    }
+    const auto& mergeOutputsOperand = *GetInput(operation, operands, kMergeOutputsParam);
+    NN_RET_CHECK(mergeOutputsOperand.length >= sizeof(bool));
+    const auto& timeMajorOperand = *GetInput(operation, operands, kTimeMajorParam);
+    NN_RET_CHECK(timeMajorOperand.length >= sizeof(bool));
+
     // Inferring batch size, number of outputs and number of cells from the
     // input tensors.
     NN_CHECK(NumDimensions(input_) == 3);
diff --git a/common/operations/LSHProjection.cpp b/common/operations/LSHProjection.cpp
index 9ca8be4..bdb106e 100644
--- a/common/operations/LSHProjection.cpp
+++ b/common/operations/LSHProjection.cpp
@@ -44,8 +44,12 @@
 
 bool LSHProjection::Prepare(const Operation& operation, RunTimeOperandInfo* operands,
                             Shape* outputShape) {
-    const int num_inputs = NumInputsWithValues(operation, operands);
-    NN_CHECK(num_inputs == 3 || num_inputs == 4);
+    // Check that none of the required inputs are omitted.
+    constexpr int requiredInputs[] = {kHashTensor, kInputTensor, kTypeParam};
+    for (const int requiredInput : requiredInputs) {
+        NN_RET_CHECK(!IsNullInput(GetInput(operation, operands, requiredInput)))
+                << "required input " << requiredInput << " is omitted";
+    }
     NN_CHECK_EQ(NumOutputs(operation), 1);
 
     const RunTimeOperandInfo* hash = GetInput(operation, operands, kHashTensor);
@@ -56,8 +60,9 @@
     const RunTimeOperandInfo* input = GetInput(operation, operands, kInputTensor);
     NN_CHECK(NumDimensions(input) >= 1);
 
-    auto type = static_cast<LSHProjectionType>(
-            getScalarData<int32_t>(operands[operation.inputs[kTypeParam]]));
+    const auto& typeOperand = operands[operation.inputs[kTypeParam]];
+    NN_RET_CHECK(typeOperand.length >= sizeof(int32_t));
+    auto type = static_cast<LSHProjectionType>(getScalarData<int32_t>(typeOperand));
     switch (type) {
         case LSHProjectionType_SPARSE:
         case LSHProjectionType_SPARSE_DEPRECATED:
diff --git a/common/operations/LSTM.cpp b/common/operations/LSTM.cpp
index 6020353..ba5d46a 100644
--- a/common/operations/LSTM.cpp
+++ b/common/operations/LSTM.cpp
@@ -83,16 +83,20 @@
     output_state_in_ = GetInput(operation, operands, kOutputStateInTensor);
     cell_state_in_ = GetInput(operation, operands, kCellStateInTensor);
 
-    params_.activation = static_cast<TfLiteFusedActivation>(
-            getScalarData<int32_t>(*GetInput(operation, operands, kActivationParam)));
+    const auto& activationOperand = *GetInput(operation, operands, kActivationParam);
+    params_.activation = static_cast<TfLiteFusedActivation>(getScalarDataWithDefault<int32_t>(
+            activationOperand, TfLiteFusedActivation::kTfLiteActNone));
+
+    const auto& cellClipOperand = *GetInput(operation, operands, kCellClipParam);
+    const auto& projClipOperand = *GetInput(operation, operands, kProjClipParam);
     if (input_->type == OperandType::TENSOR_FLOAT32) {
-        params_.cell_clip = getScalarData<float>(*GetInput(operation, operands, kCellClipParam));
-        params_.proj_clip = getScalarData<float>(*GetInput(operation, operands, kProjClipParam));
+        params_.cell_clip = getScalarDataWithDefault<float>(cellClipOperand, 0.0f);
+        params_.proj_clip = getScalarDataWithDefault<float>(projClipOperand, 0.0f);
     } else {
-        params_.cell_clip = static_cast<float>(
-                getScalarData<_Float16>(*GetInput(operation, operands, kCellClipParam)));
-        params_.proj_clip = static_cast<float>(
-                getScalarData<_Float16>(*GetInput(operation, operands, kProjClipParam)));
+        params_.cell_clip =
+                static_cast<float>(getScalarDataWithDefault<_Float16>(cellClipOperand, 0.0f));
+        params_.proj_clip =
+                static_cast<float>(getScalarDataWithDefault<_Float16>(projClipOperand, 0.0f));
     }
 
     // We check the version of LSTM by checking the number of the inputs to the
@@ -302,8 +306,42 @@
     // Check we have all the inputs and outputs we need.
     NN_CHECK(NumInputsWithValues(operation, operands) >= 15 &&
              NumInputsWithValues(operation, operands) <= 27);
+    constexpr int requiredInputs[] = {
+            kInputTensor,
+            kInputToForgetWeightsTensor,
+            kInputToCellWeightsTensor,
+            kInputToOutputWeightsTensor,
+            kRecurrentToForgetWeightsTensor,
+            kRecurrentToCellWeightsTensor,
+            kRecurrentToOutputWeightsTensor,
+            kForgetGateBiasTensor,
+            kCellGateBiasTensor,
+            kOutputGateBiasTensor,
+            kOutputStateInTensor,
+            kCellStateInTensor,
+            kActivationParam,
+            kCellClipParam,
+            kProjClipParam,
+    };
+    for (const int requiredInput : requiredInputs) {
+        NN_RET_CHECK(!IsNullInput(GetInput(operation, operands, requiredInput)))
+                << "required input " << requiredInput << " is omitted";
+    }
     NN_CHECK_EQ(NumOutputs(operation), 4);
 
+    // Check that the scalar operands' buffers are large enough.
+    const auto& activationOperand = *GetInput(operation, operands, kActivationParam);
+    NN_RET_CHECK(activationOperand.length >= sizeof(int32_t));
+    const auto& cellClipOperand = *GetInput(operation, operands, kCellClipParam);
+    const auto& projClipOperand = *GetInput(operation, operands, kProjClipParam);
+    if (input_->type == OperandType::TENSOR_FLOAT32) {
+        NN_RET_CHECK(cellClipOperand.length >= sizeof(float));
+        NN_RET_CHECK(projClipOperand.length >= sizeof(float));
+    } else {
+        NN_RET_CHECK(cellClipOperand.length >= sizeof(_Float16));
+        NN_RET_CHECK(projClipOperand.length >= sizeof(_Float16));
+    }
+
     // Inferring batch size, number of outputs and number of cells from the
     // input tensors.
     NN_CHECK(NumDimensions(input_) > 1);
diff --git a/common/operations/RNN.cpp b/common/operations/RNN.cpp
index dbff94f..259c091 100644
--- a/common/operations/RNN.cpp
+++ b/common/operations/RNN.cpp
@@ -51,7 +51,7 @@
     NNTRACE_TRANS("RNN::Prepare");
     // Check we have all the inputs and outputs we need.
     const int num_inputs = NumInputsWithValues(operation, operands);
-    NN_CHECK(num_inputs == 5 || num_inputs == 6);
+    NN_CHECK(num_inputs == 6);
     NN_CHECK_EQ(NumOutputs(operation), 2);
 
     const RunTimeOperandInfo* input = GetInput(operation, operands, kInputTensor);
diff --git a/common/operations/RNN.h b/common/operations/RNN.h
index e8e380a..245eb1d 100644
--- a/common/operations/RNN.h
+++ b/common/operations/RNN.h
@@ -37,7 +37,7 @@
     bool Eval();
 
     static constexpr int kInputTensor = 0;
-    static constexpr int kWeightsTensor = 1;  // Optional
+    static constexpr int kWeightsTensor = 1;
     static constexpr int kRecurrentWeightsTensor = 2;
     static constexpr int kBiasTensor = 3;
     static constexpr int kHiddenStateInTensor = 4;
diff --git a/common/operations/SVDF.cpp b/common/operations/SVDF.cpp
index f9fd558..12b91f4 100644
--- a/common/operations/SVDF.cpp
+++ b/common/operations/SVDF.cpp
@@ -38,9 +38,11 @@
     bias_ = GetInput(operation, operands, kBiasTensor);
     state_in_ = GetInput(operation, operands, kStateInTensor);
 
-    params_.rank_ = getScalarData<int>(*GetInput(operation, operands, kRankParam));
-    params_.activation_ = static_cast<TfLiteFusedActivation>(
-            getScalarData<int>(*GetInput(operation, operands, kActivationParam)));
+    const auto& rankOperand = *GetInput(operation, operands, kRankParam);
+    params_.rank_ = getScalarDataWithDefault<int>(rankOperand, 0);
+    const auto& activationOperand = *GetInput(operation, operands, kActivationParam);
+    params_.activation_ = static_cast<TfLiteFusedActivation>(getScalarDataWithDefault<int>(
+            activationOperand, TfLiteFusedActivation::kTfLiteActNone));
 
     state_out_ = GetOutput(operation, operands, kStateOutTensor);
     output_ = GetOutput(operation, operands, kOutputTensor);
@@ -53,8 +55,22 @@
     const int num_inputs = NumInputsWithValues(operation, operands);
 
     NN_CHECK(num_inputs == 6 || num_inputs == 7);
+    constexpr int requiredInputs[] = {
+            kInputTensor, kWeightsFeatureTensor, kWeightsTimeTensor, kStateInTensor,
+            kRankParam,   kActivationParam,
+    };
+    for (const int requiredInput : requiredInputs) {
+        NN_RET_CHECK(!IsNullInput(GetInput(operation, operands, requiredInput)))
+                << "required input " << requiredInput << " is omitted";
+    }
     NN_CHECK_EQ(NumOutputs(operation), 2);
 
+    // Check that the scalar operands' buffers are large enough.
+    const auto& rankOperand = *GetInput(operation, operands, kRankParam);
+    NN_RET_CHECK(rankOperand.length >= sizeof(int));
+    const auto& activationOperand = *GetInput(operation, operands, kActivationParam);
+    NN_RET_CHECK(activationOperand.length >= sizeof(int));
+
     const RunTimeOperandInfo* input = GetInput(operation, operands, SVDF::kInputTensor);
     const RunTimeOperandInfo* weights_feature =
             GetInput(operation, operands, SVDF::kWeightsFeatureTensor);