Remove -Wno-sign-compare cflag from NNAPI common/
Bug: N/A
Test: mma
Change-Id: Ia32cdae78ee0b30fccee477e19c4d43d6c044e01
diff --git a/common/AidlValidateHal.cpp b/common/AidlValidateHal.cpp
index 913d9dc..bd0dc8e 100644
--- a/common/AidlValidateHal.cpp
+++ b/common/AidlValidateHal.cpp
@@ -42,20 +42,22 @@
getModel,
std::set<AidlHalPreparedModelRole>* preparedModelRoles,
aidl_hal::Operand* combinedOperand) {
- NN_RET_CHECK(preparedModels.size() != 0);
- NN_RET_CHECK(inputRoles.size() != 0 || outputRoles.size() != 0);
+ NN_RET_CHECK(!preparedModels.empty());
+ NN_RET_CHECK(!inputRoles.empty() || !outputRoles.empty());
std::set<AidlHalPreparedModelRole> roles;
std::vector<aidl_hal::Operand> operands;
operands.reserve(inputRoles.size() + outputRoles.size());
for (const auto& role : inputRoles) {
- NN_RET_CHECK_LT(role.modelIndex, preparedModels.size());
+ NN_RET_CHECK_GE(role.modelIndex, 0);
+ NN_RET_CHECK_LT(static_cast<size_t>(role.modelIndex), preparedModels.size());
const auto& preparedModel = preparedModels[role.modelIndex];
NN_RET_CHECK(preparedModel != nullptr);
const auto* model = getModel(preparedModel);
NN_RET_CHECK(model != nullptr);
const auto& inputIndexes = model->main.inputIndexes;
- NN_RET_CHECK_LT(role.ioIndex, inputIndexes.size());
+ NN_RET_CHECK_GE(role.ioIndex, 0);
+ NN_RET_CHECK_LT(static_cast<size_t>(role.ioIndex), inputIndexes.size());
NN_RET_CHECK_GT(role.probability, 0.0f);
NN_RET_CHECK_LE(role.probability, 1.0f);
const auto [it, success] = roles.emplace(preparedModel.get(), IOType::INPUT, role.ioIndex);
@@ -63,13 +65,15 @@
operands.push_back(model->main.operands[inputIndexes[role.ioIndex]]);
}
for (const auto& role : outputRoles) {
- NN_RET_CHECK_LT(role.modelIndex, preparedModels.size());
+ NN_RET_CHECK_GE(role.modelIndex, 0);
+ NN_RET_CHECK_LT(static_cast<size_t>(role.modelIndex), preparedModels.size());
const auto& preparedModel = preparedModels[role.modelIndex];
NN_RET_CHECK(preparedModel != nullptr);
const auto* model = getModel(preparedModel);
NN_RET_CHECK(model != nullptr);
const auto& outputIndexes = model->main.outputIndexes;
- NN_RET_CHECK_LT(role.ioIndex, outputIndexes.size());
+ NN_RET_CHECK_GE(role.ioIndex, 0);
+ NN_RET_CHECK_LT(static_cast<size_t>(role.ioIndex), outputIndexes.size());
NN_RET_CHECK_GT(role.probability, 0.0f);
NN_RET_CHECK_LE(role.probability, 1.0f);
const auto [it, success] = roles.emplace(preparedModel.get(), IOType::OUTPUT, role.ioIndex);
diff --git a/common/Android.bp b/common/Android.bp
index 4f86a0c..f82fef3 100644
--- a/common/Android.bp
+++ b/common/Android.bp
@@ -165,7 +165,6 @@
"-Wextra",
"-Wno-extern-c-compat",
"-Wno-invalid-partial-specialization",
- "-Wno-sign-compare",
],
}
@@ -290,7 +289,6 @@
"-Wno-array-bounds",
"-Wno-extern-c-compat",
"-Wno-invalid-partial-specialization",
- "-Wno-sign-compare",
],
}
@@ -364,7 +362,6 @@
"-Wno-array-bounds",
"-Wno-extern-c-compat",
"-Wno-invalid-partial-specialization",
- "-Wno-sign-compare",
],
}
@@ -426,7 +423,6 @@
"-Wno-array-bounds",
"-Wno-extern-c-compat",
"-Wno-invalid-partial-specialization",
- "-Wno-sign-compare",
],
}
diff --git a/common/CpuExecutor.cpp b/common/CpuExecutor.cpp
index d7fa254..13399e9 100644
--- a/common/CpuExecutor.cpp
+++ b/common/CpuExecutor.cpp
@@ -269,7 +269,7 @@
for (uint32_t i = 0; i < operation->inputs.size(); i++) {
if (isOmittedInput(i)) continue;
for (uint32_t j = 0; j < getInputInfo(i)->dimensions.size(); j++) {
- NN_RET_CHECK_NE(getInputInfo(i)->dimensions[j], 0)
+ NN_RET_CHECK_NE(getInputInfo(i)->dimensions[j], 0u)
<< operation->type << " does not support zero-sized tensor, but input " << i
<< " dimension " << j << " is 0.";
}
@@ -1375,7 +1375,7 @@
const int32_t axis = getScalarData<int32_t>(operands[ins[1]]);
const int32_t numOutputs = getScalarData<int32_t>(operands[ins[2]]);
- if (numOutputs != outs.size()) {
+ if (static_cast<size_t>(numOutputs) != outs.size()) {
return ANEURALNETWORKS_BAD_DATA;
}
diff --git a/common/ExecutionBurstController.cpp b/common/ExecutionBurstController.cpp
index ac49448..5b184e2 100644
--- a/common/ExecutionBurstController.cpp
+++ b/common/ExecutionBurstController.cpp
@@ -398,7 +398,10 @@
// get all memories
hardware::hidl_vec<hardware::hidl_memory> memories(slots.size());
std::transform(slots.begin(), slots.end(), memories.begin(), [this](int32_t slot) {
- return slot < mMemoryCache.size() ? mMemoryCache[slot] : hardware::hidl_memory{};
+ if (slot < 0 || static_cast<size_t>(slot) >= mMemoryCache.size()) {
+ return hardware::hidl_memory{};
+ }
+ return mMemoryCache[slot];
});
// ensure all memories are valid
diff --git a/common/IndexedShapeWrapper.cpp b/common/IndexedShapeWrapper.cpp
index 675518c..1bab9df 100644
--- a/common/IndexedShapeWrapper.cpp
+++ b/common/IndexedShapeWrapper.cpp
@@ -37,7 +37,7 @@
NN_CHECK(isValid(*index));
bool anyIndicesLeft = false;
- for (int i = 0; i < index->size(); ++i) {
+ for (size_t i = 0; i < index->size(); ++i) {
if (index->at(i) < shape->dimensions[i] - 1) {
anyIndicesLeft = true;
break;
@@ -63,7 +63,7 @@
NN_CHECK(isValid(index));
*flatIndex = 0;
- for (int i = 0; i < index.size(); ++i) {
+ for (size_t i = 0; i < index.size(); ++i) {
*flatIndex += strides[i] * index[i];
}
return true;
@@ -74,7 +74,7 @@
NN_CHECK(index.size() >= strides.size());
*flatIndex = 0;
- for (int i = 1; i <= strides.size(); ++i) {
+ for (size_t i = 1; i <= strides.size(); ++i) {
uint32_t currentIndex = index[index.size() - i];
uint32_t currentDimSize = shape->dimensions[shape->dimensions.size() - i];
NN_CHECK(currentIndex < currentDimSize || currentDimSize == 1);
@@ -92,7 +92,7 @@
<< toString(shape->dimensions);
return false;
}
- for (int i = 0; i < index.size(); ++i) {
+ for (size_t i = 0; i < index.size(); ++i) {
if (index[i] >= shape->dimensions[i]) {
LOG(ERROR) << "Invalid index: " << toString(index)
<< " is out of range for shape: " << toString(shape->dimensions);
diff --git a/common/QuantUtils.h b/common/QuantUtils.h
index f6bfd16..4296588 100644
--- a/common/QuantUtils.h
+++ b/common/QuantUtils.h
@@ -5,7 +5,10 @@
#ifndef ANDROID_FRAMEWORKS_ML_NN_COMMON_QUANTUTILS_H
#define ANDROID_FRAMEWORKS_ML_NN_COMMON_QUANTUTILS_H
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Wsign-compare"
#include <public/gemmlowp.h>
+#pragma clang diagnostic pop
#include <limits>
#include <memory>
diff --git a/common/ValidateHal.cpp b/common/ValidateHal.cpp
index b88c5da..a4c82af 100644
--- a/common/ValidateHal.cpp
+++ b/common/ValidateHal.cpp
@@ -136,7 +136,7 @@
<< "Operand " << index << ": Operand of type "
<< getOperandTypeName(operand.type) << " with a wrong-sized scales, "
<< "expected " << expected << " was " << channelQuant.scales.size();
- NN_RET_CHECK_NE(expected, 0)
+ NN_RET_CHECK_NE(expected, 0u)
<< "Operand " << index << ": Operand of type "
<< getOperandTypeName(operand.type) << " channel dimension "
<< channelQuant.channelDim << " is underspecified (can't be 0)";
@@ -788,7 +788,7 @@
// extension operand type.
if (!isExtensionOperandType(operand.type) &&
!nonExtensionOperandTypeIsScalar(static_cast<int>(operand.type))) {
- NN_RET_CHECK_GT(modelRank, 0)
+ NN_RET_CHECK_GT(modelRank, 0u)
<< "Model " << type << " " << requestArgumentIndex
<< " has unknown rank but the request does not specify the rank.";
}
diff --git a/common/include/CpuOperationUtils.h b/common/include/CpuOperationUtils.h
index ff58ff1..d69d553 100644
--- a/common/include/CpuOperationUtils.h
+++ b/common/include/CpuOperationUtils.h
@@ -61,14 +61,14 @@
inline void convertFloat16ToFloat32(const _Float16* input, std::vector<float>* output) {
CHECK(input != nullptr);
CHECK(output != nullptr);
- for (int i = 0; i < output->size(); ++i) {
+ for (size_t i = 0; i < output->size(); ++i) {
(*output)[i] = static_cast<float>(input[i]);
}
}
inline void convertFloat32ToFloat16(const std::vector<float>& input, _Float16* output) {
CHECK(output != nullptr);
- for (int i = 0; i < input.size(); ++i) {
+ for (size_t i = 0; i < input.size(); ++i) {
output[i] = input[i];
}
}
@@ -78,7 +78,7 @@
inline void convertInt8ToUInt8(const int8_t* input, std::vector<uint8_t>* output) {
CHECK(input != nullptr);
CHECK(output != nullptr);
- for (int i = 0; i < output->size(); ++i) {
+ for (size_t i = 0; i < output->size(); ++i) {
(*output)[i] = static_cast<uint8_t>(static_cast<int32_t>(input[i]) + 128);
}
}
@@ -87,7 +87,7 @@
// and the distance between offsets is 128.
inline void convertUInt8ToInt8(const std::vector<uint8_t>& input, int8_t* output) {
CHECK(output != nullptr);
- for (int i = 0; i < input.size(); ++i) {
+ for (size_t i = 0; i < input.size(); ++i) {
output[i] = static_cast<int8_t>(static_cast<int32_t>(input[i]) - 128);
}
}
@@ -97,7 +97,7 @@
std::vector<float>* output) {
CHECK(input != nullptr);
CHECK(output != nullptr);
- for (int i = 0; i < output->size(); ++i) {
+ for (size_t i = 0; i < output->size(); ++i) {
(*output)[i] = (static_cast<float>(input[i]) - zeroPoint) * scale;
}
}
@@ -106,7 +106,7 @@
inline void convertFloat32ToQuant(const std::vector<float>& input, float scale, int32_t zeroPoint,
T* output) {
CHECK(output != nullptr);
- for (int i = 0; i < input.size(); ++i) {
+ for (size_t i = 0; i < input.size(); ++i) {
int32_t intVal = std::round(input[i] / scale + zeroPoint);
intVal = std::min<int32_t>(std::max<int32_t>(intVal, std::numeric_limits<T>::min()),
std::numeric_limits<T>::max());
@@ -117,7 +117,7 @@
template <typename T>
inline bool convertNchwToNhwc(const T* nchw, const Shape& nchwShape, std::vector<T>* nhwc,
Shape* nhwcShape) {
- NN_RET_CHECK_EQ(getNumberOfDimensions(nchwShape), 4)
+ NN_RET_CHECK_EQ(getNumberOfDimensions(nchwShape), 4u)
<< "Error converting a non-4-D tensor to NHWC layout";
*nhwcShape = nchwShape;
const auto& fromDim = nchwShape.dimensions;
@@ -138,7 +138,7 @@
template <typename T>
inline bool convertNhwcToNchw(const std::vector<T>& nhwc, const Shape& nhwcShape, T* nchw) {
- NN_RET_CHECK_EQ(getNumberOfDimensions(nhwcShape), 4)
+ NN_RET_CHECK_EQ(getNumberOfDimensions(nhwcShape), 4u)
<< "Error converting a non-4-D tensor to NCHW layout";
const auto& fromDim = nhwcShape.dimensions;
const auto from = nhwc.data();
@@ -184,7 +184,7 @@
OutputWithLayout(bool useNchw) : mDataOriginal(nullptr), mUseNchw(useNchw) {}
bool initialize(T* data, const Shape& shape) {
- NN_RET_CHECK_EQ(getNumberOfDimensions(shape), 4);
+ NN_RET_CHECK_EQ(getNumberOfDimensions(shape), 4u);
mDataOriginal = data;
mShape = shape;
if (mUseNchw) {
diff --git a/common/operations/Activation.cpp b/common/operations/Activation.cpp
index 6a9cd82..6b19bf2 100644
--- a/common/operations/Activation.cpp
+++ b/common/operations/Activation.cpp
@@ -28,6 +28,7 @@
#ifdef NN_INCLUDE_CPU_IMPLEMENTATION
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wunused-parameter"
+#pragma clang diagnostic ignored "-Wsign-compare"
#include <tensorflow/lite/kernels/internal/optimized/legacy_optimized_ops.h>
#include <tensorflow/lite/kernels/internal/optimized/optimized_ops.h>
#include <tensorflow/lite/kernels/internal/reference/integer_ops/logistic.h>
@@ -385,7 +386,7 @@
}
const Shape& input = context->getInputShape(kInputTensor);
if (hasKnownRank(input)) {
- NN_RET_CHECK_LE(getNumberOfDimensions(input), 4);
+ NN_RET_CHECK_LE(getNumberOfDimensions(input), 4u);
}
NN_RET_CHECK(validateInputTypes(context, {inputType}));
NN_RET_CHECK(validateOutputTypes(context, {inputType}));
@@ -413,7 +414,7 @@
bool prepare(OperationType opType, IOperationExecutionContext* context) {
Shape input = context->getInputShape(kInputTensor);
if (opType != OperationType::HARD_SWISH) {
- NN_RET_CHECK_LE(getNumberOfDimensions(input), 4);
+ NN_RET_CHECK_LE(getNumberOfDimensions(input), 4u);
}
Shape output = input;
if (input.type == OperandType::TENSOR_QUANT8_ASYMM ||
diff --git a/common/operations/BidirectionalSequenceLSTM.cpp b/common/operations/BidirectionalSequenceLSTM.cpp
index 6be67b0..123e832 100644
--- a/common/operations/BidirectionalSequenceLSTM.cpp
+++ b/common/operations/BidirectionalSequenceLSTM.cpp
@@ -270,16 +270,16 @@
const uint32_t n_fw_input = SizeOfDimension(input_, 2);
const uint32_t n_fw_cell = SizeOfDimension(fw_input_to_output_weights_, 0);
- NN_CHECK_EQ(NumDimensions(fw_input_to_output_weights_), 2);
+ NN_CHECK_EQ(NumDimensions(fw_input_to_output_weights_), 2u);
NN_CHECK_EQ(SizeOfDimension(fw_input_to_output_weights_, 1), n_fw_input);
- NN_CHECK_EQ(NumDimensions(fw_recurrent_to_output_weights_), 2);
+ NN_CHECK_EQ(NumDimensions(fw_recurrent_to_output_weights_), 2u);
NN_CHECK_EQ(SizeOfDimension(fw_recurrent_to_output_weights_, 0), n_fw_cell);
const uint32_t n_fw_output = SizeOfDimension(fw_recurrent_to_output_weights_, 1);
const uint32_t n_bw_cell = SizeOfDimension(bw_input_to_output_weights_, 0);
- NN_CHECK_EQ(NumDimensions(bw_recurrent_to_output_weights_), 2);
+ NN_CHECK_EQ(NumDimensions(bw_recurrent_to_output_weights_), 2u);
NN_CHECK_EQ(SizeOfDimension(bw_recurrent_to_output_weights_, 0), n_bw_cell);
const uint32_t n_bw_output = SizeOfDimension(bw_recurrent_to_output_weights_, 1);
@@ -338,42 +338,42 @@
}
if (has_fw_aux_weights) {
- int n_aux_input = SizeOfDimension(input_, 2);
+ uint32_t n_aux_input = SizeOfDimension(input_, 2);
// Check forward auxiliary input shapes
{
- NN_RET_CHECK_EQ(NumDimensions(fw_aux_input_to_input_weights_), 2);
+ NN_RET_CHECK_EQ(NumDimensions(fw_aux_input_to_input_weights_), 2u);
NN_RET_CHECK_EQ(SizeOfDimension(fw_aux_input_to_input_weights_, 0), n_fw_cell);
NN_RET_CHECK_EQ(SizeOfDimension(fw_aux_input_to_input_weights_, 1), n_aux_input);
- NN_RET_CHECK_EQ(NumDimensions(fw_aux_input_to_forget_weights_), 2);
+ NN_RET_CHECK_EQ(NumDimensions(fw_aux_input_to_forget_weights_), 2u);
NN_RET_CHECK_EQ(SizeOfDimension(fw_aux_input_to_forget_weights_, 0), n_fw_cell);
NN_RET_CHECK_EQ(SizeOfDimension(fw_aux_input_to_forget_weights_, 1), n_aux_input);
- NN_RET_CHECK_EQ(NumDimensions(fw_aux_input_to_cell_weights_), 2);
+ NN_RET_CHECK_EQ(NumDimensions(fw_aux_input_to_cell_weights_), 2u);
NN_RET_CHECK_EQ(SizeOfDimension(fw_aux_input_to_cell_weights_, 0), n_fw_cell);
NN_RET_CHECK_EQ(SizeOfDimension(fw_aux_input_to_cell_weights_, 1), n_aux_input);
- NN_RET_CHECK_EQ(NumDimensions(fw_aux_input_to_output_weights_), 2);
+ NN_RET_CHECK_EQ(NumDimensions(fw_aux_input_to_output_weights_), 2u);
NN_RET_CHECK_EQ(SizeOfDimension(fw_aux_input_to_output_weights_, 0), n_fw_cell);
NN_RET_CHECK_EQ(SizeOfDimension(fw_aux_input_to_output_weights_, 1), n_aux_input);
}
// Check backward auxiliary input shapes
{
- NN_RET_CHECK_EQ(NumDimensions(bw_aux_input_to_input_weights_), 2);
+ NN_RET_CHECK_EQ(NumDimensions(bw_aux_input_to_input_weights_), 2u);
NN_RET_CHECK_EQ(SizeOfDimension(bw_aux_input_to_input_weights_, 0), n_bw_cell);
NN_RET_CHECK_EQ(SizeOfDimension(bw_aux_input_to_input_weights_, 1), n_aux_input);
- NN_RET_CHECK_EQ(NumDimensions(bw_aux_input_to_forget_weights_), 2);
+ NN_RET_CHECK_EQ(NumDimensions(bw_aux_input_to_forget_weights_), 2u);
NN_RET_CHECK_EQ(SizeOfDimension(bw_aux_input_to_forget_weights_, 0), n_bw_cell);
NN_RET_CHECK_EQ(SizeOfDimension(bw_aux_input_to_forget_weights_, 1), n_aux_input);
- NN_RET_CHECK_EQ(NumDimensions(bw_aux_input_to_cell_weights_), 2);
+ NN_RET_CHECK_EQ(NumDimensions(bw_aux_input_to_cell_weights_), 2u);
NN_RET_CHECK_EQ(SizeOfDimension(bw_aux_input_to_cell_weights_, 0), n_bw_cell);
NN_RET_CHECK_EQ(SizeOfDimension(bw_aux_input_to_cell_weights_, 1), n_aux_input);
- NN_RET_CHECK_EQ(NumDimensions(bw_aux_input_to_output_weights_), 2);
+ NN_RET_CHECK_EQ(NumDimensions(bw_aux_input_to_output_weights_), 2u);
NN_RET_CHECK_EQ(SizeOfDimension(bw_aux_input_to_output_weights_, 0), n_bw_cell);
NN_RET_CHECK_EQ(SizeOfDimension(bw_aux_input_to_output_weights_, 1), n_aux_input);
}
diff --git a/common/operations/BidirectionalSequenceRNN.cpp b/common/operations/BidirectionalSequenceRNN.cpp
index d95f694..10071a6 100644
--- a/common/operations/BidirectionalSequenceRNN.cpp
+++ b/common/operations/BidirectionalSequenceRNN.cpp
@@ -66,9 +66,9 @@
const uint32_t firstDimSize = getSizeOfDimension(inputShape, 0);
const uint32_t secondDimSize = getSizeOfDimension(inputShape, 1);
const uint32_t inputSize = getSizeOfDimension(inputShape, 2);
- for (int f = 0; f < firstDimSize; ++f) {
- for (int s = 0; s < secondDimSize; ++s) {
- for (int i = 0; i < inputSize; ++i) {
+ for (uint32_t f = 0; f < firstDimSize; ++f) {
+ for (uint32_t s = 0; s < secondDimSize; ++s) {
+ for (uint32_t i = 0; i < inputSize; ++i) {
const uint32_t inputIndex = f * secondDimSize * inputSize + s * inputSize + i;
const uint32_t outputIndex = s * firstDimSize * inputSize + f * inputSize + i;
output[outputIndex] = input[inputIndex];
@@ -80,7 +80,7 @@
Shape removeFirstDim(const Shape& input) {
Shape output = input;
output.dimensions.resize(input.dimensions.size() - 1);
- for (int i = 0; i < input.dimensions.size() - 1; ++i) {
+ for (size_t i = 0; i < input.dimensions.size() - 1; ++i) {
output.dimensions[i] = input.dimensions[i + 1];
}
return output;
@@ -253,7 +253,7 @@
}
// Forward pass
- for (int i = 0; i < maxTime; ++i) {
+ for (uint32_t i = 0; i < maxTime; ++i) {
const T* inputBatchPtr = input + i * batchSize * inputSize;
const T* auxInputBatchPtr = nullptr;
if (hasAuxWeights) {
@@ -390,15 +390,15 @@
const uint32_t bwNumUnits = getSizeOfDimension(bwWeights, 0);
const uint32_t inputSize = getSizeOfDimension(input, 2);
- NN_RET_CHECK_EQ(getNumberOfDimensions(input), 3);
- NN_RET_CHECK_EQ(getNumberOfDimensions(fwWeights), 2);
- NN_RET_CHECK_EQ(getNumberOfDimensions(fwRecurrentWeights), 2);
- NN_RET_CHECK_EQ(getNumberOfDimensions(fwBias), 1);
- NN_RET_CHECK_EQ(getNumberOfDimensions(fwHiddenState), 2);
- NN_RET_CHECK_EQ(getNumberOfDimensions(bwWeights), 2);
- NN_RET_CHECK_EQ(getNumberOfDimensions(bwRecurrentWeights), 2);
- NN_RET_CHECK_EQ(getNumberOfDimensions(bwBias), 1);
- NN_RET_CHECK_EQ(getNumberOfDimensions(bwHiddenState), 2);
+ NN_RET_CHECK_EQ(getNumberOfDimensions(input), 3u);
+ NN_RET_CHECK_EQ(getNumberOfDimensions(fwWeights), 2u);
+ NN_RET_CHECK_EQ(getNumberOfDimensions(fwRecurrentWeights), 2u);
+ NN_RET_CHECK_EQ(getNumberOfDimensions(fwBias), 1u);
+ NN_RET_CHECK_EQ(getNumberOfDimensions(fwHiddenState), 2u);
+ NN_RET_CHECK_EQ(getNumberOfDimensions(bwWeights), 2u);
+ NN_RET_CHECK_EQ(getNumberOfDimensions(bwRecurrentWeights), 2u);
+ NN_RET_CHECK_EQ(getNumberOfDimensions(bwBias), 1u);
+ NN_RET_CHECK_EQ(getNumberOfDimensions(bwHiddenState), 2u);
NN_RET_CHECK_EQ(inputSize, getSizeOfDimension(fwWeights, 1));
NN_RET_CHECK_EQ(fwNumUnits, getSizeOfDimension(fwBias, 0));
@@ -417,9 +417,9 @@
NN_RET_CHECK_EQ(bwNumUnits, getSizeOfDimension(bwHiddenState, 1));
if (linkingMode == LinkingMode::CROSS_LINKING) {
- NN_RET_CHECK_EQ(getNumberOfDimensions(auxInput), 3);
- NN_RET_CHECK_EQ(getNumberOfDimensions(fwAuxWeights), 2);
- NN_RET_CHECK_EQ(getNumberOfDimensions(bwAuxWeights), 2);
+ NN_RET_CHECK_EQ(getNumberOfDimensions(auxInput), 3u);
+ NN_RET_CHECK_EQ(getNumberOfDimensions(fwAuxWeights), 2u);
+ NN_RET_CHECK_EQ(getNumberOfDimensions(bwAuxWeights), 2u);
NN_RET_CHECK_EQ(getSizeOfDimension(auxInput, 0), getSizeOfDimension(input, 0));
NN_RET_CHECK_EQ(getSizeOfDimension(auxInput, 1), getSizeOfDimension(input, 1));
@@ -428,7 +428,7 @@
NN_RET_CHECK_EQ(getSizeOfDimension(bwAuxWeights, 0), bwNumUnits);
NN_RET_CHECK_EQ(getSizeOfDimension(bwAuxWeights, 1), getSizeOfDimension(auxInput, 2));
} else if (linkingMode == LinkingMode::PARALLEL_LINKING) {
- NN_RET_CHECK_EQ(getNumberOfDimensions(auxInput), 3);
+ NN_RET_CHECK_EQ(getNumberOfDimensions(auxInput), 3u);
NN_RET_CHECK_EQ(getSizeOfDimension(auxInput, 0), getSizeOfDimension(input, 0));
NN_RET_CHECK_EQ(getSizeOfDimension(auxInput, 1), getSizeOfDimension(input, 1));
diff --git a/common/operations/Broadcast.cpp b/common/operations/Broadcast.cpp
index 5a29023..0129e53 100644
--- a/common/operations/Broadcast.cpp
+++ b/common/operations/Broadcast.cpp
@@ -30,6 +30,7 @@
#ifdef NN_INCLUDE_CPU_IMPLEMENTATION
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wunused-parameter"
+#pragma clang diagnostic ignored "-Wsign-compare"
#include <tensorflow/lite/kernels/internal/optimized/integer_ops/add.h>
#include <tensorflow/lite/kernels/internal/optimized/integer_ops/mul.h>
#include <tensorflow/lite/kernels/internal/optimized/legacy_optimized_ops.h>
@@ -479,8 +480,8 @@
}
if (hasKnownRank(input1) && hasKnownRank(input2)) {
- NN_RET_CHECK_LE(getNumberOfDimensions(input1), 4);
- NN_RET_CHECK_LE(getNumberOfDimensions(input2), 4);
+ NN_RET_CHECK_LE(getNumberOfDimensions(input1), 4u);
+ NN_RET_CHECK_LE(getNumberOfDimensions(input2), 4u);
}
NN_RET_CHECK(validateInputTypes(context, {inputType, inputType, OperandType::INT32}));
NN_RET_CHECK(validateOutputTypes(context, {inputType}));
@@ -492,8 +493,8 @@
Shape input1 = context->getInputShape(kInputTensor1);
Shape input2 = context->getInputShape(kInputTensor2);
Shape output = context->getOutputShape(kOutputTensor);
- NN_RET_CHECK_LE(getNumberOfDimensions(input1), 4);
- NN_RET_CHECK_LE(getNumberOfDimensions(input2), 4);
+ NN_RET_CHECK_LE(getNumberOfDimensions(input1), 4u);
+ NN_RET_CHECK_LE(getNumberOfDimensions(input2), 4u);
NN_RET_CHECK(calculateBroadcastedShape(input1, input2, &output));
return context->setOutputShape(kOutputTensor, output);
}
diff --git a/common/operations/ChannelShuffle.cpp b/common/operations/ChannelShuffle.cpp
index efa0873..1460d5a 100644
--- a/common/operations/ChannelShuffle.cpp
+++ b/common/operations/ChannelShuffle.cpp
@@ -68,7 +68,7 @@
<< "Unsupported tensor type for operation " << kOperationName;
const Shape& inputShape = context->getInputShape(kInputTensor);
if (hasKnownRank(inputShape)) {
- NN_RET_CHECK_LE(getNumberOfDimensions(inputShape), 4);
+ NN_RET_CHECK_LE(getNumberOfDimensions(inputShape), 4u);
}
NN_RET_CHECK(validateInputTypes(context, {inputType, OperandType::INT32, OperandType::INT32}));
NN_RET_CHECK(validateOutputTypes(context, {inputType}));
diff --git a/common/operations/Concatenation.cpp b/common/operations/Concatenation.cpp
index 2ef51bf..58d4e20 100644
--- a/common/operations/Concatenation.cpp
+++ b/common/operations/Concatenation.cpp
@@ -28,6 +28,7 @@
#ifdef NN_INCLUDE_CPU_IMPLEMENTATION
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wunused-parameter"
+#pragma clang diagnostic ignored "-Wsign-compare"
#include <tensorflow/lite/kernels/internal/optimized/legacy_optimized_ops.h>
#include <tensorflow/lite/kernels/internal/reference/legacy_reference_ops.h>
#include <tensorflow/lite/kernels/internal/reference/reference_ops.h>
@@ -115,7 +116,7 @@
inline bool concatenation<int8_t>(IOperationExecutionContext* context) {
uint32_t inputCount = context->getNumInputs() - 1;
std::vector<std::vector<uint8_t>> inputs_uint8(inputCount);
- for (int i = 0; i < inputCount; ++i) {
+ for (uint32_t i = 0; i < inputCount; ++i) {
const auto currentSize = getNumberOfElements(context->getInputShape(i));
inputs_uint8[i].resize(currentSize);
if (currentSize != 0) {
@@ -146,7 +147,7 @@
Result<Version> validate(const IOperationValidationContext* context) {
uint32_t inputCount = context->getNumInputs();
- NN_RET_CHECK_GE(inputCount, 2);
+ NN_RET_CHECK_GE(inputCount, 2u);
NN_RET_CHECK_EQ(context->getNumOutputs(), kNumOutputs);
const OperandType inputType = context->getInputType(0);
auto minSupportedVersion = Version::ANDROID_OC_MR1;
@@ -173,7 +174,7 @@
for (uint32_t i = 0; i < inputCount - 1; ++i) {
const uint32_t inputRank = getNumberOfDimensions(context->getInputShape(i));
if (inputRank != 0) {
- NN_RET_CHECK_LE(inputRank, 4);
+ NN_RET_CHECK_LE(inputRank, 4u);
}
}
NN_RET_CHECK(validateInputTypes(context, inExpectedTypes));
@@ -184,13 +185,13 @@
#ifdef NN_INCLUDE_CPU_IMPLEMENTATION
bool prepare(IOperationExecutionContext* context) {
uint32_t numInputs = context->getNumInputs();
- NN_RET_CHECK_GE(numInputs, 2);
+ NN_RET_CHECK_GE(numInputs, 2u);
const Shape& input0 = context->getInputShape(0);
uint32_t numDimensions = getNumberOfDimensions(input0);
int32_t axis = context->getInputValue<int32_t>(numInputs - 1);
NN_RET_CHECK_GE(axis, 0);
- NN_RET_CHECK_LT(axis, numDimensions);
- NN_RET_CHECK_LE(numDimensions, 4);
+ NN_RET_CHECK_LT(static_cast<uint32_t>(axis), numDimensions);
+ NN_RET_CHECK_LE(numDimensions, 4u);
uint32_t sumAxis = getSizeOfDimension(input0, axis);
for (uint32_t i = 1; i < numInputs - 1; ++i) {
@@ -198,7 +199,7 @@
NN_RET_CHECK_EQ(getNumberOfDimensions(input), numDimensions);
NN_RET_CHECK(input.type == input0.type);
for (uint32_t d = 0; d < numDimensions; ++d) {
- if (d == axis) {
+ if (d == static_cast<uint32_t>(axis)) {
sumAxis += getSizeOfDimension(input, axis);
} else {
NN_RET_CHECK_EQ(getSizeOfDimension(input0, d), getSizeOfDimension(input, d));
diff --git a/common/operations/Conv2D.cpp b/common/operations/Conv2D.cpp
index b2acc9d..b8b373a 100644
--- a/common/operations/Conv2D.cpp
+++ b/common/operations/Conv2D.cpp
@@ -30,6 +30,7 @@
#ifdef NN_INCLUDE_CPU_IMPLEMENTATION
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wunused-parameter"
+#pragma clang diagnostic ignored "-Wsign-compare"
#include <tensorflow/lite/kernels/internal/optimized/legacy_optimized_ops.h>
#include <tensorflow/lite/kernels/internal/reference/integer_ops/conv.h>
#include <tensorflow/lite/kernels/internal/types.h>
@@ -380,7 +381,7 @@
auto outputMultiplier = std::vector<int32_t>(outputDepth, 0);
auto outputShift = std::vector<int32_t>(outputDepth, .0f);
- for (int i = 0; i < outputDepth; ++i) {
+ for (uint32_t i = 0; i < outputDepth; ++i) {
Shape filterChannelShape = filterShape;
filterChannelShape.scale = filterScales[i];
Shape biasChannelShape = biasShape;
@@ -473,7 +474,7 @@
auto outputMultiplier = std::vector<int32_t>(outputDepth, 0);
auto outputShift = std::vector<int32_t>(outputDepth, .0f);
- for (int i = 0; i < outputDepth; ++i) {
+ for (uint32_t i = 0; i < outputDepth; ++i) {
Shape filterChannelShape = filterShape;
filterChannelShape.scale = filterScales[i];
Shape biasChannelShape = biasShape;
@@ -542,10 +543,10 @@
const auto inputRank = getNumberOfDimensions(context->getInputShape(kInputTensor));
const auto filterRank = getNumberOfDimensions(context->getInputShape(kFilterTensor));
if (inputRank != 0) {
- NN_RET_CHECK_EQ(inputRank, 4);
+ NN_RET_CHECK_EQ(inputRank, 4u);
}
if (filterRank != 0) {
- NN_RET_CHECK_EQ(filterRank, 4);
+ NN_RET_CHECK_EQ(filterRank, 4u);
}
auto inputCount = context->getNumInputs();
auto inputType = context->getInputType(kInputTensor);
@@ -574,7 +575,7 @@
NN_RET_CHECK_EQ(std::get<Operand::SymmPerChannelQuantParams>(
context->getInputExtraParams(kFilterTensor))
.channelDim,
- 0)
+ 0u)
<< "Unsupported filter tensor channel dimension for operation "
<< kOperationName;
}
@@ -606,14 +607,14 @@
withExplicitPadding = true;
}
int inputOffset = withExplicitPadding ? 3 : 0;
- if (inputCount >= 8 + inputOffset) {
+ if (inputCount >= 8u + inputOffset) {
inExpectedTypes.push_back(OperandType::BOOL);
withLayout = true;
}
- NN_RET_CHECK_NE(inputCount, 9 + inputOffset)
+ NN_RET_CHECK_NE(inputCount, 9u + inputOffset)
<< "Provided only one dilation factor value, two values are requred for operation "
<< kOperationName;
- if (inputCount == 10 + inputOffset) {
+ if (inputCount == 10u + inputOffset) {
inExpectedTypes.push_back(OperandType::INT32);
inExpectedTypes.push_back(OperandType::INT32);
withDilation = true;
@@ -653,9 +654,9 @@
} else {
NN_RET_CHECK(input.type == bias.type);
}
- NN_RET_CHECK_EQ(getNumberOfDimensions(input), 4);
- NN_RET_CHECK_EQ(getNumberOfDimensions(filter), 4);
- NN_RET_CHECK_EQ(getNumberOfDimensions(bias), 1);
+ NN_RET_CHECK_EQ(getNumberOfDimensions(input), 4u);
+ NN_RET_CHECK_EQ(getNumberOfDimensions(filter), 4u);
+ NN_RET_CHECK_EQ(getNumberOfDimensions(bias), 1u);
Conv2dParam param;
NN_RET_CHECK(param.initialize(context));
@@ -670,10 +671,10 @@
// Only batches can be zero.
NN_RET_CHECK_EQ(channels_in, getSizeOfDimension(filter, 3));
NN_RET_CHECK_EQ(channels_out, getSizeOfDimension(bias, 0));
- NN_RET_CHECK_GT(height, 0);
- NN_RET_CHECK_GT(width, 0);
- NN_RET_CHECK_GT(channels_in, 0);
- NN_RET_CHECK_GT(channels_out, 0);
+ NN_RET_CHECK_GT(height, 0u);
+ NN_RET_CHECK_GT(width, 0u);
+ NN_RET_CHECK_GT(channels_in, 0u);
+ NN_RET_CHECK_GT(channels_out, 0u);
int32_t effectiveFilterWidth = (filterWidth - 1) * param.dilation_width_factor + 1;
int32_t effectiveFilterHeight = (filterHeight - 1) * param.dilation_height_factor + 1;
diff --git a/common/operations/Densify.cpp b/common/operations/Densify.cpp
index 20f779f..cb33661 100644
--- a/common/operations/Densify.cpp
+++ b/common/operations/Densify.cpp
@@ -64,14 +64,14 @@
}
template <typename T>
-void populate(const T* srcData, std::vector<int32_t>* indices, int32_t level, int32_t prevIdx,
+void populate(const T* srcData, std::vector<int32_t>* indices, uint32_t level, uint32_t prevIdx,
T* destData, const std::vector<uint32_t>& destDims,
const std::vector<int32_t>& dimFormat, const int32_t* traversalOrder,
const std::vector<int32_t>& blockSize, const int32_t* blockMap,
const std::vector<std::vector<int32_t>>& dimMetadata, const int origRank) {
if (level == (*indices).size()) { // level == size of traversal order
std::vector<int> origIdx(origRank);
- int i = 0;
+ size_t i = 0;
// Calculating origIdx using dense tensor dimensions
for (; i < origIdx.size(); i++) {
int origDim = traversalOrder[i];
@@ -160,7 +160,7 @@
const int origRank = destShape.dimensions.size();
std::vector<int32_t> blockSize(
inputShapes[kInputBlockMap].dimensions.front()); // size of block map
- for (int i = 0; i < inputShapes[kInputBlockMap].dimensions.front(); i++) {
+ for (uint32_t i = 0; i < inputShapes[kInputBlockMap].dimensions.front(); i++) {
const int32_t origDim = traversalOrder[origRank + i];
blockSize[i] = dimensions[origDim];
}
@@ -178,13 +178,13 @@
}
T* destData = context->getOutputBuffer<T>(kOutputTensor);
- for (int32_t i = 0; i < denseTotal; i++) {
+ for (size_t i = 0; i < denseTotal; i++) {
destData[i] = zeroPoint;
}
std::vector<int32_t> indices(
inputShapes[kInputTravOrder].dimensions.front()); // size of traversal order
- populate(srcData, &indices, 0, 0, destData, destShape.dimensions, dimFormat, traversalOrder,
+ populate(srcData, &indices, 0u, 0u, destData, destShape.dimensions, dimFormat, traversalOrder,
blockSize, blockMap, dimMetadata, origRank);
return true;
}
@@ -196,9 +196,9 @@
NN_RET_CHECK_EQ(inputCount,
kMinNumInputs + context->getInputShape(kInputTravOrder).dimensions.front() * 2);
NN_RET_CHECK_EQ(context->getNumOutputs(), kNumOutputs);
- NN_RET_CHECK_EQ(context->getInputShape(kInputTensor).dimensions.size(), 1);
+ NN_RET_CHECK_EQ(context->getInputShape(kInputTensor).dimensions.size(), 1u);
for (uint32_t i = 1; i < inputCount; i++) {
- NN_RET_CHECK_EQ(context->getInputShape(i).dimensions.size(), 1);
+ NN_RET_CHECK_EQ(context->getInputShape(i).dimensions.size(), 1u);
NN_RET_CHECK_EQ(context->getInputType(i), OperandType::TENSOR_INT32);
}
return Version::EXPERIMENTAL;
@@ -216,7 +216,7 @@
const uint32_t origRank = dimensionsShape.dimensions.front() - blockMapShape.dimensions.front();
std::vector<uint32_t> destDims(origRank);
- int i = 0;
+ size_t i = 0;
for (; i < destDims.size(); i++) {
const int32_t origDim = traversalOrder[i];
destDims[origDim] = dimensions[i];
diff --git a/common/operations/Densify.h b/common/operations/Densify.h
index 8ffed34..9d8c620 100644
--- a/common/operations/Densify.h
+++ b/common/operations/Densify.h
@@ -66,7 +66,7 @@
* * origRank = the size of denseShape. Used for calculating flattened index of indices.
*/
template <typename T>
-void populate(const T* srcData, std::vector<int32_t>* indices, int32_t level, int32_t prevIdx,
+void populate(const T* srcData, std::vector<int32_t>* indices, uint32_t level, uint32_t prevIdx,
T* destData, const std::vector<uint32_t>& denseShape,
const std::vector<int32_t>& dimFormat, const int32_t* traversalOrder,
const std::vector<int32_t>& blockSize, const int32_t* blockMap,
diff --git a/common/operations/DepthwiseConv2D.cpp b/common/operations/DepthwiseConv2D.cpp
index 4b9ce39..aa27afc 100644
--- a/common/operations/DepthwiseConv2D.cpp
+++ b/common/operations/DepthwiseConv2D.cpp
@@ -307,7 +307,7 @@
auto outputMultiplier = std::vector<int32_t>(outputDepth, 0);
auto outputShift = std::vector<int32_t>(outputDepth, .0f);
- for (int i = 0; i < outputDepth; ++i) {
+ for (uint32_t i = 0; i < outputDepth; ++i) {
Shape filterChannelShape = filterShape;
filterChannelShape.scale = filterScales[i];
Shape biasChannelShape = biasShape;
@@ -329,7 +329,7 @@
for (uint32_t h = 0; h < outputHeight; h++) {
for (uint32_t w = 0; w < outputWidth; w++) {
for (uint32_t ic = 0; ic < inputDepth; ic++) {
- for (uint32_t m = 0; m < depthMultiplier; m++) {
+ for (int32_t m = 0; m < depthMultiplier; m++) {
int32_t wInputOrigin = static_cast<int32_t>(w) * strideWidth - paddingLeft;
int32_t hInputOrigin = static_cast<int32_t>(h) * strideHeight - paddingTop;
const int oc = m + ic * depthMultiplier;
@@ -452,7 +452,7 @@
NN_RET_CHECK_EQ(std::get<Operand::SymmPerChannelQuantParams>(
context->getInputExtraParams(kFilterTensor))
.channelDim,
- 3)
+ 3u)
<< "Unsupported filter tensor channel dimension for operation "
<< kOperationName;
}
@@ -489,14 +489,14 @@
withExplicitPadding = true;
}
int inputOffset = withExplicitPadding ? 3 : 0;
- if (numInputs >= 9 + inputOffset) {
+ if (numInputs >= 9u + inputOffset) {
inExpectedTypes.push_back(OperandType::BOOL);
withLayout = true;
}
- NN_RET_CHECK_NE(numInputs, 10 + inputOffset)
+ NN_RET_CHECK_NE(numInputs, 10u + inputOffset)
<< "Provided only one dilation factor value, two values are required for operation "
<< kOperationName;
- if (numInputs == 11 + inputOffset) {
+ if (numInputs == 11u + inputOffset) {
inExpectedTypes.push_back(OperandType::INT32);
inExpectedTypes.push_back(OperandType::INT32);
withDilation = true;
@@ -536,10 +536,10 @@
} else {
NN_RET_CHECK(input.type == bias.type);
}
- NN_RET_CHECK_EQ(getNumberOfDimensions(input), 4);
- NN_RET_CHECK_EQ(getNumberOfDimensions(filter), 4);
- NN_RET_CHECK_EQ(getNumberOfDimensions(bias), 1);
- NN_RET_CHECK_EQ(getSizeOfDimension(filter, 0), 1);
+ NN_RET_CHECK_EQ(getNumberOfDimensions(input), 4u);
+ NN_RET_CHECK_EQ(getNumberOfDimensions(filter), 4u);
+ NN_RET_CHECK_EQ(getNumberOfDimensions(bias), 1u);
+ NN_RET_CHECK_EQ(getSizeOfDimension(filter, 0), 1u);
NN_RET_CHECK_EQ(getSizeOfDimension(filter, 3), getSizeOfDimension(bias, 0));
DepthwiseConv2dParam param;
diff --git a/common/operations/Dequantize.cpp b/common/operations/Dequantize.cpp
index 931bcc6..ebd8045 100644
--- a/common/operations/Dequantize.cpp
+++ b/common/operations/Dequantize.cpp
@@ -84,7 +84,7 @@
const Shape& input = context->getInputShape(kInputTensor);
if (hasKnownRank(input)) {
- NN_RET_CHECK_LE(getNumberOfDimensions(input), 4);
+ NN_RET_CHECK_LE(getNumberOfDimensions(input), 4u);
}
if (inputType == OperandType::TENSOR_QUANT8_ASYMM &&
@@ -105,7 +105,7 @@
bool prepare(IOperationExecutionContext* context) {
const Shape& input = context->getInputShape(kInputTensor);
- NN_RET_CHECK_LE(getNumberOfDimensions(input), 4);
+ NN_RET_CHECK_LE(getNumberOfDimensions(input), 4u);
Shape output = context->getOutputShape(kOutputTensor);
output.dimensions = input.dimensions;
return context->setOutputShape(kOutputTensor, output);
diff --git a/common/operations/Elementwise.cpp b/common/operations/Elementwise.cpp
index 8510003..965e4da 100644
--- a/common/operations/Elementwise.cpp
+++ b/common/operations/Elementwise.cpp
@@ -119,7 +119,7 @@
const Shape& input = context->getInputShape(kInputTensor);
if (hasKnownRank(input)) {
- NN_RET_CHECK_LE(getNumberOfDimensions(input), 4);
+ NN_RET_CHECK_LE(getNumberOfDimensions(input), 4u);
}
return inputType == OperandType::TENSOR_FLOAT16 ? Version::ANDROID_Q : Version::ANDROID_OC_MR1;
@@ -135,7 +135,7 @@
bool prepareFloor(IOperationExecutionContext* context) {
Shape input = context->getInputShape(kInputTensor);
Shape output = context->getOutputShape(kOutputTensor);
- NN_RET_CHECK_LE(getNumberOfDimensions(input), 4);
+ NN_RET_CHECK_LE(getNumberOfDimensions(input), 4u);
NN_RET_CHECK(SetShape(input, &output));
return context->setOutputShape(kOutputTensor, output);
}
diff --git a/common/operations/Fill.cpp b/common/operations/Fill.cpp
index 1689d4c..1137c0d 100644
--- a/common/operations/Fill.cpp
+++ b/common/operations/Fill.cpp
@@ -82,12 +82,12 @@
bool prepare(IOperationExecutionContext* context) {
Shape dimsShape = context->getInputShape(kDimsTensor);
- NN_RET_CHECK_EQ(getNumberOfDimensions(dimsShape), 1);
+ NN_RET_CHECK_EQ(getNumberOfDimensions(dimsShape), 1u);
Shape outputShape = context->getOutputShape(kOutputTensor);
outputShape.dimensions.resize(dimsShape.dimensions[0]);
const int32_t* dims = context->getInputBuffer<int32_t>(kDimsTensor);
- for (int i = 0; i < dimsShape.dimensions[0]; ++i) {
+ for (uint32_t i = 0; i < dimsShape.dimensions[0]; ++i) {
outputShape.dimensions[i] = dims[i];
}
return context->setOutputShape(kOutputTensor, outputShape);
diff --git a/common/operations/FullyConnected.cpp b/common/operations/FullyConnected.cpp
index 25838b6..639cffa 100644
--- a/common/operations/FullyConnected.cpp
+++ b/common/operations/FullyConnected.cpp
@@ -24,6 +24,7 @@
#ifdef NN_INCLUDE_CPU_IMPLEMENTATION
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wunused-parameter"
+#pragma clang diagnostic ignored "-Wsign-compare"
#include <tensorflow/lite/kernels/internal/optimized/legacy_optimized_ops.h>
#include <tensorflow/lite/kernels/internal/reference/integer_ops/fully_connected.h>
#include <tensorflow/lite/kernels/internal/reference/reference_ops.h>
@@ -198,14 +199,14 @@
}
// The Tensorflow fully connected layer specification says that input should
// be of at least rank 2, so we check. Tflite doesn't check.
- NN_RET_CHECK_GE(getNumberOfDimensions(input), 2);
- NN_RET_CHECK_LE(getNumberOfDimensions(input), 4);
- NN_RET_CHECK_EQ(getNumberOfDimensions(weights), 2);
- NN_RET_CHECK_EQ(getNumberOfDimensions(bias), 1);
+ NN_RET_CHECK_GE(getNumberOfDimensions(input), 2u);
+ NN_RET_CHECK_LE(getNumberOfDimensions(input), 4u);
+ NN_RET_CHECK_EQ(getNumberOfDimensions(weights), 2u);
+ NN_RET_CHECK_EQ(getNumberOfDimensions(bias), 1u);
uint32_t input_n_elements = getNumberOfElements(input);
- uint32_t num_units = getSizeOfDimension(weights, 0);
- uint32_t input_size = getSizeOfDimension(weights, 1);
- uint32_t bias_len = getSizeOfDimension(bias, 0);
+ uint32_t num_units = getSizeOfDimension(weights, 0u);
+ uint32_t input_size = getSizeOfDimension(weights, 1u);
+ uint32_t bias_len = getSizeOfDimension(bias, 0u);
uint32_t batch_size = input_size == 0 ? 0 : input_n_elements / input_size;
if (batch_size != 0) {
NN_RET_CHECK_EQ(input_size * batch_size, input_n_elements);
@@ -215,8 +216,8 @@
}
if (output != nullptr) {
// Only batch_size can be 0.
- NN_RET_CHECK_GT(num_units, 0);
- NN_RET_CHECK_GT(input_size, 0);
+ NN_RET_CHECK_GT(num_units, 0u);
+ NN_RET_CHECK_GT(input_size, 0u);
output->type = input.type;
output->dimensions = {batch_size, num_units};
}
diff --git a/common/operations/GenerateProposals.cpp b/common/operations/GenerateProposals.cpp
index ec91510..d6f2a04 100644
--- a/common/operations/GenerateProposals.cpp
+++ b/common/operations/GenerateProposals.cpp
@@ -77,7 +77,7 @@
// Check for malformed data
// 1. Invalid batch id
// 2. Invalid region: x2 < x1 || y2 < y1
- NN_RET_CHECK_GE(batchIndex, 0);
+ NN_RET_CHECK_GE(batchIndex, 0u);
NN_RET_CHECK_LT(batchIndex, numBatches);
NN_RET_CHECK_LE(roiBase[0], roiBase[2]);
NN_RET_CHECK_LE(roiBase[1], roiBase[3]);
@@ -234,23 +234,23 @@
Shape imageInfoShape = context->getInputShape(kImageInfoTensor);
Shape outputShape = context->getOutputShape(kOutputTensor);
- NN_RET_CHECK_EQ(getNumberOfDimensions(roiShape), 2);
- NN_RET_CHECK_EQ(getNumberOfDimensions(bboxDeltasShape), 2);
- NN_RET_CHECK_EQ(getNumberOfDimensions(batchesShape), 1);
- NN_RET_CHECK_EQ(getNumberOfDimensions(imageInfoShape), 2);
+ NN_RET_CHECK_EQ(getNumberOfDimensions(roiShape), 2u);
+ NN_RET_CHECK_EQ(getNumberOfDimensions(bboxDeltasShape), 2u);
+ NN_RET_CHECK_EQ(getNumberOfDimensions(batchesShape), 1u);
+ NN_RET_CHECK_EQ(getNumberOfDimensions(imageInfoShape), 2u);
// Only numRois can be zero.
const uint32_t kRoiDim = 4;
uint32_t numRois = getSizeOfDimension(roiShape, 0);
uint32_t numClasses = getSizeOfDimension(bboxDeltasShape, 1) / kRoiDim;
uint32_t numBatches = getSizeOfDimension(imageInfoShape, 0);
- NN_RET_CHECK_GT(numClasses, 0);
- NN_RET_CHECK_GT(numBatches, 0);
+ NN_RET_CHECK_GT(numClasses, 0u);
+ NN_RET_CHECK_GT(numBatches, 0u);
NN_RET_CHECK_EQ(getSizeOfDimension(roiShape, 1), kRoiDim);
NN_RET_CHECK_EQ(getSizeOfDimension(bboxDeltasShape, 0), numRois);
NN_RET_CHECK_EQ(getSizeOfDimension(bboxDeltasShape, 1), kRoiDim * numClasses);
NN_RET_CHECK_EQ(getSizeOfDimension(batchesShape, 0), numRois);
- NN_RET_CHECK_EQ(getSizeOfDimension(imageInfoShape, 1), 2);
+ NN_RET_CHECK_EQ(getSizeOfDimension(imageInfoShape, 1), 2u);
if (roiShape.type == OperandType::TENSOR_QUANT16_ASYMM) {
NN_RET_CHECK_EQ(roiShape.scale, 0.125f);
@@ -366,7 +366,7 @@
if (maxNumDetections < 0) {
maxNumDetections = selectLength;
}
- while (selectStart < selectEnd && numDetections < maxNumDetections) {
+ while (selectStart < selectEnd && numDetections < static_cast<uint32_t>(maxNumDetections)) {
// find max score and swap to the front
auto& maxScore = *std::max_element(selectStart, selectEnd,
[&scoresData](const uint32_t& lhs, const uint32_t& rhs) {
@@ -414,7 +414,7 @@
[&scoresData](const uint32_t& lhs, const uint32_t& rhs) {
return scoresData[lhs] > scoresData[rhs];
});
- if (maxNumDetections < 0 || select->size() <= maxNumDetections) {
+ if (maxNumDetections < 0 || select->size() <= static_cast<size_t>(maxNumDetections)) {
return;
}
select->resize(maxNumDetections);
@@ -429,7 +429,7 @@
if (maxNumDetections < 0) {
maxNumDetections = selectLength;
}
- while (selectStart < selectEnd && numDetections < maxNumDetections) {
+ while (selectStart < selectEnd && numDetections < static_cast<uint32_t>(maxNumDetections)) {
// find max score and swap to the front
auto& maxScore = *std::max_element(selectStart, selectEnd,
[&scoresData](const uint32_t& lhs, const uint32_t& rhs) {
@@ -479,7 +479,7 @@
[&scoresData](const uint32_t& lhs, const uint32_t& rhs) {
return scoresData[lhs] > scoresData[rhs];
});
- if (maxNumDetections < 0 || select->size() <= maxNumDetections) {
+ if (maxNumDetections < 0 || select->size() <= static_cast<size_t>(maxNumDetections)) {
return;
}
select->resize(maxNumDetections);
@@ -510,7 +510,8 @@
// We assume boxes of the same batch are grouped together.
std::vector<uint32_t> batch;
- for (uint32_t i = 0, ind = -1; i < numRois; i++) {
+ int32_t ind = -1;
+ for (uint32_t i = 0; i < numRois; i++) {
if (batchesData[i] == ind) {
(batchSplitIn->back())++;
} else {
@@ -780,7 +781,7 @@
NN_RET_CHECK(getSizeOfDimension(roiShape, 0) == numRois);
NN_RET_CHECK(getSizeOfDimension(roiShape, 1) == kRoiDim * numClasses);
NN_RET_CHECK(getSizeOfDimension(batchesShape, 0) == numRois);
- NN_RET_CHECK_GT(numClasses, 1);
+ NN_RET_CHECK_GT(numClasses, 1u);
if (scoreShape.type == OperandType::TENSOR_QUANT8_ASYMM ||
scoreShape.type == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) {
@@ -1027,7 +1028,7 @@
// Find the top preNmsTopN scores.
std::vector<uint32_t> select(batchSize);
std::iota(select.begin(), select.end(), 0);
- if (preNmsTopN > 0 && preNmsTopN < select.size()) {
+ if (preNmsTopN > 0 && static_cast<size_t>(preNmsTopN) < select.size()) {
std::sort(select.begin(), select.end(),
[&scoresBase](const uint32_t lhs, const uint32_t rhs) {
return scoresBase[lhs] > scoresBase[rhs];
@@ -1300,10 +1301,10 @@
Shape outputRoiShape = context->getOutputShape(kOutputRoiTensor);
Shape outputBatchSplitShape = context->getOutputShape(kOutputBatchesTensor);
- NN_RET_CHECK_EQ(getNumberOfDimensions(scoreShape), 4);
- NN_RET_CHECK_EQ(getNumberOfDimensions(bboxDeltasShape), 4);
- NN_RET_CHECK_EQ(getNumberOfDimensions(anchorsShape), 2);
- NN_RET_CHECK_EQ(getNumberOfDimensions(imageInfoDataShape), 2);
+ NN_RET_CHECK_EQ(getNumberOfDimensions(scoreShape), 4u);
+ NN_RET_CHECK_EQ(getNumberOfDimensions(bboxDeltasShape), 4u);
+ NN_RET_CHECK_EQ(getNumberOfDimensions(anchorsShape), 2u);
+ NN_RET_CHECK_EQ(getNumberOfDimensions(imageInfoDataShape), 2u);
const uint32_t kRoiDim = 4;
uint32_t numBatches = getSizeOfDimension(scoreShape, 0);
@@ -1316,7 +1317,7 @@
NN_RET_CHECK_EQ(getSizeOfDimension(bboxDeltasShape, useNchw ? 3 : 2), width);
NN_RET_CHECK_EQ(getSizeOfDimension(bboxDeltasShape, useNchw ? 1 : 3), numAnchors * kRoiDim);
NN_RET_CHECK_EQ(getSizeOfDimension(imageInfoDataShape, 0), numBatches);
- NN_RET_CHECK_EQ(getSizeOfDimension(imageInfoDataShape, 1), 2);
+ NN_RET_CHECK_EQ(getSizeOfDimension(imageInfoDataShape, 1), 2u);
NN_RET_CHECK_EQ(getSizeOfDimension(anchorsShape, 0), numAnchors);
NN_RET_CHECK_EQ(getSizeOfDimension(anchorsShape, 1), kRoiDim);
@@ -1632,9 +1633,9 @@
Shape outputClassShape = context->getOutputShape(kOutputClassTensor);
Shape outputDetectionShape = context->getOutputShape(kOutputDetectionTensor);
- NN_RET_CHECK_EQ(getNumberOfDimensions(scoreShape), 3);
- NN_RET_CHECK_EQ(getNumberOfDimensions(deltasShape), 3);
- NN_RET_CHECK_EQ(getNumberOfDimensions(anchorsShape), 2);
+ NN_RET_CHECK_EQ(getNumberOfDimensions(scoreShape), 3u);
+ NN_RET_CHECK_EQ(getNumberOfDimensions(deltasShape), 3u);
+ NN_RET_CHECK_EQ(getNumberOfDimensions(anchorsShape), 2u);
const uint32_t kRoiDim = 4;
uint32_t numBatches = getSizeOfDimension(scoreShape, 0);
@@ -1668,13 +1669,13 @@
} else {
NN_RET_CHECK_FAIL() << "Unsupported tensor type for operation " << kOperationName;
}
- NN_RET_CHECK_GT(numClasses, 1);
- NN_RET_CHECK_GE(lengthBoxEncoding, 4);
- NN_RET_CHECK_GT(maxNumDetections, 0);
+ NN_RET_CHECK_GT(numClasses, 1u);
+ NN_RET_CHECK_GE(lengthBoxEncoding, 4u);
+ NN_RET_CHECK_GT(maxNumDetections, 0u);
if (context->getInputValue<bool>(kUseRegularNmsScalar)) {
NN_RET_CHECK_GT(context->getInputValue<int32_t>(kMaxNumDetectionPerClassScalar), 0);
} else {
- NN_RET_CHECK_GT(maxClassesPerDetection, 0);
+ NN_RET_CHECK_GT(maxClassesPerDetection, 0u);
numOutDetections *= maxClassesPerDetection;
}
diff --git a/common/operations/GroupedConv2D.cpp b/common/operations/GroupedConv2D.cpp
index 19218c0..eb8e0dc 100644
--- a/common/operations/GroupedConv2D.cpp
+++ b/common/operations/GroupedConv2D.cpp
@@ -64,7 +64,7 @@
for (uint32_t h = 0; h < outputHeight; h++) {
for (uint32_t w = 0; w < outputWidth; w++) {
const float* filterBase = filterData;
- for (uint32_t g = 0; g < numGroups; g++) {
+ for (int32_t g = 0; g < numGroups; g++) {
for (uint32_t d = 0; d < outputGroupDepth; d++) {
int32_t wInputOrigin =
static_cast<int32_t>(w) * stride_width - padding_left;
@@ -136,7 +136,7 @@
for (uint32_t h = 0; h < outputHeight; h++) {
for (uint32_t w = 0; w < outputWidth; w++) {
const T* filterBase = filterData;
- for (uint32_t g = 0; g < numGroups; g++) {
+ for (int32_t g = 0; g < numGroups; g++) {
for (uint32_t d = 0; d < outputGroupDepth; d++) {
int32_t wInputOrigin =
static_cast<int32_t>(w) * stride_width - padding_left;
@@ -218,7 +218,7 @@
auto outputMultiplier = std::vector<int32_t>(outputDepth, 0);
auto outputShift = std::vector<int32_t>(outputDepth, 0);
- for (int i = 0; i < outputDepth; ++i) {
+ for (uint32_t i = 0; i < outputDepth; ++i) {
Shape filterChannelShape = filterShape;
filterChannelShape.scale = filterScales[i];
Shape biasChannelShape = biasShape;
@@ -241,7 +241,7 @@
for (uint32_t h = 0; h < outputHeight; h++) {
for (uint32_t w = 0; w < outputWidth; w++) {
const int8_t* filterBase = filterData;
- for (uint32_t g = 0; g < numGroups; g++) {
+ for (int32_t g = 0; g < numGroups; g++) {
for (uint32_t d = 0; d < outputGroupDepth; d++) {
int32_t wInputOrigin =
static_cast<int32_t>(w) * stride_width - padding_left;
diff --git a/common/operations/HeatmapMaxKeypoint.cpp b/common/operations/HeatmapMaxKeypoint.cpp
index fdd78f6..386657c 100644
--- a/common/operations/HeatmapMaxKeypoint.cpp
+++ b/common/operations/HeatmapMaxKeypoint.cpp
@@ -131,8 +131,8 @@
// use mirroring for out of bound indexing
// need to ensure heatmapSize >= 2
- h = h < 0 ? 1 : (h >= heatmapSize ? heatmapSize - 2 : h);
- w = w < 0 ? 1 : (w >= heatmapSize ? heatmapSize - 2 : w);
+ h = h < 0 ? 1 : (static_cast<uint32_t>(h) >= heatmapSize ? heatmapSize - 2 : h);
+ w = w < 0 ? 1 : (static_cast<uint32_t>(w) >= heatmapSize ? heatmapSize - 2 : w);
uint32_t heatmapIndex = static_cast<uint32_t>(h) * heatmapSize * numKeypoints +
static_cast<uint32_t>(w) * numKeypoints + j;
@@ -262,17 +262,17 @@
bool layout = context->getInputValue<bool>(kLayoutScalar);
Shape heatmapShape = context->getInputShape(kHeatmapTensor);
Shape boxesShape = context->getInputShape(kBoxesTensor);
- NN_RET_CHECK_EQ(getNumberOfDimensions(heatmapShape), 4);
- NN_RET_CHECK_EQ(getNumberOfDimensions(boxesShape), 2);
+ NN_RET_CHECK_EQ(getNumberOfDimensions(heatmapShape), 4u);
+ NN_RET_CHECK_EQ(getNumberOfDimensions(boxesShape), 2u);
uint32_t numBoxes = getSizeOfDimension(heatmapShape, 0);
uint32_t heatmapSize = getSizeOfDimension(heatmapShape, 2);
uint32_t numKeypoints = getSizeOfDimension(heatmapShape, layout ? 1 : 3);
uint32_t boxInfoLength = getSizeOfDimension(boxesShape, 1);
NN_RET_CHECK_EQ(getSizeOfDimension(heatmapShape, layout ? 3 : 1), heatmapSize);
- NN_RET_CHECK_GE(heatmapSize, 2);
+ NN_RET_CHECK_GE(heatmapSize, 2u);
NN_RET_CHECK_EQ(getSizeOfDimension(boxesShape, 0), numBoxes);
- NN_RET_CHECK_EQ(boxInfoLength, 4);
+ NN_RET_CHECK_EQ(boxInfoLength, 4u);
if (heatmapShape.type == OperandType::TENSOR_QUANT8_ASYMM ||
heatmapShape.type == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) {
diff --git a/common/operations/InstanceNormalization.cpp b/common/operations/InstanceNormalization.cpp
index 3846a00..e221dab 100644
--- a/common/operations/InstanceNormalization.cpp
+++ b/common/operations/InstanceNormalization.cpp
@@ -126,7 +126,7 @@
#ifdef NN_INCLUDE_CPU_IMPLEMENTATION
bool prepare(IOperationExecutionContext* context) {
Shape input = context->getInputShape(kInputTensor);
- NN_RET_CHECK_EQ(getNumberOfDimensions(input), 4);
+ NN_RET_CHECK_EQ(getNumberOfDimensions(input), 4u);
return context->setOutputShape(kOutputTensor, input);
}
diff --git a/common/operations/L2Normalization.cpp b/common/operations/L2Normalization.cpp
index cfa04a8..93c32de 100644
--- a/common/operations/L2Normalization.cpp
+++ b/common/operations/L2Normalization.cpp
@@ -25,6 +25,7 @@
#ifdef NN_INCLUDE_CPU_IMPLEMENTATION
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wunused-parameter"
+#pragma clang diagnostic ignored "-Wsign-compare"
#include <tensorflow/lite/kernels/internal/optimized/optimized_ops.h>
#include <tensorflow/lite/kernels/internal/reference/integer_ops/l2normalization.h>
#pragma clang diagnostic pop
@@ -229,7 +230,7 @@
}
const Shape& input = context->getInputShape(kInputTensor);
if (hasKnownRank(input)) {
- NN_RET_CHECK_LE(getNumberOfDimensions(input), 4);
+ NN_RET_CHECK_LE(getNumberOfDimensions(input), 4u);
}
NN_RET_CHECK(validateInputTypes(context, inExpectedTypes));
NN_RET_CHECK(validateOutputTypes(context, {inputType}));
diff --git a/common/operations/LSHProjection.cpp b/common/operations/LSHProjection.cpp
index c3f051d..6ca91be 100644
--- a/common/operations/LSHProjection.cpp
+++ b/common/operations/LSHProjection.cpp
@@ -52,7 +52,7 @@
NN_CHECK_EQ(NumOutputs(operation), 1);
const RunTimeOperandInfo* hash = GetInput(operation, operands, kHashTensor);
- NN_CHECK_EQ(NumDimensions(hash), 2);
+ NN_CHECK_EQ(NumDimensions(hash), 2u);
// Support up to 32 bits.
NN_CHECK(SizeOfDimension(hash, 1) <= 32);
@@ -71,7 +71,7 @@
case LSHProjectionType_DENSE: {
RunTimeOperandInfo* weight = GetInput(operation, operands, kWeightTensor);
NN_CHECK_EQ(NumInputsWithValues(operation, operands), 4);
- NN_CHECK_EQ(NumDimensions(weight), 1);
+ NN_CHECK_EQ(NumDimensions(weight), 1u);
NN_CHECK_EQ(SizeOfDimension(weight, 0), SizeOfDimension(input, 0));
outputShape->dimensions = {SizeOfDimension(hash, 0) * SizeOfDimension(hash, 1)};
break;
diff --git a/common/operations/LSTM.cpp b/common/operations/LSTM.cpp
index 366154a..ec89291 100644
--- a/common/operations/LSTM.cpp
+++ b/common/operations/LSTM.cpp
@@ -159,30 +159,30 @@
NN_CHECK(params->proj_clip >= 0);
if (!IsNullInput(input_to_input_weights)) {
- NN_CHECK_EQ(NumDimensions(input_to_input_weights), 2);
+ NN_CHECK_EQ(NumDimensions(input_to_input_weights), 2u);
NN_CHECK_EQ(SizeOfDimension(input_to_input_weights, 0), n_cell);
NN_CHECK_EQ(SizeOfDimension(input_to_input_weights, 1), n_input);
}
- NN_CHECK_EQ(NumDimensions(input_to_forget_weights), 2);
+ NN_CHECK_EQ(NumDimensions(input_to_forget_weights), 2u);
NN_CHECK_EQ(SizeOfDimension(input_to_forget_weights, 0), n_cell);
NN_CHECK_EQ(SizeOfDimension(input_to_forget_weights, 1), n_input);
- NN_CHECK_EQ(NumDimensions(input_to_cell_weights), 2);
+ NN_CHECK_EQ(NumDimensions(input_to_cell_weights), 2u);
NN_CHECK_EQ(SizeOfDimension(input_to_cell_weights, 0), n_cell);
NN_CHECK_EQ(SizeOfDimension(input_to_cell_weights, 1), n_input);
if (!IsNullInput(recurrent_to_input_weights)) {
- NN_CHECK_EQ(NumDimensions(recurrent_to_input_weights), 2);
+ NN_CHECK_EQ(NumDimensions(recurrent_to_input_weights), 2u);
NN_CHECK_EQ(SizeOfDimension(recurrent_to_input_weights, 0), n_cell);
NN_CHECK_EQ(SizeOfDimension(recurrent_to_input_weights, 1), n_output);
}
- NN_CHECK_EQ(NumDimensions(recurrent_to_forget_weights), 2);
+ NN_CHECK_EQ(NumDimensions(recurrent_to_forget_weights), 2u);
NN_CHECK_EQ(SizeOfDimension(recurrent_to_forget_weights, 0), n_cell);
NN_CHECK_EQ(SizeOfDimension(recurrent_to_forget_weights, 1), n_output);
- NN_CHECK_EQ(NumDimensions(recurrent_to_cell_weights), 2);
+ NN_CHECK_EQ(NumDimensions(recurrent_to_cell_weights), 2u);
NN_CHECK_EQ(SizeOfDimension(recurrent_to_cell_weights, 0), n_cell);
NN_CHECK_EQ(SizeOfDimension(recurrent_to_cell_weights, 1), n_output);
@@ -194,17 +194,17 @@
NN_CHECK(cifg_weights_all_or_none);
if (!IsNullInput(cell_to_input_weights)) {
- NN_CHECK_EQ(NumDimensions(cell_to_input_weights), 1);
+ NN_CHECK_EQ(NumDimensions(cell_to_input_weights), 1u);
NN_CHECK_EQ(SizeOfDimension(cell_to_input_weights, 0), n_cell);
}
if (!IsNullInput(cell_to_forget_weights)) {
- NN_CHECK_EQ(NumDimensions(cell_to_forget_weights), 1);
+ NN_CHECK_EQ(NumDimensions(cell_to_forget_weights), 1u);
NN_CHECK_EQ(SizeOfDimension(cell_to_forget_weights, 0), n_cell);
}
if (!IsNullInput(cell_to_output_weights)) {
- NN_CHECK_EQ(NumDimensions(cell_to_output_weights), 1);
+ NN_CHECK_EQ(NumDimensions(cell_to_output_weights), 1u);
NN_CHECK_EQ(SizeOfDimension(cell_to_output_weights, 0), n_cell);
}
@@ -231,27 +231,27 @@
if (params->use_cifg) {
NN_CHECK(IsNullInput(input_gate_bias));
} else {
- NN_CHECK_EQ(NumDimensions(input_gate_bias), 1);
+ NN_CHECK_EQ(NumDimensions(input_gate_bias), 1u);
NN_CHECK_EQ(SizeOfDimension(input_gate_bias, 0), n_cell);
}
- NN_CHECK_EQ(NumDimensions(forget_gate_bias), 1);
+ NN_CHECK_EQ(NumDimensions(forget_gate_bias), 1u);
NN_CHECK_EQ(SizeOfDimension(forget_gate_bias, 0), n_cell);
- NN_CHECK_EQ(NumDimensions(cell_bias), 1);
+ NN_CHECK_EQ(NumDimensions(cell_bias), 1u);
NN_CHECK_EQ(SizeOfDimension(cell_bias, 0), n_cell);
- NN_CHECK_EQ(NumDimensions(output_gate_bias), 1);
+ NN_CHECK_EQ(NumDimensions(output_gate_bias), 1u);
NN_CHECK_EQ(SizeOfDimension(output_gate_bias, 0), n_cell);
if (!IsNullInput(projection_weights)) {
- NN_CHECK_EQ(NumDimensions(projection_weights), 2);
+ NN_CHECK_EQ(NumDimensions(projection_weights), 2u);
NN_CHECK_EQ(SizeOfDimension(projection_weights, 0), n_output);
NN_CHECK_EQ(SizeOfDimension(projection_weights, 1), n_cell);
}
if (!IsNullInput(projection_bias)) {
- NN_CHECK_EQ(NumDimensions(projection_bias), 1);
+ NN_CHECK_EQ(NumDimensions(projection_bias), 1u);
NN_CHECK_EQ(SizeOfDimension(projection_bias, 0), n_output);
}
@@ -265,19 +265,19 @@
NN_CHECK(projecton_tensors_consistent == true);
if (!IsNullInput(input_layer_norm_weights)) {
- NN_CHECK_EQ(NumDimensions(input_layer_norm_weights), 1);
+ NN_CHECK_EQ(NumDimensions(input_layer_norm_weights), 1u);
NN_CHECK_EQ(SizeOfDimension(input_layer_norm_weights, 0), n_cell);
}
if (!IsNullInput(forget_layer_norm_weights)) {
- NN_CHECK_EQ(NumDimensions(forget_layer_norm_weights), 1);
+ NN_CHECK_EQ(NumDimensions(forget_layer_norm_weights), 1u);
NN_CHECK_EQ(SizeOfDimension(forget_layer_norm_weights, 0), n_cell);
}
if (!IsNullInput(cell_layer_norm_weights)) {
- NN_CHECK_EQ(NumDimensions(cell_layer_norm_weights), 1);
+ NN_CHECK_EQ(NumDimensions(cell_layer_norm_weights), 1u);
NN_CHECK_EQ(SizeOfDimension(cell_layer_norm_weights, 0), n_cell);
}
if (!IsNullInput(output_layer_norm_weights)) {
- NN_CHECK_EQ(NumDimensions(output_layer_norm_weights), 1);
+ NN_CHECK_EQ(NumDimensions(output_layer_norm_weights), 1u);
NN_CHECK_EQ(SizeOfDimension(output_layer_norm_weights, 0), n_cell);
}
@@ -352,10 +352,10 @@
const uint32_t n_input = SizeOfDimension(input_, 1);
const uint32_t n_cell = SizeOfDimension(input_to_output_weights_, 0);
- NN_CHECK_EQ(NumDimensions(input_to_output_weights_), 2);
+ NN_CHECK_EQ(NumDimensions(input_to_output_weights_), 2u);
NN_CHECK_EQ(SizeOfDimension(input_to_output_weights_, 1), n_input);
- NN_CHECK_EQ(NumDimensions(recurrent_to_output_weights_), 2);
+ NN_CHECK_EQ(NumDimensions(recurrent_to_output_weights_), 2u);
NN_CHECK_EQ(SizeOfDimension(recurrent_to_output_weights_, 0), n_cell);
const uint32_t n_output = SizeOfDimension(recurrent_to_output_weights_, 1);
@@ -483,7 +483,7 @@
const int batchInputDelta = (forwardSequence ? 1 : -1) * static_cast<int>(batchInputSize);
const int batchOutputDelta = (forwardSequence ? 1 : -1) * static_cast<int>(batchOutputSize);
- for (int t = 0; t < maxTime; ++t) {
+ for (uint32_t t = 0; t < maxTime; ++t) {
LSTMStep(params, inputCurrentTimeStep, batchInputShape, input_to_input_weights_buffer,
input_to_forget_weights_buffer, input_to_cell_weights_buffer,
input_to_output_weights_buffer, input_to_output_weights_shape,
@@ -721,7 +721,7 @@
const int batchInputDelta = (forwardSequence ? 1 : -1) * static_cast<int>(batchInputSize);
const int batchOutputDelta = (forwardSequence ? 1 : -1) * static_cast<int>(batchOutputSize);
- for (int t = 0; t < maxTime; ++t) {
+ for (uint32_t t = 0; t < maxTime; ++t) {
LSTMStep(params, inputCurrentTimeStep, batchInputShape,
input_to_input_weights_float32.data(), input_to_forget_weights_float32.data(),
input_to_cell_weights_float32.data(), input_to_output_weights_float32.data(),
diff --git a/common/operations/LocalResponseNormalization.cpp b/common/operations/LocalResponseNormalization.cpp
index 8d53654..77d8ce2 100644
--- a/common/operations/LocalResponseNormalization.cpp
+++ b/common/operations/LocalResponseNormalization.cpp
@@ -25,6 +25,7 @@
#ifdef NN_INCLUDE_CPU_IMPLEMENTATION
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wunused-parameter"
+#pragma clang diagnostic ignored "-Wsign-compare"
#include <tensorflow/lite/kernels/internal/optimized/optimized_ops.h>
#pragma clang diagnostic pop
@@ -64,7 +65,7 @@
const float* inputBase = inputData + outer * axisSize * innerSize;
float* outputBase = outputData + outer * axisSize * innerSize;
for (uint32_t inner = 0; inner < innerSize; ++inner, ++inputBase, ++outputBase) {
- for (int32_t i = 0; i < axisSize; i++) {
+ for (int32_t i = 0; i < static_cast<int32_t>(axisSize); i++) {
const int32_t dBegin = std::max(0, i - radius);
// Add 1 on dEnd to comply with optimized_ops in TFLite
const int32_t dEnd = std::min(static_cast<int32_t>(axisSize), i + radius + 1);
@@ -175,7 +176,7 @@
const Shape& input = context->getInputShape(kInputTensor);
if (hasKnownRank(input)) {
- NN_RET_CHECK_LE(getNumberOfDimensions(input), 4);
+ NN_RET_CHECK_LE(getNumberOfDimensions(input), 4u);
}
NN_RET_CHECK(validateInputTypes(context, inExpectedTypes));
NN_RET_CHECK(validateOutputTypes(context, {inputType}));
diff --git a/common/operations/Multinomial.cpp b/common/operations/Multinomial.cpp
index c737e9f..14757d9 100644
--- a/common/operations/Multinomial.cpp
+++ b/common/operations/Multinomial.cpp
@@ -108,8 +108,8 @@
}
void Multinomial::EvalFloat32(const float* inputData) {
- const int batch_size = SizeOfDimension(input_, 0);
- const int class_size = SizeOfDimension(input_, 1);
+ const uint32_t batch_size = SizeOfDimension(input_, 0);
+ const uint32_t class_size = SizeOfDimension(input_, 1);
tensorflow::GuardedPhiloxRandom random_generator;
int32_t* seeds = GetBuffer<int32_t>(random_seeds_);
@@ -143,7 +143,7 @@
}
auto* output_ptr_batch = GetBuffer<int32_t>(output_) + b * sample_count_;
- for (uint64_t j = 0; j < sample_count_; ++j) {
+ for (uint64_t j = 0; j < static_cast<uint64_t>(sample_count_); ++j) {
const double target = simple_philox.RandDouble() * total;
auto found_iter = std::upper_bound(cdf.begin(), cdf.end(), target);
output_ptr_batch[j] = std::distance(cdf.begin(), found_iter);
diff --git a/common/operations/PRelu.cpp b/common/operations/PRelu.cpp
index a320fef..b6980c0 100644
--- a/common/operations/PRelu.cpp
+++ b/common/operations/PRelu.cpp
@@ -27,6 +27,7 @@
#ifdef NN_INCLUDE_CPU_IMPLEMENTATION
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wunused-parameter"
+#pragma clang diagnostic ignored "-Wsign-compare"
#include <tensorflow/lite/kernels/internal/optimized/legacy_optimized_ops.h>
#pragma clang diagnostic pop
#endif // NN_INCLUDE_CPU_IMPLEMENTATION
diff --git a/common/operations/Pooling.cpp b/common/operations/Pooling.cpp
index 9a455c5..1d1c347 100644
--- a/common/operations/Pooling.cpp
+++ b/common/operations/Pooling.cpp
@@ -25,6 +25,7 @@
#ifdef NN_INCLUDE_CPU_IMPLEMENTATION
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wunused-parameter"
+#pragma clang diagnostic ignored "-Wsign-compare"
#include <tensorflow/lite/kernels/internal/optimized/optimized_ops.h>
#include <tensorflow/lite/kernels/internal/reference/integer_ops/pooling.h>
#pragma clang diagnostic pop
@@ -363,7 +364,7 @@
#ifdef NN_INCLUDE_CPU_IMPLEMENTATION
bool prepare(IOperationExecutionContext* context) {
Shape input = context->getInputShape(kInputTensor);
- NN_RET_CHECK_EQ(getNumberOfDimensions(input), 4);
+ NN_RET_CHECK_EQ(getNumberOfDimensions(input), 4u);
PoolingParam param;
NN_RET_CHECK(param.initialize(context));
@@ -373,9 +374,9 @@
uint32_t height = getSizeOfDimension(input, param.useNchw ? 2 : 1);
uint32_t width = getSizeOfDimension(input, param.useNchw ? 3 : 2);
uint32_t channels = getSizeOfDimension(input, param.useNchw ? 1 : 3);
- NN_RET_CHECK_GT(height, 0);
- NN_RET_CHECK_GT(width, 0);
- NN_RET_CHECK_GT(channels, 0);
+ NN_RET_CHECK_GT(height, 0u);
+ NN_RET_CHECK_GT(width, 0u);
+ NN_RET_CHECK_GT(channels, 0u);
uint32_t outWidth = computeOutSize(width, param.filter_width, param.stride_width,
param.padding_left, param.padding_right);
diff --git a/common/operations/QLSTM.cpp b/common/operations/QLSTM.cpp
index d5e555b..7e897bb 100644
--- a/common/operations/QLSTM.cpp
+++ b/common/operations/QLSTM.cpp
@@ -178,50 +178,50 @@
const Shape inputShape = context->getInputShape(kInputTensor);
const uint32_t inputRank = getNumberOfDimensions(inputShape);
- NN_RET_CHECK_EQ(inputRank, 2) << "Invalid input tensor rank: " << inputRank;
+ NN_RET_CHECK_EQ(inputRank, 2u) << "Invalid input tensor rank: " << inputRank;
const uint32_t batchSize = getSizeOfDimension(inputShape, 0);
const uint32_t inputSize = getSizeOfDimension(inputShape, 1);
const Shape inputToOutputShape = context->getInputShape(kInputToOutputWeightsTensor);
- NN_RET_CHECK_EQ(getNumberOfDimensions(inputToOutputShape), 2);
+ NN_RET_CHECK_EQ(getNumberOfDimensions(inputToOutputShape), 2u);
NN_RET_CHECK_EQ(getSizeOfDimension(inputToOutputShape, 1), inputSize);
const uint32_t numUnits = getSizeOfDimension(inputToOutputShape, 0);
const Shape recurrentToOutputShape = context->getInputShape(kRecurrentToOutputWeightsTensor);
- NN_RET_CHECK_EQ(getNumberOfDimensions(recurrentToOutputShape), 2);
+ NN_RET_CHECK_EQ(getNumberOfDimensions(recurrentToOutputShape), 2u);
NN_RET_CHECK_EQ(getSizeOfDimension(recurrentToOutputShape, 0), numUnits);
const uint32_t outputSize = getSizeOfDimension(recurrentToOutputShape, 1);
if (hasTensor(context, kInputToInputWeightsTensor)) {
const Shape inputToInputShape = context->getInputShape(kInputToInputWeightsTensor);
- NN_RET_CHECK_EQ(getNumberOfDimensions(inputToInputShape), 2);
+ NN_RET_CHECK_EQ(getNumberOfDimensions(inputToInputShape), 2u);
NN_RET_CHECK_EQ(getSizeOfDimension(inputToInputShape, 0), numUnits);
NN_RET_CHECK_EQ(getSizeOfDimension(inputToInputShape, 1), inputSize);
}
const Shape inputToForgetShape = context->getInputShape(kInputToForgetWeightsTensor);
- NN_RET_CHECK_EQ(getNumberOfDimensions(inputToForgetShape), 2);
+ NN_RET_CHECK_EQ(getNumberOfDimensions(inputToForgetShape), 2u);
NN_RET_CHECK_EQ(getSizeOfDimension(inputToForgetShape, 0), numUnits);
NN_RET_CHECK_EQ(getSizeOfDimension(inputToForgetShape, 1), inputSize);
const Shape inputToCellShape = context->getInputShape(kInputToCellWeightsTensor);
- NN_RET_CHECK_EQ(getNumberOfDimensions(inputToCellShape), 2);
+ NN_RET_CHECK_EQ(getNumberOfDimensions(inputToCellShape), 2u);
NN_RET_CHECK_EQ(getSizeOfDimension(inputToCellShape, 0), numUnits);
NN_RET_CHECK_EQ(getSizeOfDimension(inputToCellShape, 1), inputSize);
if (hasTensor(context, kRecurrentToInputWeightsTensor)) {
const Shape recurrentToInputShape = context->getInputShape(kRecurrentToInputWeightsTensor);
- NN_RET_CHECK_EQ(getNumberOfDimensions(recurrentToInputShape), 2);
+ NN_RET_CHECK_EQ(getNumberOfDimensions(recurrentToInputShape), 2u);
NN_RET_CHECK_EQ(getSizeOfDimension(recurrentToInputShape, 0), numUnits);
NN_RET_CHECK_EQ(getSizeOfDimension(recurrentToInputShape, 1), outputSize);
}
const Shape recurrentToForgetShape = context->getInputShape(kRecurrentToForgetWeightsTensor);
- NN_RET_CHECK_EQ(getNumberOfDimensions(recurrentToForgetShape), 2);
+ NN_RET_CHECK_EQ(getNumberOfDimensions(recurrentToForgetShape), 2u);
NN_RET_CHECK_EQ(getSizeOfDimension(recurrentToForgetShape, 0), numUnits);
NN_RET_CHECK_EQ(getSizeOfDimension(recurrentToForgetShape, 1), outputSize);
const Shape recurrentToCellShape = context->getInputShape(kRecurrentToCellWeightsTensor);
- NN_RET_CHECK_EQ(getNumberOfDimensions(recurrentToCellShape), 2);
+ NN_RET_CHECK_EQ(getNumberOfDimensions(recurrentToCellShape), 2u);
NN_RET_CHECK_EQ(getSizeOfDimension(recurrentToCellShape, 0), numUnits);
NN_RET_CHECK_EQ(getSizeOfDimension(recurrentToCellShape, 1), outputSize);
@@ -235,19 +235,19 @@
if (hasTensor(context, kCellToInputWeightsTensor)) {
const Shape cellToInputShape = context->getInputShape(kCellToInputWeightsTensor);
- NN_RET_CHECK_EQ(getNumberOfDimensions(cellToInputShape), 1);
+ NN_RET_CHECK_EQ(getNumberOfDimensions(cellToInputShape), 1u);
NN_RET_CHECK_EQ(getSizeOfDimension(cellToInputShape, 0), numUnits);
}
if (hasTensor(context, kCellToForgetWeightsTensor)) {
const Shape cellToForgetShape = context->getInputShape(kCellToForgetWeightsTensor);
- NN_RET_CHECK_EQ(getNumberOfDimensions(cellToForgetShape), 1);
+ NN_RET_CHECK_EQ(getNumberOfDimensions(cellToForgetShape), 1u);
NN_RET_CHECK_EQ(getSizeOfDimension(cellToForgetShape, 0), numUnits);
}
if (hasTensor(context, kCellToOutputWeightsTensor)) {
const Shape cellToOutputShape = context->getInputShape(kCellToOutputWeightsTensor);
- NN_RET_CHECK_EQ(getNumberOfDimensions(cellToOutputShape), 1);
+ NN_RET_CHECK_EQ(getNumberOfDimensions(cellToOutputShape), 1u);
NN_RET_CHECK_EQ(getSizeOfDimension(cellToOutputShape, 0), numUnits);
}
@@ -265,7 +265,7 @@
if (!cifgUsed) {
NN_RET_CHECK(hasTensor(context, kInputGateBiasTensor));
const Shape inputGateBiasShape = context->getInputShape(kInputGateBiasTensor);
- NN_RET_CHECK_EQ(getNumberOfDimensions(inputGateBiasShape), 1);
+ NN_RET_CHECK_EQ(getNumberOfDimensions(inputGateBiasShape), 1u);
NN_RET_CHECK_EQ(getSizeOfDimension(inputGateBiasShape, 0), numUnits);
} else {
NN_RET_CHECK(!hasTensor(context, kInputGateBiasTensor))
@@ -273,58 +273,58 @@
}
const Shape forgetGateBiasShape = context->getInputShape(kForgetGateBiasTensor);
- NN_RET_CHECK_EQ(getNumberOfDimensions(forgetGateBiasShape), 1);
+ NN_RET_CHECK_EQ(getNumberOfDimensions(forgetGateBiasShape), 1u);
NN_RET_CHECK_EQ(getSizeOfDimension(forgetGateBiasShape, 0), numUnits);
const Shape cellGateBiasShape = context->getInputShape(kCellGateBiasTensor);
- NN_RET_CHECK_EQ(getNumberOfDimensions(cellGateBiasShape), 1);
+ NN_RET_CHECK_EQ(getNumberOfDimensions(cellGateBiasShape), 1u);
NN_RET_CHECK_EQ(getSizeOfDimension(cellGateBiasShape, 0), numUnits);
const Shape outputGateBiasShape = context->getInputShape(kOutputGateBiasTensor);
- NN_RET_CHECK_EQ(getNumberOfDimensions(outputGateBiasShape), 1);
+ NN_RET_CHECK_EQ(getNumberOfDimensions(outputGateBiasShape), 1u);
NN_RET_CHECK_EQ(getSizeOfDimension(outputGateBiasShape, 0), numUnits);
if (hasTensor(context, kProjectionWeightsTensor)) {
const Shape projectionShape = context->getInputShape(kProjectionWeightsTensor);
- NN_RET_CHECK_EQ(getNumberOfDimensions(projectionShape), 2);
+ NN_RET_CHECK_EQ(getNumberOfDimensions(projectionShape), 2u);
NN_RET_CHECK_EQ(getSizeOfDimension(projectionShape, 0), outputSize);
NN_RET_CHECK_EQ(getSizeOfDimension(projectionShape, 1), numUnits);
}
if (hasTensor(context, kProjectionBiasTensor)) {
const Shape projectionBiasShape = context->getInputShape(kProjectionBiasTensor);
- NN_RET_CHECK_EQ(getNumberOfDimensions(projectionBiasShape), 1);
+ NN_RET_CHECK_EQ(getNumberOfDimensions(projectionBiasShape), 1u);
NN_RET_CHECK_EQ(getSizeOfDimension(projectionBiasShape, 0), outputSize);
}
const Shape outputStateShape = context->getInputShape(kPrevOutputTensor);
- NN_RET_CHECK_EQ(getNumberOfDimensions(outputStateShape), 2);
+ NN_RET_CHECK_EQ(getNumberOfDimensions(outputStateShape), 2u);
NN_RET_CHECK_EQ(getSizeOfDimension(outputStateShape, 0), batchSize);
NN_RET_CHECK_EQ(getSizeOfDimension(outputStateShape, 1), outputSize);
const Shape cellStateShape = context->getInputShape(kPrevCellStateTensor);
- NN_RET_CHECK_EQ(getNumberOfDimensions(cellStateShape), 2);
+ NN_RET_CHECK_EQ(getNumberOfDimensions(cellStateShape), 2u);
NN_RET_CHECK_EQ(getSizeOfDimension(cellStateShape, 0), batchSize);
NN_RET_CHECK_EQ(getSizeOfDimension(cellStateShape, 1), numUnits);
if (hasTensor(context, kInputLayerNormTensor)) {
const Shape inputLayerNormShape = context->getInputShape(kInputLayerNormTensor);
- NN_RET_CHECK_EQ(getNumberOfDimensions(inputLayerNormShape), 1);
+ NN_RET_CHECK_EQ(getNumberOfDimensions(inputLayerNormShape), 1u);
NN_RET_CHECK_EQ(getSizeOfDimension(inputLayerNormShape, 0), numUnits);
}
if (hasTensor(context, kForgetLayerNormTensor)) {
const Shape forgetLayerNormShape = context->getInputShape(kForgetLayerNormTensor);
- NN_RET_CHECK_EQ(getNumberOfDimensions(forgetLayerNormShape), 1);
+ NN_RET_CHECK_EQ(getNumberOfDimensions(forgetLayerNormShape), 1u);
NN_RET_CHECK_EQ(getSizeOfDimension(forgetLayerNormShape, 0), numUnits);
}
if (hasTensor(context, kCellLayerNormTensor)) {
const Shape cellLayerNormShape = context->getInputShape(kCellLayerNormTensor);
- NN_RET_CHECK_EQ(getNumberOfDimensions(cellLayerNormShape), 1);
+ NN_RET_CHECK_EQ(getNumberOfDimensions(cellLayerNormShape), 1u);
NN_RET_CHECK_EQ(getSizeOfDimension(cellLayerNormShape, 0), numUnits);
}
if (hasTensor(context, kOutputLayerNormTensor)) {
const Shape outputLayerNormShape = context->getInputShape(kOutputLayerNormTensor);
- NN_RET_CHECK_EQ(getNumberOfDimensions(outputLayerNormShape), 1);
+ NN_RET_CHECK_EQ(getNumberOfDimensions(outputLayerNormShape), 1u);
NN_RET_CHECK_EQ(getSizeOfDimension(outputLayerNormShape, 0), numUnits);
}
diff --git a/common/operations/QuantizedLSTM.cpp b/common/operations/QuantizedLSTM.cpp
index cdc930b..42df5ba 100644
--- a/common/operations/QuantizedLSTM.cpp
+++ b/common/operations/QuantizedLSTM.cpp
@@ -20,6 +20,7 @@
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wunused-parameter"
+#pragma clang diagnostic ignored "-Wsign-compare"
#include <public/gemmlowp.h>
#include <tensorflow/lite/kernels/internal/reference/legacy_reference_ops.h>
#pragma clang diagnostic pop
@@ -248,14 +249,14 @@
bool QuantizedLSTMCell::prepare(const Operation& operation, RunTimeOperandInfo* operands,
Shape* cellStateOutShape, Shape* outputShape) {
auto input = GetInput(operation, operands, kInputTensor);
- NN_RET_CHECK_EQ(NumDimensions(input), 2);
+ NN_RET_CHECK_EQ(NumDimensions(input), 2u);
NN_RET_CHECK_EQ(input->scale, 1. / 128.0);
NN_RET_CHECK_EQ(input->zeroPoint, 128);
const uint32_t numBatches = SizeOfDimension(input, 0);
const uint32_t inputSize = SizeOfDimension(input, 1);
auto prevOutput = GetInput(operation, operands, kPrevOutputTensor);
- NN_RET_CHECK_EQ(NumDimensions(prevOutput), 2);
+ NN_RET_CHECK_EQ(NumDimensions(prevOutput), 2u);
NN_RET_CHECK_EQ(SizeOfDimension(prevOutput, 0), numBatches);
NN_RET_CHECK_EQ(prevOutput->scale, 1. / 128.0);
NN_RET_CHECK_EQ(prevOutput->zeroPoint, 128);
@@ -267,7 +268,7 @@
const float weightsZeroPoint = inputToInputWeights->zeroPoint;
auto checkWeightsShape = [&](const RunTimeOperandInfo* weights, uint32_t columns) -> bool {
- NN_RET_CHECK_EQ(NumDimensions(weights), 2);
+ NN_RET_CHECK_EQ(NumDimensions(weights), 2u);
NN_RET_CHECK_EQ(SizeOfDimension(weights, 0), outputSize);
NN_RET_CHECK_EQ(SizeOfDimension(weights, 1), columns);
NN_RET_CHECK_EQ(weights->scale, weightsScale);
@@ -299,7 +300,7 @@
NN_RET_CHECK_EQ(biasZeroPoint, 0);
auto checkBiasShape = [&](const RunTimeOperandInfo* bias) -> bool {
- NN_RET_CHECK_EQ(NumDimensions(bias), 1);
+ NN_RET_CHECK_EQ(NumDimensions(bias), 1u);
NN_RET_CHECK_EQ(SizeOfDimension(bias, 0), outputSize);
NN_RET_CHECK_EQ(bias->scale, biasScale);
NN_RET_CHECK_EQ(bias->zeroPoint, biasZeroPoint);
@@ -315,7 +316,7 @@
NN_RET_CHECK(checkBiasShape(outputGateBias));
auto prevCellState = GetInput(operation, operands, kPrevCellStateTensor);
- NN_CHECK_EQ(NumDimensions(prevCellState), 2);
+ NN_CHECK_EQ(NumDimensions(prevCellState), 2u);
NN_CHECK_EQ(SizeOfDimension(prevCellState, 0), numBatches);
NN_CHECK_EQ(SizeOfDimension(prevCellState, 1), outputSize);
NN_CHECK_EQ(prevCellState->zeroPoint, 0);
diff --git a/common/operations/Reduce.cpp b/common/operations/Reduce.cpp
index 5f7e1a0..5d6a11a 100644
--- a/common/operations/Reduce.cpp
+++ b/common/operations/Reduce.cpp
@@ -27,6 +27,7 @@
#ifdef NN_INCLUDE_CPU_IMPLEMENTATION
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wunused-parameter"
+#pragma clang diagnostic ignored "-Wsign-compare"
#include <tensorflow/lite/kernels/internal/reference/reference_ops.h>
#pragma clang diagnostic pop
#endif // NN_INCLUDE_CPU_IMPLEMENTATION
@@ -85,7 +86,7 @@
NN_RET_CHECK(validateOutputTypes(context, {inputType}));
const Shape& input = context->getInputShape(kInputTensor);
if (hasKnownRank(input)) {
- NN_RET_CHECK_LE(getNumberOfDimensions(input), 4);
+ NN_RET_CHECK_LE(getNumberOfDimensions(input), 4u);
}
return Version::ANDROID_Q;
}
@@ -108,7 +109,7 @@
}
const Shape& input = context->getInputShape(kInputTensor);
if (hasKnownRank(input)) {
- NN_RET_CHECK_LE(getNumberOfDimensions(input), 4);
+ NN_RET_CHECK_LE(getNumberOfDimensions(input), 4u);
}
return minVersion;
}
@@ -124,7 +125,7 @@
NN_RET_CHECK(validateOutputTypes(context, {inputType}));
const Shape& input = context->getInputShape(kInputTensor);
if (hasKnownRank(input)) {
- NN_RET_CHECK_LE(getNumberOfDimensions(input), 4);
+ NN_RET_CHECK_LE(getNumberOfDimensions(input), 4u);
}
return Version::ANDROID_Q;
}
@@ -133,7 +134,7 @@
bool prepare(IOperationExecutionContext* context) {
Shape inputShape = context->getInputShape(kInputTensor);
const uint32_t inputRank = getNumberOfDimensions(inputShape);
- NN_RET_CHECK_LE(inputRank, 4);
+ NN_RET_CHECK_LE(inputRank, 4u);
std::vector<bool> shouldReduce(inputRank);
const int32_t* axes = context->getInputBuffer<int32_t>(kInputAxes);
diff --git a/common/operations/Reshape.cpp b/common/operations/Reshape.cpp
index e41998d..681445c 100644
--- a/common/operations/Reshape.cpp
+++ b/common/operations/Reshape.cpp
@@ -20,6 +20,7 @@
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wunused-parameter"
+#pragma clang diagnostic ignored "-Wsign-compare"
#include <tensorflow/lite/kernels/internal/optimized/legacy_optimized_ops.h>
#include <tensorflow/lite/kernels/internal/reference/reference_ops.h>
#pragma clang diagnostic pop
diff --git a/common/operations/ResizeImageOps.cpp b/common/operations/ResizeImageOps.cpp
index 02b6dd5..db272a8 100644
--- a/common/operations/ResizeImageOps.cpp
+++ b/common/operations/ResizeImageOps.cpp
@@ -27,6 +27,7 @@
#ifdef NN_INCLUDE_CPU_IMPLEMENTATION
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wunused-parameter"
+#pragma clang diagnostic ignored "-Wsign-compare"
#include <tensorflow/lite/kernels/internal/reference/reference_ops.h>
#pragma clang diagnostic pop
@@ -232,7 +233,7 @@
#ifdef NN_INCLUDE_CPU_IMPLEMENTATION
bool prepare(OperationType opType, IOperationExecutionContext* context) {
Shape input = context->getInputShape(kInputTensor);
- NN_RET_CHECK_EQ(getNumberOfDimensions(input), 4);
+ NN_RET_CHECK_EQ(getNumberOfDimensions(input), 4u);
[[maybe_unused]] const auto numInputs = context->getNumInputs();
const bool useNchw = getOptionalScalar(context, kLayoutScalar);
const bool alignCorners = getOptionalScalar(context, kAlignCornersScalar);
@@ -245,9 +246,9 @@
uint32_t inHeight = getSizeOfDimension(input, useNchw ? 2 : 1);
uint32_t inWidth = getSizeOfDimension(input, useNchw ? 3 : 2);
uint32_t channels = getSizeOfDimension(input, useNchw ? 1 : 3);
- NN_RET_CHECK_GT(inHeight, 0);
- NN_RET_CHECK_GT(inWidth, 0);
- NN_RET_CHECK_GT(channels, 0);
+ NN_RET_CHECK_GT(inHeight, 0u);
+ NN_RET_CHECK_GT(inWidth, 0u);
+ NN_RET_CHECK_GT(channels, 0u);
int32_t height, width;
auto scalarType = context->getInputType(kOutputHeightParamScalar);
diff --git a/common/operations/RoiAlign.cpp b/common/operations/RoiAlign.cpp
index 7d5e412..b294386 100644
--- a/common/operations/RoiAlign.cpp
+++ b/common/operations/RoiAlign.cpp
@@ -88,7 +88,7 @@
// 1. invalid batch id
// 2. Region out of bound: x1|x2|y1|y2 < 0 || x1|x2 > inWidth || y1|y2 > inHeight
// 3. Invalid region: x2 < x1 || y2 < y1
- NN_RET_CHECK_GE(batchId, 0);
+ NN_RET_CHECK_GE(batchId, 0u);
NN_RET_CHECK_LT(batchId, numBatches);
NN_RET_CHECK(roiInfo[0] >= 0);
NN_RET_CHECK(roiInfo[1] >= 0);
@@ -220,7 +220,7 @@
// 1. invalid batch id
// 2. Region out of bound: x1|x2|y1|y2 < 0 || x1|x2 > inWidth || y1|y2 > inHeight
// 3. Invalid region: x2 < x1 || y2 < y1
- NN_RET_CHECK_GE(batchId, 0);
+ NN_RET_CHECK_GE(batchId, 0u);
NN_RET_CHECK_LT(batchId, numBatches);
NN_RET_CHECK(wRoiStart <= inWidth);
NN_RET_CHECK(hRoiStart <= inHeight);
@@ -392,8 +392,8 @@
Shape input = context->getInputShape(kInputTensor);
Shape roiShape = context->getInputShape(kRoiTensor);
Shape batchSplitShape = context->getInputShape(kBatchSplitTensor);
- NN_RET_CHECK_EQ(getNumberOfDimensions(input), 4);
- NN_RET_CHECK_EQ(getNumberOfDimensions(roiShape), 2);
+ NN_RET_CHECK_EQ(getNumberOfDimensions(input), 4u);
+ NN_RET_CHECK_EQ(getNumberOfDimensions(roiShape), 2u);
uint32_t numBatches = getSizeOfDimension(input, 0);
uint32_t inHeight = getSizeOfDimension(input, useNchw ? 2 : 1);
@@ -401,11 +401,11 @@
uint32_t inDepth = getSizeOfDimension(input, useNchw ? 1 : 3);
uint32_t numRois = getSizeOfDimension(roiShape, 0);
// Every dimension must be positive except for numRois.
- NN_RET_CHECK_GT(numBatches, 0);
- NN_RET_CHECK_GT(inHeight, 0);
- NN_RET_CHECK_GT(inWidth, 0);
- NN_RET_CHECK_GT(inDepth, 0);
- NN_RET_CHECK_EQ(getSizeOfDimension(roiShape, 1), 4);
+ NN_RET_CHECK_GT(numBatches, 0u);
+ NN_RET_CHECK_GT(inHeight, 0u);
+ NN_RET_CHECK_GT(inWidth, 0u);
+ NN_RET_CHECK_GT(inDepth, 0u);
+ NN_RET_CHECK_EQ(getSizeOfDimension(roiShape, 1), 4u);
NN_RET_CHECK_EQ(getSizeOfDimension(batchSplitShape, 0), numRois);
int32_t outputHeight = context->getInputValue<int32_t>(kOutputHeightScalar);
diff --git a/common/operations/RoiPooling.cpp b/common/operations/RoiPooling.cpp
index 0e1986d..a3ec5b8 100644
--- a/common/operations/RoiPooling.cpp
+++ b/common/operations/RoiPooling.cpp
@@ -80,7 +80,7 @@
// 1. invalid batch id
// 2. Region out of bound: x1|x2|y1|y2 < 0 || x1|x2 > inWidth || y1|y2 > inHeight
// 3. Invalid region: x2 < x1 || y2 < y1
- NN_RET_CHECK_GE(batchId, 0);
+ NN_RET_CHECK_GE(batchId, 0u);
NN_RET_CHECK_LT(batchId, numBatches);
NN_RET_CHECK(roiInfo[0] >= 0);
NN_RET_CHECK(roiInfo[1] >= 0);
@@ -232,15 +232,15 @@
Shape input = context->getInputShape(kInputTensor);
Shape roiShape = context->getInputShape(kRoiTensor);
Shape batchSplitShape = context->getInputShape(kBatchSplitTensor);
- NN_RET_CHECK_EQ(getNumberOfDimensions(input), 4);
- NN_RET_CHECK_EQ(getNumberOfDimensions(roiShape), 2);
+ NN_RET_CHECK_EQ(getNumberOfDimensions(input), 4u);
+ NN_RET_CHECK_EQ(getNumberOfDimensions(roiShape), 2u);
[[maybe_unused]] uint32_t numBatches = getSizeOfDimension(input, 0);
[[maybe_unused]] uint32_t inHeight = getSizeOfDimension(input, useNchw ? 2 : 1);
[[maybe_unused]] uint32_t inWidth = getSizeOfDimension(input, useNchw ? 3 : 2);
uint32_t inDepth = getSizeOfDimension(input, useNchw ? 1 : 3);
uint32_t numRois = getSizeOfDimension(roiShape, 0);
- NN_RET_CHECK_EQ(getSizeOfDimension(roiShape, 1), 4);
+ NN_RET_CHECK_EQ(getSizeOfDimension(roiShape, 1), 4u);
NN_RET_CHECK_EQ(getSizeOfDimension(batchSplitShape, 0), numRois);
auto outputHeight = context->getInputValue<int32_t>(kOutputHeightScalar);
diff --git a/common/operations/SVDF.cpp b/common/operations/SVDF.cpp
index 630dbb4..36724d4 100644
--- a/common/operations/SVDF.cpp
+++ b/common/operations/SVDF.cpp
@@ -80,7 +80,7 @@
const int rank = getScalarData<int>(*GetInput(operation, operands, kRankParam));
const uint32_t batch_size = SizeOfDimension(input, 0);
const uint32_t num_filters = SizeOfDimension(weights_feature, 0);
- NN_CHECK_EQ(num_filters % rank, 0);
+ NN_CHECK_EQ(num_filters % rank, 0u);
const uint32_t num_units = num_filters / rank;
const uint32_t memory_size = SizeOfDimension(weights_time, 1);
NN_CHECK_EQ(SizeOfDimension(input, 1), SizeOfDimension(weights_feature, 1));
diff --git a/common/operations/Select.cpp b/common/operations/Select.cpp
index f037b48..c8f88a1 100644
--- a/common/operations/Select.cpp
+++ b/common/operations/Select.cpp
@@ -88,7 +88,7 @@
LOG(ERROR) << "Condition and input tensor dimensions are not equal";
return false;
}
- for (int i = 0; i < inputCondition.dimensions.size(); ++i) {
+ for (size_t i = 0; i < inputCondition.dimensions.size(); ++i) {
if (inputCondition.dimensions[i] != input1.dimensions[i]) {
LOG(ERROR) << "Condition and input tensor dimensions are not equal";
return false;
diff --git a/common/operations/SimpleMath.cpp b/common/operations/SimpleMath.cpp
index ecd3631..afaf723 100644
--- a/common/operations/SimpleMath.cpp
+++ b/common/operations/SimpleMath.cpp
@@ -20,6 +20,7 @@
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wunused-parameter"
+#pragma clang diagnostic ignored "-Wsign-compare"
#include <tensorflow/lite/kernels/internal/optimized/legacy_optimized_ops.h>
#include <tensorflow/lite/kernels/internal/reference/legacy_reference_ops.h>
#pragma clang diagnostic pop
diff --git a/common/operations/Slice.cpp b/common/operations/Slice.cpp
index 77ff9f0..c4f0854 100644
--- a/common/operations/Slice.cpp
+++ b/common/operations/Slice.cpp
@@ -44,7 +44,7 @@
template <typename T>
void addVectors(const std::vector<T>& a, const std::vector<T>& b, std::vector<T>* res) {
- for (int i = 0; i < res->size(); ++i) {
+ for (size_t i = 0; i < res->size(); ++i) {
res->at(i) = a[i] + b[i];
}
}
@@ -60,7 +60,7 @@
std::vector<uint32_t> beginIndex(getSizeOfDimension(beginShape, 0));
std::vector<uint32_t> inputIndex(getNumberOfDimensions(inputShape));
- for (int i = 0; i < beginIndex.size(); ++i) {
+ for (size_t i = 0; i < beginIndex.size(); ++i) {
beginIndex[i] = static_cast<uint32_t>(beginData[i]);
}
@@ -109,15 +109,15 @@
#ifdef NN_INCLUDE_CPU_IMPLEMENTATION
bool prepare(IOperationExecutionContext* context) {
const Shape& inputShape = context->getInputShape(kInputTensor);
- const int32_t n_dims = getNumberOfDimensions(inputShape);
+ const uint32_t n_dims = getNumberOfDimensions(inputShape);
NN_RET_CHECK(n_dims > 0);
const Shape& beginShape = context->getInputShape(kBeginTensor);
- NN_RET_CHECK_EQ(getNumberOfDimensions(beginShape), 1);
+ NN_RET_CHECK_EQ(getNumberOfDimensions(beginShape), 1u);
NN_RET_CHECK_EQ(getSizeOfDimension(beginShape, 0), n_dims);
const Shape& sizeShape = context->getInputShape(kSizeTensor);
- NN_RET_CHECK_EQ(getNumberOfDimensions(sizeShape), 1);
+ NN_RET_CHECK_EQ(getNumberOfDimensions(sizeShape), 1u);
NN_RET_CHECK_EQ(getSizeOfDimension(sizeShape, 0), n_dims);
const int32_t* beginData = context->getInputBuffer<int32_t>(kBeginTensor);
@@ -125,15 +125,16 @@
Shape outputShape = context->getOutputShape(kOutputTensor);
outputShape.dimensions.resize(n_dims);
- for (int i = 0; i < n_dims; ++i) {
+ for (uint32_t i = 0; i < n_dims; ++i) {
const int32_t sliceBegin = beginData[i];
int32_t sliceSize = sizeData[i];
if (sliceSize == -1) {
sliceSize = getSizeOfDimension(inputShape, i) - sliceBegin;
}
- NN_RET_CHECK_LE(beginData[i], getSizeOfDimension(inputShape, i));
+ NN_RET_CHECK_LE(static_cast<uint32_t>(beginData[i]), getSizeOfDimension(inputShape, i));
NN_RET_CHECK_GE(sliceSize, 0);
- NN_RET_CHECK_LE(sliceBegin + sliceSize, getSizeOfDimension(inputShape, i));
+ NN_RET_CHECK_LE(static_cast<uint32_t>(sliceBegin + sliceSize),
+ getSizeOfDimension(inputShape, i));
outputShape.dimensions[i] = sliceSize;
}
return context->setOutputShape(kOutputTensor, outputShape);
diff --git a/common/operations/Softmax.cpp b/common/operations/Softmax.cpp
index b2e1efe..838574e 100644
--- a/common/operations/Softmax.cpp
+++ b/common/operations/Softmax.cpp
@@ -28,6 +28,7 @@
#ifdef NN_INCLUDE_CPU_IMPLEMENTATION
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wunused-parameter"
+#pragma clang diagnostic ignored "-Wsign-compare"
#include <tensorflow/lite/kernels/internal/optimized/legacy_optimized_ops.h>
#include <tensorflow/lite/kernels/internal/optimized/optimized_ops.h>
#pragma clang diagnostic pop
@@ -256,7 +257,7 @@
}
const auto inputRank = getNumberOfDimensions(context->getInputShape(kInputTensor));
if (inputRank != 0) {
- NN_RET_CHECK_LE(inputRank, 4);
+ NN_RET_CHECK_LE(inputRank, 4u);
}
if (context->getNumInputs() == kNumInputs) {
minSupportedVersion = combineVersions(minSupportedVersion, Version::ANDROID_Q);
@@ -277,7 +278,7 @@
float beta = (input.type == OperandType::TENSOR_FLOAT16)
? context->getInputValue<_Float16>(kBetaScalar)
: context->getInputValue<float>(kBetaScalar);
- NN_RET_CHECK_LE(getNumberOfDimensions(input), 4);
+ NN_RET_CHECK_LE(getNumberOfDimensions(input), 4u);
NN_RET_CHECK_GT(beta, 0.0f);
Shape output = context->getOutputShape(kOutputTensor);
output.dimensions = input.dimensions;
diff --git a/common/operations/Split.cpp b/common/operations/Split.cpp
index 441b5a2..42e0912 100644
--- a/common/operations/Split.cpp
+++ b/common/operations/Split.cpp
@@ -42,7 +42,7 @@
const Scalar* inputPtr = inputData;
for (int k = 0; k < outerSize; k++) {
- for (int i = 0; i < outputDataPtrs->size(); ++i) {
+ for (size_t i = 0; i < outputDataPtrs->size(); ++i) {
const int copySize = outputShapes[i].dimensions[axis] * baseInnerSize;
memcpy(outputDataPtrs->at(i) + k * copySize, inputPtr, copySize * sizeof(Scalar));
inputPtr += copySize;
diff --git a/common/operations/Squeeze.cpp b/common/operations/Squeeze.cpp
index 21ec132..7a4f192 100644
--- a/common/operations/Squeeze.cpp
+++ b/common/operations/Squeeze.cpp
@@ -61,7 +61,7 @@
NN_RET_CHECK(validateOutputTypes(context, {inputType}));
const Shape& input = context->getInputShape(kInputTensor);
if (hasKnownRank(input)) {
- NN_RET_CHECK_LE(getNumberOfDimensions(input), 4);
+ NN_RET_CHECK_LE(getNumberOfDimensions(input), 4u);
}
return minSupportedVersion;
}
@@ -77,7 +77,7 @@
const Shape squeezeDimsShape = context->getInputShape(kSqueezeDims);
int32_t numInputDims = static_cast<int32_t>(getNumberOfDimensions(inputShape));
- NN_RET_CHECK_LE(getNumberOfDimensions(inputShape), 4);
+ NN_RET_CHECK_LE(getNumberOfDimensions(inputShape), 4u);
// squeezeDims need to be provided as a 1-D int32 tensor.
NN_OPS_CHECK(squeezeDimsShape.type == OperandType::TENSOR_INT32);
diff --git a/common/operations/StridedSlice.cpp b/common/operations/StridedSlice.cpp
index 07e3506..29f28c0 100644
--- a/common/operations/StridedSlice.cpp
+++ b/common/operations/StridedSlice.cpp
@@ -27,6 +27,7 @@
#ifdef NN_INCLUDE_CPU_IMPLEMENTATION
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wunused-parameter"
+#pragma clang diagnostic ignored "-Wsign-compare"
#include <tensorflow/lite/kernels/internal/reference/legacy_reference_ops.h>
#pragma clang diagnostic pop
@@ -135,7 +136,7 @@
NN_RET_CHECK(validateOutputTypes(context, {inputType}));
const Shape& input = context->getInputShape(kInputTensor);
if (hasKnownRank(input)) {
- NN_RET_CHECK_LE(getNumberOfDimensions(input), 4);
+ NN_RET_CHECK_LE(getNumberOfDimensions(input), 4u);
}
return minSupportedVersion;
}
diff --git a/common/operations/Tile.cpp b/common/operations/Tile.cpp
index e08ab87..5135c8a 100644
--- a/common/operations/Tile.cpp
+++ b/common/operations/Tile.cpp
@@ -43,7 +43,7 @@
std::pair<int, int> TileOneDimension(const Shape& input_shape, const T* in_data,
const M* multipliers, T* out_data, int dimension) {
const int dimension_size = input_shape.dimensions[dimension];
- if (dimension == input_shape.dimensions.size() - 1) {
+ if (static_cast<size_t>(dimension) == input_shape.dimensions.size() - 1) {
CopyMultipleTimes(in_data, dimension_size, multipliers[dimension], out_data);
return std::make_pair(dimension_size,
dimension_size * static_cast<int>(multipliers[dimension]));
diff --git a/common/operations/TopK_V2.cpp b/common/operations/TopK_V2.cpp
index d19a309..448f19c 100644
--- a/common/operations/TopK_V2.cpp
+++ b/common/operations/TopK_V2.cpp
@@ -96,7 +96,7 @@
const Shape inputShape = context->getInputShape(kInputTensor);
const int32_t k = context->getInputValue<int32_t>(kTopKScalar);
NN_RET_CHECK_GT(k, 0);
- NN_RET_CHECK_LE(k, inputShape.dimensions.back());
+ NN_RET_CHECK_LE(static_cast<uint32_t>(k), inputShape.dimensions.back());
// Copy input shape to ensure that quantization parameters for the output
// values are the same as for the input tensor.
diff --git a/common/operations/Transpose.cpp b/common/operations/Transpose.cpp
index 9e26edd..7592553 100644
--- a/common/operations/Transpose.cpp
+++ b/common/operations/Transpose.cpp
@@ -24,6 +24,7 @@
#ifdef NN_INCLUDE_CPU_IMPLEMENTATION
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wunused-parameter"
+#pragma clang diagnostic ignored "-Wsign-compare"
#include <tensorflow/lite/kernels/internal/optimized/legacy_optimized_ops.h>
#include <tensorflow/lite/kernels/internal/reference/reference_ops.h>
#pragma clang diagnostic pop
@@ -94,7 +95,7 @@
}
const Shape& input = context->getInputShape(kInputTensor);
if (hasKnownRank(input)) {
- NN_RET_CHECK_LE(getNumberOfDimensions(input), 4);
+ NN_RET_CHECK_LE(getNumberOfDimensions(input), 4u);
}
NN_RET_CHECK(validateInputTypes(context, {inputType, OperandType::TENSOR_INT32}));
NN_RET_CHECK(validateOutputTypes(context, {inputType}));
@@ -116,18 +117,18 @@
// permData can be NO_VALUE representing a regular 2D matrix transpose
if (context->isOmittedInput(kPermTensor)) {
- NN_RET_CHECK_EQ(numInputDims, 2);
+ NN_RET_CHECK_EQ(numInputDims, 2u);
output.dimensions = {getSizeOfDimension(input, 1), getSizeOfDimension(input, 0)};
} else {
const Shape& permShape = context->getInputShape(kPermTensor);
const int32_t* permData = context->getInputBuffer<int32_t>(kPermTensor);
// Transpose op only supports 1D-4D input arrays.
- NN_RET_CHECK_LE(numInputDims, 4);
+ NN_RET_CHECK_LE(numInputDims, 4u);
// perm need to be provided as a 1-D int32 tensor.
NN_RET_CHECK(permShape.type == OperandType::TENSOR_INT32);
- NN_RET_CHECK_EQ(getNumberOfDimensions(permShape), 1);
+ NN_RET_CHECK_EQ(getNumberOfDimensions(permShape), 1u);
NN_RET_CHECK_EQ(numInputDims, getSizeOfDimension(permShape, 0));
std::vector<uint32_t> outDims(numInputDims);
diff --git a/common/operations/TransposeConv2D.cpp b/common/operations/TransposeConv2D.cpp
index 111bbf2..55ed891 100644
--- a/common/operations/TransposeConv2D.cpp
+++ b/common/operations/TransposeConv2D.cpp
@@ -78,8 +78,8 @@
Shape filterShape = context->getInputShape(kFilterTensor);
int32_t filterWidth = getSizeOfDimension(filterShape, 2);
int32_t filterHeight = getSizeOfDimension(filterShape, 1);
- NN_RET_CHECK_EQ(getNumberOfDimensions(context->getInputShape(3)), 1);
- NN_RET_CHECK_EQ(getSizeOfDimension(context->getInputShape(3), 0), 4);
+ NN_RET_CHECK_EQ(getNumberOfDimensions(context->getInputShape(3)), 1u);
+ NN_RET_CHECK_EQ(getSizeOfDimension(context->getInputShape(3), 0), 4u);
const int32_t* outputShapeData = context->getInputBuffer<int32_t>(3);
int32_t outputWidth = useNchw ? outputShapeData[3] : outputShapeData[2];
int32_t outputHeight = useNchw ? outputShapeData[2] : outputShapeData[1];
@@ -347,7 +347,7 @@
std::vector<double> realMultiplier(outputDepth, 0.0);
std::vector<int32_t> outputMultiplier(outputDepth, 0);
std::vector<int32_t> outputShift(outputDepth, 0);
- for (int i = 0; i < outputDepth; ++i) {
+ for (uint32_t i = 0; i < outputDepth; ++i) {
Shape filterChannelShape = filterShape;
filterChannelShape.scale = filterScales[i];
Shape biasChannelShape = biasShape;
@@ -462,7 +462,7 @@
NN_RET_CHECK_EQ(std::get<Operand::SymmPerChannelQuantParams>(
context->getInputExtraParams(kFilterTensor))
.channelDim,
- 0)
+ 0u)
<< "Unsupported filter tensor channel dimension for operation "
<< kOperationName;
}
@@ -507,9 +507,9 @@
} else {
NN_RET_CHECK(input.type == bias.type);
}
- NN_RET_CHECK_EQ(getNumberOfDimensions(input), 4);
- NN_RET_CHECK_EQ(getNumberOfDimensions(filter), 4);
- NN_RET_CHECK_EQ(getNumberOfDimensions(bias), 1);
+ NN_RET_CHECK_EQ(getNumberOfDimensions(input), 4u);
+ NN_RET_CHECK_EQ(getNumberOfDimensions(filter), 4u);
+ NN_RET_CHECK_EQ(getNumberOfDimensions(bias), 1u);
TransposeConv2dParam param;
NN_RET_CHECK(param.initialize(context));
@@ -524,19 +524,19 @@
// Only batches can be zero.
NN_RET_CHECK_EQ(channels_in, getSizeOfDimension(filter, 3));
NN_RET_CHECK_EQ(channels_out, getSizeOfDimension(bias, 0));
- NN_RET_CHECK_GT(height, 0);
- NN_RET_CHECK_GT(width, 0);
- NN_RET_CHECK_GT(channels_in, 0);
- NN_RET_CHECK_GT(channels_out, 0);
- NN_RET_CHECK_GT(filterWidth, 0);
- NN_RET_CHECK_GT(filterHeight, 0);
+ NN_RET_CHECK_GT(height, 0u);
+ NN_RET_CHECK_GT(width, 0u);
+ NN_RET_CHECK_GT(channels_in, 0u);
+ NN_RET_CHECK_GT(channels_out, 0u);
+ NN_RET_CHECK_GT(filterWidth, 0u);
+ NN_RET_CHECK_GT(filterHeight, 0u);
uint32_t outWidth = computeOutSizeTransposeConv(width, filterWidth, param.strideWidth,
param.paddingLeft, param.paddingRight);
uint32_t outHeight = computeOutSizeTransposeConv(height, filterHeight, param.strideHeight,
param.paddingTop, param.paddingBottom);
- NN_RET_CHECK_GT(outWidth, 0);
- NN_RET_CHECK_GT(outHeight, 0);
+ NN_RET_CHECK_GT(outWidth, 0u);
+ NN_RET_CHECK_GT(outHeight, 0u);
Shape output = context->getOutputShape(kOutputTensor);
output.type = input.type;
diff --git a/common/operations/UnidirectionalSequenceLSTM.cpp b/common/operations/UnidirectionalSequenceLSTM.cpp
index c6a83df..1865837 100644
--- a/common/operations/UnidirectionalSequenceLSTM.cpp
+++ b/common/operations/UnidirectionalSequenceLSTM.cpp
@@ -199,7 +199,7 @@
const Shape inputShape = context->getInputShape(kInputTensor);
const uint32_t inputRank = getNumberOfDimensions(inputShape);
- NN_RET_CHECK_EQ(inputRank, 3) << "Invalid input tensor rank: " << inputRank;
+ NN_RET_CHECK_EQ(inputRank, 3u) << "Invalid input tensor rank: " << inputRank;
[[maybe_unused]] const uint32_t maxTime =
getSizeOfDimension(inputShape, isTimeMajor(context) ? 0 : 1);
@@ -207,44 +207,44 @@
const uint32_t inputSize = getSizeOfDimension(inputShape, inputRank - 1);
const Shape inputToOutputShape = context->getInputShape(kInputToOutputWeightsTensor);
- NN_RET_CHECK_EQ(getNumberOfDimensions(inputToOutputShape), 2);
+ NN_RET_CHECK_EQ(getNumberOfDimensions(inputToOutputShape), 2u);
NN_RET_CHECK_EQ(getSizeOfDimension(inputToOutputShape, 1), inputSize);
const uint32_t numCells = getSizeOfDimension(inputToOutputShape, 0);
const Shape recurrentToOutputShape = context->getInputShape(kRecurrentToOutputWeightsTensor);
- NN_RET_CHECK_EQ(getNumberOfDimensions(recurrentToOutputShape), 2);
+ NN_RET_CHECK_EQ(getNumberOfDimensions(recurrentToOutputShape), 2u);
NN_RET_CHECK_EQ(getSizeOfDimension(recurrentToOutputShape, 0), numCells);
const uint32_t outputSize = getSizeOfDimension(recurrentToOutputShape, 1);
if (hasTensor(context, kInputToInputWeightsTensor)) {
const Shape inputToInputShape = context->getInputShape(kInputToInputWeightsTensor);
- NN_RET_CHECK_EQ(getNumberOfDimensions(inputToInputShape), 2);
+ NN_RET_CHECK_EQ(getNumberOfDimensions(inputToInputShape), 2u);
NN_RET_CHECK_EQ(getSizeOfDimension(inputToInputShape, 0), numCells);
NN_RET_CHECK_EQ(getSizeOfDimension(inputToInputShape, 1), inputSize);
}
const Shape inputToForgetShape = context->getInputShape(kInputToForgetWeightsTensor);
- NN_RET_CHECK_EQ(getNumberOfDimensions(inputToForgetShape), 2);
+ NN_RET_CHECK_EQ(getNumberOfDimensions(inputToForgetShape), 2u);
NN_RET_CHECK_EQ(getSizeOfDimension(inputToForgetShape, 0), numCells);
NN_RET_CHECK_EQ(getSizeOfDimension(inputToForgetShape, 1), inputSize);
const Shape inputToCellShape = context->getInputShape(kInputToCellWeightsTensor);
- NN_RET_CHECK_EQ(getNumberOfDimensions(inputToCellShape), 2);
+ NN_RET_CHECK_EQ(getNumberOfDimensions(inputToCellShape), 2u);
NN_RET_CHECK_EQ(getSizeOfDimension(inputToCellShape, 0), numCells);
NN_RET_CHECK_EQ(getSizeOfDimension(inputToCellShape, 1), inputSize);
if (hasTensor(context, kRecurrentToInputWeightsTensor)) {
const Shape recurrentToInputShape = context->getInputShape(kRecurrentToInputWeightsTensor);
- NN_RET_CHECK_EQ(getNumberOfDimensions(recurrentToInputShape), 2);
+ NN_RET_CHECK_EQ(getNumberOfDimensions(recurrentToInputShape), 2u);
NN_RET_CHECK_EQ(getSizeOfDimension(recurrentToInputShape, 0), numCells);
NN_RET_CHECK_EQ(getSizeOfDimension(recurrentToInputShape, 1), outputSize);
}
const Shape recurrentToForgetShape = context->getInputShape(kRecurrentToForgetWeightsTensor);
- NN_RET_CHECK_EQ(getNumberOfDimensions(recurrentToForgetShape), 2);
+ NN_RET_CHECK_EQ(getNumberOfDimensions(recurrentToForgetShape), 2u);
NN_RET_CHECK_EQ(getSizeOfDimension(recurrentToForgetShape, 0), numCells);
NN_RET_CHECK_EQ(getSizeOfDimension(recurrentToForgetShape, 1), outputSize);
const Shape recurrentToCellShape = context->getInputShape(kRecurrentToCellWeightsTensor);
- NN_RET_CHECK_EQ(getNumberOfDimensions(recurrentToCellShape), 2);
+ NN_RET_CHECK_EQ(getNumberOfDimensions(recurrentToCellShape), 2u);
NN_RET_CHECK_EQ(getSizeOfDimension(recurrentToCellShape, 0), numCells);
NN_RET_CHECK_EQ(getSizeOfDimension(recurrentToCellShape, 1), outputSize);
@@ -258,19 +258,19 @@
if (hasTensor(context, kCellToInputWeightsTensor)) {
const Shape cellToInputShape = context->getInputShape(kCellToInputWeightsTensor);
- NN_RET_CHECK_EQ(getNumberOfDimensions(cellToInputShape), 1);
+ NN_RET_CHECK_EQ(getNumberOfDimensions(cellToInputShape), 1u);
NN_RET_CHECK_EQ(getSizeOfDimension(cellToInputShape, 0), numCells);
}
if (hasTensor(context, kCellToForgetWeightsTensor)) {
const Shape cellToForgetShape = context->getInputShape(kCellToForgetWeightsTensor);
- NN_RET_CHECK_EQ(getNumberOfDimensions(cellToForgetShape), 1);
+ NN_RET_CHECK_EQ(getNumberOfDimensions(cellToForgetShape), 1u);
NN_RET_CHECK_EQ(getSizeOfDimension(cellToForgetShape, 0), numCells);
}
if (hasTensor(context, kCellToOutputWeightsTensor)) {
const Shape cellToOutputShape = context->getInputShape(kCellToOutputWeightsTensor);
- NN_RET_CHECK_EQ(getNumberOfDimensions(cellToOutputShape), 1);
+ NN_RET_CHECK_EQ(getNumberOfDimensions(cellToOutputShape), 1u);
NN_RET_CHECK_EQ(getSizeOfDimension(cellToOutputShape, 0), numCells);
}
@@ -288,7 +288,7 @@
if (!cifgUsed) {
NN_RET_CHECK(hasTensor(context, kInputGateBiasTensor));
const Shape inputGateBiasShape = context->getInputShape(kInputGateBiasTensor);
- NN_RET_CHECK_EQ(getNumberOfDimensions(inputGateBiasShape), 1);
+ NN_RET_CHECK_EQ(getNumberOfDimensions(inputGateBiasShape), 1u);
NN_RET_CHECK_EQ(getSizeOfDimension(inputGateBiasShape, 0), numCells);
} else {
NN_RET_CHECK(!hasTensor(context, kInputGateBiasTensor))
@@ -296,58 +296,58 @@
}
const Shape forgetGateBiasShape = context->getInputShape(kForgetGateBiasTensor);
- NN_RET_CHECK_EQ(getNumberOfDimensions(forgetGateBiasShape), 1);
+ NN_RET_CHECK_EQ(getNumberOfDimensions(forgetGateBiasShape), 1u);
NN_RET_CHECK_EQ(getSizeOfDimension(forgetGateBiasShape, 0), numCells);
const Shape cellGateBiasShape = context->getInputShape(kCellGateBiasTensor);
- NN_RET_CHECK_EQ(getNumberOfDimensions(cellGateBiasShape), 1);
+ NN_RET_CHECK_EQ(getNumberOfDimensions(cellGateBiasShape), 1u);
NN_RET_CHECK_EQ(getSizeOfDimension(cellGateBiasShape, 0), numCells);
const Shape outputGateBiasShape = context->getInputShape(kOutputGateBiasTensor);
- NN_RET_CHECK_EQ(getNumberOfDimensions(outputGateBiasShape), 1);
+ NN_RET_CHECK_EQ(getNumberOfDimensions(outputGateBiasShape), 1u);
NN_RET_CHECK_EQ(getSizeOfDimension(outputGateBiasShape, 0), numCells);
if (hasTensor(context, kProjectionWeightsTensor)) {
const Shape projectionShape = context->getInputShape(kProjectionWeightsTensor);
- NN_RET_CHECK_EQ(getNumberOfDimensions(projectionShape), 2);
+ NN_RET_CHECK_EQ(getNumberOfDimensions(projectionShape), 2u);
NN_RET_CHECK_EQ(getSizeOfDimension(projectionShape, 0), outputSize);
NN_RET_CHECK_EQ(getSizeOfDimension(projectionShape, 1), numCells);
}
if (hasTensor(context, kProjectionBiasTensor)) {
const Shape projectionBiasShape = context->getInputShape(kProjectionBiasTensor);
- NN_RET_CHECK_EQ(getNumberOfDimensions(projectionBiasShape), 1);
+ NN_RET_CHECK_EQ(getNumberOfDimensions(projectionBiasShape), 1u);
NN_RET_CHECK_EQ(getSizeOfDimension(projectionBiasShape, 0), outputSize);
}
const Shape outputStateShape = context->getInputShape(kOutputStateInTensor);
- NN_RET_CHECK_EQ(getNumberOfDimensions(outputStateShape), 2);
+ NN_RET_CHECK_EQ(getNumberOfDimensions(outputStateShape), 2u);
NN_RET_CHECK_EQ(getSizeOfDimension(outputStateShape, 0), batchSize);
NN_RET_CHECK_EQ(getSizeOfDimension(outputStateShape, 1), outputSize);
const Shape cellStateShape = context->getInputShape(kCellStateInTensor);
- NN_RET_CHECK_EQ(getNumberOfDimensions(cellStateShape), 2);
+ NN_RET_CHECK_EQ(getNumberOfDimensions(cellStateShape), 2u);
NN_RET_CHECK_EQ(getSizeOfDimension(cellStateShape, 0), batchSize);
NN_RET_CHECK_EQ(getSizeOfDimension(cellStateShape, 1), numCells);
if (hasTensor(context, kInputLayerNormWeightsTensor)) {
const Shape inputLayerNormShape = context->getInputShape(kInputLayerNormWeightsTensor);
- NN_RET_CHECK_EQ(getNumberOfDimensions(inputLayerNormShape), 1);
+ NN_RET_CHECK_EQ(getNumberOfDimensions(inputLayerNormShape), 1u);
NN_RET_CHECK_EQ(getSizeOfDimension(inputLayerNormShape, 0), numCells);
}
if (hasTensor(context, kForgetLayerNormWeightsTensor)) {
const Shape forgetLayerNormShape = context->getInputShape(kForgetLayerNormWeightsTensor);
- NN_RET_CHECK_EQ(getNumberOfDimensions(forgetLayerNormShape), 1);
+ NN_RET_CHECK_EQ(getNumberOfDimensions(forgetLayerNormShape), 1u);
NN_RET_CHECK_EQ(getSizeOfDimension(forgetLayerNormShape, 0), numCells);
}
if (hasTensor(context, kCellLayerNormWeightsTensor)) {
const Shape cellLayerNormShape = context->getInputShape(kCellLayerNormWeightsTensor);
- NN_RET_CHECK_EQ(getNumberOfDimensions(cellLayerNormShape), 1);
+ NN_RET_CHECK_EQ(getNumberOfDimensions(cellLayerNormShape), 1u);
NN_RET_CHECK_EQ(getSizeOfDimension(cellLayerNormShape, 0), numCells);
}
if (hasTensor(context, kOutputLayerNormWeightsTensor)) {
const Shape outputLayerNormShape = context->getInputShape(kOutputLayerNormWeightsTensor);
- NN_RET_CHECK_EQ(getNumberOfDimensions(outputLayerNormShape), 1);
+ NN_RET_CHECK_EQ(getNumberOfDimensions(outputLayerNormShape), 1u);
NN_RET_CHECK_EQ(getSizeOfDimension(outputLayerNormShape, 0), numCells);
}
diff --git a/common/operations/UnidirectionalSequenceRNN.cpp b/common/operations/UnidirectionalSequenceRNN.cpp
index ce32e00..3c38bcf 100644
--- a/common/operations/UnidirectionalSequenceRNN.cpp
+++ b/common/operations/UnidirectionalSequenceRNN.cpp
@@ -50,9 +50,9 @@
const uint32_t firstDimSize = getSizeOfDimension(inputShape, 0);
const uint32_t secondDimSize = getSizeOfDimension(inputShape, 1);
const uint32_t inputSize = getSizeOfDimension(inputShape, 2);
- for (int f = 0; f < firstDimSize; ++f) {
- for (int s = 0; s < secondDimSize; ++s) {
- for (int i = 0; i < inputSize; ++i) {
+ for (uint32_t f = 0; f < firstDimSize; ++f) {
+ for (uint32_t s = 0; s < secondDimSize; ++s) {
+ for (uint32_t i = 0; i < inputSize; ++i) {
const uint32_t inputIndex = f * secondDimSize * inputSize + s * inputSize + i;
const uint32_t outputIndex = s * firstDimSize * inputSize + f * inputSize + i;
output[outputIndex] = input[inputIndex];
@@ -104,7 +104,7 @@
fixedTimeInputShape.dimensions[0] = inputShape.dimensions[1];
fixedTimeInputShape.dimensions[1] = inputShape.dimensions[2];
- for (int i = 0; i < maxTime; ++i) {
+ for (uint32_t i = 0; i < maxTime; ++i) {
RNN::RNNStep<T>(input, fixedTimeInputShape, hiddenState, bias, weights, weightsShape,
recurrentWeights, recurrentWeightsShape, activation, output);
input += batchSize * inputSize;
@@ -166,11 +166,11 @@
const uint32_t numUnits = getSizeOfDimension(weights, 0);
const uint32_t inputSize = getSizeOfDimension(input, 2);
- NN_RET_CHECK_EQ(getNumberOfDimensions(input), 3);
- NN_RET_CHECK_EQ(getNumberOfDimensions(weights), 2);
- NN_RET_CHECK_EQ(getNumberOfDimensions(recurrentWeights), 2);
- NN_RET_CHECK_EQ(getNumberOfDimensions(bias), 1);
- NN_RET_CHECK_EQ(getNumberOfDimensions(hiddenState), 2);
+ NN_RET_CHECK_EQ(getNumberOfDimensions(input), 3u);
+ NN_RET_CHECK_EQ(getNumberOfDimensions(weights), 2u);
+ NN_RET_CHECK_EQ(getNumberOfDimensions(recurrentWeights), 2u);
+ NN_RET_CHECK_EQ(getNumberOfDimensions(bias), 1u);
+ NN_RET_CHECK_EQ(getNumberOfDimensions(hiddenState), 2u);
NN_RET_CHECK_EQ(inputSize, getSizeOfDimension(weights, 1));
NN_RET_CHECK_EQ(numUnits, getSizeOfDimension(bias, 0));