IVGCVSW-3482 Report operations with dynamic output size as unsupported
Signed-off-by: Aron Virginas-Tar <[email protected]>
Change-Id: Ifafe2a6fbfd6019b3395d51ed9967db794d2b034
diff --git a/1.0/HalPolicy.cpp b/1.0/HalPolicy.cpp
index 9673a74..2149d40 100644
--- a/1.0/HalPolicy.cpp
+++ b/1.0/HalPolicy.cpp
@@ -8,6 +8,7 @@
#include <armnn/Optional.hpp>
#include "FullyConnected.hpp"
+#include "OutputShapeUtils.hpp"
namespace armnn_driver
{
@@ -388,11 +389,17 @@
return Fail("%s: Operation has invalid outputs", __func__);
}
+ const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
+ if (IsDynamicOutput(outputInfo))
+ {
+ return Fail("%s: Dynamic output not supported", __func__);
+ }
+
if (!IsLayerSupportedForAnyBackend(__func__,
armnn::IsDequantizeSupported,
data.m_Backends,
input.GetTensorInfo(),
- GetTensorInfoForOperand(*outputOperand)))
+ outputInfo))
{
return false;
}
@@ -957,6 +964,11 @@
const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
+ if (IsDynamicOutput(outputInfo))
+ {
+ return Fail("%s: Dynamic output not supported", __func__);
+ }
+
armnn::L2NormalizationDescriptor desc;
desc.m_DataLayout = armnn::DataLayout::NHWC;
@@ -1082,7 +1094,11 @@
return Fail("%s: Operation has no outputs", __func__);
}
- const armnn::TensorInfo outInfo = GetTensorInfoForOperand(*outputOperand);
+ const armnn::TensorInfo outputInfo = GetTensorInfoForOperand(*outputOperand);
+ if (IsDynamicOutput(outputInfo))
+ {
+ return Fail("%s: Dynamic output not supported", __func__);
+ }
armnn::SoftmaxDescriptor desc;
if (!GetInputFloat32<hal_1_0::HalPolicy>(operation, 1, desc.m_Beta, model, data))
@@ -1094,7 +1110,7 @@
armnn::IsSoftmaxSupported,
data.m_Backends,
input.GetTensorInfo(),
- outInfo,
+ outputInfo,
desc))
{
return false;
diff --git a/1.1/HalPolicy.cpp b/1.1/HalPolicy.cpp
index 78f157d..dbd380a 100644
--- a/1.1/HalPolicy.cpp
+++ b/1.1/HalPolicy.cpp
@@ -5,6 +5,8 @@
#include "HalPolicy.hpp"
+#include "OutputShapeUtils.hpp"
+
#include "../1.0/HalPolicy.hpp"
namespace
@@ -176,20 +178,24 @@
return false;
}
- const armnn::TensorInfo& outInfo = GetTensorInfoForOperand(*outputOperand);
+ const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
+ if (IsDynamicOutput(outputInfo))
+ {
+ return Fail("%s: Dynamic output not supported", __func__);
+ }
if (!IsLayerSupportedForAnyBackend(__func__,
armnn::IsSubtractionSupported,
data.m_Backends,
input0.GetTensorInfo(),
input1.GetTensorInfo(),
- outInfo))
+ outputInfo))
{
return false;
}
armnn::IConnectableLayer* const startLayer = data.m_Network->AddSubtractionLayer();
- armnn::IConnectableLayer* const endLayer = ProcessActivation(outInfo, activationFunction, startLayer, data);
+ armnn::IConnectableLayer* const endLayer = ProcessActivation(outputInfo, activationFunction, startLayer, data);
const armnn::TensorInfo& inputTensorInfo0 = input0.GetTensorInfo();
const armnn::TensorInfo& inputTensorInfo1 = input1.GetTensorInfo();
@@ -292,6 +298,10 @@
}
const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
+ if (IsDynamicOutput(outputInfo))
+ {
+ return Fail("%s: Dynamic output not supported", __func__);
+ }
if (!IsLayerSupportedForAnyBackend(__func__,
armnn::IsPadSupported,
diff --git a/1.2/HalPolicy.cpp b/1.2/HalPolicy.cpp
index b194a57..58fcf73 100644
--- a/1.2/HalPolicy.cpp
+++ b/1.2/HalPolicy.cpp
@@ -173,6 +173,11 @@
const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
+ if (IsDynamicOutput(outputInfo))
+ {
+ return Fail("%s: Dynamic output not supported", __func__);
+ }
+
// ArmNN does not currently support non-fixed weights or bias
const ConstTensorPin weightsPin =
ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 1, model, data);
@@ -442,6 +447,18 @@
return Fail("%s: Could not read input 0", __func__);
}
+ const Operand* output = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
+ if (!output)
+ {
+ return Fail("%s: Could not read output", __func__);
+ }
+
+ const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
+ if (IsDynamicOutput(outputInfo))
+ {
+ return Fail("%s: Dynamic output not supported", __func__);
+ }
+
const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
unsigned int rank = inputInfo.GetNumDimensions();
@@ -496,14 +513,6 @@
return Fail("%s: Operation has invalid inputs: type mismatch", __func__);
}
- const Operand* output = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
- if (!output)
- {
- return Fail("%s: Could not read output", __func__);
- }
-
- const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
-
if (!IsLayerSupportedForAnyBackend(__func__,
armnn::IsPadSupported,
data.m_Backends,
@@ -543,7 +552,7 @@
const armnn::TensorInfo& alphaInfo = alpha.GetTensorInfo();
armnn::TensorInfo outputInfo = GetTensorInfoForOperand(*output);
- if (outputInfo.GetNumElements() == 0u)
+ if (IsDynamicOutput(outputInfo))
{
ALOGD("Output shape not set, will infer from inputs");
outputInfo.SetShape(InferPreluOutputShape(inputInfo.GetShape(), alphaInfo.GetShape()));
diff --git a/Android.mk b/Android.mk
index 215b0a8..9bbee43 100644
--- a/Android.mk
+++ b/Android.mk
@@ -114,6 +114,7 @@
ArmnnDevice.cpp \
ArmnnPreparedModel.cpp \
ModelToINetworkConverter.cpp \
+ OutputShapeUtils.cpp \
RequestThread.cpp \
Utils.cpp \
ConversionUtils.cpp
@@ -227,6 +228,7 @@
ArmnnDevice.cpp \
ArmnnPreparedModel.cpp \
ModelToINetworkConverter.cpp \
+ OutputShapeUtils.cpp \
RequestThread.cpp \
Utils.cpp \
ConversionUtils.cpp
diff --git a/OutputShapeUtils.cpp b/OutputShapeUtils.cpp
index de27630..a0c624c 100644
--- a/OutputShapeUtils.cpp
+++ b/OutputShapeUtils.cpp
@@ -12,6 +12,11 @@
using namespace armnn;
+bool IsDynamicOutput(const TensorInfo& outputInfo)
+{
+ return outputInfo.GetNumElements() == 0u;
+}
+
TensorShape InferPreluOutputShape(const TensorShape& inputShape, const TensorShape& alphaShape)
{
// NOTE: The inferred PReLU output size will be the maximum size along each dimension
diff --git a/OutputShapeUtils.hpp b/OutputShapeUtils.hpp
index f314252..6e2a76d 100644
--- a/OutputShapeUtils.hpp
+++ b/OutputShapeUtils.hpp
@@ -10,6 +10,8 @@
namespace armnn_driver
{
+bool IsDynamicOutput(const armnn::TensorInfo& outputInfo);
+
armnn::TensorShape InferPreluOutputShape(const armnn::TensorShape& inputShape, const armnn::TensorShape& alphaShape);
} // namespace armnn_driver