aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAron Virginas-Tar <Aron.Virginas-Tar@arm.com>2019-11-27 14:48:32 +0000
committerAron Virginas-Tar <Aron.Virginas-Tar@arm.com>2019-11-27 15:53:04 +0000
commit710f664e41803bcb04403a1da27990d76faab6cf (patch)
tree9ede03e7fcb322c4df1c6ccf0c46f1913844c9ac
parent899f64f711fdac95d88795f4e88f7770c0cb94be (diff)
downloadarmnn-710f664e41803bcb04403a1da27990d76faab6cf.tar.gz
IVGCVSW-4148 Extend reporting of quant multiplier > 1 as unsupported on ACL to per-axis case
Signed-off-by: Aron Virginas-Tar <Aron.Virginas-Tar@arm.com> Change-Id: I66a8360b6d86e95325dee58927dcbe62ccf6ad58
-rw-r--r--src/backends/aclCommon/ArmComputeTensorUtils.cpp27
-rw-r--r--src/backends/aclCommon/ArmComputeTensorUtils.hpp4
-rw-r--r--src/backends/cl/ClLayerSupport.cpp25
-rw-r--r--src/backends/neon/NeonLayerSupport.cpp25
4 files changed, 55 insertions, 26 deletions
diff --git a/src/backends/aclCommon/ArmComputeTensorUtils.cpp b/src/backends/aclCommon/ArmComputeTensorUtils.cpp
index e4fdb21ecc..328a083ae9 100644
--- a/src/backends/aclCommon/ArmComputeTensorUtils.cpp
+++ b/src/backends/aclCommon/ArmComputeTensorUtils.cpp
@@ -232,5 +232,32 @@ arm_compute::PixelValue GetPixelValue(arm_compute::ITensor& input, float pixelVa
}
}
+bool IsQuantMultiplierSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ const TensorInfo& weights)
+{
+ constexpr float maxQuantMultiplier = 1.0f;
+ if (weights.HasMultipleQuantizationScales())
+ {
+ for (float weightScale : weights.GetQuantizationScales())
+ {
+ if ((input.GetQuantizationScale() * weightScale) / output.GetQuantizationScale() > maxQuantMultiplier)
+ {
+ return false;
+ }
+ }
+ }
+ else
+ {
+ if ((input.GetQuantizationScale() * weights.GetQuantizationScale()) /
+ output.GetQuantizationScale() > maxQuantMultiplier)
+ {
+ return false;
+ }
+ }
+
+ return true;
+}
+
} // namespace armcomputetensorutils
} // namespace armnn
diff --git a/src/backends/aclCommon/ArmComputeTensorUtils.hpp b/src/backends/aclCommon/ArmComputeTensorUtils.hpp
index ef837d84d8..3fc6818b0d 100644
--- a/src/backends/aclCommon/ArmComputeTensorUtils.hpp
+++ b/src/backends/aclCommon/ArmComputeTensorUtils.hpp
@@ -243,5 +243,9 @@ inline TensorShape GetShape(const arm_compute::TensorShape& shape)
return GetTensorShape(shape, 1U);
}
+bool IsQuantMultiplierSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ const TensorInfo& weights);
+
} // namespace armcomputetensorutils
} // namespace armnn
diff --git a/src/backends/cl/ClLayerSupport.cpp b/src/backends/cl/ClLayerSupport.cpp
index 9a5c38381c..49312d6ea5 100644
--- a/src/backends/cl/ClLayerSupport.cpp
+++ b/src/backends/cl/ClLayerSupport.cpp
@@ -16,6 +16,7 @@
#if defined(ARMCOMPUTECL_ENABLED)
#include <aclCommon/ArmComputeUtils.hpp>
+#include <aclCommon/ArmComputeTensorUtils.hpp>
#include "workloads/ClAbsWorkload.hpp"
#include "workloads/ClAdditionWorkload.hpp"
#include "workloads/ClActivationWorkload.hpp"
@@ -144,6 +145,13 @@ bool IsSupportedForDataTypeCl(Optional<std::string&> reasonIfUnsupported,
std::forward<Params>(params)...);
}
+#if defined(ARMCOMPUTECL_ENABLED)
+#define IS_QUANT_MULTIPLIER_SUPPORTED(input, output, weights) \
+armcomputetensorutils::IsQuantMultiplierSupported(input, output, weights)
+#else
+#define IS_QUANT_MULTIPLIER_SUPPORTED(input, output, weights) true
+#endif
+
} // anonymous namespace
bool ClLayerSupport::IsAbsSupported(const TensorInfo& input,
@@ -324,8 +332,7 @@ bool ClLayerSupport::IsConvolution2dSupported(const TensorInfo& input,
const Optional<TensorInfo>& biases,
Optional<std::string&> reasonIfUnsupported) const
{
- // Multiplier > 1.0f currently not supported in ACL
- if ((input.GetQuantizationScale() * weights.GetQuantizationScale()) / output.GetQuantizationScale() > 1.0f)
+ if (!IS_QUANT_MULTIPLIER_SUPPORTED(input, output, weights))
{
return false;
}
@@ -368,13 +375,7 @@ bool ClLayerSupport::IsDepthwiseConvolutionSupported(const TensorInfo& input,
const Optional<TensorInfo>& biases,
Optional<std::string&> reasonIfUnsupported) const
{
- if (weights.HasPerAxisQuantization())
- {
- return false;
- }
-
- // Multiplier > 1.0f currently not supported in ACL
- if ((input.GetQuantizationScale() * weights.GetQuantizationScale()) / output.GetQuantizationScale() > 1.0f)
+ if (!IS_QUANT_MULTIPLIER_SUPPORTED(input, output, weights))
{
return false;
}
@@ -395,8 +396,7 @@ bool ClLayerSupport::IsDilatedDepthwiseConvolutionSupported(const TensorInfo& in
const Optional<TensorInfo>& biases,
Optional<std::string&> reasonIfUnsupported) const
{
- // Multiplier > 1.0f currently not supported in ACL
- if ((input.GetQuantizationScale() * weights.GetQuantizationScale()) / output.GetQuantizationScale() > 1.0f)
+ if (!IS_QUANT_MULTIPLIER_SUPPORTED(input, output, weights))
{
return false;
}
@@ -814,8 +814,7 @@ bool ClLayerSupport::IsTransposeConvolution2dSupported(const TensorInfo& input,
const Optional<TensorInfo>& biases,
Optional<std::string&> reasonIfUnsupported) const
{
- // Multiplier > 1.0f currently not supported in ACL
- if ((input.GetQuantizationScale() * weights.GetQuantizationScale()) / output.GetQuantizationScale() > 1.0f)
+ if (!IS_QUANT_MULTIPLIER_SUPPORTED(input, output, weights))
{
return false;
}
diff --git a/src/backends/neon/NeonLayerSupport.cpp b/src/backends/neon/NeonLayerSupport.cpp
index 4474b12d37..20b655098d 100644
--- a/src/backends/neon/NeonLayerSupport.cpp
+++ b/src/backends/neon/NeonLayerSupport.cpp
@@ -18,6 +18,7 @@
#if defined(ARMCOMPUTENEON_ENABLED)
#include <aclCommon/ArmComputeUtils.hpp>
+#include <aclCommon/ArmComputeTensorUtils.hpp>
#include "workloads/NeonAbsWorkload.hpp"
#include "workloads/NeonAdditionWorkload.hpp"
#include "workloads/NeonActivationWorkload.hpp"
@@ -112,6 +113,13 @@ inline bool IsWorkloadSupported(FuncType& func, Optional<std::string&> reasonIfU
return IsNeonBackendSupported(reasonIfUnsupported);
#endif
+#if defined(ARMCOMPUTENEON_ENABLED)
+#define IS_QUANT_MULTIPLIER_SUPPORTED(input, output, weights) \
+armcomputetensorutils::IsQuantMultiplierSupported(input, output, weights)
+#else
+#define IS_QUANT_MULTIPLIER_SUPPORTED(input, output, weights) true
+#endif
+
} // anonymous namespace
bool NeonLayerSupport::IsAbsSupported(const TensorInfo& input,
@@ -274,8 +282,7 @@ bool NeonLayerSupport::IsConvolution2dSupported(const TensorInfo& input,
const Optional<TensorInfo>& biases,
Optional<std::string&> reasonIfUnsupported) const
{
- // Multiplier > 1.0f currently not supported in ACL
- if ((input.GetQuantizationScale() * weights.GetQuantizationScale()) / output.GetQuantizationScale() > 1.0f)
+ if (!IS_QUANT_MULTIPLIER_SUPPORTED(input, output, weights))
{
return false;
}
@@ -308,13 +315,7 @@ bool NeonLayerSupport::IsDepthwiseConvolutionSupported(const TensorInfo& input,
const Optional<TensorInfo>& biases,
Optional<std::string&> reasonIfUnsupported) const
{
- if (weights.HasPerAxisQuantization())
- {
- return false;
- }
-
- // Multiplier > 1.0f currently not supported in ACL
- if ((input.GetQuantizationScale() * weights.GetQuantizationScale()) / output.GetQuantizationScale() > 1.0f)
+ if (!IS_QUANT_MULTIPLIER_SUPPORTED(input, output, weights))
{
return false;
}
@@ -345,8 +346,7 @@ bool NeonLayerSupport::IsDilatedDepthwiseConvolutionSupported(const TensorInfo&
const Optional<TensorInfo>& biases,
Optional<std::string&> reasonIfUnsupported) const
{
- // Multiplier > 1.0f currently not supported in ACL
- if ((input.GetQuantizationScale() * weights.GetQuantizationScale()) / output.GetQuantizationScale() > 1.0f)
+ if (!IS_QUANT_MULTIPLIER_SUPPORTED(input, output, weights))
{
return false;
}
@@ -751,8 +751,7 @@ bool NeonLayerSupport::IsTransposeConvolution2dSupported(const TensorInfo& input
const Optional<TensorInfo>& biases,
Optional<std::string&> reasonIfUnsupported) const
{
- // Multiplier > 1.0f currently not supported in ACL
- if ((input.GetQuantizationScale() * weights.GetQuantizationScale()) / output.GetQuantizationScale() > 1.0f)
+ if (!IS_QUANT_MULTIPLIER_SUPPORTED(input, output, weights))
{
return false;
}