aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMike Kelly <mike.kelly@arm.com>2019-07-25 09:26:06 +0100
committerMike Kelly <mike.kelly@arm.com>2019-07-25 09:26:06 +0100
commit3c673949b4ed3ab3129859b18439ed8fe87a6ad1 (patch)
tree0c5a47c6dbf9d03e7cdfdf10c72b8d3696bea59b
parent29404fb3b16b301d630f492a2b89b9eb39b67e63 (diff)
downloadandroid-nn-driver-3c673949b4ed3ab3129859b18439ed8fe87a6ad1.tar.gz
IVGCVSW-3521 CpuAcc V1.2 pad Failures
* Fixed Pad and PadV2 failures and skips. * Templated ConvertPad to enable float16 tests to run. Signed-off-by: Mike Kelly <mike.kelly@arm.com> Change-Id: I50ded84fe44ea5d5949e877f383f32adff88680d
-rw-r--r--1.1/HalPolicy.cpp60
-rw-r--r--1.1/HalPolicy.hpp1
-rw-r--r--1.2/HalPolicy.cpp12
-rw-r--r--ConversionUtils.hpp70
-rw-r--r--Utils.cpp7
5 files changed, 83 insertions, 67 deletions
diff --git a/1.1/HalPolicy.cpp b/1.1/HalPolicy.cpp
index c5df72a7..b58cda48 100644
--- a/1.1/HalPolicy.cpp
+++ b/1.1/HalPolicy.cpp
@@ -86,7 +86,7 @@ bool HalPolicy::ConvertOperation(const Operation& operation, const Model& model,
case V1_1::OperationType::MEAN:
return ConvertMean(operation, model, data);
case V1_1::OperationType::PAD:
- return ConvertPad(operation, model, data);
+ return ConvertPad<hal_1_1::HalPolicy>(operation, model, data);
case V1_1::OperationType::SPACE_TO_BATCH_ND:
return ConvertSpaceToBatchNd(operation, model, data);
case V1_1::OperationType::SQUEEZE:
@@ -296,64 +296,6 @@ bool HalPolicy::ConvertMean(const Operation& operation, const Model& model, Conv
return SetupAndTrackLayerOutputSlot<hal_1_1::HalPolicy>(operation, 0, *layer, model, data);
}
-bool HalPolicy::ConvertPad(const Operation& operation, const Model& model, ConversionData& data)
-{
- ALOGV("hal_1_1::HalPolicy::ConvertPad()");
-
- LayerInputHandle input = ConvertToLayerInputHandle<hal_1_1::HalPolicy>(operation, 0, model, data);
- if (!input.IsValid())
- {
- return Fail("%s: Operation has invalid inputs", __func__);
- }
-
- const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
- unsigned int rank = inputInfo.GetNumDimensions();
-
- armnn::PadDescriptor descriptor;
- if (!ConvertPaddings<hal_1_1::HalPolicy>(operation, model, data, rank, descriptor))
- {
- return Fail("%s: Could not convert paddings", __func__);
- }
-
- const Operand* output = GetOutputOperand<hal_1_1::HalPolicy>(operation, 0, model);
- if (!output)
- {
- return Fail("%s: Could not read output", __func__);
- }
-
- armnn::TensorInfo outputInfo = GetTensorInfoForOperand(*output);
- if (IsDynamicTensor(outputInfo))
- {
- ALOGD("Output shape not set, will infer from inputs");
- outputInfo.SetShape(InferPadOutputShape(inputInfo.GetShape(), descriptor.m_PadList));
- }
-
- bool isSupported = false;
- FORWARD_LAYER_SUPPORT_FUNC(__func__,
- IsPadSupported,
- data.m_Backends,
- isSupported,
- inputInfo,
- outputInfo,
- descriptor);
- if (!isSupported)
- {
- return false;
- }
-
- armnn::IConnectableLayer* const layer = data.m_Network->AddPadLayer(descriptor);
- assert(layer != nullptr);
- input.Connect(layer->GetInputSlot(0));
- layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
-
- return SetupAndTrackLayerOutputSlot<hal_1_1::HalPolicy>(operation,
- 0,
- *layer,
- model,
- data,
- armnn::Optional<armnn::TensorInfo>(outputInfo));
-}
-
bool HalPolicy::ConvertSpaceToBatchNd(const Operation& operation, const Model& model, ConversionData& data)
{
ALOGV("hal_1_1::HalPolicy::ConvertSpaceToBatchNd()");
diff --git a/1.1/HalPolicy.hpp b/1.1/HalPolicy.hpp
index dd8558b3..827fddde 100644
--- a/1.1/HalPolicy.hpp
+++ b/1.1/HalPolicy.hpp
@@ -31,7 +31,6 @@ private:
static bool ConvertDiv(const Operation& operation, const Model& model, ConversionData& data);
static bool ConvertSub(const Operation& operation, const Model& model, ConversionData& data);
static bool ConvertMean(const Operation& operation, const Model& model, ConversionData& data);
- static bool ConvertPad(const Operation& operation, const Model& model, ConversionData& data);
static bool ConvertSpaceToBatchNd(const Operation& operation, const Model& model, ConversionData& data);
static bool ConvertSqueeze(const Operation& operation, const Model& model, ConversionData& data);
static bool ConvertStridedSlice(const Operation& operation, const Model& model, ConversionData& data);
diff --git a/1.2/HalPolicy.cpp b/1.2/HalPolicy.cpp
index 906d6bcf..307475a8 100644
--- a/1.2/HalPolicy.cpp
+++ b/1.2/HalPolicy.cpp
@@ -68,7 +68,6 @@ bool HandledByV1_1(V1_2::OperationType operationType)
case V1_1::OperationType::BATCH_TO_SPACE_ND:
case V1_1::OperationType::DIV:
case V1_1::OperationType::MEAN:
- case V1_1::OperationType::PAD:
case V1_1::OperationType::SPACE_TO_BATCH_ND:
case V1_1::OperationType::SQUEEZE:
case V1_1::OperationType::STRIDED_SLICE:
@@ -146,6 +145,8 @@ bool HalPolicy::ConvertOperation(const Operation& operation, const Model& model,
return ConvertMaximum(operation, model, data);
case V1_2::OperationType::MINIMUM:
return ConvertMinimum(operation, model, data);
+ case V1_2::OperationType::PAD:
+ return ConvertPad<hal_1_2::HalPolicy>(operation, model, data);
case V1_2::OperationType::PAD_V2:
return ConvertPadV2(operation, model, data);
case V1_2::OperationType::PRELU:
@@ -675,15 +676,12 @@ bool HalPolicy::ConvertPadV2(const Operation& operation, const Model& model, Con
}
else if (operandType0 == OperandType::TENSOR_QUANT8_ASYMM && operandType2 == OperandType::INT32)
{
- int32_t quantizedPadValue = 0;
- if (!GetInputInt32<hal_1_2::HalPolicy>(operation, 2, quantizedPadValue, model, data))
+ int32_t intPadValue = 0;
+ if (!GetInputInt32<hal_1_2::HalPolicy>(operation, 2, intPadValue, model, data))
{
return Fail("%s: Could not read input 2 (INT32)", __func__);
}
-
- descriptor.m_PadValue = armnn::Dequantize(quantizedPadValue,
- inputInfo.GetQuantizationScale(),
- inputInfo.GetQuantizationOffset());
+ descriptor.m_PadValue = intPadValue;
}
else
{
diff --git a/ConversionUtils.hpp b/ConversionUtils.hpp
index 5ebec6b3..fa686a6f 100644
--- a/ConversionUtils.hpp
+++ b/ConversionUtils.hpp
@@ -1651,4 +1651,74 @@ bool ConvertDepthwiseConv2d(const HalOperation& operation, const HalModel& model
armnn::Optional<armnn::TensorInfo>(outputInfo));
}
+template<typename HalPolicy,
+ typename HalOperation = typename HalPolicy::Operation,
+ typename HalOperand = typename HalPolicy::Operand,
+ typename HalModel = typename HalPolicy::Model>
+bool ConvertPad(HalOperation& operation, const HalModel& model, ConversionData& data)
+{
+ ALOGV("hal_1_1::HalPolicy::ConvertPad()");
+
+ LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
+ if (!input.IsValid())
+ {
+ return Fail("%s: Operation has invalid inputs", __func__);
+ }
+
+ const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
+ unsigned int rank = inputInfo.GetNumDimensions();
+
+ armnn::PadDescriptor descriptor;
+ if (!ConvertPaddings<HalPolicy>(operation, model, data, rank, descriptor))
+ {
+ return Fail("%s: Could not convert paddings", __func__);
+ }
+
+ // Before Android Q, the pad value for ANEURALNETWORKS_TENSOR_QUANT8_ASYMM was undefined. Since Android Q the pad
+ // value must be "logical zero" we set it to be equal to the QuantizationOffset so effectively it ends up as
+ // (QuantizationOffset - QuantizationOffset) * scale = 0.
+ if (inputInfo.GetDataType() == armnn::DataType::QuantisedAsymm8)
+ {
+ descriptor.m_PadValue = inputInfo.GetQuantizationOffset();
+ }
+
+ const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
+ if (!output)
+ {
+ return Fail("%s: Could not read output", __func__);
+ }
+
+ armnn::TensorInfo outputInfo = GetTensorInfoForOperand(*output);
+ if (IsDynamicTensor(outputInfo))
+ {
+ ALOGD("Output shape not set, will infer from inputs");
+ outputInfo.SetShape(InferPadOutputShape(inputInfo.GetShape(), descriptor.m_PadList));
+ }
+
+ bool isSupported = false;
+ FORWARD_LAYER_SUPPORT_FUNC(__func__,
+ IsPadSupported,
+ data.m_Backends,
+ isSupported,
+ inputInfo,
+ outputInfo,
+ descriptor);
+ if (!isSupported)
+ {
+ return false;
+ }
+
+ armnn::IConnectableLayer* const layer = data.m_Network->AddPadLayer(descriptor);
+ assert(layer != nullptr);
+ input.Connect(layer->GetInputSlot(0));
+ layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
+
+ return SetupAndTrackLayerOutputSlot<HalPolicy>(operation,
+ 0,
+ *layer,
+ model,
+ data,
+ armnn::Optional<armnn::TensorInfo>(outputInfo));
+}
+
} // namespace armnn_driver
diff --git a/Utils.cpp b/Utils.cpp
index d3d62a02..43b65ee3 100644
--- a/Utils.cpp
+++ b/Utils.cpp
@@ -7,6 +7,7 @@
#include "Utils.hpp"
+#include <Half.hpp>
#include <Permute.hpp>
#include <cassert>
@@ -42,6 +43,9 @@ void SwizzleAndroidNn4dTensorToArmNn(const armnn::TensorInfo& tensor, const void
switch(tensor.GetDataType())
{
+ case armnn::DataType::Float16:
+ SwizzleAndroidNn4dTensorToArmNn<armnn::Half>(tensor.GetShape(), input, output, mappings);
+ break;
case armnn::DataType::Float32:
SwizzleAndroidNn4dTensorToArmNn<float>(tensor.GetShape(), input, output, mappings);
break;
@@ -112,6 +116,9 @@ armnn::TensorInfo GetTensorInfoForOperand(const V1_2::Operand& operand)
case V1_2::OperandType::TENSOR_FLOAT32:
type = armnn::DataType::Float32;
break;
+ case V1_2::OperandType::TENSOR_FLOAT16:
+ type = armnn::DataType::Float16;
+ break;
case V1_2::OperandType::TENSOR_QUANT8_ASYMM:
type = armnn::DataType::QuantisedAsymm8;
break;