aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAron Virginas-Tar <Aron.Virginas-Tar@arm.com>2019-07-05 15:47:07 +0100
committerÁron Virginás-Tar <aron.virginas-tar@arm.com>2019-07-05 15:40:00 +0000
commitcb8ac845da5a6297f7009e26df282a2bfd73f3b9 (patch)
treeeec2028892a95882412ff3cd869771c7a8f46f43
parentf4a7c7d7feb3a6f0ac91268978d047cb556329fd (diff)
downloadandroid-nn-driver-cb8ac845da5a6297f7009e26df282a2bfd73f3b9.tar.gz
IVGCVSW-3301 Support PAD_V2 in HAL1.2 driver
Signed-off-by: Aron Virginas-Tar <Aron.Virginas-Tar@arm.com> Change-Id: If3da7b9015fb816bb27b85ab491102005859b3b0
-rw-r--r--1.1/HalPolicy.cpp33
-rw-r--r--1.2/HalPolicy.cpp91
-rw-r--r--1.2/HalPolicy.hpp2
-rw-r--r--ConversionUtils.hpp46
4 files changed, 142 insertions, 30 deletions
diff --git a/1.1/HalPolicy.cpp b/1.1/HalPolicy.cpp
index fc4c7a2c..0f00910f 100644
--- a/1.1/HalPolicy.cpp
+++ b/1.1/HalPolicy.cpp
@@ -270,48 +270,24 @@ bool HalPolicy::ConvertMean(const Operation& operation, const Model& model, Conv
bool HalPolicy::ConvertPad(const Operation& operation, const Model& model, ConversionData& data)
{
LayerInputHandle input = ConvertToLayerInputHandle<hal_1_1::HalPolicy>(operation, 0, model, data);
-
if (!input.IsValid())
{
return Fail("%s: Operation has invalid inputs", __func__);
}
const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
-
- const Operand* paddingsOperand = GetInputOperand<hal_1_1::HalPolicy>(operation, 1, model);
-
- if (!paddingsOperand)
- {
- return Fail("%s: Could not read paddings operand", __func__);
- }
-
unsigned int rank = inputInfo.GetNumDimensions();
- armnn::TensorShape paddingsOperandShape = GetTensorShapeForOperand(*paddingsOperand);
- if (paddingsOperandShape.GetNumDimensions() != 2 || paddingsOperandShape.GetNumElements() != rank * 2)
- {
- return Fail("%s: Operation has invalid paddings operand: expected shape [%d, 2]", __func__, rank);
- }
-
- std::vector<int32_t> paddings;
- GetTensorInt32Values<hal_1_1::HalPolicy>(*paddingsOperand, paddings, model, data);
- // add padding for each dimension of input tensor.
armnn::PadDescriptor descriptor;
- for (unsigned int i = 0; i < paddings.size() - 1; i += 2)
+ if (!ConvertPaddings<hal_1_1::HalPolicy>(operation, model, data, rank, descriptor))
{
- int paddingBeforeInput = paddings[i];
- int paddingAfterInput = paddings[i + 1];
- if (paddingBeforeInput < 0 || paddingAfterInput < 0)
- {
- return Fail("%s: Operation has invalid paddings operand, invalid padding values.", __func__);
- }
- descriptor.m_PadList.emplace_back((unsigned int) paddingBeforeInput, (unsigned int) paddingAfterInput);
+ return Fail("%s: Could not convert paddings", __func__);
}
const Operand* output = GetOutputOperand<hal_1_1::HalPolicy>(operation, 0, model);
if (!output)
{
- return Fail("%s: Could not read output 0", __func__);
+ return Fail("%s: Could not read output", __func__);
}
const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
@@ -724,6 +700,5 @@ bool HalPolicy::ConvertBatchToSpaceNd(const Operation& operation, const Model& m
return SetupAndTrackLayerOutputSlot<hal_1_1::HalPolicy>(operation, 0, *layer, model, data);
}
-
} // namespace hal_1_1
-} // namespace armnn_driver
+} // namespace armnn_driver \ No newline at end of file
diff --git a/1.2/HalPolicy.cpp b/1.2/HalPolicy.cpp
index e058e026..836977da 100644
--- a/1.2/HalPolicy.cpp
+++ b/1.2/HalPolicy.cpp
@@ -9,6 +9,7 @@
#include "../1.1/HalPolicy.hpp"
#include <DataLayoutIndexed.hpp>
+#include <Half.hpp>
#include <cmath>
@@ -139,6 +140,8 @@ bool HalPolicy::ConvertOperation(const Operation& operation, const Model& model,
return ConvertConv2d(operation, model, data);
case V1_2::OperationType::DEPTHWISE_CONV_2D:
return ConvertDepthwiseConv2d(operation, model, data);
+ case V1_2::OperationType::PAD_V2:
+ return ConvertPadV2(operation, model, data);
case V1_2::OperationType::PRELU:
return ConvertPrelu(operation, model, data);
case V1_2::OperationType::RESIZE_BILINEAR:
@@ -429,6 +432,94 @@ bool HalPolicy::ConvertDepthwiseConv2d(const Operation& operation, const Model&
return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 0, *endLayer, model, data);
}
+bool HalPolicy::ConvertPadV2(const Operation& operation, const Model& model, ConversionData& data)
+{
+ LayerInputHandle input = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
+ if (!input.IsValid())
+ {
+ return Fail("%s: Could not read input 0", __func__);
+ }
+
+ const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
+ unsigned int rank = inputInfo.GetNumDimensions();
+
+ armnn::PadDescriptor descriptor;
+ if (!ConvertPaddings<hal_1_2::HalPolicy>(operation, model, data, rank, descriptor))
+ {
+ return Fail("%s: Could not convert paddings", __func__);
+ }
+
+ // Determine type of padding value
+ OperandType operandType0;
+ OperandType operandType2;
+
+ if (!GetOperandType<hal_1_2::HalPolicy>(operation, 0, model, operandType0) ||
+ !GetOperandType<hal_1_2::HalPolicy>(operation, 2, model, operandType2))
+ {
+ return Fail("%s: Operation has invalid inputs", __func__);
+ }
+
+ // Read value to use for padding
+ if (operandType0 == OperandType::TENSOR_FLOAT16 && operandType2 == OperandType::FLOAT16)
+ {
+ armnn::Half f16PadValue;
+ if (!GetInputScalar<hal_1_2::HalPolicy>(operation, 2, operandType2, f16PadValue, model, data))
+ {
+ return Fail("%s: Could not read input 2 (FLOAT16)", __func__);
+ }
+
+ descriptor.m_PadValue = f16PadValue;
+ }
+ else if (operandType0 == OperandType::TENSOR_FLOAT32 && operandType2 == OperandType::FLOAT32)
+ {
+ if (!GetInputFloat32<hal_1_2::HalPolicy>(operation, 2, descriptor.m_PadValue, model, data))
+ {
+ return Fail("%s: Could not read input 2 (FLOAT32)", __func__);
+ }
+ }
+ else if (operandType0 == OperandType::TENSOR_QUANT8_ASYMM && operandType2 == OperandType::INT32)
+ {
+ int32_t quantizedPadValue = 0;
+ if (!GetInputInt32<hal_1_2::HalPolicy>(operation, 2, quantizedPadValue, model, data))
+ {
+ return Fail("%s: Could not read input 2 (INT32)", __func__);
+ }
+
+ descriptor.m_PadValue = armnn::Dequantize(quantizedPadValue,
+ inputInfo.GetQuantizationScale(),
+ inputInfo.GetQuantizationOffset());
+ }
+ else
+ {
+ return Fail("%s: Operation has invalid inputs: type mismatch", __func__);
+ }
+
+ const Operand* output = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
+ if (!output)
+ {
+ return Fail("%s: Could not read output", __func__);
+ }
+
+ const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
+
+ if (!IsLayerSupportedForAnyBackend(__func__,
+ armnn::IsPadSupported,
+ data.m_Backends,
+ inputInfo,
+ outputInfo,
+ descriptor))
+ {
+ return false;
+ }
+
+ armnn::IConnectableLayer* const layer = data.m_Network->AddPadLayer(descriptor);
+ assert(layer != nullptr);
+ input.Connect(layer->GetInputSlot(0));
+ layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
+
+ return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 0, *layer, model, data);
+}
+
bool HalPolicy::ConvertPrelu(const Operation& operation, const Model& model, ConversionData& data)
{
LayerInputHandle input = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
diff --git a/1.2/HalPolicy.hpp b/1.2/HalPolicy.hpp
index a7bef247..9d6dd497 100644
--- a/1.2/HalPolicy.hpp
+++ b/1.2/HalPolicy.hpp
@@ -35,6 +35,8 @@ private:
static bool ConvertDepthwiseConv2d(const Operation& operation, const Model& model, ConversionData& data);
+ static bool ConvertPadV2(const Operation& operation, const Model& model, ConversionData& data);
+
static bool ConvertPrelu(const Operation& operation, const Model& model, ConversionData& data);
static bool ConvertResize(const Operation& operation,
diff --git a/ConversionUtils.hpp b/ConversionUtils.hpp
index 9a711cb7..d30b8a4e 100644
--- a/ConversionUtils.hpp
+++ b/ConversionUtils.hpp
@@ -5,11 +5,12 @@
#pragma once
+#include "Utils.hpp"
+
#include <armnn/ArmNN.hpp>
#include "armnn/src/armnnUtils/DataLayoutIndexed.hpp"
#include "armnn/src/armnnUtils/Permute.hpp"
-#include "Utils.hpp"
#include <ActivationFunctor.h>
#include <CpuExecutor.h>
@@ -1139,6 +1140,49 @@ bool ConvertToActivation(const HalOperation& operation,
template<typename HalPolicy,
typename HalOperation = typename HalPolicy::Operation,
typename HalModel = typename HalPolicy::Model>
+bool ConvertPaddings(const HalOperation& operation,
+ const HalModel& model,
+ ConversionData& data,
+ unsigned int rank,
+ armnn::PadDescriptor& padDescriptor)
+{
+ using HalOperand = typename HalPolicy::Operand;
+
+ const HalOperand* paddingsOperand = GetInputOperand<HalPolicy>(operation, 1, model);
+ if (!paddingsOperand)
+ {
+ return Fail("%s: Could not read paddings operand", __func__);
+ }
+
+ armnn::TensorShape paddingsOperandShape = GetTensorShapeForOperand(*paddingsOperand);
+ if (paddingsOperandShape.GetNumDimensions() != 2 || paddingsOperandShape.GetNumElements() != rank * 2)
+ {
+ return Fail("%s: Operation has invalid paddings operand: expected shape [%d, 2]", __func__, rank);
+ }
+
+ std::vector<int32_t> paddings;
+ GetTensorInt32Values<HalPolicy>(*paddingsOperand, paddings, model, data);
+
+ // add padding for each dimension of input tensor.
+ for (unsigned int i = 0; i < paddings.size() - 1; i += 2)
+ {
+ int paddingBeforeInput = paddings[i];
+ int paddingAfterInput = paddings[i + 1];
+
+ if (paddingBeforeInput < 0 || paddingAfterInput < 0)
+ {
+ return Fail("%s: Operation has invalid paddings operand, invalid padding values.", __func__);
+ }
+
+ padDescriptor.m_PadList.emplace_back((unsigned int) paddingBeforeInput, (unsigned int) paddingAfterInput);
+ }
+
+ return true;
+}
+
+template<typename HalPolicy,
+ typename HalOperation = typename HalPolicy::Operation,
+ typename HalModel = typename HalPolicy::Model>
bool ConvertPooling2d(const HalOperation& operation,
const char* operationName,
armnn::PoolingAlgorithm poolType,