aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAron Virginas-Tar <Aron.Virginas-Tar@arm.com>2019-07-03 15:27:08 +0100
committerÁron Virginás-Tar <aron.virginas-tar@arm.com>2019-07-05 10:16:57 +0000
commit7a6d11bf423bf7e67cc81074bb4745b0babf59c0 (patch)
treecead93bdf9aa90b87720b64a6f194639c8722697
parent17ffff3f6708340695ca1433ed8b61955e15d7a5 (diff)
downloadandroid-nn-driver-7a6d11bf423bf7e67cc81074bb4745b0babf59c0.tar.gz
IVGCVSW-3295 Support ResizeNearestNeighbour in HAL1.2 driver
* Added ConvertNearestNeighbor to 1.2/HalPolicy Signed-off-by: Aron Virginas-Tar <Aron.Virginas-Tar@arm.com> Change-Id: I9858849b70be36e7e5ced3dae667c85c03e50d4f
-rw-r--r--1.2/HalPolicy.cpp109
-rw-r--r--1.2/HalPolicy.hpp1
-rw-r--r--ConversionUtils.hpp21
-rw-r--r--NnapiSupport.txt2
4 files changed, 132 insertions, 1 deletions
diff --git a/1.2/HalPolicy.cpp b/1.2/HalPolicy.cpp
index 4e638cf4..99cc9802 100644
--- a/1.2/HalPolicy.cpp
+++ b/1.2/HalPolicy.cpp
@@ -8,6 +8,10 @@
#include "../1.0/HalPolicy.hpp"
#include "../1.1/HalPolicy.hpp"
+#include <DataLayoutIndexed.hpp>
+
+#include <cmath>
+
namespace armnn_driver
{
namespace hal_1_2
@@ -138,6 +142,8 @@ bool HalPolicy::ConvertOperation(const Operation& operation, const Model& model,
return ConvertDepthwiseConv2d(operation, model, data);
case V1_2::OperationType::PRELU:
return ConvertPrelu(operation, model, data);
+ case V1_2::OperationType::RESIZE_NEAREST_NEIGHBOR:
+ return ConvertResizeNearestNeighbor(operation, model, data);
default:
return Fail("%s: Operation type %s not supported in ArmnnDriver",
__func__, toString(operation.type).c_str());
@@ -466,5 +472,108 @@ bool HalPolicy::ConvertPrelu(const Operation& operation, const Model& model, Con
return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 0, *layer, model, data);
}
+bool HalPolicy::ConvertResizeNearestNeighbor(const Operation& operation, const Model& model, ConversionData& data)
+{
+ LayerInputHandle input = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
+ if (!input.IsValid())
+ {
+ return Fail("%s: Could not read input 0", __func__);
+ }
+
+ const Operand* output = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
+ if (!output)
+ {
+ return Fail("%s: Could not read output 0", __func__);
+ }
+
+ const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
+ const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
+
+ armnn::ResizeDescriptor descriptor;
+ descriptor.m_Method = armnn::ResizeMethod::NearestNeighbor;
+ descriptor.m_DataLayout = OptionalDataLayout<hal_1_2::HalPolicy>(operation, 3, model, data);
+
+ OperandType operandType1;
+ OperandType operandType2;
+
+ if (!GetOperandType<hal_1_2::HalPolicy>(operation, 1, model, operandType1) ||
+ !GetOperandType<hal_1_2::HalPolicy>(operation, 2, model, operandType2))
+ {
+ return Fail("%s: Operation has invalid inputs", __func__);
+ }
+
+ if (operandType1 != operandType2)
+ {
+ return Fail("%s: Operation has invalid inputs. Type of input 1 and 2 should be the same", __func__);
+ }
+
+ if (operandType1 == OperandType::INT32)
+ {
+ // Case 1: resizing by shape
+ int32_t targetWidth = 0;
+ int32_t targetHeight = 0;
+
+ if (!GetInputInt32<hal_1_2::HalPolicy>(operation, 1, targetWidth, model, data) ||
+ !GetInputInt32<hal_1_2::HalPolicy>(operation, 2, targetHeight, model, data))
+ {
+ return Fail("%s: Operation has invalid inputs for resizing by shape", __func__);
+ }
+
+ if (targetWidth < 0 || targetHeight < 0)
+ {
+ return Fail("%s: Operation has invalid inputs for resizing by shape. "
+ "Target width/height cannot be < 0", __func__);
+ }
+
+ descriptor.m_TargetWidth = static_cast<uint32_t>(targetWidth);
+ descriptor.m_TargetWidth = static_cast<uint32_t>(targetHeight);
+ }
+ else if (operandType1 == OperandType::FLOAT32)
+ {
+ // Case 2: resizing by scale
+ float widthScale = 1.0f;
+ float heightScale = 1.0f;
+
+ if (!GetInputFloat32<hal_1_2::HalPolicy>(operation, 1, widthScale, model, data) ||
+ !GetInputFloat32<hal_1_2::HalPolicy>(operation, 2, heightScale, model, data))
+ {
+ return Fail("%s: Operation has invalid inputs for resizing by scale", __func__);
+ }
+
+ const armnn::TensorShape& inputShape = inputInfo.GetShape();
+ armnnUtils::DataLayoutIndexed dataLayoutIndexed(descriptor.m_DataLayout);
+
+ float width = inputShape[dataLayoutIndexed.GetWidthIndex()];
+ float height = inputShape[dataLayoutIndexed.GetHeightIndex()];
+
+ descriptor.m_TargetWidth = std::floor(width * widthScale);
+ descriptor.m_TargetHeight = std::floor(height * heightScale);
+ }
+ else
+ {
+ // NOTE: FLOAT16 scales are not supported
+ return false;
+ }
+
+ if (!IsLayerSupportedForAnyBackend(__func__,
+ armnn::IsResizeSupported,
+ data.m_Backends,
+ inputInfo,
+ outputInfo,
+ descriptor))
+ {
+ return false;
+ }
+
+ armnn::IConnectableLayer* layer = data.m_Network->AddResizeLayer(descriptor);
+
+ assert(layer != nullptr);
+
+ layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
+ input.Connect(layer->GetInputSlot(0));
+
+ return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 0, *layer, model, data);
+}
+
} // namespace hal_1_2
} // namespace armnn_driver
diff --git a/1.2/HalPolicy.hpp b/1.2/HalPolicy.hpp
index 0966145e..762b1064 100644
--- a/1.2/HalPolicy.hpp
+++ b/1.2/HalPolicy.hpp
@@ -32,6 +32,7 @@ private:
static bool ConvertConv2d(const Operation& operation, const Model& model, ConversionData& data);
static bool ConvertDepthwiseConv2d(const Operation& operation, const Model& model, ConversionData& data);
static bool ConvertPrelu(const Operation& operation, const Model& model, ConversionData& data);
+ static bool ConvertResizeNearestNeighbor(const Operation& operation, const Model& model, ConversionData& data);
};
} // namespace hal_1_2
diff --git a/ConversionUtils.hpp b/ConversionUtils.hpp
index 03f46696..36bc4ae1 100644
--- a/ConversionUtils.hpp
+++ b/ConversionUtils.hpp
@@ -582,6 +582,27 @@ const void* GetOperandValueReadOnlyAddress(const HalOperand& operand,
}
template<typename HalPolicy,
+ typename HalOperation = typename HalPolicy::Operation,
+ typename HalModel = typename HalPolicy::Model,
+ typename HalOperandType = typename HalPolicy::OperandType>
+bool GetOperandType(const HalOperation& operation,
+ uint32_t inputIndex,
+ const HalModel& model,
+ HalOperandType& type)
+{
+ using HalOperand = typename HalPolicy::Operand;
+
+ const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
+ if (!operand)
+ {
+ return Fail("%s: invalid input operand at index %i", __func__, inputIndex);
+ }
+
+ type = operand->type;
+ return true;
+}
+
+template<typename HalPolicy,
typename HalOperand = typename HalPolicy::Operand,
typename HalModel = typename HalPolicy::Model>
ConstTensorPin ConvertOperandToConstTensorPin(const HalOperand& operand,
diff --git a/NnapiSupport.txt b/NnapiSupport.txt
index b4b2cc45..d412c086 100644
--- a/NnapiSupport.txt
+++ b/NnapiSupport.txt
@@ -49,6 +49,7 @@ The following AndroidNN HAL 1.2 operations are currently supported:
CONV_2D (FLOAT32,QUANT8_ASYMM)
DEPTHWISE_CONV_2D (FLOAT32,QUANT8_ASYMM)
+RESIZE_NEAREST_NEIGHBOR (FLOAT32,QUANT8_ASYMM)
--- Unsupported operators ---
@@ -72,7 +73,6 @@ PAD_V2
QUANTIZE
QUANTIZED_16BIT_LSTM
PRELU
-RESIZE_NEAREST_NEIGHBOR
TRANSPOSE_CONV_2D
Where operations are not supported by the ArmNN Android NN Driver, the driver indicates this to the framework