From 1cb29aa8471f4bf78f53d5cbf0b9dee7fa6b0615 Mon Sep 17 00:00:00 2001 From: Ellen Norris-Thompson Date: Thu, 11 Jul 2019 17:27:37 +0100 Subject: IVGCVSW-3305 Support Minimum in HAL1.2 Driver * Adds conversion method to HAL1.2 Policy Signed-off-by: Ellen Norris-Thompson Change-Id: Ib282f32b2f5e899f1fb085bf615df5f984b6fd34 --- 1.2/HalPolicy.cpp | 52 ++++++++++++++++++++++++++++++++++++++++++++++++++++ 1.2/HalPolicy.hpp | 2 ++ NnapiSupport.txt | 2 +- OutputShapeUtils.cpp | 6 ++++++ OutputShapeUtils.hpp | 3 +++ 5 files changed, 64 insertions(+), 1 deletion(-) diff --git a/1.2/HalPolicy.cpp b/1.2/HalPolicy.cpp index 5b501dbc..7a52fcc4 100644 --- a/1.2/HalPolicy.cpp +++ b/1.2/HalPolicy.cpp @@ -144,6 +144,8 @@ bool HalPolicy::ConvertOperation(const Operation& operation, const Model& model, return ConvertDepthwiseConv2d(operation, model, data); case V1_2::OperationType::MAXIMUM: return ConvertMaximum(operation, model, data); + case V1_2::OperationType::MINIMUM: + return ConvertMinimum(operation, model, data); case V1_2::OperationType::PAD_V2: return ConvertPadV2(operation, model, data); case V1_2::OperationType::PRELU: @@ -551,6 +553,56 @@ bool HalPolicy::ConvertMaximum(const Operation& operation, const Model& model, C armnn::Optional(outInfo)); } +bool HalPolicy::ConvertMinimum(const Operation& operation, const Model& model, ConversionData& data) +{ + LayerInputHandle input0 = ConvertToLayerInputHandle(operation, 0, model, data); + LayerInputHandle input1 = ConvertToLayerInputHandle(operation, 1, model, data); + + if (!input0.IsValid() || !input1.IsValid()) + { + return Fail("%s: Operation has invalid inputs", __func__); + } + + const Operand* output = GetOutputOperand(operation, 0, model); + if (!output) + { + return Fail("%s: Could not read output 0", __func__); + } + + armnn::TensorInfo outputInfo = GetTensorInfoForOperand(*output); + if (IsDynamicOutput(outputInfo)) + { + ALOGD("Output shape not set, will infer from inputs"); + outputInfo.SetShape(InferMinimumOutputShape(input0.GetTensorInfo().GetShape(), + input1.GetTensorInfo().GetShape())); + } + + bool isSupported = false; + FORWARD_LAYER_SUPPORT_FUNC(__func__, + IsMinimumSupported, + data.m_Backends, + isSupported, + input0.GetTensorInfo(), + input1.GetTensorInfo(), + outputInfo); + + if (!isSupported) + { + return false; + } + + armnn::IConnectableLayer* const layer = data.m_Network->AddMinimumLayer(); + assert(layer != nullptr); + BroadcastTensor(input0, input1, layer, *data.m_Network); + + return SetupAndTrackLayerOutputSlot(operation, + 0, + *layer, + model, + data, + armnn::Optional(outputInfo)); +} + bool HalPolicy::ConvertPadV2(const Operation& operation, const Model& model, ConversionData& data) { LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data); diff --git a/1.2/HalPolicy.hpp b/1.2/HalPolicy.hpp index d0d988f3..18cf0359 100644 --- a/1.2/HalPolicy.hpp +++ b/1.2/HalPolicy.hpp @@ -37,6 +37,8 @@ private: static bool ConvertMaximum(const Operation& operation, const Model& model, ConversionData& data); + static bool ConvertMinimum(const Operation& operation, const Model& model, ConversionData& data); + static bool ConvertPadV2(const Operation& operation, const Model& model, ConversionData& data); static bool ConvertPrelu(const Operation& operation, const Model& model, ConversionData& data); diff --git a/NnapiSupport.txt b/NnapiSupport.txt index eabc534f..8c7a814c 100644 --- a/NnapiSupport.txt +++ b/NnapiSupport.txt @@ -51,6 +51,7 @@ The following AndroidNN HAL 1.2 operations are currently supported: CONV_2D (FLOAT32,QUANT8_ASYMM) DEPTHWISE_CONV_2D (FLOAT32,QUANT8_ASYMM) MAXIMUM (FLOAT32,QUANT8_ASYMM) +MINIMUM (FLOAT32,QUANT8_ASYMM) PRELU (FLOAT32,QUANT8_ASYMM) RESIZE_NEAREST_NEIGHBOR (FLOAT32,QUANT8_ASYMM) @@ -69,7 +70,6 @@ The following AndroidNN HAL 1.2 operations are currently not supported: CONCATENATION LSTM -MINIMUM PAD_V2 QUANTIZE QUANTIZED_16BIT_LSTM diff --git a/OutputShapeUtils.cpp b/OutputShapeUtils.cpp index 6c936ee7..e3812a36 100644 --- a/OutputShapeUtils.cpp +++ b/OutputShapeUtils.cpp @@ -121,6 +121,12 @@ TensorShape InferMaximumOutputShape(const armnn::TensorShape& input0Shape, return CalculateMaxShape(input0Shape, input1Shape); } +TensorShape InferMinimumOutputShape(const armnn::TensorShape& input0Shape, + const armnn::TensorShape& input1Shape) +{ + return CalculateMaxShape(input0Shape, input1Shape); +} + TensorShape InferPadOutputShape(const TensorShape& inputShape, const std::vector>& padList) { diff --git a/OutputShapeUtils.hpp b/OutputShapeUtils.hpp index 2a832618..7452ced9 100644 --- a/OutputShapeUtils.hpp +++ b/OutputShapeUtils.hpp @@ -23,6 +23,9 @@ armnn::TensorShape InferDepthwiseConvolution2dOutputShape(const armnn::TensorSha armnn::TensorShape InferMaximumOutputShape(const armnn::TensorShape& input0Shape, const armnn::TensorShape& input1Shape); +armnn::TensorShape InferMinimumOutputShape(const armnn::TensorShape& input0Shape, + const armnn::TensorShape& input1Shape); + armnn::TensorShape InferPadOutputShape(const armnn::TensorShape& inputShape, const std::vector>& padList); -- cgit v1.2.1