From 95b1ef62568cc71b4bcdde6af02b26687d020a1e Mon Sep 17 00:00:00 2001 From: Narumol Prangnawarat Date: Mon, 15 Jul 2019 12:02:20 +0100 Subject: IVGCVSW-3306 Add Maximum support in HAL 1.2 driver Signed-off-by: Narumol Prangnawarat Change-Id: I94e410058ffc1ce39465498c12e10cb5669e7ed3 --- 1.2/HalPolicy.cpp | 47 +++++++++++++++++++++++++++++++++++++++++++++++ 1.2/HalPolicy.hpp | 2 ++ NnapiSupport.txt | 2 +- OutputShapeUtils.cpp | 6 ++++++ OutputShapeUtils.hpp | 3 +++ 5 files changed, 59 insertions(+), 1 deletion(-) diff --git a/1.2/HalPolicy.cpp b/1.2/HalPolicy.cpp index d7452c68..9ccac9b4 100644 --- a/1.2/HalPolicy.cpp +++ b/1.2/HalPolicy.cpp @@ -142,6 +142,8 @@ bool HalPolicy::ConvertOperation(const Operation& operation, const Model& model, return ConvertConv2d(operation, model, data); case V1_2::OperationType::DEPTHWISE_CONV_2D: return ConvertDepthwiseConv2d(operation, model, data); + case V1_2::OperationType::MAXIMUM: + return ConvertMaximum(operation, model, data); case V1_2::OperationType::PAD_V2: return ConvertPadV2(operation, model, data); case V1_2::OperationType::PRELU: @@ -456,6 +458,51 @@ bool HalPolicy::ConvertDepthwiseConv2d(const Operation& operation, const Model& return SetupAndTrackLayerOutputSlot(operation, 0, *endLayer, model, data); } +bool HalPolicy::ConvertMaximum(const Operation& operation, const Model& model, ConversionData& data) +{ + LayerInputHandle input0 = ConvertToLayerInputHandle(operation, 0, model, data); + LayerInputHandle input1 = ConvertToLayerInputHandle(operation, 1, model, data); + + if (!input0.IsValid() || !input1.IsValid()) + { + return Fail("%s: Operation has invalid inputs", __func__); + } + + const Operand* outputOperand = GetOutputOperand(operation, 0, model); + if (!outputOperand) + { + return Fail("%s: Could not read output", __func__); + } + + const armnn::TensorInfo& outInfo = GetTensorInfoForOperand(*outputOperand); + if (IsDynamicOutput(outInfo)) + { + ALOGD("Output shape not set, will infer from inputs"); + outInfo.SetShape(InferMaximumOutputShape(input0.GetTensorInfo().GetShape(), input1.GetTensorInfo().GetShape())); + } + + if (!IsLayerSupportedForAnyBackend(__func__, + armnn::IsMaximumSupported, + data.m_Backends, + input0.GetTensorInfo(), + input1.GetTensorInfo(), + outInfo)) + { + return false; + } + + armnn::IConnectableLayer* layer = data.m_Network->AddMaximumLayer(); + assert(layer != nullptr); + BroadcastTensor(input0, input1, layer, *data.m_Network); + + return SetupAndTrackLayerOutputSlot(operation, + 0, + *layer, + model, + data, + armnn::Optional(outInfo)); +} + bool HalPolicy::ConvertPadV2(const Operation& operation, const Model& model, ConversionData& data) { LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data); diff --git a/1.2/HalPolicy.hpp b/1.2/HalPolicy.hpp index 25aee820..d0d988f3 100644 --- a/1.2/HalPolicy.hpp +++ b/1.2/HalPolicy.hpp @@ -35,6 +35,8 @@ private: static bool ConvertDepthwiseConv2d(const Operation& operation, const Model& model, ConversionData& data); + static bool ConvertMaximum(const Operation& operation, const Model& model, ConversionData& data); + static bool ConvertPadV2(const Operation& operation, const Model& model, ConversionData& data); static bool ConvertPrelu(const Operation& operation, const Model& model, ConversionData& data); diff --git a/NnapiSupport.txt b/NnapiSupport.txt index 462b48e6..eabc534f 100644 --- a/NnapiSupport.txt +++ b/NnapiSupport.txt @@ -50,6 +50,7 @@ The following AndroidNN HAL 1.2 operations are currently supported: CONV_2D (FLOAT32,QUANT8_ASYMM) DEPTHWISE_CONV_2D (FLOAT32,QUANT8_ASYMM) +MAXIMUM (FLOAT32,QUANT8_ASYMM) PRELU (FLOAT32,QUANT8_ASYMM) RESIZE_NEAREST_NEIGHBOR (FLOAT32,QUANT8_ASYMM) @@ -69,7 +70,6 @@ The following AndroidNN HAL 1.2 operations are currently not supported: CONCATENATION LSTM MINIMUM -MAXIMUM PAD_V2 QUANTIZE QUANTIZED_16BIT_LSTM diff --git a/OutputShapeUtils.cpp b/OutputShapeUtils.cpp index 739038ac..b6cdb316 100644 --- a/OutputShapeUtils.cpp +++ b/OutputShapeUtils.cpp @@ -54,6 +54,12 @@ bool IsDynamicOutput(const TensorInfo& outputInfo) return outputInfo.GetNumElements() == 0u; } +TensorShape InferMaximumOutputShape(const armnn::TensorShape& input0Shape, + const armnn::TensorShape& input1Shape) +{ + return CalculateMaxShape(input0Shape, input1Shape); +} + TensorShape InferPadOutputShape(const TensorShape& inputShape, const std::vector>& padList) { diff --git a/OutputShapeUtils.hpp b/OutputShapeUtils.hpp index be255c28..dac4a193 100644 --- a/OutputShapeUtils.hpp +++ b/OutputShapeUtils.hpp @@ -12,6 +12,9 @@ namespace armnn_driver bool IsDynamicOutput(const armnn::TensorInfo& outputInfo); +armnn::TensorShape InferMaximumOutputShape(const armnn::TensorShape& input0Shape, + const armnn::TensorShape& input1Shape); + armnn::TensorShape InferPadOutputShape(const armnn::TensorShape& inputShape, const std::vector>& padList); -- cgit v1.2.1