From a6bc52f6b9eeddcdebf4e660b21a4409a592ac4e Mon Sep 17 00:00:00 2001 From: Keith Davis Date: Wed, 26 Jun 2019 09:39:49 +0100 Subject: IVGCVSW-3197 Support SPACE_TO_DEPTH on Android * Added support within 1.0/HalPolicy * Added support within 1.1/HalPolicy * Added support within 1.2/HalPolicy * Updated NnapiSupport.txt Signed-off-by: Keith Davis Change-Id: I2dc2743ee2f858d2b3496aae0f3e3e28a96ee96e --- 1.0/HalPolicy.cpp | 53 +++++++++++++++++++++++++++++++++++++++++++++++++++++ 1.0/HalPolicy.hpp | 2 ++ 1.1/HalPolicy.cpp | 1 + 1.2/HalPolicy.cpp | 52 ++++++++++++++++++++++++++++++++++++++++++++++++++++ 1.2/HalPolicy.hpp | 2 ++ NnapiSupport.txt | 2 +- 6 files changed, 111 insertions(+), 1 deletion(-) diff --git a/1.0/HalPolicy.cpp b/1.0/HalPolicy.cpp index a1a1ed9e..13c93277 100644 --- a/1.0/HalPolicy.cpp +++ b/1.0/HalPolicy.cpp @@ -58,6 +58,8 @@ bool HalPolicy::ConvertOperation(const Operation& operation, const Model& model, return ConvertReLu6(operation, model, data); case V1_0::OperationType::SOFTMAX: return ConvertSoftmax(operation, model, data); + case V1_0::OperationType::SPACE_TO_DEPTH: + return ConvertSpaceToDepth(operation, model, data); case V1_0::OperationType::TANH: return ConvertTanH(operation, model, data); case V1_0::OperationType::RESHAPE: @@ -1130,6 +1132,57 @@ bool HalPolicy::ConvertSoftmax(const Operation& operation, const Model& model, C return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data); } +bool HalPolicy::ConvertSpaceToDepth(const Operation& operation, const Model& model, ConversionData& data) +{ + LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data); + + if (!input.IsValid() ) + { + return Fail("%s: Operation has invalid inputs", __func__); + } + + const armnn::TensorInfo& inputInfo = input.GetTensorInfo(); + unsigned int rank = inputInfo.GetNumDimensions(); + + if (rank != 4) + { + return Fail("%s: Only inputs with rank 4 are supported", __func__); + } + + armnn::SpaceToDepthDescriptor desc; + bool dataLayoutCheck; + + GetInputScalar(operation, 1, OperandType::INT32, desc.m_BlockSize, model, data); + + if (desc.m_BlockSize <= 1) + { + return Fail("%s: Block size must be at least 1 in all dimensions"); + } + + const Operand* output = GetOutputOperand(operation, 0, model); + if (!output) + { + return Fail("%s: Could not read output 0", __func__); + } + + const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output); + if (!IsLayerSupportedForAnyBackend(__func__, + armnn::IsSpaceToDepthSupported, + data.m_Backends, + inputInfo, + outputInfo, + desc)) + { + return false; + } + + armnn::IConnectableLayer* const layer = data.m_Network->AddSpaceToDepthLayer(desc); + assert(layer != nullptr); + input.Connect(layer->GetInputSlot(0)); + + return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data); +} + bool HalPolicy::ConvertTanH(const Operation& operation, const Model& model, ConversionData& data) { armnn::ActivationDescriptor desc; diff --git a/1.0/HalPolicy.hpp b/1.0/HalPolicy.hpp index 094c47c5..3c3401db 100644 --- a/1.0/HalPolicy.hpp +++ b/1.0/HalPolicy.hpp @@ -72,6 +72,8 @@ private: static bool ConvertResizeBilinear(const Operation& operation, const Model& model, ConversionData& data); + static bool ConvertSpaceToDepth(const Operation& operation, const Model& model, ConversionData& data); + static bool ValidateConv2dParameters(const Operation& operation); static bool ValidateDepthwiseConv2dParameters(const Operation& operation); diff --git a/1.1/HalPolicy.cpp b/1.1/HalPolicy.cpp index 0f00910f..78f157dd 100644 --- a/1.1/HalPolicy.cpp +++ b/1.1/HalPolicy.cpp @@ -29,6 +29,7 @@ static std::vector opsEquivalentInV10({ V1_0::OperationType::RELU1, V1_0::OperationType::RELU6, V1_0::OperationType::SOFTMAX, + V1_0::OperationType::SPACE_TO_DEPTH, V1_0::OperationType::TANH, V1_0::OperationType::RESHAPE, V1_0::OperationType::RESIZE_BILINEAR, diff --git a/1.2/HalPolicy.cpp b/1.2/HalPolicy.cpp index 836977da..fe0cfbdc 100644 --- a/1.2/HalPolicy.cpp +++ b/1.2/HalPolicy.cpp @@ -669,5 +669,57 @@ bool HalPolicy::ConvertResize(const Operation& operation, return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data); } +bool HalPolicy::ConvertSpaceToDepth(const Operation& operation, const Model& model, ConversionData& data) +{ + LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data); + + if (!input.IsValid() ) + { + return Fail("%s: Operation has invalid inputs", __func__); + } + + const armnn::TensorInfo& inputInfo = input.GetTensorInfo(); + unsigned int rank = inputInfo.GetNumDimensions(); + + if (rank != 4) + { + return Fail("%s: Only inputs with rank 4 are supported", __func__); + } + + armnn::SpaceToDepthDescriptor desc; + + GetInputScalar(operation, 1, OperandType::INT32, desc.m_BlockSize, model, data); + + if (desc.m_BlockSize <= 1) + { + return Fail("%s: Block size must be at least 1 in all dimensions"); + } + + desc.m_DataLayout = OptionalDataLayout(operation, 2, model, data); + + const Operand* output = GetOutputOperand(operation, 0, model); + if (!output) + { + return Fail("%s: Could not read output 0", __func__); + } + + const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output); + if (!IsLayerSupportedForAnyBackend(__func__, + armnn::IsSpaceToDepthSupported, + data.m_Backends, + inputInfo, + outputInfo, + desc)) + { + return false; + } + + armnn::IConnectableLayer* const layer = data.m_Network->AddSpaceToDepthLayer(desc); + assert(layer != nullptr); + input.Connect(layer->GetInputSlot(0)); + + return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data); +} + } // namespace hal_1_2 } // namespace armnn_driver diff --git a/1.2/HalPolicy.hpp b/1.2/HalPolicy.hpp index 9d6dd497..25aee820 100644 --- a/1.2/HalPolicy.hpp +++ b/1.2/HalPolicy.hpp @@ -43,6 +43,8 @@ private: const Model& model, ConversionData& data, armnn::ResizeMethod resizeMethod); + + static bool ConvertSpaceToDepth(const Operation& operation, const Model& model, ConversionData& data); }; } // namespace hal_1_2 diff --git a/NnapiSupport.txt b/NnapiSupport.txt index 79626614..462b48e6 100644 --- a/NnapiSupport.txt +++ b/NnapiSupport.txt @@ -39,6 +39,7 @@ RESHAPE (FLOAT32,QUANT8_ASYMM) RESIZE_BILINEAR (FLOAT32,QUANT8_ASYMM) SOFTMAX (FLOAT32,QUANT8_ASYMM) SPACE_TO_BATCH_ND (FLOAT32,QUANT8_ASYMM) +SPACE_TO_DEPTH_ND (FLOAT32,QUANT8_ASYMM) SQUEEZE (FLOAT32,QUANT8_ASYMM) STRIDED_SLICE (FLOAT32,QUANT8_ASYMM) SUB (FLOAT32,QUANT8_ASYMM) @@ -61,7 +62,6 @@ EMBEDDING_LOOKUP HASHTABLE_LOOKUP LSH_PROJECTION RNN -SPACE_TO_DEPTH SVDF The following AndroidNN HAL 1.2 operations are currently not supported: -- cgit v1.2.1