From d74c50550f797e9a5df0e379b5b49d9bd3b29bbd Mon Sep 17 00:00:00 2001 From: Finn Williams Date: Tue, 30 Jul 2019 17:06:00 +0100 Subject: IVGCVSW-3586 Fix Skipped Space_To_Batch Hal 1.2 VTS Failures Signed-off-by: Finn Williams Change-Id: I9d0d0a75d54c25075d8d87c6265e350486157f5c --- 1.1/HalPolicy.cpp | 89 +--------------------------------------------- 1.2/HalPolicy.cpp | 9 ++++- 1.2/HalPolicy.hpp | 2 ++ ConversionUtils.hpp | 100 ++++++++++++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 111 insertions(+), 89 deletions(-) diff --git a/1.1/HalPolicy.cpp b/1.1/HalPolicy.cpp index 2fab4745..e75b5c2a 100644 --- a/1.1/HalPolicy.cpp +++ b/1.1/HalPolicy.cpp @@ -252,94 +252,7 @@ bool HalPolicy::ConvertPad(const Operation& operation, const Model& model, Conve bool HalPolicy::ConvertSpaceToBatchNd(const Operation& operation, const Model& model, ConversionData& data) { ALOGV("hal_1_1::HalPolicy::ConvertSpaceToBatchNd()"); - - LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data); - if (!input.IsValid()) - { - return Fail("%s: Operation has invalid inputs", __func__); - } - - const armnn::TensorInfo& inputInfo = input.GetTensorInfo(); - unsigned int rank = inputInfo.GetNumDimensions(); - unsigned int spatialDim = rank - 2; - - if (rank != 4) - { - Fail("%s: Only inputs with rank 4 are supported", __func__); - } - - const Operand* output = GetOutputOperand(operation, 0, model); - if (!output) - { - return Fail("%s: Could not read output 0", __func__); - } - - const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output); - if (IsDynamicTensor(outputInfo)) - { - return Fail("%s: Dynamic output tensors are not supported", __func__); - } - - const Operand* blockShapeOperand = GetInputOperand(operation, 1, model); - const Operand* paddingsOperand = GetInputOperand(operation, 2, model); - - armnn::TensorShape blockShapeOperandShape = GetTensorShapeForOperand(*blockShapeOperand); - if (blockShapeOperandShape.GetNumDimensions() != 1 || blockShapeOperandShape.GetNumElements() != spatialDim) - { - return Fail("%s: Operation has invalid block shape operand: expected shape [%d]", __func__, spatialDim); - } - - std::vector blockShape; - GetTensorInt32Values(*blockShapeOperand, blockShape, model, data); - if (std::any_of(blockShape.cbegin(), blockShape.cend(), [](int32_t i){ return i < 1; })) - { - return Fail("%s: Block shape must be at least 1 in all dimensions.", __func__); - } - - armnn::TensorShape paddingsOperandShape = GetTensorShapeForOperand(*paddingsOperand); - if (paddingsOperandShape.GetNumDimensions() != 2 || paddingsOperandShape.GetNumElements() != 2 * spatialDim) - { - return Fail("%s: Operation has invalid paddings operand: expected shape [%d, 2]", __func__, spatialDim); - } - - std::vector> paddingList; - std::vector paddings; - GetTensorInt32Values(*paddingsOperand, paddings, model, data); - for (unsigned int i = 0; i < paddings.size() - 1; i += 2) - { - int paddingBeforeInput = paddings[i]; - int paddingAfterInput = paddings[i + 1]; - if (paddingBeforeInput < 0 || paddingAfterInput < 0) - { - return Fail("%s: Operation has invalid paddings operand, invalid padding values.", __func__); - } - - paddingList.emplace_back((unsigned int) paddingBeforeInput, (unsigned int) paddingAfterInput); - } - - armnn::SpaceToBatchNdDescriptor descriptor; - descriptor.m_DataLayout = armnn::DataLayout::NHWC; - descriptor.m_BlockShape.assign(blockShape.cbegin(), blockShape.cend()); - descriptor.m_PadList.assign(paddingList.cbegin(), paddingList.cend()); - - bool isSupported = false; - FORWARD_LAYER_SUPPORT_FUNC(__func__, - IsSpaceToBatchNdSupported, - data.m_Backends, - isSupported, - inputInfo, - outputInfo, - descriptor); - if (!isSupported) - { - return false; - } - - armnn::IConnectableLayer* const layer = data.m_Network->AddSpaceToBatchNdLayer(descriptor); - assert(layer != nullptr); - input.Connect(layer->GetInputSlot(0)); - - return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data); + return ::ConvertSpaceToBatchNd(operation, model, data); } bool HalPolicy::ConvertSqueeze(const Operation& operation, const Model& model, ConversionData& data) diff --git a/1.2/HalPolicy.cpp b/1.2/HalPolicy.cpp index 03a64460..af310c93 100644 --- a/1.2/HalPolicy.cpp +++ b/1.2/HalPolicy.cpp @@ -57,7 +57,6 @@ bool HandledByV1_1(V1_2::OperationType operationType) { case V1_1::OperationType::DIV: case V1_1::OperationType::MEAN: - case V1_1::OperationType::SPACE_TO_BATCH_ND: case V1_1::OperationType::SQUEEZE: case V1_1::OperationType::STRIDED_SLICE: case V1_1::OperationType::TRANSPOSE: @@ -163,6 +162,8 @@ bool HalPolicy::ConvertOperation(const Operation& operation, const Model& model, return ConvertTransposeConv2d(operation, model, data); case V1_2::OperationType::SOFTMAX: return ConvertSoftmax(operation, model, data); + case V1_2::OperationType::SPACE_TO_BATCH_ND : + return ConvertSpaceToBatchNd(operation, model, data); case V1_2::OperationType::SPACE_TO_DEPTH: return ConvertSpaceToDepth(operation, model, data); case V1_2::OperationType::SUB: @@ -938,6 +939,12 @@ bool HalPolicy::ConvertResize(const Operation& operation, return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data); } +bool HalPolicy::ConvertSpaceToBatchNd(const Operation& operation, const Model& model, ConversionData& data) +{ + ALOGV("hal_1_2::HalPolicy::ConvertSpaceToBatchNd()"); + return ::ConvertSpaceToBatchNd(operation, model, data); +} + bool HalPolicy::ConvertSpaceToDepth(const Operation& operation, const Model& model, ConversionData& data) { ALOGV("hal_1_2::HalPolicy::ConvertSpaceToDepth()"); diff --git a/1.2/HalPolicy.hpp b/1.2/HalPolicy.hpp index 409f7b4f..8b8d501c 100644 --- a/1.2/HalPolicy.hpp +++ b/1.2/HalPolicy.hpp @@ -68,6 +68,8 @@ private: static bool ConvertSoftmax(const Operation& operation, const Model& model, ConversionData& data); + static bool ConvertSpaceToBatchNd(const Operation& operation, const Model& model, ConversionData& data); + static bool ConvertSpaceToDepth(const Operation& operation, const Model& model, ConversionData& data); static bool ConvertSub(const Operation& operation, const Model& model, ConversionData& data); diff --git a/ConversionUtils.hpp b/ConversionUtils.hpp index 2b2a8ce4..0349999d 100644 --- a/ConversionUtils.hpp +++ b/ConversionUtils.hpp @@ -1885,4 +1885,104 @@ bool ConvertBatchToSpaceNd(const HalOperation& operation, return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data); } +template +bool ConvertSpaceToBatchNd(const HalOperation& operation, const HalModel& model, ConversionData& data) +{ + LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data); + if (!input.IsValid()) + { + return Fail("%s: Operation has invalid inputs", __func__); + } + + const armnn::TensorInfo& inputInfo = input.GetTensorInfo(); + unsigned int rank = inputInfo.GetNumDimensions(); + unsigned int spatialDim = rank - 2; + + if (rank != 4) + { + Fail("%s: Only inputs with rank 4 are supported", __func__); + } + + const HalOperand* output = GetOutputOperand(operation, 0, model); + if (!output) + { + return Fail("%s: Could not read output 0", __func__); + } + + const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output); + if (IsDynamicTensor(outputInfo)) + { + return Fail("%s: Dynamic output tensors are not supported", __func__); + } + + const HalOperand* blockShapeOperand = GetInputOperand(operation, 1, model); + const HalOperand* paddingsOperand = GetInputOperand(operation, 2, model); + + armnn::TensorShape blockShapeOperandShape = GetTensorShapeForOperand(*blockShapeOperand); + if (blockShapeOperandShape.GetNumDimensions() != 1 || blockShapeOperandShape.GetNumElements() != spatialDim) + { + return Fail("%s: Operation has invalid block shape operand: expected shape [%d]", __func__, spatialDim); + } + + std::vector blockShape; + GetTensorInt32Values(*blockShapeOperand, blockShape, model, data); + if (std::any_of(blockShape.cbegin(), blockShape.cend(), [](int32_t i){ return i < 1; })) + { + return Fail("%s: Block shape must be at least 1 in all dimensions.", __func__); + } + + armnn::TensorShape paddingsOperandShape = GetTensorShapeForOperand(*paddingsOperand); + if (paddingsOperandShape.GetNumDimensions() != 2 || paddingsOperandShape.GetNumElements() != 2 * spatialDim) + { + return Fail("%s: Operation has invalid paddings operand: expected shape [%d, 2]", __func__, spatialDim); + } + + std::vector> paddingList; + std::vector paddings; + GetTensorInt32Values(*paddingsOperand, paddings, model, data); + for (unsigned int i = 0; i < paddings.size() - 1; i += 2) + { + int paddingBeforeInput = paddings[i]; + int paddingAfterInput = paddings[i + 1]; + if (paddingBeforeInput < 0 || paddingAfterInput < 0) + { + return Fail("%s: Operation has invalid paddings operand, invalid padding values.", __func__); + } + + paddingList.emplace_back((unsigned int) paddingBeforeInput, (unsigned int) paddingAfterInput); + } + + armnn::SpaceToBatchNdDescriptor descriptor; + descriptor.m_DataLayout = armnn::DataLayout::NHWC; + descriptor.m_BlockShape.assign(blockShape.cbegin(), blockShape.cend()); + descriptor.m_PadList.assign(paddingList.cbegin(), paddingList.cend()); + + if (Is12Operand(*output)) + { + descriptor.m_DataLayout = OptionalDataLayout(operation, 3, model, data); + } + + bool isSupported = false; + FORWARD_LAYER_SUPPORT_FUNC(__func__, + IsSpaceToBatchNdSupported, + data.m_Backends, + isSupported, + inputInfo, + outputInfo, + descriptor); + if (!isSupported) + { + return false; + } + + armnn::IConnectableLayer* const layer = data.m_Network->AddSpaceToBatchNdLayer(descriptor); + assert(layer != nullptr); + input.Connect(layer->GetInputSlot(0)); + + return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data); +} + } // namespace armnn_driver -- cgit v1.2.1