From 23b87b33b28fc862ac99c8ae2cfc33b923b6490e Mon Sep 17 00:00:00 2001 From: Finn Williams Date: Tue, 30 Jul 2019 11:44:05 +0100 Subject: IVGCVSW-3583 Fix Skipped Batch_To_Space Hal 1.2 Tests Signed-off-by: Finn Williams Change-Id: I3f2928ba86a9d306a7eb400db3a420e42cf3fa7e --- 1.1/HalPolicy.cpp | 72 +-------------------------------------------- 1.2/HalPolicy.cpp | 9 +++++- 1.2/HalPolicy.hpp | 2 ++ ConversionUtils.hpp | 85 +++++++++++++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 96 insertions(+), 72 deletions(-) diff --git a/1.1/HalPolicy.cpp b/1.1/HalPolicy.cpp index 93ee70e1..2fab4745 100644 --- a/1.1/HalPolicy.cpp +++ b/1.1/HalPolicy.cpp @@ -605,77 +605,7 @@ bool HalPolicy::ConvertTranspose(const Operation& operation, const Model& model, bool HalPolicy::ConvertBatchToSpaceNd(const Operation& operation, const Model& model, ConversionData& data) { ALOGV("hal_1_1::HalPolicy::ConvertBatchToSpaceNd()"); - - LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data); - if (!input.IsValid()) - { - return Fail("%s: Operation has invalid inputs", __func__); - } - - const Operand* output = GetOutputOperand(operation, 0, model); - if (!output) - { - return Fail("%s: Could not read output 0", __func__); - } - - const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output); - if (IsDynamicTensor(outputInfo)) - { - return Fail("%s: Dynamic output tensors are not supported", __func__); - } - - const Operand* blockOperand = GetInputOperand(operation, 1, model); - if (!blockOperand) - { - return Fail("%s: Could not read input 1", __func__); - } - - // Convert the block operand to int32 - std::vector block; - if (!GetTensorInt32Values(*blockOperand, block, model, data)) - { - return Fail("%s: Input 1 has invalid values", __func__); - } - - const armnn::TensorInfo& inputInfo = input.GetTensorInfo(); - - unsigned int rank = inputInfo.GetNumDimensions(); - if (rank != 4) - { - Fail("%s: Only inputs with rank equal to 4 are supported", __func__); - } - - if (std::any_of(block.cbegin(), block.cend(), [](int32_t i){ return i < 1; })) - { - return Fail("%s: Block sizes for each spatial dimension of the input tensor must be" - " greater than or equal to 1", __func__); - } - - armnn::BatchToSpaceNdDescriptor batchToSpaceNdDesc; - batchToSpaceNdDesc.m_BlockShape.assign(block.cbegin(), block.cend()); - batchToSpaceNdDesc.m_DataLayout = armnn::DataLayout::NHWC; - - // Setting crops to 0,0 0,0 as it is not supported in Android NN API - batchToSpaceNdDesc.m_Crops = {{0, 0}, {0, 0}}; - - bool isSupported = false; - FORWARD_LAYER_SUPPORT_FUNC(__func__, - IsBatchToSpaceNdSupported, - data.m_Backends, - isSupported, - inputInfo, - outputInfo, - batchToSpaceNdDesc); - if (!isSupported) - { - return false; - } - - armnn::IConnectableLayer* const layer = data.m_Network->AddBatchToSpaceNdLayer(batchToSpaceNdDesc); - assert(layer != nullptr); - input.Connect(layer->GetInputSlot(0)); - - return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data); + return ::ConvertBatchToSpaceNd(operation, model, data); } } // namespace hal_1_1 diff --git a/1.2/HalPolicy.cpp b/1.2/HalPolicy.cpp index fe571df2..4372c166 100644 --- a/1.2/HalPolicy.cpp +++ b/1.2/HalPolicy.cpp @@ -55,7 +55,6 @@ bool HandledByV1_1(V1_2::OperationType operationType) } switch (static_cast(operationType)) { - case V1_1::OperationType::BATCH_TO_SPACE_ND: case V1_1::OperationType::DIV: case V1_1::OperationType::MEAN: case V1_1::OperationType::SPACE_TO_BATCH_ND: @@ -128,6 +127,8 @@ bool HalPolicy::ConvertOperation(const Operation& operation, const Model& model, { case V1_2::OperationType::AVERAGE_POOL_2D: return ConvertAveragePool2d(operation, model, data); + case V1_2::OperationType::BATCH_TO_SPACE_ND: + return ConvertBatchToSpaceNd(operation, model, data); case V1_2::OperationType::CONV_2D: return ConvertConv2d(operation, model, data); case V1_2::OperationType::DEPTHWISE_CONV_2D: @@ -182,6 +183,12 @@ bool HalPolicy::ConvertAveragePool2d(const Operation& operation, const Model& mo return ConvertPooling2d(operation, __func__, armnn::PoolingAlgorithm::Average, model, data); } +bool HalPolicy::ConvertBatchToSpaceNd(const Operation& operation, const Model& model, ConversionData& data) +{ + ALOGV("hal_1_2::HalPolicy::ConvertBatchToSpaceNd()"); + return ::ConvertBatchToSpaceNd(operation, model, data); +} + bool HalPolicy::ConvertConv2d(const Operation& operation, const Model& model, ConversionData& data) { ALOGV("hal_1_2::HalPolicy::ConvertConv2d()"); diff --git a/1.2/HalPolicy.hpp b/1.2/HalPolicy.hpp index 5d6158a3..e4719e8b 100644 --- a/1.2/HalPolicy.hpp +++ b/1.2/HalPolicy.hpp @@ -33,6 +33,8 @@ public: private: static bool ConvertAveragePool2d(const Operation& operation, const Model& model, ConversionData& data); + static bool ConvertBatchToSpaceNd(const Operation& operation, const Model& model, ConversionData& data); + static bool ConvertConv2d(const Operation& operation, const Model& model, ConversionData& data); static bool ConvertDepthwiseConv2d(const Operation& operation, const Model& model, ConversionData& data); diff --git a/ConversionUtils.hpp b/ConversionUtils.hpp index fc6d3653..946bc95d 100644 --- a/ConversionUtils.hpp +++ b/ConversionUtils.hpp @@ -1800,5 +1800,90 @@ bool ConvertSub(const Operation& operation, const Model& model, ConversionData& return Fail("%s: ProcessActivation failed", __func__); } +template +bool ConvertBatchToSpaceNd(const HalOperation& operation, + const HalModel& model, + ConversionData& data) +{ + using HalOperand = typename HalPolicy::Operand; + using HalOperandType = typename HalPolicy::OperandType; + + LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data); + if (!input.IsValid()) + { + return Fail("%s: Operation has invalid inputs", __func__); + } + + const HalOperand* output = GetOutputOperand(operation, 0, model); + if (!output) + { + return Fail("%s: Could not read output 0", __func__); + } + + const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output); + if (IsDynamicTensor(outputInfo)) + { + return Fail("%s: Dynamic output tensors are not supported", __func__); + } + + const HalOperand* blockOperand = GetInputOperand(operation, 1, model); + if (!blockOperand) + { + return Fail("%s: Could not read input 1", __func__); + } + + // Convert the block operand to int32 + std::vector block; + if (!GetTensorInt32Values(*blockOperand, block, model, data)) + { + return Fail("%s: Input 1 has invalid values", __func__); + } + + const armnn::TensorInfo& inputInfo = input.GetTensorInfo(); + + unsigned int rank = inputInfo.GetNumDimensions(); + if (rank != 4) + { + Fail("%s: Only inputs with rank equal to 4 are supported", __func__); + } + + if (std::any_of(block.cbegin(), block.cend(), [](int32_t i){ return i < 1; })) + { + return Fail("%s: Block sizes for each spatial dimension of the input tensor must be" + " greater than or equal to 1", __func__); + } + + armnn::BatchToSpaceNdDescriptor batchToSpaceNdDesc; + batchToSpaceNdDesc.m_BlockShape.assign(block.cbegin(), block.cend()); + batchToSpaceNdDesc.m_DataLayout = armnn::DataLayout::NHWC; + + if (Is12Operand(*output)) + { + batchToSpaceNdDesc.m_DataLayout = OptionalDataLayout(operation, 3, model, data); + } + // Setting crops to 0,0 0,0 as it is not supported in Android NN API + batchToSpaceNdDesc.m_Crops = {{0, 0}, {0, 0}}; + + bool isSupported = false; + FORWARD_LAYER_SUPPORT_FUNC(__func__, + IsBatchToSpaceNdSupported, + data.m_Backends, + isSupported, + inputInfo, + outputInfo, + batchToSpaceNdDesc); + if (!isSupported) + { + return false; + } + + armnn::IConnectableLayer* const layer = data.m_Network->AddBatchToSpaceNdLayer(batchToSpaceNdDesc); + assert(layer != nullptr); + input.Connect(layer->GetInputSlot(0)); + + return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data); +} } // namespace armnn_driver -- cgit v1.2.1