diff options
author | Finn Williams <Finn.Williams@arm.com> | 2019-07-30 11:44:05 +0100 |
---|---|---|
committer | Matteo Martincigh <matteo.martincigh@arm.com> | 2019-07-30 13:47:32 +0000 |
commit | 23b87b33b28fc862ac99c8ae2cfc33b923b6490e (patch) | |
tree | 0bc9b63c79965d0e917191637103087740630666 /ConversionUtils.hpp | |
parent | a4a629ad3a29df52fb47592d435d41180367ec5f (diff) | |
download | android-nn-driver-23b87b33b28fc862ac99c8ae2cfc33b923b6490e.tar.gz |
IVGCVSW-3583 Fix Skipped Batch_To_Space Hal 1.2 Tests
Signed-off-by: Finn Williams <Finn.Williams@arm.com>
Change-Id: I3f2928ba86a9d306a7eb400db3a420e42cf3fa7e
Diffstat (limited to 'ConversionUtils.hpp')
-rw-r--r-- | ConversionUtils.hpp | 85 |
1 files changed, 85 insertions, 0 deletions
diff --git a/ConversionUtils.hpp b/ConversionUtils.hpp index fc6d3653..946bc95d 100644 --- a/ConversionUtils.hpp +++ b/ConversionUtils.hpp @@ -1800,5 +1800,90 @@ bool ConvertSub(const Operation& operation, const Model& model, ConversionData& return Fail("%s: ProcessActivation failed", __func__); } +template<typename HalPolicy, + typename HalOperation = typename HalPolicy::Operation, + typename HalModel = typename HalPolicy::Model> +bool ConvertBatchToSpaceNd(const HalOperation& operation, + const HalModel& model, + ConversionData& data) +{ + using HalOperand = typename HalPolicy::Operand; + using HalOperandType = typename HalPolicy::OperandType; + + LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data); + if (!input.IsValid()) + { + return Fail("%s: Operation has invalid inputs", __func__); + } + + const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model); + if (!output) + { + return Fail("%s: Could not read output 0", __func__); + } + + const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output); + if (IsDynamicTensor(outputInfo)) + { + return Fail("%s: Dynamic output tensors are not supported", __func__); + } + + const HalOperand* blockOperand = GetInputOperand<HalPolicy>(operation, 1, model); + if (!blockOperand) + { + return Fail("%s: Could not read input 1", __func__); + } + + // Convert the block operand to int32 + std::vector<int32_t> block; + if (!GetTensorInt32Values<HalPolicy>(*blockOperand, block, model, data)) + { + return Fail("%s: Input 1 has invalid values", __func__); + } + + const armnn::TensorInfo& inputInfo = input.GetTensorInfo(); + + unsigned int rank = inputInfo.GetNumDimensions(); + if (rank != 4) + { + Fail("%s: Only inputs with rank equal to 4 are supported", __func__); + } + + if (std::any_of(block.cbegin(), block.cend(), [](int32_t i){ return i < 1; })) + { + return Fail("%s: Block sizes for each spatial dimension of the input tensor must be" + " greater than or equal to 1", __func__); + } + + armnn::BatchToSpaceNdDescriptor batchToSpaceNdDesc; + batchToSpaceNdDesc.m_BlockShape.assign(block.cbegin(), block.cend()); + batchToSpaceNdDesc.m_DataLayout = armnn::DataLayout::NHWC; + + if (Is12Operand(*output)) + { + batchToSpaceNdDesc.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 3, model, data); + } + // Setting crops to 0,0 0,0 as it is not supported in Android NN API + batchToSpaceNdDesc.m_Crops = {{0, 0}, {0, 0}}; + + bool isSupported = false; + FORWARD_LAYER_SUPPORT_FUNC(__func__, + IsBatchToSpaceNdSupported, + data.m_Backends, + isSupported, + inputInfo, + outputInfo, + batchToSpaceNdDesc); + if (!isSupported) + { + return false; + } + + armnn::IConnectableLayer* const layer = data.m_Network->AddBatchToSpaceNdLayer(batchToSpaceNdDesc); + assert(layer != nullptr); + input.Connect(layer->GetInputSlot(0)); + + return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data); +} } // namespace armnn_driver |