From 81a68341fc041d8d4cdcbb4e930e3c01d52ef464 Mon Sep 17 00:00:00 2001 From: Nattapat Chaimanowong Date: Mon, 5 Nov 2018 14:04:47 +0000 Subject: IVGCVSW-2094 Add converter method for SPACE_TO_BATCH_ND to V1.1 section of the HalPolicy Change-Id: Ibe9ccf814e437c238e474e1f6eac5b1a8db1c8df --- 1.1/HalPolicy.cpp | 88 +++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1.1/HalPolicy.hpp | 1 + 2 files changed, 89 insertions(+) (limited to '1.1') diff --git a/1.1/HalPolicy.cpp b/1.1/HalPolicy.cpp index c647812d..9078b6f2 100644 --- a/1.1/HalPolicy.cpp +++ b/1.1/HalPolicy.cpp @@ -33,6 +33,8 @@ bool HalPolicy::ConvertOperation(const Operation& operation, const Model& model, return ConvertMean(operation, model, data); case V1_1::OperationType::PAD: return ConvertPad(operation, model, data); + case V1_1::OperationType::SPACE_TO_BATCH_ND: + return ConvertSpaceToBatchNd(operation, model, data); case V1_1::OperationType::SQUEEZE: return ConvertSqueeze(operation, model, data); case V1_1::OperationType::TRANSPOSE: @@ -278,6 +280,92 @@ bool HalPolicy::ConvertPad(const Operation& operation, const Model& model, Conve return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data); } +bool HalPolicy::ConvertSpaceToBatchNd(const Operation& operation, const Model& model, ConversionData& data) +{ + LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data); + + if (!input.IsValid()) + { + return Fail("%s: Operation has invalid inputs", __func__); + } + + const armnn::TensorInfo& inputInfo = input.GetTensorInfo(); + unsigned int rank = inputInfo.GetNumDimensions(); + unsigned int spatialDim = rank - 2; + + if (rank != 4) + { + Fail("%s: Only inputs with rank 4 are supported", __func__); + } + + armnn::SpaceToBatchNdDescriptor descriptor; + descriptor.m_DataLayout = armnn::DataLayout::NHWC; + + const Operand* blockShapeOperand = GetInputOperand(operation, 1, model); + const Operand* paddingsOperand = GetInputOperand(operation, 2, model); + + armnn::TensorShape blockShapeOperandShape = GetTensorShapeForOperand(*blockShapeOperand); + if (blockShapeOperandShape.GetNumDimensions() != 1 || blockShapeOperandShape.GetNumElements() != spatialDim) + { + return Fail("%s: Operation has invalid block shape operand: expected shape [%d]", __func__, spatialDim); + } + + std::vector blockShape; + GetTensorInt32Values(*blockShapeOperand, blockShape, model, data); + for (unsigned int i = 0; i < blockShape.size(); i++) + { + if (blockShape[i] < 1) + { + return Fail("%s: Block shape must be at least 1 in all dimensions.", __func__); + } + + descriptor.m_BlockShape.push_back((unsigned int) blockShape[i]); + } + + armnn::TensorShape paddingsOperandShape = GetTensorShapeForOperand(*paddingsOperand); + if (paddingsOperandShape.GetNumDimensions() != 2 || paddingsOperandShape.GetNumElements() != 2 * spatialDim) + { + return Fail("%s: Operation has invalid paddings operand: expected shape [%d, 2]", __func__, spatialDim); + } + + std::vector paddings; + GetTensorInt32Values(*paddingsOperand, paddings, model, data); + for (unsigned int i = 0; i < paddings.size() - 1; i += 2) + { + int paddingBeforeInput = paddings[i]; + int paddingAfterInput = paddings[i + 1]; + if (paddingBeforeInput < 0 || paddingAfterInput < 0) + { + return Fail("%s: Operation has invalid paddings operand, invalid padding values.", __func__); + } + + descriptor.m_PadList.emplace_back((unsigned int) paddingBeforeInput, (unsigned int) paddingAfterInput); + } + + const Operand* output = GetOutputOperand(operation, 0, model); + if (!output) + { + return Fail("%s: Could not read output 0", __func__); + } + + const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output); + if (!IsLayerSupported(__func__, + armnn::IsSpaceToBatchNdSupported, + data.m_Compute, + inputInfo, + outputInfo, + descriptor)) + { + return false; + } + + armnn::IConnectableLayer* const layer = data.m_Network->AddSpaceToBatchNdLayer(descriptor); + assert(layer != nullptr); + input.Connect(layer->GetInputSlot(0)); + + return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data); +} + bool HalPolicy::ConvertSqueeze(const Operation& operation, const Model& model, ConversionData& data) { LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data); diff --git a/1.1/HalPolicy.hpp b/1.1/HalPolicy.hpp index b08ac50e..c537a1bd 100644 --- a/1.1/HalPolicy.hpp +++ b/1.1/HalPolicy.hpp @@ -29,6 +29,7 @@ private: static bool ConvertSub(const Operation& operation, const Model& model, ConversionData& data); static bool ConvertMean(const Operation& operation, const Model& model, ConversionData& data); static bool ConvertPad(const Operation& operation, const Model& model, ConversionData& data); + static bool ConvertSpaceToBatchNd(const Operation& operation, const Model& model, ConversionData& data); static bool ConvertSqueeze(const Operation& operation, const Model& model, ConversionData& data); static bool ConvertTranspose(const Operation& operation, const Model& model, ConversionData& data); }; -- cgit v1.2.1