From f77cab57b3eca1425384d4d5bfe44d76fc7023b9 Mon Sep 17 00:00:00 2001 From: Teresa Charlin Date: Thu, 1 Jun 2023 16:15:13 +0100 Subject: IVGCVSW-7785 Extend support for 3D tensors BATCH_TO_SPACE and SPACE_TO_BATCH in CpuRef * Both layers were assuming 4D tensors, now 3D is supported too. * Remove some unnecessary includes * Add Unit Tests Signed-off-by: Teresa Charlin Change-Id: I7bdd11e4936a27cd97ec65fd915e6ccaa1494cff --- .../reference/workloads/BatchToSpaceNd.cpp | 108 ++++++++++++--------- .../reference/workloads/BatchToSpaceNd.hpp | 22 ++--- .../workloads/RefBatchToSpaceNdWorkload.cpp | 9 +- .../workloads/RefBatchToSpaceNdWorkload.hpp | 6 +- .../workloads/RefSpaceToBatchNdWorkload.cpp | 11 +-- .../workloads/RefSpaceToBatchNdWorkload.hpp | 6 +- .../reference/workloads/SpaceToBatchNd.cpp | 65 ++++++++----- .../reference/workloads/SpaceToBatchNd.hpp | 3 +- 8 files changed, 129 insertions(+), 101 deletions(-) (limited to 'src/backends/reference/workloads') diff --git a/src/backends/reference/workloads/BatchToSpaceNd.cpp b/src/backends/reference/workloads/BatchToSpaceNd.cpp index bf7de1b04c..ebe9d2cfd5 100644 --- a/src/backends/reference/workloads/BatchToSpaceNd.cpp +++ b/src/backends/reference/workloads/BatchToSpaceNd.cpp @@ -1,85 +1,105 @@ // -// Copyright © 2017 Arm Ltd. All rights reserved. +// Copyright © 2017-2020,2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // #include "BatchToSpaceNd.hpp" -#include "RefWorkloadUtils.hpp" - -#include - -#include +#include using namespace armnnUtils; namespace armnn { -inline unsigned int Offset(const TensorShape& shape, unsigned int batch, unsigned int height, unsigned int width, - unsigned int channels, const DataLayoutIndexed& dataLayout) +unsigned int Offset(const TensorShape& shape, + unsigned int batch, + unsigned int height, + unsigned int width, + unsigned int channels, + const DataLayoutIndexed& dataLayout) { - if (dataLayout.GetDataLayout() == DataLayout::NHWC) + // 3D Tensors + unsigned int channelDimension3D = dataLayout.GetDataLayout() == DataLayout::NCHW ? 1 : 2; + if (shape.GetNumDimensions() == 3) { - return ((batch * shape[dataLayout.GetHeightIndex()] + height) * shape[dataLayout.GetWidthIndex()] + width) * - shape[dataLayout.GetChannelsIndex()] + channels; + return (batch * shape[dataLayout.GetHeightIndex()] + height) * shape[channelDimension3D] + channels; + } + // 4D Tensors + else if (shape.GetNumDimensions() == 4) + { + if (dataLayout.GetDataLayout() == DataLayout::NHWC) + { + return ((batch * shape[dataLayout.GetHeightIndex()] + height) * + shape[dataLayout.GetWidthIndex()] + width) * + shape[dataLayout.GetChannelsIndex()] + channels; + } + else + { + return ((batch * shape[dataLayout.GetChannelsIndex()] + channels) * + shape[dataLayout.GetHeightIndex()] + height) * + shape[dataLayout.GetWidthIndex()] + width; + } } else { - return ((batch * shape[dataLayout.GetChannelsIndex()] + channels) * - shape[dataLayout.GetHeightIndex()] + height) * - shape[dataLayout.GetWidthIndex()] + width; + throw InvalidArgumentException("Tensor rank must be either 3 or 4", CHECK_LOCATION()); } } -void BatchToSpaceNd(const DataLayoutIndexed& dataLayout, - const TensorInfo& inputTensorInfo, - const TensorInfo& outputTensorInfo, - const std::vector& blockShape, - const std::vector>& cropsData, - Decoder& inputDecoder, - Encoder& outputEncoder) +void BatchToSpaceNd(const TensorInfo& inputInfo, + const TensorInfo& outputInfo, + const BatchToSpaceNdDescriptor& params, + Decoder& inputData, + Encoder& outputData) { - TensorShape inputShape = inputTensorInfo.GetShape(); - - ARMNN_ASSERT_MSG(inputShape.GetNumDimensions() == 4, "Expected Input with 4 Dimensions"); - - TensorShape outputShape = outputTensorInfo.GetShape(); + unsigned int rank = inputInfo.GetNumDimensions(); + if (rank != 3 && rank != 4 ) + { + throw InvalidArgumentException("Tensor rank must be either 3 or 4, but it is " + std::to_string(rank), + CHECK_LOCATION()); + } - ARMNN_ASSERT_MSG(outputShape.GetNumDimensions() == 4, "Expected Output with 4 Dimensions"); + DataLayoutIndexed dataLayout = params.m_DataLayout; + unsigned int channelDimension3D = params.m_DataLayout == DataLayout::NCHW ? 1 : 2; - const unsigned int inputBatchSize = inputShape[0]; - const unsigned int channels = inputShape[dataLayout.GetChannelsIndex()]; + TensorShape inputShape = inputInfo.GetShape(); + TensorShape outputShape = outputInfo.GetShape(); + const unsigned int inputBatchSize = inputShape[0]; const unsigned int outputBatchSize = outputShape[0]; - const unsigned int outputHeight = outputShape[dataLayout.GetHeightIndex()]; - const unsigned int outputWidth = outputShape[dataLayout.GetWidthIndex()]; - ARMNN_ASSERT_MSG(blockShape.size() > 0, "BlockShape must contain 1 or more entries"); + const unsigned int channels = (rank == 3) ? inputShape[channelDimension3D] + : inputShape[dataLayout.GetChannelsIndex()]; - const unsigned int blockShapeHeight = blockShape[0]; - const unsigned int blockShapeWidth = blockShape[1]; + const unsigned int inputHeight = inputShape[dataLayout.GetHeightIndex()]; + const unsigned int inputWidth = (rank == 3) ? 1 : inputShape[dataLayout.GetWidthIndex()]; + const unsigned int outputHeight = outputShape[dataLayout.GetHeightIndex()]; + const unsigned int outputWidth = (rank == 3) ? 1 : outputShape[dataLayout.GetWidthIndex()]; - ARMNN_ASSERT_MSG(cropsData.size() > 0, "Crops must contain 1 or more entries"); + const unsigned int blockHeight = params.m_BlockShape[0]; + const unsigned int blockWidth = (rank == 3) ? 1 : params.m_BlockShape[1]; - const unsigned int cropsTop = cropsData[0].first; - const unsigned int cropsLeft = cropsData[1].first; + const unsigned int cropsTop = params.m_Crops[0].first; + const unsigned int cropsLeft = (rank == 3) ? 0 : params.m_Crops[1].first; for (unsigned int inBatch = 0; inBatch < inputBatchSize; ++inBatch) { const unsigned int outBatch = inBatch % outputBatchSize; const unsigned int spatialOffset = inBatch / outputBatchSize; - for (unsigned int inH = 0; inH < inputTensorInfo.GetShape()[dataLayout.GetHeightIndex()]; ++inH) { - const unsigned int outH = inH * blockShapeHeight + spatialOffset / blockShapeWidth - cropsTop; + for (unsigned int inH = 0; inH < inputHeight; ++inH) + { + const unsigned int outH = inH * blockHeight + spatialOffset / blockWidth - cropsTop; if (outH >= outputHeight) { continue; } - for (unsigned int inW = 0; inW < inputTensorInfo.GetShape()[dataLayout.GetWidthIndex()]; ++inW) { - const unsigned int outW = inW * blockShapeWidth + spatialOffset % blockShapeWidth - cropsLeft; + for (unsigned int inW = 0; inW < inputWidth; ++inW) + { + const unsigned int outW = inW * blockWidth + spatialOffset % blockWidth - cropsLeft; if (outW >= outputWidth) { @@ -91,9 +111,9 @@ void BatchToSpaceNd(const DataLayoutIndexed& dataLayout, unsigned int outOffset = Offset(outputShape, outBatch, outH, outW, c, dataLayout); unsigned int inOffset = Offset(inputShape, inBatch, inH, inW, c, dataLayout); - outputEncoder[outOffset]; - inputDecoder[inOffset]; - outputEncoder.Set(inputDecoder.Get()); + outputData[outOffset]; + inputData[inOffset]; + outputData.Set(inputData.Get()); } } } diff --git a/src/backends/reference/workloads/BatchToSpaceNd.hpp b/src/backends/reference/workloads/BatchToSpaceNd.hpp index 0fcef58554..acacda4e86 100644 --- a/src/backends/reference/workloads/BatchToSpaceNd.hpp +++ b/src/backends/reference/workloads/BatchToSpaceNd.hpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd. All rights reserved. +// Copyright © 2017-2019,2021,2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -9,21 +9,15 @@ #include "Decoders.hpp" #include "Encoders.hpp" -#include - -#include - -#include -#include +#include namespace armnn { -void BatchToSpaceNd(const armnnUtils::DataLayoutIndexed& dataLayout, - const TensorInfo& inputTensorInfo, - const TensorInfo& outputTensorInfo, - const std::vector& blockShape, - const std::vector>& cropsData, - Decoder& inputDecoder, - Encoder& outputEncoder); +void BatchToSpaceNd(const TensorInfo& inputInfo, + const TensorInfo& outputInfo, + const BatchToSpaceNdDescriptor& params, + Decoder& inputData, + Encoder& outputData); + } // namespace armnn diff --git a/src/backends/reference/workloads/RefBatchToSpaceNdWorkload.cpp b/src/backends/reference/workloads/RefBatchToSpaceNdWorkload.cpp index 72c7a7687e..6bb8aff72c 100644 --- a/src/backends/reference/workloads/RefBatchToSpaceNdWorkload.cpp +++ b/src/backends/reference/workloads/RefBatchToSpaceNdWorkload.cpp @@ -1,11 +1,11 @@ // -// Copyright © 2022 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2018-2019,2021-2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // -#include "BatchToSpaceNd.hpp" -#include "Profiling.hpp" #include "RefBatchToSpaceNdWorkload.hpp" +#include "BatchToSpaceNd.hpp" + #include "RefWorkloadUtils.hpp" namespace armnn @@ -32,8 +32,7 @@ void RefBatchToSpaceNdWorkload::Execute(std::vector inputs, std: std::unique_ptr> inputDecoder = MakeDecoder(inputInfo, inputs[0]->Map()); std::unique_ptr> outputEncoder = MakeEncoder(outputInfo, outputs[0]->Map()); - BatchToSpaceNd(m_Data.m_Parameters.m_DataLayout, inputInfo, outputInfo, m_Data.m_Parameters.m_BlockShape, - m_Data.m_Parameters.m_Crops, *inputDecoder, *outputEncoder); + BatchToSpaceNd(inputInfo, outputInfo, m_Data.m_Parameters, *inputDecoder, *outputEncoder); } diff --git a/src/backends/reference/workloads/RefBatchToSpaceNdWorkload.hpp b/src/backends/reference/workloads/RefBatchToSpaceNdWorkload.hpp index ac6aad3eb2..5fb5835b68 100644 --- a/src/backends/reference/workloads/RefBatchToSpaceNdWorkload.hpp +++ b/src/backends/reference/workloads/RefBatchToSpaceNdWorkload.hpp @@ -1,14 +1,14 @@ // -// Copyright © 2022 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2018-2019,2021-2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // #pragma once #include "RefBaseWorkload.hpp" -#include -namespace armnn { +namespace armnn +{ class RefBatchToSpaceNdWorkload : public RefBaseWorkload { diff --git a/src/backends/reference/workloads/RefSpaceToBatchNdWorkload.cpp b/src/backends/reference/workloads/RefSpaceToBatchNdWorkload.cpp index 6aa422afdc..d29c2c801e 100644 --- a/src/backends/reference/workloads/RefSpaceToBatchNdWorkload.cpp +++ b/src/backends/reference/workloads/RefSpaceToBatchNdWorkload.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2022 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2018-2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -7,7 +7,6 @@ #include "SpaceToBatchNd.hpp" #include "RefWorkloadUtils.hpp" -#include namespace armnn { @@ -28,12 +27,12 @@ void RefSpaceToBatchNdWorkload::Execute(std::vector inputs, std: ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefSpaceToBatchNdWorkload_Execute"); const TensorInfo& inputInfo = GetTensorInfo(inputs[0]); - std::unique_ptr> decoder = MakeDecoder(inputInfo, inputs[0]->Map()); - const TensorInfo& outputInfo = GetTensorInfo(outputs[0]); - std::unique_ptr> encoder = MakeEncoder(outputInfo, outputs[0]->Map()); - SpaceToBatchNd(inputInfo, outputInfo, m_Data.m_Parameters, *decoder, *encoder); + std::unique_ptr> inputDecoder = MakeDecoder(inputInfo, inputs[0]->Map()); + std::unique_ptr> outputEncoder = MakeEncoder(outputInfo, outputs[0]->Map()); + + SpaceToBatchNd(inputInfo, outputInfo, m_Data.m_Parameters, *inputDecoder, *outputEncoder); } } //namespace armnn diff --git a/src/backends/reference/workloads/RefSpaceToBatchNdWorkload.hpp b/src/backends/reference/workloads/RefSpaceToBatchNdWorkload.hpp index f2c87682db..f9d75ee4d6 100644 --- a/src/backends/reference/workloads/RefSpaceToBatchNdWorkload.hpp +++ b/src/backends/reference/workloads/RefSpaceToBatchNdWorkload.hpp @@ -1,13 +1,11 @@ // -// Copyright © 2022 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2018-2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // #pragma once #include "RefBaseWorkload.hpp" -#include - namespace armnn { @@ -15,8 +13,10 @@ class RefSpaceToBatchNdWorkload : public RefBaseWorkload::RefBaseWorkload; + void Execute() const override; void ExecuteAsync(ExecutionData& executionData) override; + private: void Execute(std::vector inputs, std::vector outputs) const; }; diff --git a/src/backends/reference/workloads/SpaceToBatchNd.cpp b/src/backends/reference/workloads/SpaceToBatchNd.cpp index b6bab17367..c3f022c6a6 100644 --- a/src/backends/reference/workloads/SpaceToBatchNd.cpp +++ b/src/backends/reference/workloads/SpaceToBatchNd.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd. All rights reserved. +// Copyright © 2017-2019,2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -19,15 +19,29 @@ unsigned int GetOffset(const TensorShape& shape, unsigned int c, const DataLayoutIndexed& dataLayout) { - if (dataLayout.GetDataLayout() == DataLayout::NHWC) + // 3D Tensors + unsigned int channelDimension3D = dataLayout.GetDataLayout() == DataLayout::NCHW ? 1 : 2; + if (shape.GetNumDimensions() == 3) { - return ((b * shape[dataLayout.GetHeightIndex()] + h) * shape[dataLayout.GetWidthIndex()] + w) * - shape[dataLayout.GetChannelsIndex()] + c; + return (b * shape[dataLayout.GetHeightIndex()] + h) * shape[channelDimension3D] + c; + } + // 4D Tensors + else if (shape.GetNumDimensions() == 4) + { + if (dataLayout.GetDataLayout() == DataLayout::NHWC) + { + return ((b * shape[dataLayout.GetHeightIndex()] + h) * shape[dataLayout.GetWidthIndex()] + w) * + shape[dataLayout.GetChannelsIndex()] + c; + } + else + { + return ((b * shape[dataLayout.GetChannelsIndex()] + c) * shape[dataLayout.GetHeightIndex()] + h) * + shape[dataLayout.GetWidthIndex()] + w; + } } else { - return ((b * shape[dataLayout.GetChannelsIndex()] + c) * shape[dataLayout.GetHeightIndex()] + h) * - shape[dataLayout.GetWidthIndex()] + w; + throw InvalidArgumentException("Tensor rank must be either 3 or 4", CHECK_LOCATION()); } } @@ -37,37 +51,46 @@ void SpaceToBatchNd(const TensorInfo& inputInfo, Decoder& inputData, Encoder& outputData) { + unsigned int rank = inputInfo.GetNumDimensions(); + if (rank != 3 && rank != 4 ) + { + throw InvalidArgumentException("Tensor rank must be either 3 or 4, but it is " + std::to_string(rank), + CHECK_LOCATION()); + } + DataLayoutIndexed dataLayout = params.m_DataLayout; + unsigned int channelDimension3D = params.m_DataLayout == DataLayout::NCHW ? 1 : 2; const TensorShape& inputShape = inputInfo.GetShape(); const TensorShape& outputShape = outputInfo.GetShape(); - const unsigned int channels = inputShape[dataLayout.GetChannelsIndex()]; + const unsigned int inputBatchSize = inputShape[0]; + const unsigned int outputBatchSize = outputShape[0]; - const unsigned int inputBatchSize = inputShape[0]; - const unsigned int inputHeight = inputShape[dataLayout.GetHeightIndex()]; - const unsigned int inputWidth = inputShape[dataLayout.GetWidthIndex()]; + const unsigned int channels = (rank == 3) ? inputShape[channelDimension3D] + : inputShape[dataLayout.GetChannelsIndex()]; - const unsigned int outputBatchSize = outputShape[0]; + const unsigned int inputHeight = inputShape[dataLayout.GetHeightIndex()]; + const unsigned int inputWidth = (rank == 3) ? 1 : inputShape[dataLayout.GetWidthIndex()]; const unsigned int outputHeight = outputShape[dataLayout.GetHeightIndex()]; - const unsigned int outputWidth = outputShape[dataLayout.GetWidthIndex()]; + const unsigned int outputWidth = (rank == 3) ? 1 : outputShape[dataLayout.GetWidthIndex()]; const unsigned int blockHeight = params.m_BlockShape[0]; - const unsigned int blockWidth = params.m_BlockShape[1]; + const unsigned int blockWidth = (rank == 3) ? 1 : params.m_BlockShape[1]; - const unsigned int paddingTop = params.m_PadList[0].first; - const unsigned int paddingLeft = params.m_PadList[1].first; + const unsigned int paddingTop = params.m_PadList[0].first; + const unsigned int paddingLeft = (rank == 3) ? 0 : params.m_PadList[1].first; - for (unsigned int outB = 0; outB < outputBatchSize; outB++) + for (unsigned int outB = 0; outB < outputBatchSize; ++outB) { unsigned int inB = outB % inputBatchSize; unsigned int shiftW = (outB / inputBatchSize) % blockWidth; unsigned int shiftH = (outB / inputBatchSize) / blockWidth; - for (unsigned int outH = 0; outH < outputHeight; outH++) + for (unsigned int outH = 0; outH < outputHeight; ++outH) { - for (unsigned int outW = 0; outW < outputWidth; outW++) + for (unsigned int outW = 0; outW < outputWidth; ++outW) { if (outH * blockHeight + shiftH < paddingTop || outH * blockHeight + shiftH >= paddingTop + inputHeight || @@ -117,10 +140,4 @@ void SpaceToBatchNd(const TensorInfo& inputInfo, } } -void SpaceToBatchNd(const TensorInfo& inputInfo, - const TensorInfo& outputInfo, - const SpaceToBatchNdDescriptor& params, - Decoder& inputData, - Encoder& outData); - } //namespace armnn diff --git a/src/backends/reference/workloads/SpaceToBatchNd.hpp b/src/backends/reference/workloads/SpaceToBatchNd.hpp index 57c9b6bc25..7de34ee59a 100644 --- a/src/backends/reference/workloads/SpaceToBatchNd.hpp +++ b/src/backends/reference/workloads/SpaceToBatchNd.hpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd. All rights reserved. +// Copyright © 2017-2019,2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -10,7 +10,6 @@ #include "Encoders.hpp" #include -#include "armnn/Tensor.hpp" namespace armnn { -- cgit v1.2.1