diff options
author | Éanna Ó Catháin <eanna.ocathain@arm.com> | 2018-11-12 11:36:34 +0000 |
---|---|---|
committer | Les Bell <les.bell@arm.com> | 2018-11-12 13:08:37 +0000 |
commit | 4e1e136cce3fca73ba49b570cfcb620f4ec574da (patch) | |
tree | 1fe9fcbb6a9dbafc12aa99ac543bc0da636a1cd1 /src/backends/reference | |
parent | f97debb95cbc7e0bbc60e66e5463ede517cac61b (diff) | |
download | armnn-4e1e136cce3fca73ba49b570cfcb620f4ec574da.tar.gz |
IVGCVSW-2054: BATCH_TO_SPACE_ND Reference implementation and Unit tests.
Change-Id: I13c6728dbb60643d0e086d171225c5d802987f92
Diffstat (limited to 'src/backends/reference')
14 files changed, 272 insertions, 1 deletions
diff --git a/src/backends/reference/RefLayerSupport.cpp b/src/backends/reference/RefLayerSupport.cpp index 0902b0fd17..b057370459 100644 --- a/src/backends/reference/RefLayerSupport.cpp +++ b/src/backends/reference/RefLayerSupport.cpp @@ -101,6 +101,22 @@ bool RefLayerSupport::IsBatchNormalizationSupported(const TensorInfo& input, &TrueFunc<>); } +bool RefLayerSupport::IsBatchToSpaceNdSupported(const TensorInfo& input, + const TensorInfo& output, + const BatchToSpaceNdDescriptor& descriptor, + Optional<std::string&> reasonIfUnsupported) const +{ + ignore_unused(descriptor); + return (IsSupportedForDataTypeRef(reasonIfUnsupported, + input.GetDataType(), + &TrueFunc<>, + &TrueFunc<>) && + IsSupportedForDataTypeRef(reasonIfUnsupported, + output.GetDataType(), + &TrueFunc<>, + &TrueFunc<>)); +} + bool RefLayerSupport::IsConstantSupported(const TensorInfo& output, Optional<std::string&> reasonIfUnsupported) const { diff --git a/src/backends/reference/RefLayerSupport.hpp b/src/backends/reference/RefLayerSupport.hpp index b161f5c7cf..2e86ecee29 100644 --- a/src/backends/reference/RefLayerSupport.hpp +++ b/src/backends/reference/RefLayerSupport.hpp @@ -31,6 +31,11 @@ public: const BatchNormalizationDescriptor& descriptor, Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override; + bool IsBatchToSpaceNdSupported(const TensorInfo& input, + const TensorInfo& output, + const BatchToSpaceNdDescriptor& descriptor, + Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override; + bool IsConstantSupported(const TensorInfo& output, Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override; diff --git a/src/backends/reference/RefWorkloadFactory.cpp b/src/backends/reference/RefWorkloadFactory.cpp index a238d5f545..afffd65285 100644 --- a/src/backends/reference/RefWorkloadFactory.cpp +++ b/src/backends/reference/RefWorkloadFactory.cpp @@ -270,5 +270,10 @@ std::unique_ptr<IWorkload> RefWorkloadFactory::CreatePad(const PadQueueDescripto return MakeWorkload<RefPadFloat32Workload, RefPadUint8Workload>(descriptor, info); } +std::unique_ptr<IWorkload> RefWorkloadFactory::CreateBatchToSpaceNd(const BatchToSpaceNdQueueDescriptor& descriptor, + const WorkloadInfo& info) const +{ + return MakeWorkload<RefBatchToSpaceNdFloat32Workload, RefBatchToSpaceNdUint8Workload>(descriptor, info); +} } // namespace armnn diff --git a/src/backends/reference/RefWorkloadFactory.hpp b/src/backends/reference/RefWorkloadFactory.hpp index e9b298d376..91bba84038 100644 --- a/src/backends/reference/RefWorkloadFactory.hpp +++ b/src/backends/reference/RefWorkloadFactory.hpp @@ -143,6 +143,9 @@ public: virtual std::unique_ptr<IWorkload> CreatePad(const PadQueueDescriptor& descriptor, const WorkloadInfo& info) const override; + + virtual std::unique_ptr<IWorkload> CreateBatchToSpaceNd(const BatchToSpaceNdQueueDescriptor& descriptor, + const WorkloadInfo& info) const override; private: template <typename F32Workload, typename U8Workload, typename QueueDescriptorType> diff --git a/src/backends/reference/backend.mk b/src/backends/reference/backend.mk index cc8c24f394..7d56144f18 100644 --- a/src/backends/reference/backend.mk +++ b/src/backends/reference/backend.mk @@ -13,6 +13,7 @@ BACKEND_SOURCES := \ RefWorkloadFactory.cpp \ workloads/Activation.cpp \ workloads/ArithmeticFunction.cpp \ + workloads/BatchToSpaceNd.cpp \ workloads/Broadcast.cpp \ workloads/ConvImpl.cpp \ workloads/FullyConnected.cpp \ @@ -25,6 +26,8 @@ BACKEND_SOURCES := \ workloads/RefBaseConstantWorkload.cpp \ workloads/RefBatchNormalizationFloat32Workload.cpp \ workloads/RefBatchNormalizationUint8Workload.cpp \ + workloads/RefBatchToSpaceNdFloat32Workload.cpp \ + workloads/RefBatchToSpaceNdUint8Workload.cpp \ workloads/RefConstantFloat32Workload.cpp \ workloads/RefConstantUint8Workload.cpp \ workloads/RefConvertFp16ToFp32Workload.cpp \ diff --git a/src/backends/reference/test/RefLayerTests.cpp b/src/backends/reference/test/RefLayerTests.cpp index df0e37866d..703ec58208 100644 --- a/src/backends/reference/test/RefLayerTests.cpp +++ b/src/backends/reference/test/RefLayerTests.cpp @@ -376,4 +376,10 @@ ARMNN_AUTO_TEST_CASE(SpaceToBatchNdMultiChannelsNHWCUint8, SpaceToBatchNdMultiCh ARMNN_AUTO_TEST_CASE(SpaceToBatchNdMultiBlockNHWCUint8, SpaceToBatchNdMultiBlockNHWCUint8Test) ARMNN_AUTO_TEST_CASE(SpaceToBatchNdPaddingNHWCUint8, SpaceToBatchNdPaddingNHWCUint8Test) +ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcFloat321, BatchToSpaceNdNhwcFloat32Test1) +ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcFloat322, BatchToSpaceNdNhwcFloat32Test2) +ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcFloat323, BatchToSpaceNdNhwcFloat32Test3) + +ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwFloat321, BatchToSpaceNdNchwFloat32Test1) + BOOST_AUTO_TEST_SUITE_END() diff --git a/src/backends/reference/workloads/BatchToSpaceNd.cpp b/src/backends/reference/workloads/BatchToSpaceNd.cpp new file mode 100644 index 0000000000..bedf8418ef --- /dev/null +++ b/src/backends/reference/workloads/BatchToSpaceNd.cpp @@ -0,0 +1,100 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#include "BatchToSpaceNd.hpp" + +#include "RefWorkloadUtils.hpp" + +#include <armnn/Types.hpp> + +#include <boost/assert.hpp> + +namespace armnn +{ + +inline unsigned int Offset(const TensorShape& shape, unsigned int batch, unsigned int height, unsigned int width, + unsigned int channels, const DataLayoutIndexed& dataLayout) +{ + if (dataLayout.GetDataLayout() == DataLayout::NHWC) + { + return ((batch * shape[dataLayout.GetHeightIndex()] + height) * shape[dataLayout.GetWidthIndex()] + width) * + shape[dataLayout.GetChannelsIndex()] + channels; + } + else + { + return ((batch * shape[dataLayout.GetChannelsIndex()] + channels) * + shape[dataLayout.GetHeightIndex()] + height) * + shape[dataLayout.GetWidthIndex()] + width; + } +} + +void BatchToSpaceNd(const DataLayoutIndexed& dataLayout, + const TensorInfo& inputTensorInfo, + const TensorInfo& outputTensorInfo, + const std::vector<unsigned int>& blockShape, + const std::vector<std::vector<unsigned int>>& cropsData, + const float* inputData, + float* outputData) +{ + TensorShape inputShape = inputTensorInfo.GetShape(); + unsigned int inputNumDims = inputShape.GetNumDimensions(); + if (inputNumDims != 4) + { + throw armnn::InvalidArgumentException("Expected Input with 4 Dimensions"); + } + + TensorShape outputShape = outputTensorInfo.GetShape(); + unsigned int outputNumDims = outputShape.GetNumDimensions(); + if (outputNumDims != 4) + { + throw armnn::InvalidArgumentException("Expected Output with 4 Dimensions"); + } + + const unsigned int inputBatchSize = inputShape[0]; + const unsigned int channels = inputShape[dataLayout.GetChannelsIndex()]; + + const unsigned int outputBatchSize = outputShape[0]; + const unsigned int outputHeight = outputShape[dataLayout.GetHeightIndex()]; + const unsigned int outputWidth = outputShape[dataLayout.GetWidthIndex()]; + + const unsigned int blockShapeHeight = blockShape[0]; + const unsigned int blockShapeWidth = blockShape[1]; + + const unsigned int cropsTop = cropsData[0][0]; + const unsigned int cropsLeft = cropsData[1][0]; + + for (unsigned int inBatch = 0; inBatch < inputBatchSize; ++inBatch) + { + const unsigned int outBatch = inBatch % outputBatchSize; + const unsigned int spatialOffset = inBatch / outputBatchSize; + + for (unsigned int inH = 0; inH < inputTensorInfo.GetShape()[dataLayout.GetHeightIndex()]; ++inH) { + const unsigned int outH = inH * blockShapeHeight + spatialOffset / blockShapeWidth - cropsTop; + + if (outH >= outputHeight) + { + continue; + } + + for (unsigned int inW = 0; inW < inputTensorInfo.GetShape()[dataLayout.GetWidthIndex()]; ++inW) { + const unsigned int outW = inW * blockShapeWidth + spatialOffset % blockShapeWidth - cropsLeft; + + if (outW >= outputWidth) + { + continue; + } + + for (unsigned int c = 0; c < channels; c++) + { + unsigned int outOffset = Offset(outputShape, outBatch, outH, outW, c, dataLayout); + unsigned int inOffset = Offset(inputShape, inBatch, inH, inW, c, dataLayout); + outputData[outOffset] = inputData[inOffset]; + } + } + } + } +} + +} //namespace armnn diff --git a/src/backends/reference/workloads/BatchToSpaceNd.hpp b/src/backends/reference/workloads/BatchToSpaceNd.hpp new file mode 100644 index 0000000000..7923ceadd0 --- /dev/null +++ b/src/backends/reference/workloads/BatchToSpaceNd.hpp @@ -0,0 +1,22 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#pragma once + +#include <backendsCommon/Workload.hpp> +#include <backendsCommon/WorkloadData.hpp> +#include <armnn/Types.hpp> + +namespace armnn +{ + +void BatchToSpaceNd(const DataLayoutIndexed& dataLayout, + const TensorInfo& inputTensorInfo, + const TensorInfo& outputTensorInfo, + const std::vector<unsigned int>& blockShape, + const std::vector<std::vector<unsigned int>>& cropsData, + const float* inputData, + float* outputData); +} // namespace armnn
\ No newline at end of file diff --git a/src/backends/reference/workloads/CMakeLists.txt b/src/backends/reference/workloads/CMakeLists.txt index 4cef2d0771..1c38509ca0 100644 --- a/src/backends/reference/workloads/CMakeLists.txt +++ b/src/backends/reference/workloads/CMakeLists.txt @@ -9,6 +9,8 @@ list(APPEND armnnRefBackendWorkloads_sources ArithmeticFunction.cpp ArithmeticFunction.hpp BatchNormImpl.hpp + BatchToSpaceNd.cpp + BatchToSpaceNd.hpp Broadcast.cpp Broadcast.hpp ConvImpl.cpp @@ -32,6 +34,10 @@ list(APPEND armnnRefBackendWorkloads_sources RefBatchNormalizationFloat32Workload.hpp RefBatchNormalizationUint8Workload.cpp RefBatchNormalizationUint8Workload.hpp + RefBatchToSpaceNdFloat32Workload.cpp + RefBatchToSpaceNdFloat32Workload.hpp + RefBatchToSpaceNdUint8Workload.cpp + RefBatchToSpaceNdUint8Workload.hpp RefConstantFloat32Workload.cpp RefConstantFloat32Workload.hpp RefConstantUint8Workload.cpp diff --git a/src/backends/reference/workloads/RefBatchToSpaceNdFloat32Workload.cpp b/src/backends/reference/workloads/RefBatchToSpaceNdFloat32Workload.cpp new file mode 100644 index 0000000000..bf246c272f --- /dev/null +++ b/src/backends/reference/workloads/RefBatchToSpaceNdFloat32Workload.cpp @@ -0,0 +1,28 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#include "BatchToSpaceNd.hpp" +#include "Profiling.hpp" +#include "RefBatchToSpaceNdFloat32Workload.hpp" +#include "RefWorkloadUtils.hpp" + +namespace armnn +{ + +void RefBatchToSpaceNdFloat32Workload::Execute() const +{ + ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefBatchToSpaceNdFloat32Workload_Execute"); + + const TensorInfo& inputInfo = GetTensorInfo(m_Data.m_Inputs[0]); + const TensorInfo& outputInfo = GetTensorInfo(m_Data.m_Outputs[0]); + const float* inputData = GetInputTensorDataFloat(0, m_Data); + float* outputData = GetOutputTensorDataFloat(0, m_Data); + + BatchToSpaceNd(m_Data.m_Parameters.m_DataLayout, inputInfo, outputInfo, m_Data.m_Parameters.m_BlockShape, + m_Data.m_Parameters.m_Crops, inputData, outputData); +} + + +} //namespace armnn
\ No newline at end of file diff --git a/src/backends/reference/workloads/RefBatchToSpaceNdFloat32Workload.hpp b/src/backends/reference/workloads/RefBatchToSpaceNdFloat32Workload.hpp new file mode 100644 index 0000000000..4977772c82 --- /dev/null +++ b/src/backends/reference/workloads/RefBatchToSpaceNdFloat32Workload.hpp @@ -0,0 +1,22 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#pragma once + +#include <backendsCommon/Workload.hpp> +#include <backendsCommon/WorkloadData.hpp> + +namespace armnn { + +class RefBatchToSpaceNdFloat32Workload : public Float32Workload<BatchToSpaceNdQueueDescriptor> +{ + +public: + using Float32Workload<BatchToSpaceNdQueueDescriptor>::Float32Workload; + + virtual void Execute() const override; +}; + +} // namespace armnn
\ No newline at end of file diff --git a/src/backends/reference/workloads/RefBatchToSpaceNdUint8Workload.cpp b/src/backends/reference/workloads/RefBatchToSpaceNdUint8Workload.cpp new file mode 100644 index 0000000000..a66bcd42de --- /dev/null +++ b/src/backends/reference/workloads/RefBatchToSpaceNdUint8Workload.cpp @@ -0,0 +1,30 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#include "BatchToSpaceNd.hpp" +#include "Profiling.hpp" +#include "RefBatchToSpaceNdUint8Workload.hpp" +#include "RefWorkloadUtils.hpp" + +namespace armnn +{ + +void RefBatchToSpaceNdUint8Workload::Execute() const +{ + ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefBatchToSpaceNdUint8Workload_Execute"); + + const TensorInfo& inputInfo = GetTensorInfo(m_Data.m_Inputs[0]); + const TensorInfo& outputInfo = GetTensorInfo(m_Data.m_Outputs[0]); + auto dequantizedInputData = Dequantize(GetInputTensorDataU8(0, m_Data), inputInfo); + float* outputData = GetOutputTensorDataFloat(0, m_Data); + + std::vector<float> results(outputInfo.GetNumElements()); + BatchToSpaceNd(m_Data.m_Parameters.m_DataLayout, inputInfo, outputInfo, m_Data.m_Parameters.m_BlockShape, + m_Data.m_Parameters.m_Crops, dequantizedInputData.data(), outputData); + + Quantize(GetOutputTensorDataU8(0, m_Data), results.data(), outputInfo); +} + +} //namespace armnn
\ No newline at end of file diff --git a/src/backends/reference/workloads/RefBatchToSpaceNdUint8Workload.hpp b/src/backends/reference/workloads/RefBatchToSpaceNdUint8Workload.hpp new file mode 100644 index 0000000000..1f221c2f47 --- /dev/null +++ b/src/backends/reference/workloads/RefBatchToSpaceNdUint8Workload.hpp @@ -0,0 +1,23 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#pragma once + +#include <backendsCommon/Workload.hpp> +#include <backendsCommon/WorkloadData.hpp> + +namespace armnn +{ + +class RefBatchToSpaceNdUint8Workload : public Uint8Workload<BatchToSpaceNdQueueDescriptor> +{ + +public: + using Uint8Workload<BatchToSpaceNdQueueDescriptor>::Uint8Workload; + + virtual void Execute() const override; +}; + +} // namespace armnn
\ No newline at end of file diff --git a/src/backends/reference/workloads/RefWorkloads.hpp b/src/backends/reference/workloads/RefWorkloads.hpp index 03907a6b91..5ea7fe4b58 100644 --- a/src/backends/reference/workloads/RefWorkloads.hpp +++ b/src/backends/reference/workloads/RefWorkloads.hpp @@ -54,4 +54,6 @@ #include "RefConvertFp32ToFp16Workload.hpp" #include "RefMeanUint8Workload.hpp" #include "RefMeanFloat32Workload.hpp" -#include "RefPadWorkload.hpp"
\ No newline at end of file +#include "RefPadWorkload.hpp" +#include "RefBatchToSpaceNdUint8Workload.hpp" +#include "RefBatchToSpaceNdFloat32Workload.hpp" |