From 4e1e136cce3fca73ba49b570cfcb620f4ec574da Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C3=89anna=20=C3=93=20Cath=C3=A1in?= Date: Mon, 12 Nov 2018 11:36:34 +0000 Subject: IVGCVSW-2054: BATCH_TO_SPACE_ND Reference implementation and Unit tests. Change-Id: I13c6728dbb60643d0e086d171225c5d802987f92 --- Android.mk | 1 + CMakeLists.txt | 2 + include/armnn/Descriptors.hpp | 19 +++ include/armnn/DescriptorsFwd.hpp | 1 + include/armnn/ILayerSupport.hpp | 5 + include/armnn/INetwork.hpp | 7 + include/armnn/LayerSupport.hpp | 8 + src/armnn/InternalTypes.cpp | 1 + src/armnn/InternalTypes.hpp | 1 + src/armnn/LayerSupport.cpp | 15 ++ src/armnn/LayersFwd.hpp | 2 + src/armnn/Network.cpp | 6 + src/armnn/Network.hpp | 3 + src/armnn/layers/BatchToSpaceNdLayer.cpp | 89 +++++++++++ src/armnn/layers/BatchToSpaceNdLayer.hpp | 29 ++++ src/backends/backendsCommon/ILayerSupport.cpp | 8 + src/backends/backendsCommon/WorkloadData.cpp | 8 +- src/backends/backendsCommon/WorkloadData.hpp | 4 + src/backends/backendsCommon/WorkloadFactory.cpp | 12 ++ src/backends/backendsCommon/WorkloadFactory.hpp | 3 + .../test/IsLayerSupportedTestImpl.hpp | 16 ++ src/backends/backendsCommon/test/LayerTests.cpp | 167 +++++++++++++++++++++ src/backends/backendsCommon/test/LayerTests.hpp | 6 + src/backends/cl/ClWorkloadFactory.cpp | 12 ++ src/backends/cl/ClWorkloadFactory.hpp | 3 + src/backends/neon/NeonWorkloadFactory.cpp | 12 ++ src/backends/neon/NeonWorkloadFactory.hpp | 3 + src/backends/reference/RefLayerSupport.cpp | 16 ++ src/backends/reference/RefLayerSupport.hpp | 5 + src/backends/reference/RefWorkloadFactory.cpp | 5 + src/backends/reference/RefWorkloadFactory.hpp | 3 + src/backends/reference/backend.mk | 3 + src/backends/reference/test/RefLayerTests.cpp | 6 + .../reference/workloads/BatchToSpaceNd.cpp | 100 ++++++++++++ .../reference/workloads/BatchToSpaceNd.hpp | 22 +++ src/backends/reference/workloads/CMakeLists.txt | 6 + .../workloads/RefBatchToSpaceNdFloat32Workload.cpp | 28 ++++ .../workloads/RefBatchToSpaceNdFloat32Workload.hpp | 22 +++ .../workloads/RefBatchToSpaceNdUint8Workload.cpp | 30 ++++ .../workloads/RefBatchToSpaceNdUint8Workload.hpp | 23 +++ src/backends/reference/workloads/RefWorkloads.hpp | 4 +- 41 files changed, 714 insertions(+), 2 deletions(-) create mode 100644 src/armnn/layers/BatchToSpaceNdLayer.cpp create mode 100644 src/armnn/layers/BatchToSpaceNdLayer.hpp create mode 100644 src/backends/reference/workloads/BatchToSpaceNd.cpp create mode 100644 src/backends/reference/workloads/BatchToSpaceNd.hpp create mode 100644 src/backends/reference/workloads/RefBatchToSpaceNdFloat32Workload.cpp create mode 100644 src/backends/reference/workloads/RefBatchToSpaceNdFloat32Workload.hpp create mode 100644 src/backends/reference/workloads/RefBatchToSpaceNdUint8Workload.cpp create mode 100644 src/backends/reference/workloads/RefBatchToSpaceNdUint8Workload.hpp diff --git a/Android.mk b/Android.mk index ee24312156..4742aed6d1 100644 --- a/Android.mk +++ b/Android.mk @@ -80,6 +80,7 @@ LOCAL_SRC_FILES := \ src/armnn/layers/AdditionLayer.cpp \ src/armnn/layers/ArithmeticBaseLayer.cpp \ src/armnn/layers/BatchNormalizationLayer.cpp \ + src/armnn/layers/BatchToSpaceNdLayer.cpp \ src/armnn/layers/ConstantLayer.cpp \ src/armnn/layers/Convolution2dLayer.cpp \ src/armnn/layers/ConvertFp16ToFp32Layer.cpp \ diff --git a/CMakeLists.txt b/CMakeLists.txt index 48176c106b..95ca39fb1b 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -175,6 +175,8 @@ list(APPEND armnn_sources src/armnn/layers/ArithmeticBaseLayer.cpp src/armnn/layers/BatchNormalizationLayer.hpp src/armnn/layers/BatchNormalizationLayer.cpp + src/armnn/layers/BatchToSpaceNdLayer.hpp + src/armnn/layers/BatchToSpaceNdLayer.cpp src/armnn/layers/ConstantLayer.hpp src/armnn/layers/ConstantLayer.cpp src/armnn/layers/Convolution2dLayer.hpp diff --git a/include/armnn/Descriptors.hpp b/include/armnn/Descriptors.hpp index 656afb1756..bda8cf7396 100644 --- a/include/armnn/Descriptors.hpp +++ b/include/armnn/Descriptors.hpp @@ -296,6 +296,25 @@ struct BatchNormalizationDescriptor DataLayoutIndexed m_DataLayout; }; +struct BatchToSpaceNdDescriptor +{ + BatchToSpaceNdDescriptor() + : m_BlockShape() + , m_Crops() + , m_DataLayout(DataLayout::NCHW) + {} + + BatchToSpaceNdDescriptor(std::vector blockShape, std::vector> crops) + : m_BlockShape(blockShape) + , m_Crops(crops) + , m_DataLayout(DataLayout::NCHW) + {} + + std::vector m_BlockShape; + std::vector> m_Crops; + DataLayoutIndexed m_DataLayout; +}; + struct FakeQuantizationDescriptor { FakeQuantizationDescriptor() diff --git a/include/armnn/DescriptorsFwd.hpp b/include/armnn/DescriptorsFwd.hpp index a2974d797d..0e96c360d3 100644 --- a/include/armnn/DescriptorsFwd.hpp +++ b/include/armnn/DescriptorsFwd.hpp @@ -8,6 +8,7 @@ namespace armnn { struct ActivationDescriptor; struct BatchNormalizationDescriptor; +struct BatchToSpaceNdDescriptor; struct Convolution2dDescriptor; struct DepthwiseConvolution2dDescriptor; struct FakeQuantizationDescriptor; diff --git a/include/armnn/ILayerSupport.hpp b/include/armnn/ILayerSupport.hpp index 0e7adff0af..1bf268f409 100644 --- a/include/armnn/ILayerSupport.hpp +++ b/include/armnn/ILayerSupport.hpp @@ -41,6 +41,11 @@ public: const BatchNormalizationDescriptor& descriptor, Optional reasonIfUnsupported = EmptyOptional()) const; + virtual bool IsBatchToSpaceNdSupported(const TensorInfo& input, + const TensorInfo& output, + const BatchToSpaceNdDescriptor& descriptor, + Optional reasonIfUnsupported = EmptyOptional()) const; + virtual bool IsConstantSupported(const TensorInfo& output, Optional reasonIfUnsupported = EmptyOptional()) const; diff --git a/include/armnn/INetwork.hpp b/include/armnn/INetwork.hpp index 2cb8f28d87..df274d6dc1 100644 --- a/include/armnn/INetwork.hpp +++ b/include/armnn/INetwork.hpp @@ -149,6 +149,13 @@ public: virtual IConnectableLayer* AddPermuteLayer(const PermuteDescriptor& permuteDescriptor, const char* name = nullptr) = 0; + /// Adds a batch to space ND layer to the network. + /// @param batchToSpaceNdDescriptor - Description of the layer. + /// @param name - Optional name for the layer. + /// @return - Interface for configuring the layer. + virtual IConnectableLayer* AddBatchToSpaceNdLayer(const BatchToSpaceNdDescriptor& batchToSpaceNdDescriptor, + const char* name = nullptr) = 0; + /// Adds a pooling layer to the network. /// @param pooling2dDescriptor - Pooling2dDescriptor to configure the pooling. /// @param name - Optional name for the layer. diff --git a/include/armnn/LayerSupport.hpp b/include/armnn/LayerSupport.hpp index eb6b5da7b9..bd20f185da 100644 --- a/include/armnn/LayerSupport.hpp +++ b/include/armnn/LayerSupport.hpp @@ -40,6 +40,14 @@ bool IsBatchNormalizationSupported(const BackendId& backend, char* reasonIfUnsupported = nullptr, size_t reasonIfUnsupportedMaxLength = 1024); +/// Deprecated in favor of IBackend and ILayerSupport interfaces +bool IsBatchToSpaceNdSupported(const BackendId& backend, + const TensorInfo& input, + const TensorInfo& output, + const BatchToSpaceNdDescriptor& descriptor, + char* reasonIfUnsupported = nullptr, + size_t reasonIfUnsupportedMaxLength = 1024); + /// Deprecated in favor of IBackend and ILayerSupport interfaces bool IsConstantSupported(const BackendId& backend, const TensorInfo& output, diff --git a/src/armnn/InternalTypes.cpp b/src/armnn/InternalTypes.cpp index 931b6a3579..3493a3d5a2 100644 --- a/src/armnn/InternalTypes.cpp +++ b/src/armnn/InternalTypes.cpp @@ -17,6 +17,7 @@ char const* GetLayerTypeAsCString(LayerType type) case LayerType::Activation: return "Activation"; case LayerType::Addition: return "Addition"; case LayerType::BatchNormalization: return "BatchNormalization"; + case LayerType::BatchToSpaceNd: return "BatchToSpaceNd"; case LayerType::Constant: return "Constant"; case LayerType::ConvertFp16ToFp32: return "ConvertFp16ToFp32"; case LayerType::ConvertFp32ToFp16: return "ConvertFp32ToFp16"; diff --git a/src/armnn/InternalTypes.hpp b/src/armnn/InternalTypes.hpp index 0661b16649..dc3c55edac 100644 --- a/src/armnn/InternalTypes.hpp +++ b/src/armnn/InternalTypes.hpp @@ -17,6 +17,7 @@ enum class LayerType Activation = FirstLayer, Addition, BatchNormalization, + BatchToSpaceNd, Constant, ConvertFp16ToFp32, ConvertFp32ToFp16, diff --git a/src/armnn/LayerSupport.cpp b/src/armnn/LayerSupport.cpp index fb3ce43646..5d2d205534 100644 --- a/src/armnn/LayerSupport.cpp +++ b/src/armnn/LayerSupport.cpp @@ -12,6 +12,7 @@ #include #include #include +#include namespace armnn { @@ -100,6 +101,20 @@ bool IsBatchNormalizationSupported(const BackendId& backend, descriptor); } +bool IsBatchToSpaceNdSupported(const BackendId& backend, + const TensorInfo& input, + const TensorInfo& output, + const BatchToSpaceNdDescriptor& descriptor, + char* reasonIfUnsupported, + size_t reasonIfUnsupportedMaxLength) +{ + FORWARD_LAYER_SUPPORT_FUNC(backend, + IsBatchToSpaceNdSupported, + input, + output, + descriptor); +} + bool IsConstantSupported(const BackendId& backend, const TensorInfo& output, char* reasonIfUnsupported, diff --git a/src/armnn/LayersFwd.hpp b/src/armnn/LayersFwd.hpp index 5c08b6677f..bd1297b550 100644 --- a/src/armnn/LayersFwd.hpp +++ b/src/armnn/LayersFwd.hpp @@ -9,6 +9,7 @@ #include "layers/ActivationLayer.hpp" #include "layers/AdditionLayer.hpp" #include "layers/BatchNormalizationLayer.hpp" +#include "layers/BatchToSpaceNdLayer.hpp" #include "layers/ConstantLayer.hpp" #include "layers/ConvertFp16ToFp32Layer.hpp" #include "layers/ConvertFp32ToFp16Layer.hpp" @@ -67,6 +68,7 @@ constexpr LayerType LayerEnumOf(const T* = nullptr); DECLARE_LAYER(Activation) DECLARE_LAYER(Addition) DECLARE_LAYER(BatchNormalization) +DECLARE_LAYER(BatchToSpaceNd) DECLARE_LAYER(Constant) DECLARE_LAYER(ConvertFp16ToFp32) DECLARE_LAYER(ConvertFp32ToFp16) diff --git a/src/armnn/Network.cpp b/src/armnn/Network.cpp index 7b430c3ac5..3b3ee3146a 100644 --- a/src/armnn/Network.cpp +++ b/src/armnn/Network.cpp @@ -351,6 +351,12 @@ IConnectableLayer* Network::AddInputLayer(LayerBindingId id, const char* name) return m_Graph->AddLayer(id, name); } +IConnectableLayer* Network::AddBatchToSpaceNdLayer(const BatchToSpaceNdDescriptor& batchToSpaceNdDescriptor, + const char* name) +{ + return m_Graph->AddLayer(batchToSpaceNdDescriptor, name); +} + IConnectableLayer* Network::AddFullyConnectedLayerImpl(const FullyConnectedDescriptor& fullyConnectedDescriptor, const ConstTensor& weights, const ConstTensor* biases, diff --git a/src/armnn/Network.hpp b/src/armnn/Network.hpp index 4a93dd1ee4..95cdb28bfb 100644 --- a/src/armnn/Network.hpp +++ b/src/armnn/Network.hpp @@ -34,6 +34,9 @@ public: IConnectableLayer* AddInputLayer(LayerBindingId id, const char* name=nullptr) override; + IConnectableLayer* AddBatchToSpaceNdLayer(const BatchToSpaceNdDescriptor& batchToSpaceNdDescriptor, + const char* name = nullptr) override; + IConnectableLayer* AddConvolution2dLayer(const Convolution2dDescriptor& convolution2dDescriptor, const ConstTensor& weights, const char* name = nullptr) override; diff --git a/src/armnn/layers/BatchToSpaceNdLayer.cpp b/src/armnn/layers/BatchToSpaceNdLayer.cpp new file mode 100644 index 0000000000..595ce4a7fe --- /dev/null +++ b/src/armnn/layers/BatchToSpaceNdLayer.cpp @@ -0,0 +1,89 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// +#include "BatchToSpaceNdLayer.hpp" + +#include "LayerCloneBase.hpp" +#include "LayerWithParameters.hpp" +#include "BatchToSpaceNdLayer.hpp" + +#include +#include +#include +#include + +namespace armnn +{ + +BatchToSpaceNdLayer::BatchToSpaceNdLayer(const armnn::BatchToSpaceNdDescriptor& param, const char* name) + : LayerWithParameters(1, 1, LayerType::BatchToSpaceNd, param, name) +{ +} + +std::unique_ptr BatchToSpaceNdLayer::CreateWorkload(const Graph& graph, + const IWorkloadFactory& factory) const +{ + BatchToSpaceNdQueueDescriptor descriptor; + + return factory.CreateBatchToSpaceNd(descriptor, PrepInfoAndDesc(descriptor, graph)); +} + +BatchToSpaceNdLayer* BatchToSpaceNdLayer::Clone(Graph& graph) const +{ + auto layer = CloneBase(graph, m_Param, GetName()); + return std::move(layer); +} + +void BatchToSpaceNdLayer::ValidateTensorShapesFromInputs() +{ + VerifyLayerConnections(1, CHECK_LOCATION()); + + auto inferredShapes = InferOutputShapes({GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape()}); + + BOOST_ASSERT(inferredShapes.size() == 1); + + ConditionalThrowIfNotEqual( + "BatchToSpaceLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.", + GetOutputSlot(0).GetTensorInfo().GetShape(),inferredShapes[0]); +} + +std::vector BatchToSpaceNdLayer::InferOutputShapes(const std::vector& inputShapes) const +{ + const DataLayoutIndexed & dataLayout = m_Param.m_DataLayout; + const TensorShape& inputShape = inputShapes[0]; + unsigned int inBatchSize = inputShape[0]; + unsigned int channelSize = inputShape[dataLayout.GetChannelsIndex()]; + + std::vector theBlockShape = m_Param.m_BlockShape; + + unsigned int overallSize = inBatchSize; + + for (unsigned int i = 0; i < theBlockShape.size(); ++i) + { + overallSize = overallSize * theBlockShape.at(i); + } + + std::vector> crops = m_Param.m_Crops; + + std::vector yCrops = crops[0]; + std::vector xCrops = crops[1]; + + unsigned int inputHeight = inputShape[dataLayout.GetHeightIndex()]; + unsigned int outputHeight = theBlockShape.at(0) * (inputHeight - (yCrops[0] + yCrops[1])); + + unsigned int inputWidth = inputShape[dataLayout.GetWidthIndex()]; + unsigned int outputWidth = theBlockShape.at(1) * (inputWidth - (xCrops[0] + xCrops[1])); + + unsigned int outputBatchSize = overallSize / (outputHeight * outputWidth); + + if (dataLayout == DataLayout::NHWC) + { + return std::vector({ TensorShape({ outputBatchSize, outputHeight, outputWidth, channelSize }) }); + } + else + { + return std::vector({ TensorShape({ outputBatchSize, channelSize, outputHeight, outputWidth }) }); + } +} +} // namespace armnn diff --git a/src/armnn/layers/BatchToSpaceNdLayer.hpp b/src/armnn/layers/BatchToSpaceNdLayer.hpp new file mode 100644 index 0000000000..eb5f979f3a --- /dev/null +++ b/src/armnn/layers/BatchToSpaceNdLayer.hpp @@ -0,0 +1,29 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// +#pragma once + +#include "LayerWithParameters.hpp" + +namespace armnn +{ + +class BatchToSpaceNdLayer : public LayerWithParameters +{ +public: + virtual std::unique_ptr CreateWorkload(const Graph& graph, + const IWorkloadFactory& factory) const override; + + BatchToSpaceNdLayer* Clone(Graph& graph) const override; + + void ValidateTensorShapesFromInputs() override; + + std::vector InferOutputShapes(const std::vector& inputShapes) const override; + +protected: + BatchToSpaceNdLayer(const BatchToSpaceNdDescriptor& param, const char* name); + ~BatchToSpaceNdLayer() = default; +}; + +} // namespace diff --git a/src/backends/backendsCommon/ILayerSupport.cpp b/src/backends/backendsCommon/ILayerSupport.cpp index ebfff5d429..2cd57b7ad7 100644 --- a/src/backends/backendsCommon/ILayerSupport.cpp +++ b/src/backends/backendsCommon/ILayerSupport.cpp @@ -59,6 +59,14 @@ bool ILayerSupport::IsBatchNormalizationSupported(const TensorInfo& input, return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported); } +bool ILayerSupport::IsBatchToSpaceNdSupported(const TensorInfo& input, + const TensorInfo& output, + const BatchToSpaceNdDescriptor& descriptor, + Optional reasonIfUnsupported) const +{ + return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported); +} + bool ILayerSupport::IsConstantSupported(const TensorInfo& output, Optional reasonIfUnsupported) const { diff --git a/src/backends/backendsCommon/WorkloadData.cpp b/src/backends/backendsCommon/WorkloadData.cpp index 7c02947b32..9fbdfe94c2 100644 --- a/src/backends/backendsCommon/WorkloadData.cpp +++ b/src/backends/backendsCommon/WorkloadData.cpp @@ -918,4 +918,10 @@ void PadQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const } } -} //namespace armnn +void BatchToSpaceNdQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const +{ + ValidateSingleInput(workloadInfo, "BatchToSpaceNdQueueDescriptor"); + ValidateSingleOutput(workloadInfo, "BatchToSpaceNdQueueDescriptor"); +} + +} //namespace armnn \ No newline at end of file diff --git a/src/backends/backendsCommon/WorkloadData.hpp b/src/backends/backendsCommon/WorkloadData.hpp index 7fb8855bf6..d54a71aa8c 100644 --- a/src/backends/backendsCommon/WorkloadData.hpp +++ b/src/backends/backendsCommon/WorkloadData.hpp @@ -335,4 +335,8 @@ struct ConvertFp32ToFp16QueueDescriptor : QueueDescriptor void Validate(const WorkloadInfo& workloadInfo) const; }; +struct BatchToSpaceNdQueueDescriptor : QueueDescriptorWithParameters +{ + void Validate(const WorkloadInfo& workloadInfo) const; +}; } //namespace armnn diff --git a/src/backends/backendsCommon/WorkloadFactory.cpp b/src/backends/backendsCommon/WorkloadFactory.cpp index 9f974522aa..ec30f34880 100644 --- a/src/backends/backendsCommon/WorkloadFactory.cpp +++ b/src/backends/backendsCommon/WorkloadFactory.cpp @@ -116,6 +116,18 @@ bool IWorkloadFactory::IsLayerSupported(const BackendId& backendId, reason); break; } + case LayerType::BatchToSpaceNd: + { + const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); + const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo(); + auto cLayer = boost::polymorphic_downcast(&layer); + + result = layerSupportObject->IsBatchToSpaceNdSupported(OverrideDataType(input, dataType), + OverrideDataType(output, dataType), + cLayer->GetParameters(), + reason); + break; + } case LayerType::Constant: { const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo(); diff --git a/src/backends/backendsCommon/WorkloadFactory.hpp b/src/backends/backendsCommon/WorkloadFactory.hpp index 67876e13a2..e3be9f501f 100644 --- a/src/backends/backendsCommon/WorkloadFactory.hpp +++ b/src/backends/backendsCommon/WorkloadFactory.hpp @@ -97,6 +97,9 @@ public: virtual std::unique_ptr CreateBatchNormalization(const BatchNormalizationQueueDescriptor& descriptor, const WorkloadInfo& info) const = 0; + virtual std::unique_ptr CreateBatchToSpaceNd(const BatchToSpaceNdQueueDescriptor& descriptor, + const WorkloadInfo& Info) const = 0; + virtual std::unique_ptr CreateMemCopy(const MemCopyQueueDescriptor& descriptor, const WorkloadInfo& info) const = 0; diff --git a/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp b/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp index 2c992bc10b..25079058f6 100644 --- a/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp +++ b/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp @@ -91,6 +91,20 @@ struct DummyLayer }; +template<> +struct DummyLayer +{ + DummyLayer() + { + m_Layer = dummyGraph.AddLayer(armnn::BatchToSpaceNdDescriptor(), ""); + } + ~DummyLayer() + { + dummyGraph.EraseLayer(m_Layer); + } + armnn::BatchToSpaceNdLayer* m_Layer; +}; + template<> struct DummyLayer { @@ -306,6 +320,8 @@ DECLARE_LAYER_POLICY_1_PARAM(Addition) DECLARE_LAYER_POLICY_2_PARAM(BatchNormalization) +DECLARE_LAYER_POLICY_2_PARAM(BatchToSpaceNd) + DECLARE_LAYER_POLICY_1_PARAM(Constant) DECLARE_LAYER_POLICY_1_PARAM(ConvertFp16ToFp32) diff --git a/src/backends/backendsCommon/test/LayerTests.cpp b/src/backends/backendsCommon/test/LayerTests.cpp index cdc989fe6d..4a003036ca 100755 --- a/src/backends/backendsCommon/test/LayerTests.cpp +++ b/src/backends/backendsCommon/test/LayerTests.cpp @@ -6169,3 +6169,170 @@ LayerTestResult SpaceToBatchNdPaddingNHWCUint8Test(armnn::IWorkloadF { return SpaceToBatchNdPaddingNHWCTest(workloadFactory); } + +namespace { + +template +LayerTestResult BatchToSpaceNdHelper(armnn::IWorkloadFactory &workloadFactory, + const armnn::DataLayout& dataLayout, + const unsigned int *inputShape, + const std::vector &inputData, + const std::vector &blockShape, + const std::vector> &crops, + const unsigned int *outputShape, + const std::vector &outputData, + float scale = 1.0f, + int32_t offset = 0) + { + auto dataType = (std::is_same::value ? armnn::DataType::QuantisedAsymm8 : armnn::DataType::Float32); + + armnn::TensorInfo inputTensorInfo(InputDim, inputShape, dataType); + armnn::TensorInfo outputTensorInfo(OutputDim, outputShape, dataType); + + inputTensorInfo.SetQuantizationScale(scale); + inputTensorInfo.SetQuantizationOffset(offset); + + outputTensorInfo.SetQuantizationScale(scale); + outputTensorInfo.SetQuantizationOffset(offset); + + auto input = MakeTensor(inputTensorInfo, inputData); + + LayerTestResult result(outputTensorInfo); + result.outputExpected = MakeTensor(outputTensorInfo, outputData); + + std::unique_ptr inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo); + std::unique_ptr outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo); + + armnn::BatchToSpaceNdQueueDescriptor data; + data.m_Parameters.m_DataLayout = dataLayout; + data.m_Parameters.m_BlockShape = blockShape; + data.m_Parameters.m_Crops = crops; + armnn::WorkloadInfo info; + AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get()); + AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get()); + + std::unique_ptr workload = workloadFactory.CreateBatchToSpaceNd(data, info); + + inputHandle->Allocate(); + outputHandle->Allocate(); + + CopyDataToITensorHandle(inputHandle.get(), input.origin()); + + workload->Execute(); + + CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get()); + + return result; +} + +} // anonymous namespace + +LayerTestResult BatchToSpaceNdNhwcFloat32Test1(armnn::IWorkloadFactory& workloadFactory) +{ + const unsigned int inputShape[] = {4, 2, 2, 1}; + const unsigned int outputShape[] = {1, 4, 4, 1 }; + + std::vector input + ({ + // Batch 0, Height 0, Width (2) x Channel (1) + 1.0f, 3.0f, + // Batch 0, Height 1, Width (2) x Channel (1) + 9.0f, 11.0f, + + + // Batch 1, Height 0, Width (2) x Channel (1) + 2.0f, 4.0f, + // Batch 1, Height 1, Width (2) x Channel (1) + 10.0f, 12.0f, + + + // Batch 2, Height 0, Width (2) x Channel (1) + 5.0f, 7.0f, + // Batch 2, Height 1, Width (2) x Channel (1) + 13.0f, 15.0f, + + // Batch 3, Height 0, Width (2) x Channel (3) + 6.0f, 8.0f, + // Batch 3, Height 1, Width (2) x Channel (1) + 14.0f, 16.0f + }); + + std::vector expectedOutput + ({ + 1.0f, 2.0f, 3.0f, 4.0f, + 5.0f, 6.0f, 7.0f, 8.0f, + 9.0f, 10.0f, 11.0f, 12.0f, + 13.0f, 14.0f, 15.0f, 16.0f + }); + + std::vector blockShape {2, 2}; + std::vector> crops = {{0, 0}, {0, 0}}; + + return BatchToSpaceNdHelper(workloadFactory, armnn::DataLayout::NHWC, inputShape, input, blockShape, + crops, outputShape, expectedOutput); +} + +LayerTestResult BatchToSpaceNdNhwcFloat32Test2(armnn::IWorkloadFactory& workloadFactory) +{ + const unsigned int inputShape[] = {4, 1, 1, 1}; + const unsigned int outputShape[] = {1, 2, 2, 1}; + + std::vector input + ({ + // Batch 0, Height 0, Width (2) x Channel (1) + 1.0f, 2.0f, 3.0f, 4.0f + }); + + std::vector expectedOutput({1.0f, 2.0f, 3.0f, 4.0f}); + + std::vector blockShape({2, 2}); + std::vector> crops = {{0, 0}, {0, 0}}; + + return BatchToSpaceNdHelper(workloadFactory, armnn::DataLayout::NHWC, inputShape, input, blockShape, + crops, outputShape, expectedOutput); +} + +LayerTestResult BatchToSpaceNdNhwcFloat32Test3(armnn::IWorkloadFactory& workloadFactory) +{ + const unsigned int inputShape[] = {4, 1, 1, 3}; + const unsigned int outputShape[] = {1, 2, 2, 3}; + + std::vector input({ 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f }); + + std::vector expectedOutput({ 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f }); + + std::vector blockShape({2, 2}); + std::vector> crops = {{0, 0}, {0, 0}}; + + return BatchToSpaceNdHelper(workloadFactory, armnn::DataLayout::NHWC, inputShape, input, blockShape, + crops, outputShape, expectedOutput); +} + +LayerTestResult BatchToSpaceNdNchwFloat32Test1(armnn::IWorkloadFactory &workloadFactory) +{ + const unsigned int inputShape[] = {4, 3, 1, 1}; + const unsigned int outputShape[] = {1, 3, 2, 2}; + + std::vector input({ 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f }); + + std::vector expectedOutput + ({ + // Batch 0, Channel 0, Height (2) x Width (2) + 1.0f, 4.0f, + 7.0f, 10.0f, + + // Batch 0, Channel 1, Height (2) x Width (2) + 2.0f, 5.0f, + 8.0f, 11.0f, + + // Batch 0, Channel 2, Height (2) x Width (2) + 3.0f, 6.0f, + 9.0f, 12.0f, + }); + + std::vector blockShape({2, 2}); + std::vector> crops = {{0, 0}, {0, 0}}; + + return BatchToSpaceNdHelper(workloadFactory, armnn::DataLayout::NCHW, inputShape, input, blockShape, + crops, outputShape, expectedOutput); +} diff --git a/src/backends/backendsCommon/test/LayerTests.hpp b/src/backends/backendsCommon/test/LayerTests.hpp index 66032c8f2a..cd8758e477 100644 --- a/src/backends/backendsCommon/test/LayerTests.hpp +++ b/src/backends/backendsCommon/test/LayerTests.hpp @@ -434,3 +434,9 @@ LayerTestResult SpaceToBatchNdSimpleNHWCUint8Test(armnn::IWorkloadFa LayerTestResult SpaceToBatchNdMultiChannelsNHWCUint8Test(armnn::IWorkloadFactory& workloadFactory); LayerTestResult SpaceToBatchNdMultiBlockNHWCUint8Test(armnn::IWorkloadFactory& workloadFactory); LayerTestResult SpaceToBatchNdPaddingNHWCUint8Test(armnn::IWorkloadFactory& workloadFactory); + +LayerTestResult BatchToSpaceNdNhwcFloat32Test1(armnn::IWorkloadFactory& workloadFactory); +LayerTestResult BatchToSpaceNdNhwcFloat32Test2(armnn::IWorkloadFactory& workloadFactory); +LayerTestResult BatchToSpaceNdNhwcFloat32Test3(armnn::IWorkloadFactory& workloadFactory); + +LayerTestResult BatchToSpaceNdNchwFloat32Test1(armnn::IWorkloadFactory &workloadFactory); diff --git a/src/backends/cl/ClWorkloadFactory.cpp b/src/backends/cl/ClWorkloadFactory.cpp index eece934ea3..0862ea163e 100644 --- a/src/backends/cl/ClWorkloadFactory.cpp +++ b/src/backends/cl/ClWorkloadFactory.cpp @@ -314,6 +314,12 @@ std::unique_ptr ClWorkloadFactory::CreatePad(const PadQueueDescriptor return MakeWorkload(descriptor, info); } +std::unique_ptr ClWorkloadFactory::CreateBatchToSpaceNd(const BatchToSpaceNdQueueDescriptor& descriptor, + const WorkloadInfo& info) const +{ + return MakeWorkload(descriptor, info); +} + void ClWorkloadFactory::Release() { m_MemoryManager.Release(); @@ -530,6 +536,12 @@ std::unique_ptr ClWorkloadFactory::CreatePad(const PadQueueDescriptor return nullptr; } +std::unique_ptr ClWorkloadFactory::CreateBatchToSpaceNd(const BatchToSpaceNdQueueDescriptor& descriptor, + const WorkloadInfo& info) const +{ + return nullptr; +} + void ClWorkloadFactory::Release() { } diff --git a/src/backends/cl/ClWorkloadFactory.hpp b/src/backends/cl/ClWorkloadFactory.hpp index c45bc15839..6a928dbbfc 100644 --- a/src/backends/cl/ClWorkloadFactory.hpp +++ b/src/backends/cl/ClWorkloadFactory.hpp @@ -126,6 +126,9 @@ public: virtual std::unique_ptr CreatePad(const PadQueueDescriptor& descriptor, const WorkloadInfo& info) const override; + virtual std::unique_ptr CreateBatchToSpaceNd(const BatchToSpaceNdQueueDescriptor& descriptor, + const WorkloadInfo& info) const override; + virtual void Release() override; virtual void Acquire() override; diff --git a/src/backends/neon/NeonWorkloadFactory.cpp b/src/backends/neon/NeonWorkloadFactory.cpp index 11b5634a79..f0d916b63b 100644 --- a/src/backends/neon/NeonWorkloadFactory.cpp +++ b/src/backends/neon/NeonWorkloadFactory.cpp @@ -279,6 +279,12 @@ std::unique_ptr NeonWorkloadFactory::CreatePad(const PadQueueDescript return MakeWorkloadHelper(descriptor, info); } +std::unique_ptr NeonWorkloadFactory::CreateBatchToSpaceNd(const BatchToSpaceNdQueueDescriptor& descriptor, + const WorkloadInfo& info) const +{ + return MakeWorkloadHelper(descriptor, info); +} + void NeonWorkloadFactory::Release() { m_MemoryManager.Release(); @@ -495,6 +501,12 @@ std::unique_ptr NeonWorkloadFactory::CreatePad(const PadQueueDescript return nullptr; } +std::unique_ptr NeonWorkloadFactory::CreateBatchToSpaceNd(const BatchToSpaceNdQueueDescriptor& descriptor, + const WorkloadInfo& info) const +{ + return nullptr; +} + void NeonWorkloadFactory::Release() {} diff --git a/src/backends/neon/NeonWorkloadFactory.hpp b/src/backends/neon/NeonWorkloadFactory.hpp index 9b574f6a14..98f323afdf 100644 --- a/src/backends/neon/NeonWorkloadFactory.hpp +++ b/src/backends/neon/NeonWorkloadFactory.hpp @@ -128,6 +128,9 @@ public: virtual std::unique_ptr CreatePad(const PadQueueDescriptor& descriptor, const WorkloadInfo& info) const override; + virtual std::unique_ptr CreateBatchToSpaceNd(const BatchToSpaceNdQueueDescriptor& descriptor, + const WorkloadInfo& Info) const override; + virtual void Release() override; virtual void Acquire() override; diff --git a/src/backends/reference/RefLayerSupport.cpp b/src/backends/reference/RefLayerSupport.cpp index 0902b0fd17..b057370459 100644 --- a/src/backends/reference/RefLayerSupport.cpp +++ b/src/backends/reference/RefLayerSupport.cpp @@ -101,6 +101,22 @@ bool RefLayerSupport::IsBatchNormalizationSupported(const TensorInfo& input, &TrueFunc<>); } +bool RefLayerSupport::IsBatchToSpaceNdSupported(const TensorInfo& input, + const TensorInfo& output, + const BatchToSpaceNdDescriptor& descriptor, + Optional reasonIfUnsupported) const +{ + ignore_unused(descriptor); + return (IsSupportedForDataTypeRef(reasonIfUnsupported, + input.GetDataType(), + &TrueFunc<>, + &TrueFunc<>) && + IsSupportedForDataTypeRef(reasonIfUnsupported, + output.GetDataType(), + &TrueFunc<>, + &TrueFunc<>)); +} + bool RefLayerSupport::IsConstantSupported(const TensorInfo& output, Optional reasonIfUnsupported) const { diff --git a/src/backends/reference/RefLayerSupport.hpp b/src/backends/reference/RefLayerSupport.hpp index b161f5c7cf..2e86ecee29 100644 --- a/src/backends/reference/RefLayerSupport.hpp +++ b/src/backends/reference/RefLayerSupport.hpp @@ -31,6 +31,11 @@ public: const BatchNormalizationDescriptor& descriptor, Optional reasonIfUnsupported = EmptyOptional()) const override; + bool IsBatchToSpaceNdSupported(const TensorInfo& input, + const TensorInfo& output, + const BatchToSpaceNdDescriptor& descriptor, + Optional reasonIfUnsupported = EmptyOptional()) const override; + bool IsConstantSupported(const TensorInfo& output, Optional reasonIfUnsupported = EmptyOptional()) const override; diff --git a/src/backends/reference/RefWorkloadFactory.cpp b/src/backends/reference/RefWorkloadFactory.cpp index a238d5f545..afffd65285 100644 --- a/src/backends/reference/RefWorkloadFactory.cpp +++ b/src/backends/reference/RefWorkloadFactory.cpp @@ -270,5 +270,10 @@ std::unique_ptr RefWorkloadFactory::CreatePad(const PadQueueDescripto return MakeWorkload(descriptor, info); } +std::unique_ptr RefWorkloadFactory::CreateBatchToSpaceNd(const BatchToSpaceNdQueueDescriptor& descriptor, + const WorkloadInfo& info) const +{ + return MakeWorkload(descriptor, info); +} } // namespace armnn diff --git a/src/backends/reference/RefWorkloadFactory.hpp b/src/backends/reference/RefWorkloadFactory.hpp index e9b298d376..91bba84038 100644 --- a/src/backends/reference/RefWorkloadFactory.hpp +++ b/src/backends/reference/RefWorkloadFactory.hpp @@ -143,6 +143,9 @@ public: virtual std::unique_ptr CreatePad(const PadQueueDescriptor& descriptor, const WorkloadInfo& info) const override; + + virtual std::unique_ptr CreateBatchToSpaceNd(const BatchToSpaceNdQueueDescriptor& descriptor, + const WorkloadInfo& info) const override; private: template diff --git a/src/backends/reference/backend.mk b/src/backends/reference/backend.mk index cc8c24f394..7d56144f18 100644 --- a/src/backends/reference/backend.mk +++ b/src/backends/reference/backend.mk @@ -13,6 +13,7 @@ BACKEND_SOURCES := \ RefWorkloadFactory.cpp \ workloads/Activation.cpp \ workloads/ArithmeticFunction.cpp \ + workloads/BatchToSpaceNd.cpp \ workloads/Broadcast.cpp \ workloads/ConvImpl.cpp \ workloads/FullyConnected.cpp \ @@ -25,6 +26,8 @@ BACKEND_SOURCES := \ workloads/RefBaseConstantWorkload.cpp \ workloads/RefBatchNormalizationFloat32Workload.cpp \ workloads/RefBatchNormalizationUint8Workload.cpp \ + workloads/RefBatchToSpaceNdFloat32Workload.cpp \ + workloads/RefBatchToSpaceNdUint8Workload.cpp \ workloads/RefConstantFloat32Workload.cpp \ workloads/RefConstantUint8Workload.cpp \ workloads/RefConvertFp16ToFp32Workload.cpp \ diff --git a/src/backends/reference/test/RefLayerTests.cpp b/src/backends/reference/test/RefLayerTests.cpp index df0e37866d..703ec58208 100644 --- a/src/backends/reference/test/RefLayerTests.cpp +++ b/src/backends/reference/test/RefLayerTests.cpp @@ -376,4 +376,10 @@ ARMNN_AUTO_TEST_CASE(SpaceToBatchNdMultiChannelsNHWCUint8, SpaceToBatchNdMultiCh ARMNN_AUTO_TEST_CASE(SpaceToBatchNdMultiBlockNHWCUint8, SpaceToBatchNdMultiBlockNHWCUint8Test) ARMNN_AUTO_TEST_CASE(SpaceToBatchNdPaddingNHWCUint8, SpaceToBatchNdPaddingNHWCUint8Test) +ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcFloat321, BatchToSpaceNdNhwcFloat32Test1) +ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcFloat322, BatchToSpaceNdNhwcFloat32Test2) +ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcFloat323, BatchToSpaceNdNhwcFloat32Test3) + +ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwFloat321, BatchToSpaceNdNchwFloat32Test1) + BOOST_AUTO_TEST_SUITE_END() diff --git a/src/backends/reference/workloads/BatchToSpaceNd.cpp b/src/backends/reference/workloads/BatchToSpaceNd.cpp new file mode 100644 index 0000000000..bedf8418ef --- /dev/null +++ b/src/backends/reference/workloads/BatchToSpaceNd.cpp @@ -0,0 +1,100 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#include "BatchToSpaceNd.hpp" + +#include "RefWorkloadUtils.hpp" + +#include + +#include + +namespace armnn +{ + +inline unsigned int Offset(const TensorShape& shape, unsigned int batch, unsigned int height, unsigned int width, + unsigned int channels, const DataLayoutIndexed& dataLayout) +{ + if (dataLayout.GetDataLayout() == DataLayout::NHWC) + { + return ((batch * shape[dataLayout.GetHeightIndex()] + height) * shape[dataLayout.GetWidthIndex()] + width) * + shape[dataLayout.GetChannelsIndex()] + channels; + } + else + { + return ((batch * shape[dataLayout.GetChannelsIndex()] + channels) * + shape[dataLayout.GetHeightIndex()] + height) * + shape[dataLayout.GetWidthIndex()] + width; + } +} + +void BatchToSpaceNd(const DataLayoutIndexed& dataLayout, + const TensorInfo& inputTensorInfo, + const TensorInfo& outputTensorInfo, + const std::vector& blockShape, + const std::vector>& cropsData, + const float* inputData, + float* outputData) +{ + TensorShape inputShape = inputTensorInfo.GetShape(); + unsigned int inputNumDims = inputShape.GetNumDimensions(); + if (inputNumDims != 4) + { + throw armnn::InvalidArgumentException("Expected Input with 4 Dimensions"); + } + + TensorShape outputShape = outputTensorInfo.GetShape(); + unsigned int outputNumDims = outputShape.GetNumDimensions(); + if (outputNumDims != 4) + { + throw armnn::InvalidArgumentException("Expected Output with 4 Dimensions"); + } + + const unsigned int inputBatchSize = inputShape[0]; + const unsigned int channels = inputShape[dataLayout.GetChannelsIndex()]; + + const unsigned int outputBatchSize = outputShape[0]; + const unsigned int outputHeight = outputShape[dataLayout.GetHeightIndex()]; + const unsigned int outputWidth = outputShape[dataLayout.GetWidthIndex()]; + + const unsigned int blockShapeHeight = blockShape[0]; + const unsigned int blockShapeWidth = blockShape[1]; + + const unsigned int cropsTop = cropsData[0][0]; + const unsigned int cropsLeft = cropsData[1][0]; + + for (unsigned int inBatch = 0; inBatch < inputBatchSize; ++inBatch) + { + const unsigned int outBatch = inBatch % outputBatchSize; + const unsigned int spatialOffset = inBatch / outputBatchSize; + + for (unsigned int inH = 0; inH < inputTensorInfo.GetShape()[dataLayout.GetHeightIndex()]; ++inH) { + const unsigned int outH = inH * blockShapeHeight + spatialOffset / blockShapeWidth - cropsTop; + + if (outH >= outputHeight) + { + continue; + } + + for (unsigned int inW = 0; inW < inputTensorInfo.GetShape()[dataLayout.GetWidthIndex()]; ++inW) { + const unsigned int outW = inW * blockShapeWidth + spatialOffset % blockShapeWidth - cropsLeft; + + if (outW >= outputWidth) + { + continue; + } + + for (unsigned int c = 0; c < channels; c++) + { + unsigned int outOffset = Offset(outputShape, outBatch, outH, outW, c, dataLayout); + unsigned int inOffset = Offset(inputShape, inBatch, inH, inW, c, dataLayout); + outputData[outOffset] = inputData[inOffset]; + } + } + } + } +} + +} //namespace armnn diff --git a/src/backends/reference/workloads/BatchToSpaceNd.hpp b/src/backends/reference/workloads/BatchToSpaceNd.hpp new file mode 100644 index 0000000000..7923ceadd0 --- /dev/null +++ b/src/backends/reference/workloads/BatchToSpaceNd.hpp @@ -0,0 +1,22 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#pragma once + +#include +#include +#include + +namespace armnn +{ + +void BatchToSpaceNd(const DataLayoutIndexed& dataLayout, + const TensorInfo& inputTensorInfo, + const TensorInfo& outputTensorInfo, + const std::vector& blockShape, + const std::vector>& cropsData, + const float* inputData, + float* outputData); +} // namespace armnn \ No newline at end of file diff --git a/src/backends/reference/workloads/CMakeLists.txt b/src/backends/reference/workloads/CMakeLists.txt index 4cef2d0771..1c38509ca0 100644 --- a/src/backends/reference/workloads/CMakeLists.txt +++ b/src/backends/reference/workloads/CMakeLists.txt @@ -9,6 +9,8 @@ list(APPEND armnnRefBackendWorkloads_sources ArithmeticFunction.cpp ArithmeticFunction.hpp BatchNormImpl.hpp + BatchToSpaceNd.cpp + BatchToSpaceNd.hpp Broadcast.cpp Broadcast.hpp ConvImpl.cpp @@ -32,6 +34,10 @@ list(APPEND armnnRefBackendWorkloads_sources RefBatchNormalizationFloat32Workload.hpp RefBatchNormalizationUint8Workload.cpp RefBatchNormalizationUint8Workload.hpp + RefBatchToSpaceNdFloat32Workload.cpp + RefBatchToSpaceNdFloat32Workload.hpp + RefBatchToSpaceNdUint8Workload.cpp + RefBatchToSpaceNdUint8Workload.hpp RefConstantFloat32Workload.cpp RefConstantFloat32Workload.hpp RefConstantUint8Workload.cpp diff --git a/src/backends/reference/workloads/RefBatchToSpaceNdFloat32Workload.cpp b/src/backends/reference/workloads/RefBatchToSpaceNdFloat32Workload.cpp new file mode 100644 index 0000000000..bf246c272f --- /dev/null +++ b/src/backends/reference/workloads/RefBatchToSpaceNdFloat32Workload.cpp @@ -0,0 +1,28 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#include "BatchToSpaceNd.hpp" +#include "Profiling.hpp" +#include "RefBatchToSpaceNdFloat32Workload.hpp" +#include "RefWorkloadUtils.hpp" + +namespace armnn +{ + +void RefBatchToSpaceNdFloat32Workload::Execute() const +{ + ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefBatchToSpaceNdFloat32Workload_Execute"); + + const TensorInfo& inputInfo = GetTensorInfo(m_Data.m_Inputs[0]); + const TensorInfo& outputInfo = GetTensorInfo(m_Data.m_Outputs[0]); + const float* inputData = GetInputTensorDataFloat(0, m_Data); + float* outputData = GetOutputTensorDataFloat(0, m_Data); + + BatchToSpaceNd(m_Data.m_Parameters.m_DataLayout, inputInfo, outputInfo, m_Data.m_Parameters.m_BlockShape, + m_Data.m_Parameters.m_Crops, inputData, outputData); +} + + +} //namespace armnn \ No newline at end of file diff --git a/src/backends/reference/workloads/RefBatchToSpaceNdFloat32Workload.hpp b/src/backends/reference/workloads/RefBatchToSpaceNdFloat32Workload.hpp new file mode 100644 index 0000000000..4977772c82 --- /dev/null +++ b/src/backends/reference/workloads/RefBatchToSpaceNdFloat32Workload.hpp @@ -0,0 +1,22 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#pragma once + +#include +#include + +namespace armnn { + +class RefBatchToSpaceNdFloat32Workload : public Float32Workload +{ + +public: + using Float32Workload::Float32Workload; + + virtual void Execute() const override; +}; + +} // namespace armnn \ No newline at end of file diff --git a/src/backends/reference/workloads/RefBatchToSpaceNdUint8Workload.cpp b/src/backends/reference/workloads/RefBatchToSpaceNdUint8Workload.cpp new file mode 100644 index 0000000000..a66bcd42de --- /dev/null +++ b/src/backends/reference/workloads/RefBatchToSpaceNdUint8Workload.cpp @@ -0,0 +1,30 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#include "BatchToSpaceNd.hpp" +#include "Profiling.hpp" +#include "RefBatchToSpaceNdUint8Workload.hpp" +#include "RefWorkloadUtils.hpp" + +namespace armnn +{ + +void RefBatchToSpaceNdUint8Workload::Execute() const +{ + ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefBatchToSpaceNdUint8Workload_Execute"); + + const TensorInfo& inputInfo = GetTensorInfo(m_Data.m_Inputs[0]); + const TensorInfo& outputInfo = GetTensorInfo(m_Data.m_Outputs[0]); + auto dequantizedInputData = Dequantize(GetInputTensorDataU8(0, m_Data), inputInfo); + float* outputData = GetOutputTensorDataFloat(0, m_Data); + + std::vector results(outputInfo.GetNumElements()); + BatchToSpaceNd(m_Data.m_Parameters.m_DataLayout, inputInfo, outputInfo, m_Data.m_Parameters.m_BlockShape, + m_Data.m_Parameters.m_Crops, dequantizedInputData.data(), outputData); + + Quantize(GetOutputTensorDataU8(0, m_Data), results.data(), outputInfo); +} + +} //namespace armnn \ No newline at end of file diff --git a/src/backends/reference/workloads/RefBatchToSpaceNdUint8Workload.hpp b/src/backends/reference/workloads/RefBatchToSpaceNdUint8Workload.hpp new file mode 100644 index 0000000000..1f221c2f47 --- /dev/null +++ b/src/backends/reference/workloads/RefBatchToSpaceNdUint8Workload.hpp @@ -0,0 +1,23 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#pragma once + +#include +#include + +namespace armnn +{ + +class RefBatchToSpaceNdUint8Workload : public Uint8Workload +{ + +public: + using Uint8Workload::Uint8Workload; + + virtual void Execute() const override; +}; + +} // namespace armnn \ No newline at end of file diff --git a/src/backends/reference/workloads/RefWorkloads.hpp b/src/backends/reference/workloads/RefWorkloads.hpp index 03907a6b91..5ea7fe4b58 100644 --- a/src/backends/reference/workloads/RefWorkloads.hpp +++ b/src/backends/reference/workloads/RefWorkloads.hpp @@ -54,4 +54,6 @@ #include "RefConvertFp32ToFp16Workload.hpp" #include "RefMeanUint8Workload.hpp" #include "RefMeanFloat32Workload.hpp" -#include "RefPadWorkload.hpp" \ No newline at end of file +#include "RefPadWorkload.hpp" +#include "RefBatchToSpaceNdUint8Workload.hpp" +#include "RefBatchToSpaceNdFloat32Workload.hpp" -- cgit v1.2.1