From f77cab57b3eca1425384d4d5bfe44d76fc7023b9 Mon Sep 17 00:00:00 2001 From: Teresa Charlin Date: Thu, 1 Jun 2023 16:15:13 +0100 Subject: IVGCVSW-7785 Extend support for 3D tensors BATCH_TO_SPACE and SPACE_TO_BATCH in CpuRef * Both layers were assuming 4D tensors, now 3D is supported too. * Remove some unnecessary includes * Add Unit Tests Signed-off-by: Teresa Charlin Change-Id: I7bdd11e4936a27cd97ec65fd915e6ccaa1494cff --- src/backends/backendsCommon/WorkloadData.cpp | 105 +++++++++++++----- .../test/layerTests/BatchToSpaceNdTestImpl.hpp | 27 +++++ .../test/layerTests/SpaceToBatchNdTestImpl.cpp | 121 +++++++++++++++++++++ .../test/layerTests/SpaceToBatchNdTestImpl.hpp | 15 +++ src/backends/reference/RefLayerSupport.cpp | 14 --- src/backends/reference/test/RefLayerTests.cpp | 8 ++ .../reference/workloads/BatchToSpaceNd.cpp | 108 ++++++++++-------- .../reference/workloads/BatchToSpaceNd.hpp | 22 ++-- .../workloads/RefBatchToSpaceNdWorkload.cpp | 9 +- .../workloads/RefBatchToSpaceNdWorkload.hpp | 6 +- .../workloads/RefSpaceToBatchNdWorkload.cpp | 11 +- .../workloads/RefSpaceToBatchNdWorkload.hpp | 6 +- .../reference/workloads/SpaceToBatchNd.cpp | 65 +++++++---- .../reference/workloads/SpaceToBatchNd.hpp | 3 +- 14 files changed, 380 insertions(+), 140 deletions(-) (limited to 'src/backends') diff --git a/src/backends/backendsCommon/WorkloadData.cpp b/src/backends/backendsCommon/WorkloadData.cpp index 6a5963ddcb..d4ae08d874 100644 --- a/src/backends/backendsCommon/WorkloadData.cpp +++ b/src/backends/backendsCommon/WorkloadData.cpp @@ -1815,47 +1815,66 @@ void SpaceToBatchNdQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) c const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0]; const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0]; - ValidateTensorNumDimensions(inputTensorInfo, descriptorName, 4, "input"); - ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 4, "output"); - - if (m_Parameters.m_BlockShape.size() != 2) - { - throw InvalidArgumentException(descriptorName + ": Block Shape must contain 2 spatial dimensions."); - } - if (m_Parameters.m_BlockShape.size() != m_Parameters.m_PadList.size()) { throw InvalidArgumentException(descriptorName + ": Pad List must contain the same number of " "dimensions as Block Shape."); } - const TensorShape& inputShape = inputTensorInfo.GetShape(); - - std::pair heightPad = m_Parameters.m_PadList[0]; - std::pair widthPad = m_Parameters.m_PadList[1]; + if (m_Parameters.m_BlockShape.size() == 2) + { + ValidateTensorNumDimensions(inputTensorInfo, descriptorName, 4, "input"); + ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 4, "output"); + } + else if (m_Parameters.m_BlockShape.size() == 1) + { + ValidateTensorNumDimensions(inputTensorInfo, descriptorName, 3, "input"); + ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 3, "output"); + } + else + { + throw InvalidArgumentException(descriptorName + ": Invalid Block and Crops size."); + } + // Check input + padding and output have the same number of elements DataLayoutIndexed dimensionIndices(m_Parameters.m_DataLayout); + const unsigned int inputHeight = inputTensorInfo.GetShape()[dimensionIndices.GetHeightIndex()] + + m_Parameters.m_PadList[0].first + m_Parameters.m_PadList[0].second; + const unsigned int inputWidth = (inputTensorInfo.GetNumDimensions() == 3) ? 1 : + inputTensorInfo.GetShape()[dimensionIndices.GetWidthIndex()] + + m_Parameters.m_PadList[1].first + m_Parameters.m_PadList[1].second; - const unsigned int inputWidth = inputShape[dimensionIndices.GetWidthIndex()] + - widthPad.first + widthPad.second; - const unsigned int inputHeight = inputShape[dimensionIndices.GetHeightIndex()] + - heightPad.first + heightPad.second; + const int channelsIndex_int = (m_Parameters.m_DataLayout == DataLayout::NCHW) ? 1 : -1; + const unsigned int channelsIndex = channelsIndex_int < 0 ? + static_cast(channelsIndex_int) + inputTensorInfo.GetNumDimensions() + : static_cast(channelsIndex_int); - const unsigned int numInputElements = inputShape[0] * inputHeight * inputWidth * - inputShape[dimensionIndices.GetChannelsIndex()]; - const unsigned int numOutputElements = outputTensorInfo.GetNumElements(); + const unsigned int numInputElements = inputTensorInfo.GetShape()[0] * + inputHeight * + inputWidth * + inputTensorInfo.GetShape()[channelsIndex]; - if (numOutputElements != numInputElements) + if (outputTensorInfo.GetNumElements() != numInputElements) { throw InvalidArgumentException(descriptorName + ": Input tensor has " + - to_string(numInputElements) + " after padding but output tensor has " + - to_string(numOutputElements) + " elements."); + to_string(numInputElements) + " after padding but output tensor has " + + to_string(outputTensorInfo.GetNumElements()) + " elements."); } - if (inputHeight % m_Parameters.m_BlockShape[0] != 0 || inputWidth % m_Parameters.m_BlockShape[1] != 0) + // In a 4D tensor, there will be 2 spatialDimensions (H and W), and the for loop will run twice. + // In a 3D tensor, there will be 1 spatialDimensions, and the for loop will run once. + unsigned int firstSpatialDimension = m_Parameters.m_DataLayout == DataLayout::NCHW ? 2 : 1; + for (unsigned int i = 0; i < m_Parameters.m_BlockShape.size(); ++i) { - throw InvalidArgumentException(descriptorName + ": Input shape after padding must be " - "divisible by Block Shape in all spatial dimensions"); + unsigned int spatialDimension = firstSpatialDimension + i; + auto inputSize = inputTensorInfo.GetShape()[spatialDimension] + + m_Parameters.m_PadList[i].first + + m_Parameters.m_PadList[i].second; + if (inputSize % m_Parameters.m_BlockShape[i] != 0) + { + throw InvalidArgumentException(descriptorName + ": Input dimension size after padding must be " + "divisible by Block Shape in dimension: " + to_string(spatialDimension) + "."); + } } std::vector supportedTypes = @@ -2472,6 +2491,42 @@ void BatchToSpaceNdQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) c const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0]; const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0]; + if (m_Parameters.m_BlockShape.size() != m_Parameters.m_Crops.size()) + { + throw InvalidArgumentException(descriptorName + ": Crops must contain the same number of " + "dimensions as Block Shape."); + } + + if (m_Parameters.m_BlockShape.size() == 2) + { + ValidateTensorNumDimensions(inputTensorInfo, descriptorName, 4, "input"); + ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 4, "output"); + } + else if (m_Parameters.m_BlockShape.size() == 1) + { + ValidateTensorNumDimensions(inputTensorInfo, descriptorName, 3, "input"); + ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 3, "output"); + } + else + { + throw InvalidArgumentException(descriptorName + ": Invalid Block and Crops size."); + } + + // In a 4D tensor, there will be 2 spatialDimensions (H and W), and the for loop will run twice. + // In a 3D tensor, there will be 1 spatialDimensions, and the for loop will run once. + unsigned int firstSpatialDimension = m_Parameters.m_DataLayout == DataLayout::NCHW ? 2 : 1; + for (unsigned int i = 0; i < m_Parameters.m_BlockShape.size(); ++i) + { + unsigned int spatialDimension = firstSpatialDimension + i; + unsigned int cropSize = m_Parameters.m_Crops[i].first + m_Parameters.m_Crops[i].second; + unsigned int outputSize = inputTensorInfo.GetShape()[spatialDimension] * m_Parameters.m_BlockShape[i]; + if (cropSize > outputSize) + { + throw InvalidArgumentException(descriptorName + ": CropSize must be less than or equal to the uncropped" + "outputSize in dimension: " + to_string(spatialDimension) + "."); + } + } + std::vector supportedTypes = { DataType::BFloat16, diff --git a/src/backends/backendsCommon/test/layerTests/BatchToSpaceNdTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/BatchToSpaceNdTestImpl.hpp index b3007771c9..4f8b7d0193 100644 --- a/src/backends/backendsCommon/test/layerTests/BatchToSpaceNdTestImpl.hpp +++ b/src/backends/backendsCommon/test/layerTests/BatchToSpaceNdTestImpl.hpp @@ -278,6 +278,33 @@ LayerTestResult BatchToSpaceNdNhwcTest7( crops, outputShape, expectedOutput); } +template> +LayerTestResult BatchToSpaceNdNhwcTest8( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory) +{ + const unsigned int inputShape[] = {4, 2, 1}; + const unsigned int outputShape[] = {1, 8, 1}; + + std::vector input({ + 1.0f, 2.0f, 3.0f, 4.0f, + 5.0f, 6.0f, 7.0f, 8.0f + }); + + std::vector expectedOutput({ + 1.0f, 3.0f, 5.0f, 7.0f, + 2.0f, 4.0f, 6.0f, 8.0f + }); + + std::vector blockShape {4}; + std::vector> crops = {{0, 0}}; + + return BatchToSpaceNdHelper(workloadFactory, memoryManager, tensorHandleFactory, + armnn::DataLayout::NHWC, inputShape, input, blockShape, + crops, outputShape, expectedOutput); +} + template> LayerTestResult BatchToSpaceNdNchwTest1( armnn::IWorkloadFactory &workloadFactory, diff --git a/src/backends/backendsCommon/test/layerTests/SpaceToBatchNdTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/SpaceToBatchNdTestImpl.cpp index 92876e18bd..4e40692c8c 100644 --- a/src/backends/backendsCommon/test/layerTests/SpaceToBatchNdTestImpl.cpp +++ b/src/backends/backendsCommon/test/layerTests/SpaceToBatchNdTestImpl.cpp @@ -88,6 +88,59 @@ LayerTestResult SpaceToBatchNdTestImpl( outputTensorInfo.GetShape()); } +template +LayerTestResult SpaceToBatchNd3DTestImpl( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory, + armnn::TensorInfo& inputTensorInfo, + armnn::TensorInfo& outputTensorInfo, + std::vector& inputData, + std::vector& outputExpectedData, + armnn::SpaceToBatchNdQueueDescriptor descriptor, + const float qScale = 1.0f, + const int32_t qOffset = 0) +{ + IgnoreUnused(memoryManager); + + if(armnn::IsQuantizedType()) + { + inputTensorInfo.SetQuantizationScale(qScale); + inputTensorInfo.SetQuantizationOffset(qOffset); + outputTensorInfo.SetQuantizationScale(qScale); + outputTensorInfo.SetQuantizationOffset(qOffset); + } + + std::vector input = armnnUtils::QuantizedVector(inputData, qScale, qOffset); + std::vector expectedOutput = armnnUtils::QuantizedVector(outputExpectedData, qScale, qOffset); + std::vector actualOutput(outputTensorInfo.GetNumElements()); + + std::unique_ptr inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo); + std::unique_ptr outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo); + + armnn::WorkloadInfo info; + AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get()); + AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get()); + + std::unique_ptr workload = workloadFactory.CreateWorkload(armnn::LayerType::SpaceToBatchNd, + descriptor, + info); + + inputHandle->Allocate(); + outputHandle->Allocate(); + + CopyDataToITensorHandle(inputHandle.get(), input.data()); + + workload->Execute(); + + CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get()); + + return LayerTestResult(actualOutput, + expectedOutput, + outputHandle->GetShape(), + outputTensorInfo.GetShape()); +} + template> LayerTestResult SpaceToBatchNdSimpleTest( armnn::IWorkloadFactory& workloadFactory, @@ -253,6 +306,44 @@ LayerTestResult SpaceToBatchNdPaddingTest( inputTensorInfo, outputTensorInfo, input, outputExpected, desc); } +template> +LayerTestResult SpaceToBatchNdSimple3DTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory, + armnn::DataLayout dataLayout = armnn::DataLayout::NHWC) +{ + armnn::TensorInfo inputTensorInfo; + armnn::TensorInfo outputTensorInfo; + + unsigned int inputShape[] = {1, 8, 1}; + unsigned int outputShape[] = {4, 2, 1}; + + armnn::SpaceToBatchNdQueueDescriptor desc; + desc.m_Parameters.m_DataLayout = dataLayout; + desc.m_Parameters.m_BlockShape = {4}; + desc.m_Parameters.m_PadList = {{0, 0}}; + + inputTensorInfo = armnn::TensorInfo(3, inputShape, ArmnnType); + outputTensorInfo = armnn::TensorInfo(3, outputShape, ArmnnType); + + std::vector input = std::vector( + { + 1.0f, 3.0f, 5.0f, 7.0f, + 2.0f, 4.0f, 6.0f, 8.0f + }); + + std::vector outputExpected = std::vector( + { + 1.0f, 2.0f, 3.0f, 4.0f, + 5.0f, 6.0f, 7.0f, 8.0f + }); + + return SpaceToBatchNd3DTestImpl( + workloadFactory, memoryManager, tensorHandleFactory, + inputTensorInfo, outputTensorInfo, input, outputExpected, desc); +} + template> LayerTestResult SpaceToBatchNdSimpleNhwcTest( armnn::IWorkloadFactory& workloadFactory, @@ -463,6 +554,16 @@ LayerTestResult SpaceToBatchNdPaddingNhwcFloat32Test( tensorHandleFactory); } +LayerTestResult SpaceToBatchNdSimpleNhwc3DFloat32Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory) +{ + return SpaceToBatchNdSimple3DTest(workloadFactory, + memoryManager, + tensorHandleFactory); +} + LayerTestResult SpaceToBatchNdSimpleNhwcFloat16Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, @@ -503,6 +604,16 @@ LayerTestResult SpaceToBatchNdPaddingNhwcFloat16Test( tensorHandleFactory); } +LayerTestResult SpaceToBatchNdSimpleNhwc3DFloat16Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory) +{ + return SpaceToBatchNdSimple3DTest(workloadFactory, + memoryManager, + tensorHandleFactory); +} + LayerTestResult SpaceToBatchNdSimpleNhwcUint8Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, @@ -543,6 +654,16 @@ LayerTestResult SpaceToBatchNdPaddingNhwcUint8Test( tensorHandleFactory); } +LayerTestResult SpaceToBatchNdSimpleNhwc3DUint8Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory) +{ + return SpaceToBatchNdSimple3DTest(workloadFactory, + memoryManager, + tensorHandleFactory); +} + LayerTestResult SpaceToBatchNdSimpleUint16Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, diff --git a/src/backends/backendsCommon/test/layerTests/SpaceToBatchNdTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/SpaceToBatchNdTestImpl.hpp index 7768b162f2..4e87d6ab6c 100644 --- a/src/backends/backendsCommon/test/layerTests/SpaceToBatchNdTestImpl.hpp +++ b/src/backends/backendsCommon/test/layerTests/SpaceToBatchNdTestImpl.hpp @@ -91,6 +91,11 @@ LayerTestResult SpaceToBatchNdPaddingNhwcFloat32Test( const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, const armnn::ITensorHandleFactory& tensorHandleFactory); +LayerTestResult SpaceToBatchNdSimpleNhwc3DFloat32Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + LayerTestResult SpaceToBatchNdSimpleNhwcFloat16Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, @@ -111,6 +116,11 @@ LayerTestResult SpaceToBatchNdPaddingNhwcFloat16Test( const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, const armnn::ITensorHandleFactory& tensorHandleFactory); +LayerTestResult SpaceToBatchNdSimpleNhwc3DFloat16Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + LayerTestResult SpaceToBatchNdSimpleNhwcUint8Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, @@ -131,6 +141,11 @@ LayerTestResult SpaceToBatchNdPaddingNhwcUint8Test( const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, const armnn::ITensorHandleFactory& tensorHandleFactory); +LayerTestResult SpaceToBatchNdSimpleNhwc3DUint8Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + LayerTestResult SpaceToBatchNdSimpleUint16Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, diff --git a/src/backends/reference/RefLayerSupport.cpp b/src/backends/reference/RefLayerSupport.cpp index cbc6723dbc..81e5c837a5 100644 --- a/src/backends/reference/RefLayerSupport.cpp +++ b/src/backends/reference/RefLayerSupport.cpp @@ -794,20 +794,6 @@ bool RefLayerSupport::IsBatchToSpaceNdSupported(const TensorInfo& input, supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported, "Reference BatchToSpaceNd: input and output types mismatched."); - supported &= CheckSupportRule(TensorNumDimensionsAreCorrect(output, 4), - reasonIfUnsupported, - CreateIncorrectDimensionsErrorMsg(4, - output.GetNumDimensions(), - batchToSpaceNdLayerStr, - outputTensorStr).data()); - - supported &= CheckSupportRule(TensorNumDimensionsAreCorrect(input, 4), - reasonIfUnsupported, - CreateIncorrectDimensionsErrorMsg(4, - input.GetNumDimensions(), - batchToSpaceNdLayerStr, - inputTensorStr).data()); - return supported; } diff --git a/src/backends/reference/test/RefLayerTests.cpp b/src/backends/reference/test/RefLayerTests.cpp index 8b89743870..6e697723e9 100644 --- a/src/backends/reference/test/RefLayerTests.cpp +++ b/src/backends/reference/test/RefLayerTests.cpp @@ -1938,16 +1938,19 @@ ARMNN_AUTO_TEST_CASE_WITH_THF(SpaceToBatchNdSimpleNhwcFloat32, SpaceToBatchNdSim ARMNN_AUTO_TEST_CASE_WITH_THF(SpaceToBatchNdMultiChannelsNhwcFloat32, SpaceToBatchNdMultiChannelsNhwcFloat32Test) ARMNN_AUTO_TEST_CASE_WITH_THF(SpaceToBatchNdMultiBlockNhwcFloat32, SpaceToBatchNdMultiBlockNhwcFloat32Test) ARMNN_AUTO_TEST_CASE_WITH_THF(SpaceToBatchNdPaddingNhwcFloat32, SpaceToBatchNdPaddingNhwcFloat32Test) +ARMNN_AUTO_TEST_CASE_WITH_THF(SpaceToBatchNdSimpleNhwc3DFloat32, SpaceToBatchNdSimpleNhwc3DFloat32Test) ARMNN_AUTO_TEST_CASE_WITH_THF(SpaceToBatchNdSimpleNhwcFloat16, SpaceToBatchNdSimpleNhwcFloat16Test) ARMNN_AUTO_TEST_CASE_WITH_THF(SpaceToBatchNdMultiChannelsNhwcFloat16, SpaceToBatchNdMultiChannelsNhwcFloat16Test) ARMNN_AUTO_TEST_CASE_WITH_THF(SpaceToBatchNdMultiBlockNhwcFloat16, SpaceToBatchNdMultiBlockNhwcFloat16Test) ARMNN_AUTO_TEST_CASE_WITH_THF(SpaceToBatchNdPaddingNhwcFloat16, SpaceToBatchNdPaddingNhwcFloat16Test) +ARMNN_AUTO_TEST_CASE_WITH_THF(SpaceToBatchNdSimpleNhwc3DFloat16, SpaceToBatchNdSimpleNhwc3DFloat16Test) ARMNN_AUTO_TEST_CASE_WITH_THF(SpaceToBatchNdSimpleNhwcUint8, SpaceToBatchNdSimpleNhwcUint8Test) ARMNN_AUTO_TEST_CASE_WITH_THF(SpaceToBatchNdMultiChannelsNhwcUint8, SpaceToBatchNdMultiChannelsNhwcUint8Test) ARMNN_AUTO_TEST_CASE_WITH_THF(SpaceToBatchNdMultiBlockNhwcUint8, SpaceToBatchNdMultiBlockNhwcUint8Test) ARMNN_AUTO_TEST_CASE_WITH_THF(SpaceToBatchNdPaddingNhwcUint8, SpaceToBatchNdPaddingNhwcUint8Test) +ARMNN_AUTO_TEST_CASE_WITH_THF(SpaceToBatchNdSimpleNhwc3DUint8, SpaceToBatchNdSimpleNhwc3DUint8Test) ARMNN_AUTO_TEST_CASE_WITH_THF(SpaceToBatchNdSimpleUint16, SpaceToBatchNdSimpleUint16Test) ARMNN_AUTO_TEST_CASE_WITH_THF(SpaceToBatchNdMultiChannelsUint16, SpaceToBatchNdMultiChannelsUint16Test) @@ -1967,6 +1970,7 @@ ARMNN_AUTO_TEST_CASE_WITH_THF(BatchToSpaceNdNhwcFloat32_4, BatchToSpaceNdNhwcTes ARMNN_AUTO_TEST_CASE_WITH_THF(BatchToSpaceNdNhwcFloat32_5, BatchToSpaceNdNhwcTest5) ARMNN_AUTO_TEST_CASE_WITH_THF(BatchToSpaceNdNhwcFloat32_6, BatchToSpaceNdNhwcTest6) ARMNN_AUTO_TEST_CASE_WITH_THF(BatchToSpaceNdNhwcFloat32_7, BatchToSpaceNdNhwcTest7) +ARMNN_AUTO_TEST_CASE_WITH_THF(BatchToSpaceNdNhwcFloat32_3D, BatchToSpaceNdNhwcTest8) ARMNN_AUTO_TEST_CASE_WITH_THF(BatchToSpaceNdNhwcFloat16_1, BatchToSpaceNdNhwcTest1) ARMNN_AUTO_TEST_CASE_WITH_THF(BatchToSpaceNdNhwcFloat16_2, BatchToSpaceNdNhwcTest2) @@ -1975,6 +1979,7 @@ ARMNN_AUTO_TEST_CASE_WITH_THF(BatchToSpaceNdNhwcFloat16_4, BatchToSpaceNdNhwcTes ARMNN_AUTO_TEST_CASE_WITH_THF(BatchToSpaceNdNhwcFloat16_5, BatchToSpaceNdNhwcTest5) ARMNN_AUTO_TEST_CASE_WITH_THF(BatchToSpaceNdNhwcFloat16_6, BatchToSpaceNdNhwcTest6) ARMNN_AUTO_TEST_CASE_WITH_THF(BatchToSpaceNdNhwcFloat16_7, BatchToSpaceNdNhwcTest7) +ARMNN_AUTO_TEST_CASE_WITH_THF(BatchToSpaceNdNhwcFloat16_3D, BatchToSpaceNdNhwcTest8) ARMNN_AUTO_TEST_CASE_WITH_THF(BatchToSpaceNdNhwcInt1, BatchToSpaceNdNhwcTest1) ARMNN_AUTO_TEST_CASE_WITH_THF(BatchToSpaceNdNhwcInt2, BatchToSpaceNdNhwcTest2) @@ -1983,6 +1988,7 @@ ARMNN_AUTO_TEST_CASE_WITH_THF(BatchToSpaceNdNhwcInt4, BatchToSpaceNdNhwcTest4) ARMNN_AUTO_TEST_CASE_WITH_THF(BatchToSpaceNdNhwcInt6, BatchToSpaceNdNhwcTest6) ARMNN_AUTO_TEST_CASE_WITH_THF(BatchToSpaceNdNhwcInt7, BatchToSpaceNdNhwcTest7) +ARMNN_AUTO_TEST_CASE_WITH_THF(BatchToSpaceNdNhwcInt_3D, BatchToSpaceNdNhwcTest8) ARMNN_AUTO_TEST_CASE_WITH_THF(BatchToSpaceNdNhwcUint1, BatchToSpaceNdNhwcTest1) ARMNN_AUTO_TEST_CASE_WITH_THF(BatchToSpaceNdNhwcUint2, BatchToSpaceNdNhwcTest2) @@ -1991,6 +1997,7 @@ ARMNN_AUTO_TEST_CASE_WITH_THF(BatchToSpaceNdNhwcUint4, BatchToSpaceNdNhwcTest4< ARMNN_AUTO_TEST_CASE_WITH_THF(BatchToSpaceNdNhwcUint5, BatchToSpaceNdNhwcTest5) ARMNN_AUTO_TEST_CASE_WITH_THF(BatchToSpaceNdNhwcUint6, BatchToSpaceNdNhwcTest6) ARMNN_AUTO_TEST_CASE_WITH_THF(BatchToSpaceNdNhwcUint7, BatchToSpaceNdNhwcTest7) +ARMNN_AUTO_TEST_CASE_WITH_THF(BatchToSpaceNdNhwcUint_3D, BatchToSpaceNdNhwcTest8) ARMNN_AUTO_TEST_CASE_WITH_THF(BatchToSpaceNdNhwcQsymm16_1, BatchToSpaceNdNhwcTest1) ARMNN_AUTO_TEST_CASE_WITH_THF(BatchToSpaceNdNhwcQsymm16_2, BatchToSpaceNdNhwcTest2) @@ -1999,6 +2006,7 @@ ARMNN_AUTO_TEST_CASE_WITH_THF(BatchToSpaceNdNhwcQsymm16_4, BatchToSpaceNdNhwcTe ARMNN_AUTO_TEST_CASE_WITH_THF(BatchToSpaceNdNhwcQsymm16_5, BatchToSpaceNdNhwcTest5) ARMNN_AUTO_TEST_CASE_WITH_THF(BatchToSpaceNdNhwcQsymm16_6, BatchToSpaceNdNhwcTest6) ARMNN_AUTO_TEST_CASE_WITH_THF(BatchToSpaceNdNhwcQsymm16_7, BatchToSpaceNdNhwcTest7) +ARMNN_AUTO_TEST_CASE_WITH_THF(BatchToSpaceNdNhwcQsymm16_3D, BatchToSpaceNdNhwcTest8) ARMNN_AUTO_TEST_CASE_WITH_THF(BatchToSpaceNdNchwFloat16_1, BatchToSpaceNdNchwTest1) ARMNN_AUTO_TEST_CASE_WITH_THF(BatchToSpaceNdNchwFloat16_2, BatchToSpaceNdNchwTest2) diff --git a/src/backends/reference/workloads/BatchToSpaceNd.cpp b/src/backends/reference/workloads/BatchToSpaceNd.cpp index bf7de1b04c..ebe9d2cfd5 100644 --- a/src/backends/reference/workloads/BatchToSpaceNd.cpp +++ b/src/backends/reference/workloads/BatchToSpaceNd.cpp @@ -1,85 +1,105 @@ // -// Copyright © 2017 Arm Ltd. All rights reserved. +// Copyright © 2017-2020,2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // #include "BatchToSpaceNd.hpp" -#include "RefWorkloadUtils.hpp" - -#include - -#include +#include using namespace armnnUtils; namespace armnn { -inline unsigned int Offset(const TensorShape& shape, unsigned int batch, unsigned int height, unsigned int width, - unsigned int channels, const DataLayoutIndexed& dataLayout) +unsigned int Offset(const TensorShape& shape, + unsigned int batch, + unsigned int height, + unsigned int width, + unsigned int channels, + const DataLayoutIndexed& dataLayout) { - if (dataLayout.GetDataLayout() == DataLayout::NHWC) + // 3D Tensors + unsigned int channelDimension3D = dataLayout.GetDataLayout() == DataLayout::NCHW ? 1 : 2; + if (shape.GetNumDimensions() == 3) { - return ((batch * shape[dataLayout.GetHeightIndex()] + height) * shape[dataLayout.GetWidthIndex()] + width) * - shape[dataLayout.GetChannelsIndex()] + channels; + return (batch * shape[dataLayout.GetHeightIndex()] + height) * shape[channelDimension3D] + channels; + } + // 4D Tensors + else if (shape.GetNumDimensions() == 4) + { + if (dataLayout.GetDataLayout() == DataLayout::NHWC) + { + return ((batch * shape[dataLayout.GetHeightIndex()] + height) * + shape[dataLayout.GetWidthIndex()] + width) * + shape[dataLayout.GetChannelsIndex()] + channels; + } + else + { + return ((batch * shape[dataLayout.GetChannelsIndex()] + channels) * + shape[dataLayout.GetHeightIndex()] + height) * + shape[dataLayout.GetWidthIndex()] + width; + } } else { - return ((batch * shape[dataLayout.GetChannelsIndex()] + channels) * - shape[dataLayout.GetHeightIndex()] + height) * - shape[dataLayout.GetWidthIndex()] + width; + throw InvalidArgumentException("Tensor rank must be either 3 or 4", CHECK_LOCATION()); } } -void BatchToSpaceNd(const DataLayoutIndexed& dataLayout, - const TensorInfo& inputTensorInfo, - const TensorInfo& outputTensorInfo, - const std::vector& blockShape, - const std::vector>& cropsData, - Decoder& inputDecoder, - Encoder& outputEncoder) +void BatchToSpaceNd(const TensorInfo& inputInfo, + const TensorInfo& outputInfo, + const BatchToSpaceNdDescriptor& params, + Decoder& inputData, + Encoder& outputData) { - TensorShape inputShape = inputTensorInfo.GetShape(); - - ARMNN_ASSERT_MSG(inputShape.GetNumDimensions() == 4, "Expected Input with 4 Dimensions"); - - TensorShape outputShape = outputTensorInfo.GetShape(); + unsigned int rank = inputInfo.GetNumDimensions(); + if (rank != 3 && rank != 4 ) + { + throw InvalidArgumentException("Tensor rank must be either 3 or 4, but it is " + std::to_string(rank), + CHECK_LOCATION()); + } - ARMNN_ASSERT_MSG(outputShape.GetNumDimensions() == 4, "Expected Output with 4 Dimensions"); + DataLayoutIndexed dataLayout = params.m_DataLayout; + unsigned int channelDimension3D = params.m_DataLayout == DataLayout::NCHW ? 1 : 2; - const unsigned int inputBatchSize = inputShape[0]; - const unsigned int channels = inputShape[dataLayout.GetChannelsIndex()]; + TensorShape inputShape = inputInfo.GetShape(); + TensorShape outputShape = outputInfo.GetShape(); + const unsigned int inputBatchSize = inputShape[0]; const unsigned int outputBatchSize = outputShape[0]; - const unsigned int outputHeight = outputShape[dataLayout.GetHeightIndex()]; - const unsigned int outputWidth = outputShape[dataLayout.GetWidthIndex()]; - ARMNN_ASSERT_MSG(blockShape.size() > 0, "BlockShape must contain 1 or more entries"); + const unsigned int channels = (rank == 3) ? inputShape[channelDimension3D] + : inputShape[dataLayout.GetChannelsIndex()]; - const unsigned int blockShapeHeight = blockShape[0]; - const unsigned int blockShapeWidth = blockShape[1]; + const unsigned int inputHeight = inputShape[dataLayout.GetHeightIndex()]; + const unsigned int inputWidth = (rank == 3) ? 1 : inputShape[dataLayout.GetWidthIndex()]; + const unsigned int outputHeight = outputShape[dataLayout.GetHeightIndex()]; + const unsigned int outputWidth = (rank == 3) ? 1 : outputShape[dataLayout.GetWidthIndex()]; - ARMNN_ASSERT_MSG(cropsData.size() > 0, "Crops must contain 1 or more entries"); + const unsigned int blockHeight = params.m_BlockShape[0]; + const unsigned int blockWidth = (rank == 3) ? 1 : params.m_BlockShape[1]; - const unsigned int cropsTop = cropsData[0].first; - const unsigned int cropsLeft = cropsData[1].first; + const unsigned int cropsTop = params.m_Crops[0].first; + const unsigned int cropsLeft = (rank == 3) ? 0 : params.m_Crops[1].first; for (unsigned int inBatch = 0; inBatch < inputBatchSize; ++inBatch) { const unsigned int outBatch = inBatch % outputBatchSize; const unsigned int spatialOffset = inBatch / outputBatchSize; - for (unsigned int inH = 0; inH < inputTensorInfo.GetShape()[dataLayout.GetHeightIndex()]; ++inH) { - const unsigned int outH = inH * blockShapeHeight + spatialOffset / blockShapeWidth - cropsTop; + for (unsigned int inH = 0; inH < inputHeight; ++inH) + { + const unsigned int outH = inH * blockHeight + spatialOffset / blockWidth - cropsTop; if (outH >= outputHeight) { continue; } - for (unsigned int inW = 0; inW < inputTensorInfo.GetShape()[dataLayout.GetWidthIndex()]; ++inW) { - const unsigned int outW = inW * blockShapeWidth + spatialOffset % blockShapeWidth - cropsLeft; + for (unsigned int inW = 0; inW < inputWidth; ++inW) + { + const unsigned int outW = inW * blockWidth + spatialOffset % blockWidth - cropsLeft; if (outW >= outputWidth) { @@ -91,9 +111,9 @@ void BatchToSpaceNd(const DataLayoutIndexed& dataLayout, unsigned int outOffset = Offset(outputShape, outBatch, outH, outW, c, dataLayout); unsigned int inOffset = Offset(inputShape, inBatch, inH, inW, c, dataLayout); - outputEncoder[outOffset]; - inputDecoder[inOffset]; - outputEncoder.Set(inputDecoder.Get()); + outputData[outOffset]; + inputData[inOffset]; + outputData.Set(inputData.Get()); } } } diff --git a/src/backends/reference/workloads/BatchToSpaceNd.hpp b/src/backends/reference/workloads/BatchToSpaceNd.hpp index 0fcef58554..acacda4e86 100644 --- a/src/backends/reference/workloads/BatchToSpaceNd.hpp +++ b/src/backends/reference/workloads/BatchToSpaceNd.hpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd. All rights reserved. +// Copyright © 2017-2019,2021,2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -9,21 +9,15 @@ #include "Decoders.hpp" #include "Encoders.hpp" -#include - -#include - -#include -#include +#include namespace armnn { -void BatchToSpaceNd(const armnnUtils::DataLayoutIndexed& dataLayout, - const TensorInfo& inputTensorInfo, - const TensorInfo& outputTensorInfo, - const std::vector& blockShape, - const std::vector>& cropsData, - Decoder& inputDecoder, - Encoder& outputEncoder); +void BatchToSpaceNd(const TensorInfo& inputInfo, + const TensorInfo& outputInfo, + const BatchToSpaceNdDescriptor& params, + Decoder& inputData, + Encoder& outputData); + } // namespace armnn diff --git a/src/backends/reference/workloads/RefBatchToSpaceNdWorkload.cpp b/src/backends/reference/workloads/RefBatchToSpaceNdWorkload.cpp index 72c7a7687e..6bb8aff72c 100644 --- a/src/backends/reference/workloads/RefBatchToSpaceNdWorkload.cpp +++ b/src/backends/reference/workloads/RefBatchToSpaceNdWorkload.cpp @@ -1,11 +1,11 @@ // -// Copyright © 2022 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2018-2019,2021-2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // -#include "BatchToSpaceNd.hpp" -#include "Profiling.hpp" #include "RefBatchToSpaceNdWorkload.hpp" +#include "BatchToSpaceNd.hpp" + #include "RefWorkloadUtils.hpp" namespace armnn @@ -32,8 +32,7 @@ void RefBatchToSpaceNdWorkload::Execute(std::vector inputs, std: std::unique_ptr> inputDecoder = MakeDecoder(inputInfo, inputs[0]->Map()); std::unique_ptr> outputEncoder = MakeEncoder(outputInfo, outputs[0]->Map()); - BatchToSpaceNd(m_Data.m_Parameters.m_DataLayout, inputInfo, outputInfo, m_Data.m_Parameters.m_BlockShape, - m_Data.m_Parameters.m_Crops, *inputDecoder, *outputEncoder); + BatchToSpaceNd(inputInfo, outputInfo, m_Data.m_Parameters, *inputDecoder, *outputEncoder); } diff --git a/src/backends/reference/workloads/RefBatchToSpaceNdWorkload.hpp b/src/backends/reference/workloads/RefBatchToSpaceNdWorkload.hpp index ac6aad3eb2..5fb5835b68 100644 --- a/src/backends/reference/workloads/RefBatchToSpaceNdWorkload.hpp +++ b/src/backends/reference/workloads/RefBatchToSpaceNdWorkload.hpp @@ -1,14 +1,14 @@ // -// Copyright © 2022 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2018-2019,2021-2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // #pragma once #include "RefBaseWorkload.hpp" -#include -namespace armnn { +namespace armnn +{ class RefBatchToSpaceNdWorkload : public RefBaseWorkload { diff --git a/src/backends/reference/workloads/RefSpaceToBatchNdWorkload.cpp b/src/backends/reference/workloads/RefSpaceToBatchNdWorkload.cpp index 6aa422afdc..d29c2c801e 100644 --- a/src/backends/reference/workloads/RefSpaceToBatchNdWorkload.cpp +++ b/src/backends/reference/workloads/RefSpaceToBatchNdWorkload.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2022 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2018-2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -7,7 +7,6 @@ #include "SpaceToBatchNd.hpp" #include "RefWorkloadUtils.hpp" -#include namespace armnn { @@ -28,12 +27,12 @@ void RefSpaceToBatchNdWorkload::Execute(std::vector inputs, std: ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefSpaceToBatchNdWorkload_Execute"); const TensorInfo& inputInfo = GetTensorInfo(inputs[0]); - std::unique_ptr> decoder = MakeDecoder(inputInfo, inputs[0]->Map()); - const TensorInfo& outputInfo = GetTensorInfo(outputs[0]); - std::unique_ptr> encoder = MakeEncoder(outputInfo, outputs[0]->Map()); - SpaceToBatchNd(inputInfo, outputInfo, m_Data.m_Parameters, *decoder, *encoder); + std::unique_ptr> inputDecoder = MakeDecoder(inputInfo, inputs[0]->Map()); + std::unique_ptr> outputEncoder = MakeEncoder(outputInfo, outputs[0]->Map()); + + SpaceToBatchNd(inputInfo, outputInfo, m_Data.m_Parameters, *inputDecoder, *outputEncoder); } } //namespace armnn diff --git a/src/backends/reference/workloads/RefSpaceToBatchNdWorkload.hpp b/src/backends/reference/workloads/RefSpaceToBatchNdWorkload.hpp index f2c87682db..f9d75ee4d6 100644 --- a/src/backends/reference/workloads/RefSpaceToBatchNdWorkload.hpp +++ b/src/backends/reference/workloads/RefSpaceToBatchNdWorkload.hpp @@ -1,13 +1,11 @@ // -// Copyright © 2022 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2018-2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // #pragma once #include "RefBaseWorkload.hpp" -#include - namespace armnn { @@ -15,8 +13,10 @@ class RefSpaceToBatchNdWorkload : public RefBaseWorkload::RefBaseWorkload; + void Execute() const override; void ExecuteAsync(ExecutionData& executionData) override; + private: void Execute(std::vector inputs, std::vector outputs) const; }; diff --git a/src/backends/reference/workloads/SpaceToBatchNd.cpp b/src/backends/reference/workloads/SpaceToBatchNd.cpp index b6bab17367..c3f022c6a6 100644 --- a/src/backends/reference/workloads/SpaceToBatchNd.cpp +++ b/src/backends/reference/workloads/SpaceToBatchNd.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd. All rights reserved. +// Copyright © 2017-2019,2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -19,15 +19,29 @@ unsigned int GetOffset(const TensorShape& shape, unsigned int c, const DataLayoutIndexed& dataLayout) { - if (dataLayout.GetDataLayout() == DataLayout::NHWC) + // 3D Tensors + unsigned int channelDimension3D = dataLayout.GetDataLayout() == DataLayout::NCHW ? 1 : 2; + if (shape.GetNumDimensions() == 3) { - return ((b * shape[dataLayout.GetHeightIndex()] + h) * shape[dataLayout.GetWidthIndex()] + w) * - shape[dataLayout.GetChannelsIndex()] + c; + return (b * shape[dataLayout.GetHeightIndex()] + h) * shape[channelDimension3D] + c; + } + // 4D Tensors + else if (shape.GetNumDimensions() == 4) + { + if (dataLayout.GetDataLayout() == DataLayout::NHWC) + { + return ((b * shape[dataLayout.GetHeightIndex()] + h) * shape[dataLayout.GetWidthIndex()] + w) * + shape[dataLayout.GetChannelsIndex()] + c; + } + else + { + return ((b * shape[dataLayout.GetChannelsIndex()] + c) * shape[dataLayout.GetHeightIndex()] + h) * + shape[dataLayout.GetWidthIndex()] + w; + } } else { - return ((b * shape[dataLayout.GetChannelsIndex()] + c) * shape[dataLayout.GetHeightIndex()] + h) * - shape[dataLayout.GetWidthIndex()] + w; + throw InvalidArgumentException("Tensor rank must be either 3 or 4", CHECK_LOCATION()); } } @@ -37,37 +51,46 @@ void SpaceToBatchNd(const TensorInfo& inputInfo, Decoder& inputData, Encoder& outputData) { + unsigned int rank = inputInfo.GetNumDimensions(); + if (rank != 3 && rank != 4 ) + { + throw InvalidArgumentException("Tensor rank must be either 3 or 4, but it is " + std::to_string(rank), + CHECK_LOCATION()); + } + DataLayoutIndexed dataLayout = params.m_DataLayout; + unsigned int channelDimension3D = params.m_DataLayout == DataLayout::NCHW ? 1 : 2; const TensorShape& inputShape = inputInfo.GetShape(); const TensorShape& outputShape = outputInfo.GetShape(); - const unsigned int channels = inputShape[dataLayout.GetChannelsIndex()]; + const unsigned int inputBatchSize = inputShape[0]; + const unsigned int outputBatchSize = outputShape[0]; - const unsigned int inputBatchSize = inputShape[0]; - const unsigned int inputHeight = inputShape[dataLayout.GetHeightIndex()]; - const unsigned int inputWidth = inputShape[dataLayout.GetWidthIndex()]; + const unsigned int channels = (rank == 3) ? inputShape[channelDimension3D] + : inputShape[dataLayout.GetChannelsIndex()]; - const unsigned int outputBatchSize = outputShape[0]; + const unsigned int inputHeight = inputShape[dataLayout.GetHeightIndex()]; + const unsigned int inputWidth = (rank == 3) ? 1 : inputShape[dataLayout.GetWidthIndex()]; const unsigned int outputHeight = outputShape[dataLayout.GetHeightIndex()]; - const unsigned int outputWidth = outputShape[dataLayout.GetWidthIndex()]; + const unsigned int outputWidth = (rank == 3) ? 1 : outputShape[dataLayout.GetWidthIndex()]; const unsigned int blockHeight = params.m_BlockShape[0]; - const unsigned int blockWidth = params.m_BlockShape[1]; + const unsigned int blockWidth = (rank == 3) ? 1 : params.m_BlockShape[1]; - const unsigned int paddingTop = params.m_PadList[0].first; - const unsigned int paddingLeft = params.m_PadList[1].first; + const unsigned int paddingTop = params.m_PadList[0].first; + const unsigned int paddingLeft = (rank == 3) ? 0 : params.m_PadList[1].first; - for (unsigned int outB = 0; outB < outputBatchSize; outB++) + for (unsigned int outB = 0; outB < outputBatchSize; ++outB) { unsigned int inB = outB % inputBatchSize; unsigned int shiftW = (outB / inputBatchSize) % blockWidth; unsigned int shiftH = (outB / inputBatchSize) / blockWidth; - for (unsigned int outH = 0; outH < outputHeight; outH++) + for (unsigned int outH = 0; outH < outputHeight; ++outH) { - for (unsigned int outW = 0; outW < outputWidth; outW++) + for (unsigned int outW = 0; outW < outputWidth; ++outW) { if (outH * blockHeight + shiftH < paddingTop || outH * blockHeight + shiftH >= paddingTop + inputHeight || @@ -117,10 +140,4 @@ void SpaceToBatchNd(const TensorInfo& inputInfo, } } -void SpaceToBatchNd(const TensorInfo& inputInfo, - const TensorInfo& outputInfo, - const SpaceToBatchNdDescriptor& params, - Decoder& inputData, - Encoder& outData); - } //namespace armnn diff --git a/src/backends/reference/workloads/SpaceToBatchNd.hpp b/src/backends/reference/workloads/SpaceToBatchNd.hpp index 57c9b6bc25..7de34ee59a 100644 --- a/src/backends/reference/workloads/SpaceToBatchNd.hpp +++ b/src/backends/reference/workloads/SpaceToBatchNd.hpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd. All rights reserved. +// Copyright © 2017-2019,2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -10,7 +10,6 @@ #include "Encoders.hpp" #include -#include "armnn/Tensor.hpp" namespace armnn { -- cgit v1.2.1