From 6abc7ee0cceeb269081cc23149f4190730815e51 Mon Sep 17 00:00:00 2001 From: Teresa Charlin Date: Tue, 22 Feb 2022 17:32:27 +0000 Subject: IVGCVSW-6803 Add int32 support for CONCATENATION in CpuRef Signed-off-by: Teresa Charlin Change-Id: Id9decafcebb6dbcac3a03516281524f22419cbfb --- .../test/layerTests/ConcatTestImpl.cpp | 253 +++++++++++---------- .../test/layerTests/ConcatTestImpl.hpp | 5 + src/backends/reference/RefLayerSupport.cpp | 5 +- src/backends/reference/test/RefLayerTests.cpp | 1 + 4 files changed, 144 insertions(+), 120 deletions(-) diff --git a/src/backends/backendsCommon/test/layerTests/ConcatTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/ConcatTestImpl.cpp index 2563fcf14b..a7a2364475 100644 --- a/src/backends/backendsCommon/test/layerTests/ConcatTestImpl.cpp +++ b/src/backends/backendsCommon/test/layerTests/ConcatTestImpl.cpp @@ -420,6 +420,133 @@ template void Concatenate( // // Implementation templates // +template> +LayerTestResult ConcatTestImpl( + IWorkloadFactory& workloadFactory, + const IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory) +{ + + IgnoreUnused(memoryManager); + + unsigned int outputWidth = 3; + unsigned int outputHeight = 6; + unsigned int outputChannels = 3; + + unsigned int inputWidth1 = 3; + unsigned int inputHeight1 = 6; + unsigned int inputChannels1 = 2; + + unsigned int inputWidth2 = 3; + unsigned int inputHeight2 = 6; + unsigned int inputChannels2 = 1; + + // Define the tensor descriptors. + TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, ArmnnType); + TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, ArmnnType); + TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, ArmnnType); + + std::vector actualOutput(outputTensorInfo.GetNumElements()); + + std::vector expectedOutput = + { + 1, 2, 3, + 4, 5, 6, + 7, 8, 9, + 10, 11, 12, + 13, 14, 15, + 16, 17, 18, + + 19, 20, 21, + 22, 23, 24, + 25, 26, 27, + 28, 29, 30, + 31, 32, 33, + 34, 35, 36, + + 37, 38, 39, + 40, 41, 42, + 43, 44, 45, + 46, 47, 48, + 49, 50, 51, + 52, 53, 54 + }; + + std::vector input1 = + { + 1, 2, 3, + 4, 5, 6, + 7, 8, 9, + 10, 11, 12, + 13, 14, 15, + 16, 17, 18, + + 19, 20, 21, + 22, 23, 24, + 25, 26, 27, + 28, 29, 30, + 31, 32, 33, + 34, 35, 36 + }; + + std::vector input2 = + { + 37, 38, 39, + 40, 41, 42, + 43, 44, 45, + 46, 47, 48, + 49, 50, 51, + 52, 53, 54, + }; + + std::vector wOrigin1 = {0, 0, 0}; //Extent of the window is defined by size of input[0]. + ConcatQueueDescriptor::ViewOrigin window1(wOrigin1); + + std::vector wOrigin2 = {2, 0, 0}; //Extent of the window is defined by size of input[1]. + ConcatQueueDescriptor::ViewOrigin window2(wOrigin2); + + std::unique_ptr outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo); + + bool subTensorsSupported = workloadFactory.SupportsSubTensors(); + + std::unique_ptr inputHandle1 = + subTensorsSupported ? + tensorHandleFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) : + tensorHandleFactory.CreateTensorHandle(inputTensorInfo1); + + std::unique_ptr inputHandle2 = + subTensorsSupported ? + tensorHandleFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) : + tensorHandleFactory.CreateTensorHandle(inputTensorInfo2); + + ConcatQueueDescriptor data; + WorkloadInfo info; + AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get()); + AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get()); + AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get()); + + data.m_ViewOrigins.push_back(window1); + data.m_ViewOrigins.push_back(window2); + + std::unique_ptr workload = workloadFactory.CreateWorkload(LayerType::Concat, data, info); + + inputHandle1->Allocate(); + inputHandle2->Allocate(); + outputHandle->Allocate(); + + CopyDataToITensorHandle(inputHandle1.get(), input1.data()); + CopyDataToITensorHandle(inputHandle2.get(), input2.data()); + + workload->PostAllocationConfigure(); + workload->Execute(); + + CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get()); + + return LayerTestResult(actualOutput, + expectedOutput, + outputHandle->GetShape(), + outputTensorInfo.GetShape()); +} template> LayerTestResult Concat1dTestImpl( @@ -2119,125 +2246,15 @@ LayerTestResult ConcatTest( const IBackendInternal::IMemoryManagerSharedPtr& memoryManager, const armnn::ITensorHandleFactory& tensorHandleFactory) { - IgnoreUnused(memoryManager); - - unsigned int outputWidth = 3; - unsigned int outputHeight = 6; - unsigned int outputChannels = 3; - - unsigned int inputWidth1 = 3; - unsigned int inputHeight1 = 6; - unsigned int inputChannels1 = 2; - - unsigned int inputWidth2 = 3; - unsigned int inputHeight2 = 6; - unsigned int inputChannels2 = 1; - - // Define the tensor descriptors. - TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, DataType::Float32); - TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, DataType::Float32); - TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, DataType::Float32); - - std::vector actualOutput(outputTensorInfo.GetNumElements()); - - std::vector expectedOutput = - { - 1.0f, 2.0f, 3.0f, - 4.0f, 5.0f, 6.0f, - 7.0f, 8.0f, 9.0f, - 10.0f, 11.0f, 12.0f, - 13.0f, 14.0f, 15.0f, - 16.0f, 17.0f, 18.0f, - - 19.0f, 20.0f, 21.0f, - 22.0f, 23.0f, 24.0f, - 25.0f, 26.0f, 27.0f, - 28.0f, 29.0f, 30.0f, - 31.0f, 32.0f, 33.0f, - 34.0f, 35.0f, 36.0f, - - 37.0f, 38.0f, 39.0f, - 40.0f, 41.0f, 42.0f, - 43.0f, 44.0f, 45.0f, - 46.0f, 47.0f, 48.0f, - 49.0f, 50.0f, 51.0f, - 52.0f, 53.0f, 54.0f - }; - - std::vector input1 = - { - 1.0f, 2.0f, 3.0f, - 4.0f, 5.0f, 6.0f, - 7.0f, 8.0f, 9.0f, - 10.0f, 11.0f, 12.0f, - 13.0f, 14.0f, 15.0f, - 16.0f, 17.0f, 18.0f, - - 19.0f, 20.0f, 21.0f, - 22.0f, 23.0f, 24.0f, - 25.0f, 26.0f, 27.0f, - 28.0f, 29.0f, 30.0f, - 31.0f, 32.0f, 33.0f, - 34.0f, 35.0f, 36.0f - }; - - std::vector input2 = - { - 37.0f, 38.0f, 39.0f, - 40.0f, 41.0f, 42.0f, - 43.0f, 44.0f, 45.0f, - 46.0f, 47.0f, 48.0f, - 49.0f, 50.0f, 51.0f, - 52.0f, 53.0f, 54.0f, - }; - - std::vector wOrigin1 = {0, 0, 0}; //Extent of the window is defined by size of input[0]. - ConcatQueueDescriptor::ViewOrigin window1(wOrigin1); - - std::vector wOrigin2 = {2, 0, 0}; //Extent of the window is defined by size of input[1]. - ConcatQueueDescriptor::ViewOrigin window2(wOrigin2); - - std::unique_ptr outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo); - - bool subTensorsSupported = workloadFactory.SupportsSubTensors(); - - std::unique_ptr inputHandle1 = - subTensorsSupported ? - tensorHandleFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) : - tensorHandleFactory.CreateTensorHandle(inputTensorInfo1); - - std::unique_ptr inputHandle2 = - subTensorsSupported ? - tensorHandleFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) : - tensorHandleFactory.CreateTensorHandle(inputTensorInfo2); - - ConcatQueueDescriptor data; - WorkloadInfo info; - AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get()); - AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get()); - AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get()); - - data.m_ViewOrigins.push_back(window1); - data.m_ViewOrigins.push_back(window2); - - std::unique_ptr workload = workloadFactory.CreateWorkload(LayerType::Concat, data, info); - - inputHandle1->Allocate(); - inputHandle2->Allocate(); - outputHandle->Allocate(); - - CopyDataToITensorHandle(inputHandle1.get(), input1.data()); - CopyDataToITensorHandle(inputHandle2.get(), input2.data()); - - workload->PostAllocationConfigure(); - workload->Execute(); - - CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get()); + return ConcatTestImpl(workloadFactory, memoryManager, tensorHandleFactory); +} - return LayerTestResult(actualOutput, - expectedOutput, - outputHandle->GetShape(), - outputTensorInfo.GetShape()); +LayerTestResult ConcatInt32Test( + IWorkloadFactory& workloadFactory, + const IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory) +{ + return ConcatTestImpl(workloadFactory, memoryManager, tensorHandleFactory); } LayerTestResult Concat1dTest( diff --git a/src/backends/backendsCommon/test/layerTests/ConcatTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/ConcatTestImpl.hpp index 99adcea9f5..3ca9522dd0 100644 --- a/src/backends/backendsCommon/test/layerTests/ConcatTestImpl.hpp +++ b/src/backends/backendsCommon/test/layerTests/ConcatTestImpl.hpp @@ -36,6 +36,11 @@ LayerTestResult ConcatFloat16Test( const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, const armnn::ITensorHandleFactory& tensorHandleFactory); +LayerTestResult ConcatInt32Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + LayerTestResult ConcatUint8Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, diff --git a/src/backends/reference/RefLayerSupport.cpp b/src/backends/reference/RefLayerSupport.cpp index f5798c886f..b55adfa958 100644 --- a/src/backends/reference/RefLayerSupport.cpp +++ b/src/backends/reference/RefLayerSupport.cpp @@ -879,14 +879,15 @@ bool RefLayerSupport::IsConcatSupported(const std::vector inp IgnoreUnused(descriptor); bool supported = true; - std::array supportedTypes = + std::array supportedTypes = { DataType::BFloat16, DataType::Float32, DataType::Float16, DataType::QAsymmS8, DataType::QAsymmU8, - DataType::QSymmS16 + DataType::QSymmS16, + DataType::Signed32 }; supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported, diff --git a/src/backends/reference/test/RefLayerTests.cpp b/src/backends/reference/test/RefLayerTests.cpp index 69694e0275..d07aea1b9c 100644 --- a/src/backends/reference/test/RefLayerTests.cpp +++ b/src/backends/reference/test/RefLayerTests.cpp @@ -790,6 +790,7 @@ ARMNN_AUTO_TEST_CASE_WITH_THF(CopyViaSplitterInt16, CopyViaSplitterInt16Test) ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleConcat, ConcatTest) ARMNN_AUTO_TEST_CASE_WITH_THF(ConcatBFloat16, ConcatBFloat16Test) ARMNN_AUTO_TEST_CASE_WITH_THF(ConcatFloat16, ConcatFloat16Test) +ARMNN_AUTO_TEST_CASE_WITH_THF(ConcatInt32, ConcatInt32Test) ARMNN_AUTO_TEST_CASE_WITH_THF(ConcatUint8, ConcatUint8Test) ARMNN_AUTO_TEST_CASE_WITH_THF(ConcatUint8DifferentQParams, ConcatUint8DifferentQParamsTest) ARMNN_AUTO_TEST_CASE_WITH_THF(ConcatUint16, ConcatUint16Test) -- cgit v1.2.1