ArmNN
 22.05
ConcatTestImpl.hpp File Reference

Go to the source code of this file.

Functions

template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult< T, 3 > ConcatDifferentInputOutputQParamTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool useSubtensor)
 
LayerTestResult< float, 3 > ConcatTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< armnn::BFloat16, 3 > ConcatBFloat16Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< armnn::Half, 3 > ConcatFloat16Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< int32_t, 3 > ConcatInt32Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< uint8_t, 3 > ConcatUint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< uint16_t, 3 > ConcatUint16Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< uint8_t, 3 > ConcatUint8DifferentQParamsTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< float, 1 > Concat1dTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< float, 2 > Concat2dDim0Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< float, 2 > Concat2dDim1Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< float, 2 > Concat2dDim0DiffInputDimsTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< float, 2 > Concat2dDim1DiffInputDimsTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< float, 3 > Concat3dDim0Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< float, 3 > Concat3dDim1Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< float, 3 > Concat3dDim2Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool useSubtensor)
 
LayerTestResult< float, 3 > Concat3dDim0DiffInputDimsTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< float, 3 > Concat3dDim1DiffInputDimsTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< float, 3 > Concat3dDim2DiffInputDimsTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool useSubtensor)
 
LayerTestResult< float, 4 > Concat4dDim0Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< float, 4 > Concat4dDim1Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< float, 4 > Concat4dDim2Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< float, 4 > Concat4dDim3Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool useSubtensor)
 
LayerTestResult< float, 4 > Concat4dDiffShapeDim0Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< float, 4 > Concat4dDiffShapeDim1Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< float, 4 > Concat4dDiffShapeDim2Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< float, 4 > Concat4dDiffShapeDim3Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool useSubtensor)
 
LayerTestResult< uint8_t, 4 > Concat4dDim0Uint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< uint8_t, 4 > Concat4dDim1Uint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< uint8_t, 4 > Concat4dDim2Uint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< uint8_t, 4 > Concat4dDim3Uint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool useSubtensor)
 
LayerTestResult< uint8_t, 4 > Concat4dDiffShapeDim0Uint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< uint8_t, 4 > Concat4dDiffShapeDim1Uint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< uint8_t, 4 > Concat4dDiffShapeDim2Uint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< uint8_t, 4 > Concat4dDiffShapeDim3Uint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool useSubtensor)
 
LayerTestResult< uint8_t, 1 > Concat1dUint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< uint8_t, 2 > Concat2dDim0Uint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< uint8_t, 2 > Concat2dDim1Uint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< uint8_t, 2 > Concat2dDim0DiffInputDimsUint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< uint8_t, 2 > Concat2dDim1DiffInputDimsUint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< uint8_t, 3 > Concat3dDim0Uint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< uint8_t, 3 > Concat3dDim1Uint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< uint8_t, 3 > Concat3dDim2Uint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool useSubtensor)
 
LayerTestResult< uint8_t, 3 > Concat3dDim0DiffInputDimsUint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< uint8_t, 3 > Concat3dDim1DiffInputDimsUint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< uint8_t, 3 > Concat3dDim2DiffInputDimsUint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool useSubtensor)
 
LayerTestResult< uint8_t, 3 > ConcatDifferentInputOutputQParamUint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool useSubtensor)
 
LayerTestResult< int16_t, 3 > ConcatDifferentInputOutputQParamInt16Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool useSubtensor)
 

Function Documentation

◆ Concat1dTest()

LayerTestResult<float, 1> Concat1dTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 2260 of file ConcatTestImpl.cpp.

Referenced by TEST_SUITE().

2264 {
2265  return Concat1dTestImpl<DataType::Float32>(workloadFactory, memoryManager, tensorHandleFactory, 0.0f, 0);
2266 }

◆ Concat1dUint8Test()

LayerTestResult<uint8_t, 1> Concat1dUint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 2869 of file ConcatTestImpl.cpp.

Referenced by TEST_SUITE().

2873 {
2874  return Concat1dTestImpl<DataType::QAsymmU8>(workloadFactory, memoryManager, tensorHandleFactory, 0.5f, -1);
2875 }

◆ Concat2dDim0DiffInputDimsTest()

LayerTestResult<float, 2> Concat2dDim0DiffInputDimsTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 2284 of file ConcatTestImpl.cpp.

Referenced by TEST_SUITE().

2288 {
2289  return Concat2dDim0DiffInputDimsTestImpl<DataType::Float32>(workloadFactory, memoryManager,
2290  tensorHandleFactory, 0.0f, 0);
2291 }

◆ Concat2dDim0DiffInputDimsUint8Test()

LayerTestResult<uint8_t, 2> Concat2dDim0DiffInputDimsUint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 2893 of file ConcatTestImpl.cpp.

Referenced by TEST_SUITE().

2897 {
2898  return Concat2dDim0DiffInputDimsTestImpl<DataType::QAsymmU8>(
2899  workloadFactory, memoryManager, tensorHandleFactory, 0.5f, -1);
2900 }

◆ Concat2dDim0Test()

LayerTestResult<float, 2> Concat2dDim0Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 2268 of file ConcatTestImpl.cpp.

Referenced by TEST_SUITE().

2272 {
2273  return Concat2dDim0TestImpl<DataType::Float32>(workloadFactory, memoryManager, tensorHandleFactory, 0.0f, 0);
2274 }

◆ Concat2dDim0Uint8Test()

LayerTestResult<uint8_t, 2> Concat2dDim0Uint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 2877 of file ConcatTestImpl.cpp.

Referenced by TEST_SUITE().

2881 {
2882  return Concat2dDim0TestImpl<DataType::QAsymmU8>(workloadFactory, memoryManager, tensorHandleFactory, 0.5f, -1);
2883 }

◆ Concat2dDim1DiffInputDimsTest()

LayerTestResult<float, 2> Concat2dDim1DiffInputDimsTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 2293 of file ConcatTestImpl.cpp.

Referenced by TEST_SUITE().

2297 {
2298  return Concat2dDim1DiffInputDimsTestImpl<DataType::Float32>(workloadFactory,
2299  memoryManager,
2300  tensorHandleFactory,
2301  0.0f,
2302  0);
2303 }

◆ Concat2dDim1DiffInputDimsUint8Test()

LayerTestResult<uint8_t, 2> Concat2dDim1DiffInputDimsUint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 2902 of file ConcatTestImpl.cpp.

Referenced by TEST_SUITE().

2906 {
2907  return Concat2dDim1DiffInputDimsTestImpl<DataType::QAsymmU8>(
2908  workloadFactory, memoryManager, tensorHandleFactory, 0.5f, -1);
2909 }

◆ Concat2dDim1Test()

LayerTestResult<float, 2> Concat2dDim1Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 2276 of file ConcatTestImpl.cpp.

Referenced by TEST_SUITE().

2280 {
2281  return Concat2dDim1TestImpl<DataType::Float32>(workloadFactory, memoryManager, tensorHandleFactory, 0.0f, 0);
2282 }

◆ Concat2dDim1Uint8Test()

LayerTestResult<uint8_t, 2> Concat2dDim1Uint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 2885 of file ConcatTestImpl.cpp.

Referenced by TEST_SUITE().

2889 {
2890  return Concat2dDim1TestImpl<DataType::QAsymmU8>(workloadFactory, memoryManager, tensorHandleFactory, 0.5f, -1);
2891 }

◆ Concat3dDim0DiffInputDimsTest()

LayerTestResult<float, 3> Concat3dDim0DiffInputDimsTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 2331 of file ConcatTestImpl.cpp.

Referenced by TEST_SUITE().

2335 {
2336  return Concat3dDim0DiffInputDimsTestImpl<DataType::Float32>(
2337  workloadFactory, memoryManager, tensorHandleFactory, 0.0f, 0);
2338 }

◆ Concat3dDim0DiffInputDimsUint8Test()

LayerTestResult<uint8_t, 3> Concat3dDim0DiffInputDimsUint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 2937 of file ConcatTestImpl.cpp.

Referenced by TEST_SUITE().

2941 {
2942  return Concat3dDim0TestImpl<DataType::QAsymmU8>(workloadFactory, memoryManager, tensorHandleFactory, 0.5f, -1);
2943 }

◆ Concat3dDim0Test()

LayerTestResult<float, 3> Concat3dDim0Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 2305 of file ConcatTestImpl.cpp.

Referenced by TEST_SUITE().

2309 {
2310  return Concat3dDim0TestImpl<DataType::Float32>(workloadFactory, memoryManager, tensorHandleFactory, 0.0f, 0);
2311 }

◆ Concat3dDim0Uint8Test()

LayerTestResult<uint8_t, 3> Concat3dDim0Uint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 2911 of file ConcatTestImpl.cpp.

Referenced by TEST_SUITE().

2915 {
2916  return Concat3dDim0TestImpl<DataType::QAsymmU8>(workloadFactory, memoryManager, tensorHandleFactory, 0.5f, -1);
2917 }

◆ Concat3dDim1DiffInputDimsTest()

LayerTestResult<float, 3> Concat3dDim1DiffInputDimsTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 2340 of file ConcatTestImpl.cpp.

Referenced by TEST_SUITE().

2344 {
2345  return Concat3dDim1DiffInputDimsTestImpl<DataType::Float32>(workloadFactory, memoryManager,
2346  tensorHandleFactory, 0.0f, 0);
2347 }

◆ Concat3dDim1DiffInputDimsUint8Test()

LayerTestResult<uint8_t, 3> Concat3dDim1DiffInputDimsUint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 2945 of file ConcatTestImpl.cpp.

Referenced by TEST_SUITE().

2949 {
2950  return Concat3dDim1DiffInputDimsTestImpl<DataType::QAsymmU8>(
2951  workloadFactory, memoryManager, tensorHandleFactory, 0.5f, -1);
2952 }

◆ Concat3dDim1Test()

LayerTestResult<float, 3> Concat3dDim1Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 2313 of file ConcatTestImpl.cpp.

Referenced by TEST_SUITE().

2317 {
2318  return Concat3dDim1TestImpl<DataType::Float32>(workloadFactory, memoryManager, tensorHandleFactory, 0.0f, 0);
2319 }

◆ Concat3dDim1Uint8Test()

LayerTestResult<uint8_t, 3> Concat3dDim1Uint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 2919 of file ConcatTestImpl.cpp.

Referenced by TEST_SUITE().

2923 {
2924  return Concat3dDim1TestImpl<DataType::QAsymmU8>(workloadFactory, memoryManager, tensorHandleFactory, 0.5f, -1);
2925 }

◆ Concat3dDim2DiffInputDimsTest()

LayerTestResult<float, 3> Concat3dDim2DiffInputDimsTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory,
bool  useSubtensor 
)

Definition at line 2349 of file ConcatTestImpl.cpp.

Referenced by TEST_SUITE().

2354 {
2355  return Concat3dDim2DiffInputDimsTestImpl<DataType::Float32>(
2356  workloadFactory, memoryManager, tensorHandleFactory, useSubtensor, 0.0f, 0);
2357 }

◆ Concat3dDim2DiffInputDimsUint8Test()

LayerTestResult<uint8_t, 3> Concat3dDim2DiffInputDimsUint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory,
bool  useSubtensor 
)

Definition at line 2954 of file ConcatTestImpl.cpp.

Referenced by TEST_SUITE().

2959 {
2960  return Concat3dDim2DiffInputDimsTestImpl<DataType::QAsymmU8>(
2961  workloadFactory, memoryManager, tensorHandleFactory, useSubtensor, 0.5f, -1);
2962 }

◆ Concat3dDim2Test()

LayerTestResult<float, 3> Concat3dDim2Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory,
bool  useSubtensor 
)

Definition at line 2321 of file ConcatTestImpl.cpp.

Referenced by TEST_SUITE().

2326 {
2327  return Concat3dDim2TestImpl<DataType::Float32>(workloadFactory, memoryManager, tensorHandleFactory,
2328  useSubtensor, 0.0f, 0);
2329 }

◆ Concat3dDim2Uint8Test()

LayerTestResult<uint8_t, 3> Concat3dDim2Uint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory,
bool  useSubtensor 
)

Definition at line 2927 of file ConcatTestImpl.cpp.

Referenced by TEST_SUITE().

2932 {
2933  return Concat3dDim2TestImpl<DataType::QAsymmU8>(
2934  workloadFactory, memoryManager, tensorHandleFactory, useSubtensor, 0.5f, -1);
2935 }

◆ Concat4dDiffShapeDim0Test()

LayerTestResult<float, 4> Concat4dDiffShapeDim0Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 2393 of file ConcatTestImpl.cpp.

Referenced by TEST_SUITE().

2397 {
2398  return Concat4dDiffShapeDim0TestImpl<DataType::Float32>(workloadFactory, memoryManager,
2399  tensorHandleFactory, 0.0f, 0);
2400 }

◆ Concat4dDiffShapeDim0Uint8Test()

LayerTestResult<uint8_t, 4> Concat4dDiffShapeDim0Uint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 2997 of file ConcatTestImpl.cpp.

Referenced by TEST_SUITE().

3001 {
3002  return Concat4dDiffShapeDim0TestImpl<DataType::QAsymmU8>(
3003  workloadFactory, memoryManager, tensorHandleFactory, 0.5f, -1);
3004 }

◆ Concat4dDiffShapeDim1Test()

LayerTestResult<float, 4> Concat4dDiffShapeDim1Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 2402 of file ConcatTestImpl.cpp.

Referenced by TEST_SUITE().

2406 {
2407  return Concat4dDiffShapeDim1TestImpl<DataType::Float32>(
2408  workloadFactory, memoryManager, tensorHandleFactory, 0.0f, 0);
2409 }

◆ Concat4dDiffShapeDim1Uint8Test()

LayerTestResult<uint8_t, 4> Concat4dDiffShapeDim1Uint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 3006 of file ConcatTestImpl.cpp.

Referenced by TEST_SUITE().

3010 {
3011  return Concat4dDiffShapeDim1TestImpl<DataType::QAsymmU8>(
3012  workloadFactory, memoryManager, tensorHandleFactory, 0.5f, -1);
3013 }

◆ Concat4dDiffShapeDim2Test()

LayerTestResult<float, 4> Concat4dDiffShapeDim2Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 2411 of file ConcatTestImpl.cpp.

Referenced by TEST_SUITE().

2415 {
2416  return Concat4dDiffShapeDim2TestImpl<DataType::Float32>(workloadFactory, memoryManager,
2417  tensorHandleFactory, 0.0f, 0);
2418 }

◆ Concat4dDiffShapeDim2Uint8Test()

LayerTestResult<uint8_t, 4> Concat4dDiffShapeDim2Uint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 3015 of file ConcatTestImpl.cpp.

Referenced by TEST_SUITE().

3019 {
3020  return Concat4dDiffShapeDim2TestImpl<DataType::QAsymmU8>(
3021  workloadFactory, memoryManager, tensorHandleFactory, 0.5f, -1);
3022 }

◆ Concat4dDiffShapeDim3Test()

LayerTestResult<float, 4> Concat4dDiffShapeDim3Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory,
bool  useSubtensor 
)

Definition at line 2420 of file ConcatTestImpl.cpp.

Referenced by TEST_SUITE().

2425 {
2426  return Concat4dDiffShapeDim3TestImpl<DataType::Float32>(
2427  workloadFactory, memoryManager, tensorHandleFactory, 0.0f, 0, useSubtensor);
2428 }

◆ Concat4dDiffShapeDim3Uint8Test()

LayerTestResult<uint8_t, 4> Concat4dDiffShapeDim3Uint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory,
bool  useSubtensor 
)

Definition at line 3024 of file ConcatTestImpl.cpp.

Referenced by TEST_SUITE().

3029 {
3030  return Concat4dDiffShapeDim3TestImpl<DataType::QAsymmU8>(
3031  workloadFactory, memoryManager, tensorHandleFactory, 0.5f, -1, useSubtensor);
3032 }

◆ Concat4dDim0Test()

LayerTestResult<float, 4> Concat4dDim0Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 2359 of file ConcatTestImpl.cpp.

Referenced by TEST_SUITE().

2363 {
2364  return Concat4dDim0TestImpl<DataType::Float32>(workloadFactory, memoryManager, tensorHandleFactory, 0.0f, 0);
2365 }

◆ Concat4dDim0Uint8Test()

LayerTestResult<uint8_t, 4> Concat4dDim0Uint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 2964 of file ConcatTestImpl.cpp.

Referenced by TEST_SUITE().

2968 {
2969  return Concat4dDim0TestImpl<DataType::QAsymmU8>(workloadFactory, memoryManager, tensorHandleFactory, 0.5f, -1);
2970 }

◆ Concat4dDim1Test()

LayerTestResult<float, 4> Concat4dDim1Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 2367 of file ConcatTestImpl.cpp.

Referenced by TEST_SUITE().

2371 {
2372  return Concat4dDim1TestImpl<DataType::Float32>(workloadFactory, memoryManager, tensorHandleFactory, 0.0f, 0);
2373 }

◆ Concat4dDim1Uint8Test()

LayerTestResult<uint8_t, 4> Concat4dDim1Uint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 2972 of file ConcatTestImpl.cpp.

Referenced by TEST_SUITE().

2976 {
2977  return Concat4dDim1TestImpl<DataType::QAsymmU8>(workloadFactory, memoryManager, tensorHandleFactory, 0.5f, -1);
2978 }

◆ Concat4dDim2Test()

LayerTestResult<float, 4> Concat4dDim2Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 2375 of file ConcatTestImpl.cpp.

Referenced by TEST_SUITE().

2379 {
2380  return Concat4dDim2TestImpl<DataType::Float32>(workloadFactory, memoryManager, tensorHandleFactory, 0.0f, 0);
2381 }

◆ Concat4dDim2Uint8Test()

LayerTestResult<uint8_t, 4> Concat4dDim2Uint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 2980 of file ConcatTestImpl.cpp.

Referenced by TEST_SUITE().

2984 {
2985  return Concat4dDim2TestImpl<DataType::QAsymmU8>(workloadFactory, memoryManager, tensorHandleFactory, 0.5f, -1);
2986 }

◆ Concat4dDim3Test()

LayerTestResult<float, 4> Concat4dDim3Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory,
bool  useSubtensor 
)

Definition at line 2383 of file ConcatTestImpl.cpp.

Referenced by TEST_SUITE().

2388 {
2389  return Concat4dDim3TestImpl<DataType::Float32>(workloadFactory, memoryManager,
2390  tensorHandleFactory, 0.0f, 0, useSubtensor);
2391 }

◆ Concat4dDim3Uint8Test()

LayerTestResult<uint8_t, 4> Concat4dDim3Uint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory,
bool  useSubtensor 
)

Definition at line 2988 of file ConcatTestImpl.cpp.

Referenced by TEST_SUITE().

2992 {
2993  return Concat4dDim3TestImpl<DataType::QAsymmU8>(
2994  workloadFactory, memoryManager, tensorHandleFactory, 0.5f, -1, useSubtensor);
2995 }

◆ ConcatBFloat16Test()

LayerTestResult<armnn::BFloat16, 3> ConcatBFloat16Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 2438 of file ConcatTestImpl.cpp.

Referenced by TEST_SUITE().

2442 {
2443  return Concat3dDim1TestImpl<DataType::BFloat16>(workloadFactory, memoryManager, tensorHandleFactory, 0.0f, 0);
2444 }

◆ ConcatDifferentInputOutputQParamInt16Test()

LayerTestResult<int16_t, 3> ConcatDifferentInputOutputQParamInt16Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory,
bool  useSubtensor 
)

◆ ConcatDifferentInputOutputQParamTest()

LayerTestResult<T, 3> ConcatDifferentInputOutputQParamTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory,
bool  useSubtensor 
)

Definition at line 2082 of file ConcatTestImpl.cpp.

References armnn::Concat, CopyDataFromITensorHandle(), CopyDataToITensorHandle(), armnn::CreateDescriptorForConcatenation(), ITensorHandleFactory::CreateSubTensorHandle(), ITensorHandleFactory::CreateTensorHandle(), IWorkloadFactory::CreateWorkload(), armnn::IgnoreUnused(), armnn::info, QueueDescriptorWithParameters< LayerDescriptor >::m_Parameters, ConcatQueueDescriptor::m_ViewOrigins, TensorInfo::SetQuantizationScale(), and IWorkloadFactory::SupportsSubTensors().

2087 {
2088  IgnoreUnused(memoryManager);
2089 
2090  // Defines the tensor descriptors.
2091  TensorInfo outputTensorInfo({ 3, 6, 3 }, ArmnnType);
2092  TensorInfo inputTensorInfo1({ 3, 6, 2 }, ArmnnType);
2093  TensorInfo inputTensorInfo2({ 3, 6, 1 }, ArmnnType);
2094 
2095  std::vector<TensorShape> inputTensorShapes({inputTensorInfo1.GetShape(), inputTensorInfo2.GetShape()});
2096 
2097  // Quantized input1 tensor.
2098  const float inputScale1 = 0.5f;
2099  const int32_t inputOffset1 = 5;
2100 
2101  std::vector<T> input1 =
2102  {
2103  1, 2, 3,
2104  4, 5, 6,
2105  7, 8, 9,
2106  10, 11, 12,
2107  13, 14, 15,
2108  16, 17, 18,
2109 
2110  19, 20, 21,
2111  22, 23, 24,
2112  25, 26, 27,
2113  28, 29, 30,
2114  31, 32, 33,
2115  34, 35, 36
2116  };
2117 
2118  // Quatized input2 tensor.
2119  const float inputScale2 = 0.2f;
2120  const int32_t inputOffset2 = 10;
2121 
2122  std::vector<T> input2 =
2123  {
2124  37, 38, 39,
2125  40, 41, 42,
2126  43, 44, 45,
2127  46, 47, 48,
2128  49, 50, 51,
2129  52, 53, 54
2130  };
2131 
2132  // Quantized output tensor.
2133  const float outputScale = 0.1f;
2134  const int32_t outputOffset = 20;
2135 
2136  std::vector<T> actualOutput(outputTensorInfo.GetNumElements());
2137 
2138  std::vector<T> expectedOutput =
2139  {
2140  0, 5, 74,
2141  10, 15, 76,
2142  20, 25, 78,
2143  30, 35, 80,
2144  40, 45, 82,
2145  50, 55, 84,
2146 
2147  60, 65, 86,
2148  70, 75, 88,
2149  80, 85, 90,
2150  90, 95, 92,
2151  100, 105, 94,
2152  110, 115, 96,
2153 
2154  120, 125, 98,
2155  130, 135, 100,
2156  140, 145, 102,
2157  150, 155, 104,
2158  160, 165, 106,
2159  170, 175, 108
2160  };
2161 
2162  outputTensorInfo.SetQuantizationScale(outputScale);
2163  outputTensorInfo.SetQuantizationOffset(outputOffset);
2164  inputTensorInfo1.SetQuantizationScale(inputScale1);
2165  inputTensorInfo1.SetQuantizationOffset(inputOffset1);
2166  inputTensorInfo2.SetQuantizationScale(inputScale2);
2167  inputTensorInfo2.SetQuantizationOffset(inputOffset2);
2168 
2169  std::vector<unsigned int> wOrigin1 = { 0, 0, 0 }; //Extent of the window is defined by size of input[0].
2170  ConcatQueueDescriptor::ViewOrigin window1(wOrigin1);
2171 
2172  std::vector<unsigned int> wOrigin2 = { 0, 0, 2 }; //Extent of the window is defined by size of input[1].
2173  ConcatQueueDescriptor::ViewOrigin window2(wOrigin2);
2174 
2175  std::unique_ptr<ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
2176 
2177  bool subTensorsSupported = useSubtensor && workloadFactory.SupportsSubTensors();
2178 
2179  std::unique_ptr<ITensorHandle> inputHandle1 =
2180  subTensorsSupported ?
2181  tensorHandleFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
2182  tensorHandleFactory.CreateTensorHandle(inputTensorInfo1);
2183 
2184  std::unique_ptr<ITensorHandle> inputHandle2 =
2185  subTensorsSupported ?
2186  tensorHandleFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
2187  tensorHandleFactory.CreateTensorHandle(inputTensorInfo2);
2188 
2189  ConcatQueueDescriptor data;
2191  inputTensorShapes.begin(),inputTensorShapes.end(), 2);
2192  data.m_Parameters = desc;
2193 
2195  AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
2196  AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
2197  AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
2198 
2199  data.m_ViewOrigins.push_back(window1);
2200  data.m_ViewOrigins.push_back(window2);
2201 
2202  std::unique_ptr<IWorkload> workload = workloadFactory.CreateWorkload(LayerType::Concat, data, info);
2203 
2204  inputHandle1->Allocate();
2205  inputHandle2->Allocate();
2206  outputHandle->Allocate();
2207 
2208  CopyDataToITensorHandle(inputHandle1.get(), input1.data());
2209  CopyDataToITensorHandle(inputHandle2.get(), input2.data());
2210 
2211  workload->PostAllocationConfigure();
2212  workload->Execute();
2213 
2214  CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
2215 
2216  return LayerTestResult<T, 3>(actualOutput,
2217  expectedOutput,
2218  outputHandle->GetShape(),
2219  outputTensorInfo.GetShape());
2220 }
virtual std::unique_ptr< ITensorHandle > CreateSubTensorHandle(ITensorHandle &parent, TensorShape const &subTensorShape, unsigned int const *subTensorOrigin) const =0
void IgnoreUnused(Ts &&...)
std::vector< ViewOrigin > m_ViewOrigins
An OriginsDescriptor for the ConcatLayer.
void CopyDataFromITensorHandle(void *mem, const armnn::ITensorHandle *tensorHandle)
void SetQuantizationScale(float scale)
Definition: Tensor.cpp:473
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)
Contains information about TensorInfos of a layer.
OriginsDescriptor CreateDescriptorForConcatenation(TensorShapeIt first, TensorShapeIt last, unsigned int concatenationDimension)
Convenience template to create an OriginsDescriptor to use when creating a ConcatLayer for performing...
virtual std::unique_ptr< IWorkload > CreateWorkload(LayerType type, const QueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo) const =0
virtual bool SupportsSubTensors() const =0

◆ ConcatDifferentInputOutputQParamUint8Test()

LayerTestResult<uint8_t, 3> ConcatDifferentInputOutputQParamUint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory,
bool  useSubtensor 
)

◆ ConcatFloat16Test()

LayerTestResult<armnn::Half, 3> ConcatFloat16Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 2430 of file ConcatTestImpl.cpp.

Referenced by TEST_SUITE().

2434 {
2435  return Concat3dDim1TestImpl<DataType::Float16>(workloadFactory, memoryManager, tensorHandleFactory, 0.0f, 0);
2436 }

◆ ConcatInt32Test()

LayerTestResult<int32_t, 3> ConcatInt32Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 2252 of file ConcatTestImpl.cpp.

Referenced by TEST_SUITE().

2256 {
2257  return ConcatTestImpl<DataType::Signed32>(workloadFactory, memoryManager, tensorHandleFactory);
2258 }

◆ ConcatTest()

LayerTestResult<float, 3> ConcatTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 2244 of file ConcatTestImpl.cpp.

Referenced by TEST_SUITE().

2248 {
2249  return ConcatTestImpl<DataType::Float32>(workloadFactory, memoryManager, tensorHandleFactory);
2250 }

◆ ConcatUint16Test()

LayerTestResult<uint16_t, 3> ConcatUint16Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 2730 of file ConcatTestImpl.cpp.

References armnn::Concat, CopyDataFromITensorHandle(), CopyDataToITensorHandle(), ITensorHandleFactory::CreateSubTensorHandle(), ITensorHandleFactory::CreateTensorHandle(), IWorkloadFactory::CreateWorkload(), armnn::IgnoreUnused(), armnn::info, ConcatQueueDescriptor::m_ViewOrigins, armnn::QSymmS16, TensorInfo::SetQuantizationScale(), and IWorkloadFactory::SupportsSubTensors().

Referenced by TEST_SUITE().

2734 {
2735  IgnoreUnused(memoryManager);
2736 
2737  unsigned int outputWidth = 3;
2738  unsigned int outputHeight = 6;
2739  unsigned int outputChannels = 3;
2740 
2741  unsigned int inputWidth1 = 3;
2742  unsigned int inputHeight1 = 6;
2743  unsigned int inputChannels1 = 2;
2744 
2745  unsigned int inputWidth2 = 3;
2746  unsigned int inputHeight2 = 6;
2747  unsigned int inputChannels2 = 1;
2748 
2749  // Defines the tensor descriptors.
2750  TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, DataType::QSymmS16);
2751  TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, DataType::QSymmS16);
2752  TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, DataType::QSymmS16);
2753 
2754  // Arbitrary scale and offsets. They don't really matter as the Concat operator doesn't dequantize/quantize them.
2755  const float scale = 0.13497836f;
2756  const int32_t offset = -7;
2757 
2758  outputTensorInfo.SetQuantizationScale(scale);
2759  outputTensorInfo.SetQuantizationOffset(offset);
2760  inputTensorInfo1.SetQuantizationScale(scale);
2761  inputTensorInfo1.SetQuantizationOffset(offset);
2762  inputTensorInfo2.SetQuantizationScale(scale);
2763  inputTensorInfo2.SetQuantizationOffset(offset);
2764 
2765  std::vector<uint16_t> actualOutput(outputTensorInfo.GetNumElements());
2766 
2767  std::vector<uint16_t> expectedOutput =
2768  {
2769  1, 2, 3,
2770  4, 5, 6,
2771  7, 8, 9,
2772  10, 11, 12,
2773  13, 14, 15,
2774  16, 17, 18,
2775 
2776  19, 20, 21,
2777  22, 23, 24,
2778  25, 26, 27,
2779  28, 29, 30,
2780  31, 32, 33,
2781  34, 35, 36,
2782 
2783  37, 38, 39,
2784  40, 41, 42,
2785  43, 44, 45,
2786  46, 47, 48,
2787  49, 50, 51,
2788  52, 53, 54
2789  };
2790 
2791  std::vector<uint16_t> input1 =
2792  {
2793  1, 2, 3,
2794  4, 5, 6,
2795  7, 8, 9,
2796  10, 11, 12,
2797  13, 14, 15,
2798  16, 17, 18,
2799 
2800  19, 20, 21,
2801  22, 23, 24,
2802  25, 26, 27,
2803  28, 29, 30,
2804  31, 32, 33,
2805  34, 35, 36,
2806  };
2807 
2808  std::vector<uint16_t> input2 =
2809  {
2810  37, 38, 39,
2811  40, 41, 42,
2812  43, 44, 45,
2813  46, 47, 48,
2814  49, 50, 51,
2815  52, 53, 54,
2816  };
2817 
2818  std::vector<unsigned int> wOrigin1 = { 0, 0, 0 }; //Extent of the window is defined by size of input[0].
2819  ConcatQueueDescriptor::ViewOrigin window1(wOrigin1);
2820 
2821  std::vector<unsigned int> wOrigin2 = { 2, 0, 0 }; //Extent of the window is defined by size of input[1].
2822  ConcatQueueDescriptor::ViewOrigin window2(wOrigin2);
2823 
2824 
2825  std::unique_ptr<ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
2826 
2827  bool subTensorsSupported = workloadFactory.SupportsSubTensors();
2828 
2829  std::unique_ptr<ITensorHandle> inputHandle1 =
2830  subTensorsSupported ?
2831  tensorHandleFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
2832  tensorHandleFactory.CreateTensorHandle(inputTensorInfo1);
2833 
2834  std::unique_ptr<ITensorHandle> inputHandle2 =
2835  subTensorsSupported ?
2836  tensorHandleFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
2837  tensorHandleFactory.CreateTensorHandle(inputTensorInfo2);
2838 
2839 
2840  ConcatQueueDescriptor data;
2842  AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
2843  AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
2844  AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
2845 
2846  data.m_ViewOrigins.push_back(window1);
2847  data.m_ViewOrigins.push_back(window2);
2848 
2849  std::unique_ptr<IWorkload> workload = workloadFactory.CreateWorkload(LayerType::Concat, data, info);
2850 
2851  inputHandle1->Allocate();
2852  inputHandle2->Allocate();
2853  outputHandle->Allocate();
2854 
2855  CopyDataToITensorHandle(inputHandle1.get(), input1.data());
2856  CopyDataToITensorHandle(inputHandle2.get(), input2.data());
2857 
2858  workload->PostAllocationConfigure();
2859  workload->Execute();
2860 
2861  CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
2862 
2863  return LayerTestResult<uint16_t, 3>(actualOutput,
2864  expectedOutput,
2865  outputHandle->GetShape(),
2866  outputTensorInfo.GetShape());
2867 }
virtual std::unique_ptr< ITensorHandle > CreateSubTensorHandle(ITensorHandle &parent, TensorShape const &subTensorShape, unsigned int const *subTensorOrigin) const =0
void IgnoreUnused(Ts &&...)
std::vector< ViewOrigin > m_ViewOrigins
void CopyDataFromITensorHandle(void *mem, const armnn::ITensorHandle *tensorHandle)
void SetQuantizationScale(float scale)
Definition: Tensor.cpp:473
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)
Contains information about TensorInfos of a layer.
virtual std::unique_ptr< IWorkload > CreateWorkload(LayerType type, const QueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo) const =0
virtual bool SupportsSubTensors() const =0

◆ ConcatUint8DifferentQParamsTest()

LayerTestResult<uint8_t, 3> ConcatUint8DifferentQParamsTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 2446 of file ConcatTestImpl.cpp.

References armnn::Concat, CopyDataFromITensorHandle(), CopyDataToITensorHandle(), ITensorHandleFactory::CreateSubTensorHandle(), ITensorHandleFactory::CreateTensorHandle(), IWorkloadFactory::CreateWorkload(), armnn::IgnoreUnused(), armnn::info, ConcatQueueDescriptor::m_ViewOrigins, armnn::QAsymmU8, TensorInfo::SetQuantizationScale(), and IWorkloadFactory::SupportsSubTensors().

Referenced by TEST_SUITE().

2450 {
2451  IgnoreUnused(memoryManager);
2452 
2453  unsigned int outputWidth = 3;
2454  unsigned int outputHeight = 6;
2455  unsigned int outputChannels = 3;
2456 
2457  unsigned int inputWidth1 = 3;
2458  unsigned int inputHeight1 = 6;
2459  unsigned int inputChannels1 = 2;
2460 
2461  unsigned int inputWidth2 = 3;
2462  unsigned int inputHeight2 = 6;
2463  unsigned int inputChannels2 = 1;
2464 
2465  // Defines the tensor descriptors.
2466  TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, DataType::QAsymmU8);
2467  TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, DataType::QAsymmU8);
2468  TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, DataType::QAsymmU8);
2469 
2470  // Quantized input1 tensor. Range [-3, 1]
2471  const float inputScale1 = 0.015686f;
2472  const int32_t inputOffset1 = 192;
2473 
2474  std::vector<uint8_t> input1 =
2475  {
2476  1, 2, 3,
2477  4, 5, 6,
2478  7, 8, 9,
2479  10, 11, 12,
2480  13, 14, 15,
2481  16, 17, 18,
2482 
2483  19, 20, 21,
2484  22, 23, 24,
2485  25, 26, 27,
2486  28, 29, 30,
2487  31, 32, 33,
2488  34, 35, 36
2489  };
2490 
2491  // Quatized input2 tensor. Range [-1, 4]
2492  const float inputScale2 = 0.019608f;
2493  const int32_t inputOffset2 = 50;
2494 
2495  std::vector<uint8_t> input2 =
2496  {
2497  37, 38, 39,
2498  40, 41, 42,
2499  43, 44, 45,
2500  46, 47, 48,
2501  49, 50, 51,
2502  52, 53, 54
2503  };
2504 
2505  // Output has the same quantization parameters than input1,
2506  // so that only the requantization of input2 is required
2507  const float outputScale = 0.015686f;
2508  const int32_t outputOffset = 192;
2509 
2510  std::vector<uint8_t> actualOutput(outputTensorInfo.GetNumElements());
2511 
2512  std::vector<uint8_t> expectedOutput =
2513  {
2514  1, 2, 3,
2515  4, 5, 6,
2516  7, 8, 9,
2517  10, 11, 12,
2518  13, 14, 15,
2519  16, 17, 18,
2520 
2521  19, 20, 21,
2522  22, 23, 24,
2523  25, 26, 27,
2524  28, 29, 30,
2525  31, 32, 33,
2526  34, 35, 36,
2527 
2528  176, 177, 178,
2529  179, 181, 182,
2530  183, 184, 186,
2531  187, 188, 189,
2532  191, 192, 193,
2533  195, 196, 197
2534  };
2535 
2536  outputTensorInfo.SetQuantizationScale(outputScale);
2537  outputTensorInfo.SetQuantizationOffset(outputOffset);
2538  inputTensorInfo1.SetQuantizationScale(inputScale1);
2539  inputTensorInfo1.SetQuantizationOffset(inputOffset1);
2540  inputTensorInfo2.SetQuantizationScale(inputScale2);
2541  inputTensorInfo2.SetQuantizationOffset(inputOffset2);
2542 
2543  std::vector<unsigned int> wOrigin1 = { 0, 0, 0 }; //Extent of the window is defined by size of input[0].
2544  ConcatQueueDescriptor::ViewOrigin window1(wOrigin1);
2545 
2546  std::vector<unsigned int> wOrigin2 = { 2, 0, 0 }; //Extent of the window is defined by size of input[1].
2547  ConcatQueueDescriptor::ViewOrigin window2(wOrigin2);
2548 
2549  std::unique_ptr<ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
2550 
2551  bool subTensorsSupported = workloadFactory.SupportsSubTensors();
2552 
2553  std::unique_ptr<ITensorHandle> inputHandle1 =
2554  subTensorsSupported ?
2555  tensorHandleFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
2556  tensorHandleFactory.CreateTensorHandle(inputTensorInfo1);
2557 
2558  std::unique_ptr<ITensorHandle> inputHandle2 =
2559  subTensorsSupported ?
2560  tensorHandleFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
2561  tensorHandleFactory.CreateTensorHandle(inputTensorInfo2);
2562 
2563  ConcatQueueDescriptor data;
2565  AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
2566  AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
2567  AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
2568 
2569  data.m_ViewOrigins.push_back(window1);
2570  data.m_ViewOrigins.push_back(window2);
2571 
2572  std::unique_ptr<IWorkload> workload = workloadFactory.CreateWorkload(LayerType::Concat, data, info);
2573 
2574  inputHandle1->Allocate();
2575  inputHandle2->Allocate();
2576  outputHandle->Allocate();
2577 
2578  CopyDataToITensorHandle(inputHandle1.get(), input1.data());
2579  CopyDataToITensorHandle(inputHandle2.get(), input2.data());
2580 
2581  workload->PostAllocationConfigure();
2582  workload->Execute();
2583 
2584  CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
2585 
2586  return LayerTestResult<uint8_t, 3>(actualOutput,
2587  expectedOutput,
2588  outputHandle->GetShape(),
2589  outputTensorInfo.GetShape());
2590 }
virtual std::unique_ptr< ITensorHandle > CreateSubTensorHandle(ITensorHandle &parent, TensorShape const &subTensorShape, unsigned int const *subTensorOrigin) const =0
void IgnoreUnused(Ts &&...)
std::vector< ViewOrigin > m_ViewOrigins
void CopyDataFromITensorHandle(void *mem, const armnn::ITensorHandle *tensorHandle)
void SetQuantizationScale(float scale)
Definition: Tensor.cpp:473
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)
Contains information about TensorInfos of a layer.
virtual std::unique_ptr< IWorkload > CreateWorkload(LayerType type, const QueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo) const =0
virtual bool SupportsSubTensors() const =0

◆ ConcatUint8Test()

LayerTestResult<uint8_t, 3> ConcatUint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 2592 of file ConcatTestImpl.cpp.

References armnn::Concat, CopyDataFromITensorHandle(), CopyDataToITensorHandle(), ITensorHandleFactory::CreateSubTensorHandle(), ITensorHandleFactory::CreateTensorHandle(), IWorkloadFactory::CreateWorkload(), armnn::IgnoreUnused(), armnn::info, ConcatQueueDescriptor::m_ViewOrigins, armnn::QAsymmU8, TensorInfo::SetQuantizationScale(), and IWorkloadFactory::SupportsSubTensors().

Referenced by TEST_SUITE().

2596 {
2597  IgnoreUnused(memoryManager);
2598 
2599  unsigned int outputWidth = 3;
2600  unsigned int outputHeight = 6;
2601  unsigned int outputChannels = 3;
2602 
2603  unsigned int inputWidth1 = 3;
2604  unsigned int inputHeight1 = 6;
2605  unsigned int inputChannels1 = 2;
2606 
2607  unsigned int inputWidth2 = 3;
2608  unsigned int inputHeight2 = 6;
2609  unsigned int inputChannels2 = 1;
2610 
2611  // Defines the tensor descriptors.
2612  TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, DataType::QAsymmU8);
2613  TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, DataType::QAsymmU8);
2614  TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, DataType::QAsymmU8);
2615 
2616  // Arbitrary scale and offsets. They don't really matter as the Concat operator doesn't dequantize/quantize them.
2617  const float scale = 0.13497836f;
2618  const int32_t offset = -7;
2619 
2620  outputTensorInfo.SetQuantizationScale(scale);
2621  outputTensorInfo.SetQuantizationOffset(offset);
2622  inputTensorInfo1.SetQuantizationScale(scale);
2623  inputTensorInfo1.SetQuantizationOffset(offset);
2624  inputTensorInfo2.SetQuantizationScale(scale);
2625  inputTensorInfo2.SetQuantizationOffset(offset);
2626 
2627  std::vector<uint8_t> actualOutput(outputTensorInfo.GetNumElements());
2628 
2629  std::vector<uint8_t> expectedOutput =
2630  {
2631  1, 2, 3,
2632  4, 5, 6,
2633  7, 8, 9,
2634  10, 11, 12,
2635  13, 14, 15,
2636  16, 17, 18,
2637 
2638  19, 20, 21,
2639  22, 23, 24,
2640  25, 26, 27,
2641  28, 29, 30,
2642  31, 32, 33,
2643  34, 35, 36,
2644 
2645  37, 38, 39,
2646  40, 41, 42,
2647  43, 44, 45,
2648  46, 47, 48,
2649  49, 50, 51,
2650  52, 53, 54
2651  };
2652 
2653  std::vector<uint8_t> input1 =
2654  {
2655  1, 2, 3,
2656  4, 5, 6,
2657  7, 8, 9,
2658  10, 11, 12,
2659  13, 14, 15,
2660  16, 17, 18,
2661 
2662  19, 20, 21,
2663  22, 23, 24,
2664  25, 26, 27,
2665  28, 29, 30,
2666  31, 32, 33,
2667  34, 35, 36
2668  };
2669 
2670  std::vector<uint8_t> input2 =
2671  {
2672  37, 38, 39,
2673  40, 41, 42,
2674  43, 44, 45,
2675  46, 47, 48,
2676  49, 50, 51,
2677  52, 53, 54
2678  };
2679 
2680  std::vector<unsigned int> wOrigin1 = { 0, 0, 0 }; //Extent of the window is defined by size of input[0].
2681  ConcatQueueDescriptor::ViewOrigin window1(wOrigin1);
2682 
2683  std::vector<unsigned int> wOrigin2 = { 2, 0, 0 }; //Extent of the window is defined by size of input[1].
2684  ConcatQueueDescriptor::ViewOrigin window2(wOrigin2);
2685 
2686  std::unique_ptr<ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
2687 
2688  bool subTensorsSupported = workloadFactory.SupportsSubTensors();
2689 
2690  std::unique_ptr<ITensorHandle> inputHandle1 =
2691  subTensorsSupported ?
2692  tensorHandleFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
2693  tensorHandleFactory.CreateTensorHandle(inputTensorInfo1);
2694 
2695  std::unique_ptr<ITensorHandle> inputHandle2 =
2696  subTensorsSupported ?
2697  tensorHandleFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
2698  tensorHandleFactory.CreateTensorHandle(inputTensorInfo2);
2699 
2700 
2701  ConcatQueueDescriptor data;
2703  AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
2704  AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
2705  AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
2706 
2707  data.m_ViewOrigins.push_back(window1);
2708  data.m_ViewOrigins.push_back(window2);
2709 
2710  std::unique_ptr<IWorkload> workload = workloadFactory.CreateWorkload(LayerType::Concat, data, info);
2711 
2712  inputHandle1->Allocate();
2713  inputHandle2->Allocate();
2714  outputHandle->Allocate();
2715 
2716  CopyDataToITensorHandle(inputHandle1.get(), input1.data());
2717  CopyDataToITensorHandle(inputHandle2.get(), input2.data());
2718 
2719  workload->PostAllocationConfigure();
2720  workload->Execute();
2721 
2722  CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
2723 
2724  return LayerTestResult<uint8_t, 3>(actualOutput,
2725  expectedOutput,
2726  outputHandle->GetShape(),
2727  outputTensorInfo.GetShape());
2728 }
virtual std::unique_ptr< ITensorHandle > CreateSubTensorHandle(ITensorHandle &parent, TensorShape const &subTensorShape, unsigned int const *subTensorOrigin) const =0
void IgnoreUnused(Ts &&...)
std::vector< ViewOrigin > m_ViewOrigins
void CopyDataFromITensorHandle(void *mem, const armnn::ITensorHandle *tensorHandle)
void SetQuantizationScale(float scale)
Definition: Tensor.cpp:473
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)
Contains information about TensorInfos of a layer.
virtual std::unique_ptr< IWorkload > CreateWorkload(LayerType type, const QueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo) const =0
virtual bool SupportsSubTensors() const =0