ArmNN
 22.02
ConcatTestImpl.hpp File Reference

Go to the source code of this file.

Functions

template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult< T, 3 > ConcatDifferentInputOutputQParamTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool useSubtensor)
 
LayerTestResult< float, 3 > ConcatTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< armnn::BFloat16, 3 > ConcatBFloat16Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< armnn::Half, 3 > ConcatFloat16Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< uint8_t, 3 > ConcatUint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< uint16_t, 3 > ConcatUint16Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< uint8_t, 3 > ConcatUint8DifferentQParamsTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< float, 1 > Concat1dTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< float, 2 > Concat2dDim0Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< float, 2 > Concat2dDim1Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< float, 2 > Concat2dDim0DiffInputDimsTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< float, 2 > Concat2dDim1DiffInputDimsTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< float, 3 > Concat3dDim0Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< float, 3 > Concat3dDim1Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< float, 3 > Concat3dDim2Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool useSubtensor)
 
LayerTestResult< float, 3 > Concat3dDim0DiffInputDimsTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< float, 3 > Concat3dDim1DiffInputDimsTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< float, 3 > Concat3dDim2DiffInputDimsTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool useSubtensor)
 
LayerTestResult< float, 4 > Concat4dDim0Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< float, 4 > Concat4dDim1Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< float, 4 > Concat4dDim2Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< float, 4 > Concat4dDim3Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool useSubtensor)
 
LayerTestResult< float, 4 > Concat4dDiffShapeDim0Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< float, 4 > Concat4dDiffShapeDim1Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< float, 4 > Concat4dDiffShapeDim2Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< float, 4 > Concat4dDiffShapeDim3Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool useSubtensor)
 
LayerTestResult< uint8_t, 4 > Concat4dDim0Uint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< uint8_t, 4 > Concat4dDim1Uint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< uint8_t, 4 > Concat4dDim2Uint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< uint8_t, 4 > Concat4dDim3Uint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool useSubtensor)
 
LayerTestResult< uint8_t, 4 > Concat4dDiffShapeDim0Uint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< uint8_t, 4 > Concat4dDiffShapeDim1Uint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< uint8_t, 4 > Concat4dDiffShapeDim2Uint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< uint8_t, 4 > Concat4dDiffShapeDim3Uint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool useSubtensor)
 
LayerTestResult< uint8_t, 1 > Concat1dUint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< uint8_t, 2 > Concat2dDim0Uint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< uint8_t, 2 > Concat2dDim1Uint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< uint8_t, 2 > Concat2dDim0DiffInputDimsUint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< uint8_t, 2 > Concat2dDim1DiffInputDimsUint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< uint8_t, 3 > Concat3dDim0Uint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< uint8_t, 3 > Concat3dDim1Uint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< uint8_t, 3 > Concat3dDim2Uint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool useSubtensor)
 
LayerTestResult< uint8_t, 3 > Concat3dDim0DiffInputDimsUint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< uint8_t, 3 > Concat3dDim1DiffInputDimsUint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< uint8_t, 3 > Concat3dDim2DiffInputDimsUint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool useSubtensor)
 
LayerTestResult< uint8_t, 3 > ConcatDifferentInputOutputQParamUint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool useSubtensor)
 
LayerTestResult< int16_t, 3 > ConcatDifferentInputOutputQParamInt16Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool useSubtensor)
 

Function Documentation

◆ Concat1dTest()

LayerTestResult<float, 1> Concat1dTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 2243 of file ConcatTestImpl.cpp.

Referenced by TEST_SUITE().

2247 {
2248  return Concat1dTestImpl<DataType::Float32>(workloadFactory, memoryManager, tensorHandleFactory, 0.0f, 0);
2249 }

◆ Concat1dUint8Test()

LayerTestResult<uint8_t, 1> Concat1dUint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 2852 of file ConcatTestImpl.cpp.

Referenced by TEST_SUITE().

2856 {
2857  return Concat1dTestImpl<DataType::QAsymmU8>(workloadFactory, memoryManager, tensorHandleFactory, 0.5f, -1);
2858 }

◆ Concat2dDim0DiffInputDimsTest()

LayerTestResult<float, 2> Concat2dDim0DiffInputDimsTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 2267 of file ConcatTestImpl.cpp.

Referenced by TEST_SUITE().

2271 {
2272  return Concat2dDim0DiffInputDimsTestImpl<DataType::Float32>(workloadFactory, memoryManager,
2273  tensorHandleFactory, 0.0f, 0);
2274 }

◆ Concat2dDim0DiffInputDimsUint8Test()

LayerTestResult<uint8_t, 2> Concat2dDim0DiffInputDimsUint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 2876 of file ConcatTestImpl.cpp.

Referenced by TEST_SUITE().

2880 {
2881  return Concat2dDim0DiffInputDimsTestImpl<DataType::QAsymmU8>(
2882  workloadFactory, memoryManager, tensorHandleFactory, 0.5f, -1);
2883 }

◆ Concat2dDim0Test()

LayerTestResult<float, 2> Concat2dDim0Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 2251 of file ConcatTestImpl.cpp.

Referenced by TEST_SUITE().

2255 {
2256  return Concat2dDim0TestImpl<DataType::Float32>(workloadFactory, memoryManager, tensorHandleFactory, 0.0f, 0);
2257 }

◆ Concat2dDim0Uint8Test()

LayerTestResult<uint8_t, 2> Concat2dDim0Uint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 2860 of file ConcatTestImpl.cpp.

Referenced by TEST_SUITE().

2864 {
2865  return Concat2dDim0TestImpl<DataType::QAsymmU8>(workloadFactory, memoryManager, tensorHandleFactory, 0.5f, -1);
2866 }

◆ Concat2dDim1DiffInputDimsTest()

LayerTestResult<float, 2> Concat2dDim1DiffInputDimsTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 2276 of file ConcatTestImpl.cpp.

Referenced by TEST_SUITE().

2280 {
2281  return Concat2dDim1DiffInputDimsTestImpl<DataType::Float32>(workloadFactory,
2282  memoryManager,
2283  tensorHandleFactory,
2284  0.0f,
2285  0);
2286 }

◆ Concat2dDim1DiffInputDimsUint8Test()

LayerTestResult<uint8_t, 2> Concat2dDim1DiffInputDimsUint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 2885 of file ConcatTestImpl.cpp.

Referenced by TEST_SUITE().

2889 {
2890  return Concat2dDim1DiffInputDimsTestImpl<DataType::QAsymmU8>(
2891  workloadFactory, memoryManager, tensorHandleFactory, 0.5f, -1);
2892 }

◆ Concat2dDim1Test()

LayerTestResult<float, 2> Concat2dDim1Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 2259 of file ConcatTestImpl.cpp.

Referenced by TEST_SUITE().

2263 {
2264  return Concat2dDim1TestImpl<DataType::Float32>(workloadFactory, memoryManager, tensorHandleFactory, 0.0f, 0);
2265 }

◆ Concat2dDim1Uint8Test()

LayerTestResult<uint8_t, 2> Concat2dDim1Uint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 2868 of file ConcatTestImpl.cpp.

Referenced by TEST_SUITE().

2872 {
2873  return Concat2dDim1TestImpl<DataType::QAsymmU8>(workloadFactory, memoryManager, tensorHandleFactory, 0.5f, -1);
2874 }

◆ Concat3dDim0DiffInputDimsTest()

LayerTestResult<float, 3> Concat3dDim0DiffInputDimsTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 2314 of file ConcatTestImpl.cpp.

Referenced by TEST_SUITE().

2318 {
2319  return Concat3dDim0DiffInputDimsTestImpl<DataType::Float32>(
2320  workloadFactory, memoryManager, tensorHandleFactory, 0.0f, 0);
2321 }

◆ Concat3dDim0DiffInputDimsUint8Test()

LayerTestResult<uint8_t, 3> Concat3dDim0DiffInputDimsUint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 2920 of file ConcatTestImpl.cpp.

Referenced by TEST_SUITE().

2924 {
2925  return Concat3dDim0TestImpl<DataType::QAsymmU8>(workloadFactory, memoryManager, tensorHandleFactory, 0.5f, -1);
2926 }

◆ Concat3dDim0Test()

LayerTestResult<float, 3> Concat3dDim0Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 2288 of file ConcatTestImpl.cpp.

Referenced by TEST_SUITE().

2292 {
2293  return Concat3dDim0TestImpl<DataType::Float32>(workloadFactory, memoryManager, tensorHandleFactory, 0.0f, 0);
2294 }

◆ Concat3dDim0Uint8Test()

LayerTestResult<uint8_t, 3> Concat3dDim0Uint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 2894 of file ConcatTestImpl.cpp.

Referenced by TEST_SUITE().

2898 {
2899  return Concat3dDim0TestImpl<DataType::QAsymmU8>(workloadFactory, memoryManager, tensorHandleFactory, 0.5f, -1);
2900 }

◆ Concat3dDim1DiffInputDimsTest()

LayerTestResult<float, 3> Concat3dDim1DiffInputDimsTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 2323 of file ConcatTestImpl.cpp.

Referenced by TEST_SUITE().

2327 {
2328  return Concat3dDim1DiffInputDimsTestImpl<DataType::Float32>(workloadFactory, memoryManager,
2329  tensorHandleFactory, 0.0f, 0);
2330 }

◆ Concat3dDim1DiffInputDimsUint8Test()

LayerTestResult<uint8_t, 3> Concat3dDim1DiffInputDimsUint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 2928 of file ConcatTestImpl.cpp.

Referenced by TEST_SUITE().

2932 {
2933  return Concat3dDim1DiffInputDimsTestImpl<DataType::QAsymmU8>(
2934  workloadFactory, memoryManager, tensorHandleFactory, 0.5f, -1);
2935 }

◆ Concat3dDim1Test()

LayerTestResult<float, 3> Concat3dDim1Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 2296 of file ConcatTestImpl.cpp.

Referenced by TEST_SUITE().

2300 {
2301  return Concat3dDim1TestImpl<DataType::Float32>(workloadFactory, memoryManager, tensorHandleFactory, 0.0f, 0);
2302 }

◆ Concat3dDim1Uint8Test()

LayerTestResult<uint8_t, 3> Concat3dDim1Uint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 2902 of file ConcatTestImpl.cpp.

Referenced by TEST_SUITE().

2906 {
2907  return Concat3dDim1TestImpl<DataType::QAsymmU8>(workloadFactory, memoryManager, tensorHandleFactory, 0.5f, -1);
2908 }

◆ Concat3dDim2DiffInputDimsTest()

LayerTestResult<float, 3> Concat3dDim2DiffInputDimsTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory,
bool  useSubtensor 
)

Definition at line 2332 of file ConcatTestImpl.cpp.

Referenced by TEST_SUITE().

2337 {
2338  return Concat3dDim2DiffInputDimsTestImpl<DataType::Float32>(
2339  workloadFactory, memoryManager, tensorHandleFactory, useSubtensor, 0.0f, 0);
2340 }

◆ Concat3dDim2DiffInputDimsUint8Test()

LayerTestResult<uint8_t, 3> Concat3dDim2DiffInputDimsUint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory,
bool  useSubtensor 
)

Definition at line 2937 of file ConcatTestImpl.cpp.

Referenced by TEST_SUITE().

2942 {
2943  return Concat3dDim2DiffInputDimsTestImpl<DataType::QAsymmU8>(
2944  workloadFactory, memoryManager, tensorHandleFactory, useSubtensor, 0.5f, -1);
2945 }

◆ Concat3dDim2Test()

LayerTestResult<float, 3> Concat3dDim2Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory,
bool  useSubtensor 
)

Definition at line 2304 of file ConcatTestImpl.cpp.

Referenced by TEST_SUITE().

2309 {
2310  return Concat3dDim2TestImpl<DataType::Float32>(workloadFactory, memoryManager, tensorHandleFactory,
2311  useSubtensor, 0.0f, 0);
2312 }

◆ Concat3dDim2Uint8Test()

LayerTestResult<uint8_t, 3> Concat3dDim2Uint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory,
bool  useSubtensor 
)

Definition at line 2910 of file ConcatTestImpl.cpp.

Referenced by TEST_SUITE().

2915 {
2916  return Concat3dDim2TestImpl<DataType::QAsymmU8>(
2917  workloadFactory, memoryManager, tensorHandleFactory, useSubtensor, 0.5f, -1);
2918 }

◆ Concat4dDiffShapeDim0Test()

LayerTestResult<float, 4> Concat4dDiffShapeDim0Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 2376 of file ConcatTestImpl.cpp.

Referenced by TEST_SUITE().

2380 {
2381  return Concat4dDiffShapeDim0TestImpl<DataType::Float32>(workloadFactory, memoryManager,
2382  tensorHandleFactory, 0.0f, 0);
2383 }

◆ Concat4dDiffShapeDim0Uint8Test()

LayerTestResult<uint8_t, 4> Concat4dDiffShapeDim0Uint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 2980 of file ConcatTestImpl.cpp.

Referenced by TEST_SUITE().

2984 {
2985  return Concat4dDiffShapeDim0TestImpl<DataType::QAsymmU8>(
2986  workloadFactory, memoryManager, tensorHandleFactory, 0.5f, -1);
2987 }

◆ Concat4dDiffShapeDim1Test()

LayerTestResult<float, 4> Concat4dDiffShapeDim1Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 2385 of file ConcatTestImpl.cpp.

Referenced by TEST_SUITE().

2389 {
2390  return Concat4dDiffShapeDim1TestImpl<DataType::Float32>(
2391  workloadFactory, memoryManager, tensorHandleFactory, 0.0f, 0);
2392 }

◆ Concat4dDiffShapeDim1Uint8Test()

LayerTestResult<uint8_t, 4> Concat4dDiffShapeDim1Uint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 2989 of file ConcatTestImpl.cpp.

Referenced by TEST_SUITE().

2993 {
2994  return Concat4dDiffShapeDim1TestImpl<DataType::QAsymmU8>(
2995  workloadFactory, memoryManager, tensorHandleFactory, 0.5f, -1);
2996 }

◆ Concat4dDiffShapeDim2Test()

LayerTestResult<float, 4> Concat4dDiffShapeDim2Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 2394 of file ConcatTestImpl.cpp.

Referenced by TEST_SUITE().

2398 {
2399  return Concat4dDiffShapeDim2TestImpl<DataType::Float32>(workloadFactory, memoryManager,
2400  tensorHandleFactory, 0.0f, 0);
2401 }

◆ Concat4dDiffShapeDim2Uint8Test()

LayerTestResult<uint8_t, 4> Concat4dDiffShapeDim2Uint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 2998 of file ConcatTestImpl.cpp.

Referenced by TEST_SUITE().

3002 {
3003  return Concat4dDiffShapeDim2TestImpl<DataType::QAsymmU8>(
3004  workloadFactory, memoryManager, tensorHandleFactory, 0.5f, -1);
3005 }

◆ Concat4dDiffShapeDim3Test()

LayerTestResult<float, 4> Concat4dDiffShapeDim3Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory,
bool  useSubtensor 
)

Definition at line 2403 of file ConcatTestImpl.cpp.

Referenced by TEST_SUITE().

2408 {
2409  return Concat4dDiffShapeDim3TestImpl<DataType::Float32>(
2410  workloadFactory, memoryManager, tensorHandleFactory, 0.0f, 0, useSubtensor);
2411 }

◆ Concat4dDiffShapeDim3Uint8Test()

LayerTestResult<uint8_t, 4> Concat4dDiffShapeDim3Uint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory,
bool  useSubtensor 
)

Definition at line 3007 of file ConcatTestImpl.cpp.

Referenced by TEST_SUITE().

3012 {
3013  return Concat4dDiffShapeDim3TestImpl<DataType::QAsymmU8>(
3014  workloadFactory, memoryManager, tensorHandleFactory, 0.5f, -1, useSubtensor);
3015 }

◆ Concat4dDim0Test()

LayerTestResult<float, 4> Concat4dDim0Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 2342 of file ConcatTestImpl.cpp.

Referenced by TEST_SUITE().

2346 {
2347  return Concat4dDim0TestImpl<DataType::Float32>(workloadFactory, memoryManager, tensorHandleFactory, 0.0f, 0);
2348 }

◆ Concat4dDim0Uint8Test()

LayerTestResult<uint8_t, 4> Concat4dDim0Uint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 2947 of file ConcatTestImpl.cpp.

Referenced by TEST_SUITE().

2951 {
2952  return Concat4dDim0TestImpl<DataType::QAsymmU8>(workloadFactory, memoryManager, tensorHandleFactory, 0.5f, -1);
2953 }

◆ Concat4dDim1Test()

LayerTestResult<float, 4> Concat4dDim1Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 2350 of file ConcatTestImpl.cpp.

Referenced by TEST_SUITE().

2354 {
2355  return Concat4dDim1TestImpl<DataType::Float32>(workloadFactory, memoryManager, tensorHandleFactory, 0.0f, 0);
2356 }

◆ Concat4dDim1Uint8Test()

LayerTestResult<uint8_t, 4> Concat4dDim1Uint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 2955 of file ConcatTestImpl.cpp.

Referenced by TEST_SUITE().

2959 {
2960  return Concat4dDim1TestImpl<DataType::QAsymmU8>(workloadFactory, memoryManager, tensorHandleFactory, 0.5f, -1);
2961 }

◆ Concat4dDim2Test()

LayerTestResult<float, 4> Concat4dDim2Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 2358 of file ConcatTestImpl.cpp.

Referenced by TEST_SUITE().

2362 {
2363  return Concat4dDim2TestImpl<DataType::Float32>(workloadFactory, memoryManager, tensorHandleFactory, 0.0f, 0);
2364 }

◆ Concat4dDim2Uint8Test()

LayerTestResult<uint8_t, 4> Concat4dDim2Uint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 2963 of file ConcatTestImpl.cpp.

Referenced by TEST_SUITE().

2967 {
2968  return Concat4dDim2TestImpl<DataType::QAsymmU8>(workloadFactory, memoryManager, tensorHandleFactory, 0.5f, -1);
2969 }

◆ Concat4dDim3Test()

LayerTestResult<float, 4> Concat4dDim3Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory,
bool  useSubtensor 
)

Definition at line 2366 of file ConcatTestImpl.cpp.

Referenced by TEST_SUITE().

2371 {
2372  return Concat4dDim3TestImpl<DataType::Float32>(workloadFactory, memoryManager,
2373  tensorHandleFactory, 0.0f, 0, useSubtensor);
2374 }

◆ Concat4dDim3Uint8Test()

LayerTestResult<uint8_t, 4> Concat4dDim3Uint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory,
bool  useSubtensor 
)

Definition at line 2971 of file ConcatTestImpl.cpp.

Referenced by TEST_SUITE().

2975 {
2976  return Concat4dDim3TestImpl<DataType::QAsymmU8>(
2977  workloadFactory, memoryManager, tensorHandleFactory, 0.5f, -1, useSubtensor);
2978 }

◆ ConcatBFloat16Test()

LayerTestResult<armnn::BFloat16, 3> ConcatBFloat16Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 2421 of file ConcatTestImpl.cpp.

Referenced by TEST_SUITE().

2425 {
2426  return Concat3dDim1TestImpl<DataType::BFloat16>(workloadFactory, memoryManager, tensorHandleFactory, 0.0f, 0);
2427 }

◆ ConcatDifferentInputOutputQParamInt16Test()

LayerTestResult<int16_t, 3> ConcatDifferentInputOutputQParamInt16Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory,
bool  useSubtensor 
)

◆ ConcatDifferentInputOutputQParamTest()

LayerTestResult<T, 3> ConcatDifferentInputOutputQParamTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory,
bool  useSubtensor 
)

Definition at line 1955 of file ConcatTestImpl.cpp.

References armnn::Concat, CopyDataFromITensorHandle(), CopyDataToITensorHandle(), armnn::CreateDescriptorForConcatenation(), ITensorHandleFactory::CreateSubTensorHandle(), ITensorHandleFactory::CreateTensorHandle(), IWorkloadFactory::CreateWorkload(), armnn::IgnoreUnused(), armnn::info, QueueDescriptorWithParameters< LayerDescriptor >::m_Parameters, ConcatQueueDescriptor::m_ViewOrigins, TensorInfo::SetQuantizationScale(), and IWorkloadFactory::SupportsSubTensors().

1960 {
1961  IgnoreUnused(memoryManager);
1962 
1963  // Defines the tensor descriptors.
1964  TensorInfo outputTensorInfo({ 3, 6, 3 }, ArmnnType);
1965  TensorInfo inputTensorInfo1({ 3, 6, 2 }, ArmnnType);
1966  TensorInfo inputTensorInfo2({ 3, 6, 1 }, ArmnnType);
1967 
1968  std::vector<TensorShape> inputTensorShapes({inputTensorInfo1.GetShape(), inputTensorInfo2.GetShape()});
1969 
1970  // Quantized input1 tensor.
1971  const float inputScale1 = 0.5f;
1972  const int32_t inputOffset1 = 5;
1973 
1974  std::vector<T> input1 =
1975  {
1976  1, 2, 3,
1977  4, 5, 6,
1978  7, 8, 9,
1979  10, 11, 12,
1980  13, 14, 15,
1981  16, 17, 18,
1982 
1983  19, 20, 21,
1984  22, 23, 24,
1985  25, 26, 27,
1986  28, 29, 30,
1987  31, 32, 33,
1988  34, 35, 36
1989  };
1990 
1991  // Quatized input2 tensor.
1992  const float inputScale2 = 0.2f;
1993  const int32_t inputOffset2 = 10;
1994 
1995  std::vector<T> input2 =
1996  {
1997  37, 38, 39,
1998  40, 41, 42,
1999  43, 44, 45,
2000  46, 47, 48,
2001  49, 50, 51,
2002  52, 53, 54
2003  };
2004 
2005  // Quantized output tensor.
2006  const float outputScale = 0.1f;
2007  const int32_t outputOffset = 20;
2008 
2009  std::vector<T> actualOutput(outputTensorInfo.GetNumElements());
2010 
2011  std::vector<T> expectedOutput =
2012  {
2013  0, 5, 74,
2014  10, 15, 76,
2015  20, 25, 78,
2016  30, 35, 80,
2017  40, 45, 82,
2018  50, 55, 84,
2019 
2020  60, 65, 86,
2021  70, 75, 88,
2022  80, 85, 90,
2023  90, 95, 92,
2024  100, 105, 94,
2025  110, 115, 96,
2026 
2027  120, 125, 98,
2028  130, 135, 100,
2029  140, 145, 102,
2030  150, 155, 104,
2031  160, 165, 106,
2032  170, 175, 108
2033  };
2034 
2035  outputTensorInfo.SetQuantizationScale(outputScale);
2036  outputTensorInfo.SetQuantizationOffset(outputOffset);
2037  inputTensorInfo1.SetQuantizationScale(inputScale1);
2038  inputTensorInfo1.SetQuantizationOffset(inputOffset1);
2039  inputTensorInfo2.SetQuantizationScale(inputScale2);
2040  inputTensorInfo2.SetQuantizationOffset(inputOffset2);
2041 
2042  std::vector<unsigned int> wOrigin1 = { 0, 0, 0 }; //Extent of the window is defined by size of input[0].
2043  ConcatQueueDescriptor::ViewOrigin window1(wOrigin1);
2044 
2045  std::vector<unsigned int> wOrigin2 = { 0, 0, 2 }; //Extent of the window is defined by size of input[1].
2046  ConcatQueueDescriptor::ViewOrigin window2(wOrigin2);
2047 
2048  std::unique_ptr<ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
2049 
2050  bool subTensorsSupported = useSubtensor && workloadFactory.SupportsSubTensors();
2051 
2052  std::unique_ptr<ITensorHandle> inputHandle1 =
2053  subTensorsSupported ?
2054  tensorHandleFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
2055  tensorHandleFactory.CreateTensorHandle(inputTensorInfo1);
2056 
2057  std::unique_ptr<ITensorHandle> inputHandle2 =
2058  subTensorsSupported ?
2059  tensorHandleFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
2060  tensorHandleFactory.CreateTensorHandle(inputTensorInfo2);
2061 
2062  ConcatQueueDescriptor data;
2064  inputTensorShapes.begin(),inputTensorShapes.end(), 2);
2065  data.m_Parameters = desc;
2066 
2068  AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
2069  AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
2070  AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
2071 
2072  data.m_ViewOrigins.push_back(window1);
2073  data.m_ViewOrigins.push_back(window2);
2074 
2075  std::unique_ptr<IWorkload> workload = workloadFactory.CreateWorkload(LayerType::Concat, data, info);
2076 
2077  inputHandle1->Allocate();
2078  inputHandle2->Allocate();
2079  outputHandle->Allocate();
2080 
2081  CopyDataToITensorHandle(inputHandle1.get(), input1.data());
2082  CopyDataToITensorHandle(inputHandle2.get(), input2.data());
2083 
2084  workload->PostAllocationConfigure();
2085  workload->Execute();
2086 
2087  CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
2088 
2089  return LayerTestResult<T, 3>(actualOutput,
2090  expectedOutput,
2091  outputHandle->GetShape(),
2092  outputTensorInfo.GetShape());
2093 }
virtual std::unique_ptr< ITensorHandle > CreateSubTensorHandle(ITensorHandle &parent, TensorShape const &subTensorShape, unsigned int const *subTensorOrigin) const =0
void IgnoreUnused(Ts &&...)
std::vector< ViewOrigin > m_ViewOrigins
An OriginsDescriptor for the ConcatLayer.
void CopyDataFromITensorHandle(void *mem, const armnn::ITensorHandle *tensorHandle)
void SetQuantizationScale(float scale)
Definition: Tensor.cpp:475
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)
Contains information about TensorInfos of a layer.
OriginsDescriptor CreateDescriptorForConcatenation(TensorShapeIt first, TensorShapeIt last, unsigned int concatenationDimension)
Convenience template to create an OriginsDescriptor to use when creating a ConcatLayer for performing...
virtual std::unique_ptr< IWorkload > CreateWorkload(LayerType type, const QueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo) const =0
virtual bool SupportsSubTensors() const =0

◆ ConcatDifferentInputOutputQParamUint8Test()

LayerTestResult<uint8_t, 3> ConcatDifferentInputOutputQParamUint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory,
bool  useSubtensor 
)

◆ ConcatFloat16Test()

LayerTestResult<armnn::Half, 3> ConcatFloat16Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 2413 of file ConcatTestImpl.cpp.

Referenced by TEST_SUITE().

2417 {
2418  return Concat3dDim1TestImpl<DataType::Float16>(workloadFactory, memoryManager, tensorHandleFactory, 0.0f, 0);
2419 }

◆ ConcatTest()

LayerTestResult<float, 3> ConcatTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 2117 of file ConcatTestImpl.cpp.

References armnn::Concat, CopyDataFromITensorHandle(), CopyDataToITensorHandle(), ITensorHandleFactory::CreateSubTensorHandle(), ITensorHandleFactory::CreateTensorHandle(), IWorkloadFactory::CreateWorkload(), armnn::Float32, armnn::IgnoreUnused(), armnn::info, ConcatQueueDescriptor::m_ViewOrigins, and IWorkloadFactory::SupportsSubTensors().

Referenced by TEST_SUITE().

2121 {
2122  IgnoreUnused(memoryManager);
2123 
2124  unsigned int outputWidth = 3;
2125  unsigned int outputHeight = 6;
2126  unsigned int outputChannels = 3;
2127 
2128  unsigned int inputWidth1 = 3;
2129  unsigned int inputHeight1 = 6;
2130  unsigned int inputChannels1 = 2;
2131 
2132  unsigned int inputWidth2 = 3;
2133  unsigned int inputHeight2 = 6;
2134  unsigned int inputChannels2 = 1;
2135 
2136  // Define the tensor descriptors.
2137  TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, DataType::Float32);
2138  TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, DataType::Float32);
2139  TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, DataType::Float32);
2140 
2141  std::vector<float> actualOutput(outputTensorInfo.GetNumElements());
2142 
2143  std::vector<float> expectedOutput =
2144  {
2145  1.0f, 2.0f, 3.0f,
2146  4.0f, 5.0f, 6.0f,
2147  7.0f, 8.0f, 9.0f,
2148  10.0f, 11.0f, 12.0f,
2149  13.0f, 14.0f, 15.0f,
2150  16.0f, 17.0f, 18.0f,
2151 
2152  19.0f, 20.0f, 21.0f,
2153  22.0f, 23.0f, 24.0f,
2154  25.0f, 26.0f, 27.0f,
2155  28.0f, 29.0f, 30.0f,
2156  31.0f, 32.0f, 33.0f,
2157  34.0f, 35.0f, 36.0f,
2158 
2159  37.0f, 38.0f, 39.0f,
2160  40.0f, 41.0f, 42.0f,
2161  43.0f, 44.0f, 45.0f,
2162  46.0f, 47.0f, 48.0f,
2163  49.0f, 50.0f, 51.0f,
2164  52.0f, 53.0f, 54.0f
2165  };
2166 
2167  std::vector<float> input1 =
2168  {
2169  1.0f, 2.0f, 3.0f,
2170  4.0f, 5.0f, 6.0f,
2171  7.0f, 8.0f, 9.0f,
2172  10.0f, 11.0f, 12.0f,
2173  13.0f, 14.0f, 15.0f,
2174  16.0f, 17.0f, 18.0f,
2175 
2176  19.0f, 20.0f, 21.0f,
2177  22.0f, 23.0f, 24.0f,
2178  25.0f, 26.0f, 27.0f,
2179  28.0f, 29.0f, 30.0f,
2180  31.0f, 32.0f, 33.0f,
2181  34.0f, 35.0f, 36.0f
2182  };
2183 
2184  std::vector<float> input2 =
2185  {
2186  37.0f, 38.0f, 39.0f,
2187  40.0f, 41.0f, 42.0f,
2188  43.0f, 44.0f, 45.0f,
2189  46.0f, 47.0f, 48.0f,
2190  49.0f, 50.0f, 51.0f,
2191  52.0f, 53.0f, 54.0f,
2192  };
2193 
2194  std::vector<unsigned int> wOrigin1 = {0, 0, 0}; //Extent of the window is defined by size of input[0].
2195  ConcatQueueDescriptor::ViewOrigin window1(wOrigin1);
2196 
2197  std::vector<unsigned int> wOrigin2 = {2, 0, 0}; //Extent of the window is defined by size of input[1].
2198  ConcatQueueDescriptor::ViewOrigin window2(wOrigin2);
2199 
2200  std::unique_ptr<ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
2201 
2202  bool subTensorsSupported = workloadFactory.SupportsSubTensors();
2203 
2204  std::unique_ptr<ITensorHandle> inputHandle1 =
2205  subTensorsSupported ?
2206  tensorHandleFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
2207  tensorHandleFactory.CreateTensorHandle(inputTensorInfo1);
2208 
2209  std::unique_ptr<ITensorHandle> inputHandle2 =
2210  subTensorsSupported ?
2211  tensorHandleFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
2212  tensorHandleFactory.CreateTensorHandle(inputTensorInfo2);
2213 
2214  ConcatQueueDescriptor data;
2216  AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
2217  AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
2218  AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
2219 
2220  data.m_ViewOrigins.push_back(window1);
2221  data.m_ViewOrigins.push_back(window2);
2222 
2223  std::unique_ptr<IWorkload> workload = workloadFactory.CreateWorkload(LayerType::Concat, data, info);
2224 
2225  inputHandle1->Allocate();
2226  inputHandle2->Allocate();
2227  outputHandle->Allocate();
2228 
2229  CopyDataToITensorHandle(inputHandle1.get(), input1.data());
2230  CopyDataToITensorHandle(inputHandle2.get(), input2.data());
2231 
2232  workload->PostAllocationConfigure();
2233  workload->Execute();
2234 
2235  CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
2236 
2237  return LayerTestResult<float, 3>(actualOutput,
2238  expectedOutput,
2239  outputHandle->GetShape(),
2240  outputTensorInfo.GetShape());
2241 }
virtual std::unique_ptr< ITensorHandle > CreateSubTensorHandle(ITensorHandle &parent, TensorShape const &subTensorShape, unsigned int const *subTensorOrigin) const =0
void IgnoreUnused(Ts &&...)
std::vector< ViewOrigin > m_ViewOrigins
void CopyDataFromITensorHandle(void *mem, const armnn::ITensorHandle *tensorHandle)
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)
Contains information about TensorInfos of a layer.
virtual std::unique_ptr< IWorkload > CreateWorkload(LayerType type, const QueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo) const =0
virtual bool SupportsSubTensors() const =0

◆ ConcatUint16Test()

LayerTestResult<uint16_t, 3> ConcatUint16Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 2713 of file ConcatTestImpl.cpp.

References armnn::Concat, CopyDataFromITensorHandle(), CopyDataToITensorHandle(), ITensorHandleFactory::CreateSubTensorHandle(), ITensorHandleFactory::CreateTensorHandle(), IWorkloadFactory::CreateWorkload(), armnn::IgnoreUnused(), armnn::info, ConcatQueueDescriptor::m_ViewOrigins, armnn::QSymmS16, TensorInfo::SetQuantizationScale(), and IWorkloadFactory::SupportsSubTensors().

Referenced by TEST_SUITE().

2717 {
2718  IgnoreUnused(memoryManager);
2719 
2720  unsigned int outputWidth = 3;
2721  unsigned int outputHeight = 6;
2722  unsigned int outputChannels = 3;
2723 
2724  unsigned int inputWidth1 = 3;
2725  unsigned int inputHeight1 = 6;
2726  unsigned int inputChannels1 = 2;
2727 
2728  unsigned int inputWidth2 = 3;
2729  unsigned int inputHeight2 = 6;
2730  unsigned int inputChannels2 = 1;
2731 
2732  // Defines the tensor descriptors.
2733  TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, DataType::QSymmS16);
2734  TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, DataType::QSymmS16);
2735  TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, DataType::QSymmS16);
2736 
2737  // Arbitrary scale and offsets. They don't really matter as the Concat operator doesn't dequantize/quantize them.
2738  const float scale = 0.13497836f;
2739  const int32_t offset = -7;
2740 
2741  outputTensorInfo.SetQuantizationScale(scale);
2742  outputTensorInfo.SetQuantizationOffset(offset);
2743  inputTensorInfo1.SetQuantizationScale(scale);
2744  inputTensorInfo1.SetQuantizationOffset(offset);
2745  inputTensorInfo2.SetQuantizationScale(scale);
2746  inputTensorInfo2.SetQuantizationOffset(offset);
2747 
2748  std::vector<uint16_t> actualOutput(outputTensorInfo.GetNumElements());
2749 
2750  std::vector<uint16_t> expectedOutput =
2751  {
2752  1, 2, 3,
2753  4, 5, 6,
2754  7, 8, 9,
2755  10, 11, 12,
2756  13, 14, 15,
2757  16, 17, 18,
2758 
2759  19, 20, 21,
2760  22, 23, 24,
2761  25, 26, 27,
2762  28, 29, 30,
2763  31, 32, 33,
2764  34, 35, 36,
2765 
2766  37, 38, 39,
2767  40, 41, 42,
2768  43, 44, 45,
2769  46, 47, 48,
2770  49, 50, 51,
2771  52, 53, 54
2772  };
2773 
2774  std::vector<uint16_t> input1 =
2775  {
2776  1, 2, 3,
2777  4, 5, 6,
2778  7, 8, 9,
2779  10, 11, 12,
2780  13, 14, 15,
2781  16, 17, 18,
2782 
2783  19, 20, 21,
2784  22, 23, 24,
2785  25, 26, 27,
2786  28, 29, 30,
2787  31, 32, 33,
2788  34, 35, 36,
2789  };
2790 
2791  std::vector<uint16_t> input2 =
2792  {
2793  37, 38, 39,
2794  40, 41, 42,
2795  43, 44, 45,
2796  46, 47, 48,
2797  49, 50, 51,
2798  52, 53, 54,
2799  };
2800 
2801  std::vector<unsigned int> wOrigin1 = { 0, 0, 0 }; //Extent of the window is defined by size of input[0].
2802  ConcatQueueDescriptor::ViewOrigin window1(wOrigin1);
2803 
2804  std::vector<unsigned int> wOrigin2 = { 2, 0, 0 }; //Extent of the window is defined by size of input[1].
2805  ConcatQueueDescriptor::ViewOrigin window2(wOrigin2);
2806 
2807 
2808  std::unique_ptr<ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
2809 
2810  bool subTensorsSupported = workloadFactory.SupportsSubTensors();
2811 
2812  std::unique_ptr<ITensorHandle> inputHandle1 =
2813  subTensorsSupported ?
2814  tensorHandleFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
2815  tensorHandleFactory.CreateTensorHandle(inputTensorInfo1);
2816 
2817  std::unique_ptr<ITensorHandle> inputHandle2 =
2818  subTensorsSupported ?
2819  tensorHandleFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
2820  tensorHandleFactory.CreateTensorHandle(inputTensorInfo2);
2821 
2822 
2823  ConcatQueueDescriptor data;
2825  AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
2826  AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
2827  AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
2828 
2829  data.m_ViewOrigins.push_back(window1);
2830  data.m_ViewOrigins.push_back(window2);
2831 
2832  std::unique_ptr<IWorkload> workload = workloadFactory.CreateWorkload(LayerType::Concat, data, info);
2833 
2834  inputHandle1->Allocate();
2835  inputHandle2->Allocate();
2836  outputHandle->Allocate();
2837 
2838  CopyDataToITensorHandle(inputHandle1.get(), input1.data());
2839  CopyDataToITensorHandle(inputHandle2.get(), input2.data());
2840 
2841  workload->PostAllocationConfigure();
2842  workload->Execute();
2843 
2844  CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
2845 
2846  return LayerTestResult<uint16_t, 3>(actualOutput,
2847  expectedOutput,
2848  outputHandle->GetShape(),
2849  outputTensorInfo.GetShape());
2850 }
virtual std::unique_ptr< ITensorHandle > CreateSubTensorHandle(ITensorHandle &parent, TensorShape const &subTensorShape, unsigned int const *subTensorOrigin) const =0
void IgnoreUnused(Ts &&...)
std::vector< ViewOrigin > m_ViewOrigins
void CopyDataFromITensorHandle(void *mem, const armnn::ITensorHandle *tensorHandle)
void SetQuantizationScale(float scale)
Definition: Tensor.cpp:475
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)
Contains information about TensorInfos of a layer.
virtual std::unique_ptr< IWorkload > CreateWorkload(LayerType type, const QueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo) const =0
virtual bool SupportsSubTensors() const =0

◆ ConcatUint8DifferentQParamsTest()

LayerTestResult<uint8_t, 3> ConcatUint8DifferentQParamsTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 2429 of file ConcatTestImpl.cpp.

References armnn::Concat, CopyDataFromITensorHandle(), CopyDataToITensorHandle(), ITensorHandleFactory::CreateSubTensorHandle(), ITensorHandleFactory::CreateTensorHandle(), IWorkloadFactory::CreateWorkload(), armnn::IgnoreUnused(), armnn::info, ConcatQueueDescriptor::m_ViewOrigins, armnn::QAsymmU8, TensorInfo::SetQuantizationScale(), and IWorkloadFactory::SupportsSubTensors().

Referenced by TEST_SUITE().

2433 {
2434  IgnoreUnused(memoryManager);
2435 
2436  unsigned int outputWidth = 3;
2437  unsigned int outputHeight = 6;
2438  unsigned int outputChannels = 3;
2439 
2440  unsigned int inputWidth1 = 3;
2441  unsigned int inputHeight1 = 6;
2442  unsigned int inputChannels1 = 2;
2443 
2444  unsigned int inputWidth2 = 3;
2445  unsigned int inputHeight2 = 6;
2446  unsigned int inputChannels2 = 1;
2447 
2448  // Defines the tensor descriptors.
2449  TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, DataType::QAsymmU8);
2450  TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, DataType::QAsymmU8);
2451  TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, DataType::QAsymmU8);
2452 
2453  // Quantized input1 tensor. Range [-3, 1]
2454  const float inputScale1 = 0.015686f;
2455  const int32_t inputOffset1 = 192;
2456 
2457  std::vector<uint8_t> input1 =
2458  {
2459  1, 2, 3,
2460  4, 5, 6,
2461  7, 8, 9,
2462  10, 11, 12,
2463  13, 14, 15,
2464  16, 17, 18,
2465 
2466  19, 20, 21,
2467  22, 23, 24,
2468  25, 26, 27,
2469  28, 29, 30,
2470  31, 32, 33,
2471  34, 35, 36
2472  };
2473 
2474  // Quatized input2 tensor. Range [-1, 4]
2475  const float inputScale2 = 0.019608f;
2476  const int32_t inputOffset2 = 50;
2477 
2478  std::vector<uint8_t> input2 =
2479  {
2480  37, 38, 39,
2481  40, 41, 42,
2482  43, 44, 45,
2483  46, 47, 48,
2484  49, 50, 51,
2485  52, 53, 54
2486  };
2487 
2488  // Output has the same quantization parameters than input1,
2489  // so that only the requantization of input2 is required
2490  const float outputScale = 0.015686f;
2491  const int32_t outputOffset = 192;
2492 
2493  std::vector<uint8_t> actualOutput(outputTensorInfo.GetNumElements());
2494 
2495  std::vector<uint8_t> expectedOutput =
2496  {
2497  1, 2, 3,
2498  4, 5, 6,
2499  7, 8, 9,
2500  10, 11, 12,
2501  13, 14, 15,
2502  16, 17, 18,
2503 
2504  19, 20, 21,
2505  22, 23, 24,
2506  25, 26, 27,
2507  28, 29, 30,
2508  31, 32, 33,
2509  34, 35, 36,
2510 
2511  176, 177, 178,
2512  179, 181, 182,
2513  183, 184, 186,
2514  187, 188, 189,
2515  191, 192, 193,
2516  195, 196, 197
2517  };
2518 
2519  outputTensorInfo.SetQuantizationScale(outputScale);
2520  outputTensorInfo.SetQuantizationOffset(outputOffset);
2521  inputTensorInfo1.SetQuantizationScale(inputScale1);
2522  inputTensorInfo1.SetQuantizationOffset(inputOffset1);
2523  inputTensorInfo2.SetQuantizationScale(inputScale2);
2524  inputTensorInfo2.SetQuantizationOffset(inputOffset2);
2525 
2526  std::vector<unsigned int> wOrigin1 = { 0, 0, 0 }; //Extent of the window is defined by size of input[0].
2527  ConcatQueueDescriptor::ViewOrigin window1(wOrigin1);
2528 
2529  std::vector<unsigned int> wOrigin2 = { 2, 0, 0 }; //Extent of the window is defined by size of input[1].
2530  ConcatQueueDescriptor::ViewOrigin window2(wOrigin2);
2531 
2532  std::unique_ptr<ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
2533 
2534  bool subTensorsSupported = workloadFactory.SupportsSubTensors();
2535 
2536  std::unique_ptr<ITensorHandle> inputHandle1 =
2537  subTensorsSupported ?
2538  tensorHandleFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
2539  tensorHandleFactory.CreateTensorHandle(inputTensorInfo1);
2540 
2541  std::unique_ptr<ITensorHandle> inputHandle2 =
2542  subTensorsSupported ?
2543  tensorHandleFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
2544  tensorHandleFactory.CreateTensorHandle(inputTensorInfo2);
2545 
2546  ConcatQueueDescriptor data;
2548  AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
2549  AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
2550  AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
2551 
2552  data.m_ViewOrigins.push_back(window1);
2553  data.m_ViewOrigins.push_back(window2);
2554 
2555  std::unique_ptr<IWorkload> workload = workloadFactory.CreateWorkload(LayerType::Concat, data, info);
2556 
2557  inputHandle1->Allocate();
2558  inputHandle2->Allocate();
2559  outputHandle->Allocate();
2560 
2561  CopyDataToITensorHandle(inputHandle1.get(), input1.data());
2562  CopyDataToITensorHandle(inputHandle2.get(), input2.data());
2563 
2564  workload->PostAllocationConfigure();
2565  workload->Execute();
2566 
2567  CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
2568 
2569  return LayerTestResult<uint8_t, 3>(actualOutput,
2570  expectedOutput,
2571  outputHandle->GetShape(),
2572  outputTensorInfo.GetShape());
2573 }
virtual std::unique_ptr< ITensorHandle > CreateSubTensorHandle(ITensorHandle &parent, TensorShape const &subTensorShape, unsigned int const *subTensorOrigin) const =0
void IgnoreUnused(Ts &&...)
std::vector< ViewOrigin > m_ViewOrigins
void CopyDataFromITensorHandle(void *mem, const armnn::ITensorHandle *tensorHandle)
void SetQuantizationScale(float scale)
Definition: Tensor.cpp:475
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)
Contains information about TensorInfos of a layer.
virtual std::unique_ptr< IWorkload > CreateWorkload(LayerType type, const QueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo) const =0
virtual bool SupportsSubTensors() const =0

◆ ConcatUint8Test()

LayerTestResult<uint8_t, 3> ConcatUint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 2575 of file ConcatTestImpl.cpp.

References armnn::Concat, CopyDataFromITensorHandle(), CopyDataToITensorHandle(), ITensorHandleFactory::CreateSubTensorHandle(), ITensorHandleFactory::CreateTensorHandle(), IWorkloadFactory::CreateWorkload(), armnn::IgnoreUnused(), armnn::info, ConcatQueueDescriptor::m_ViewOrigins, armnn::QAsymmU8, TensorInfo::SetQuantizationScale(), and IWorkloadFactory::SupportsSubTensors().

Referenced by TEST_SUITE().

2579 {
2580  IgnoreUnused(memoryManager);
2581 
2582  unsigned int outputWidth = 3;
2583  unsigned int outputHeight = 6;
2584  unsigned int outputChannels = 3;
2585 
2586  unsigned int inputWidth1 = 3;
2587  unsigned int inputHeight1 = 6;
2588  unsigned int inputChannels1 = 2;
2589 
2590  unsigned int inputWidth2 = 3;
2591  unsigned int inputHeight2 = 6;
2592  unsigned int inputChannels2 = 1;
2593 
2594  // Defines the tensor descriptors.
2595  TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, DataType::QAsymmU8);
2596  TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, DataType::QAsymmU8);
2597  TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, DataType::QAsymmU8);
2598 
2599  // Arbitrary scale and offsets. They don't really matter as the Concat operator doesn't dequantize/quantize them.
2600  const float scale = 0.13497836f;
2601  const int32_t offset = -7;
2602 
2603  outputTensorInfo.SetQuantizationScale(scale);
2604  outputTensorInfo.SetQuantizationOffset(offset);
2605  inputTensorInfo1.SetQuantizationScale(scale);
2606  inputTensorInfo1.SetQuantizationOffset(offset);
2607  inputTensorInfo2.SetQuantizationScale(scale);
2608  inputTensorInfo2.SetQuantizationOffset(offset);
2609 
2610  std::vector<uint8_t> actualOutput(outputTensorInfo.GetNumElements());
2611 
2612  std::vector<uint8_t> expectedOutput =
2613  {
2614  1, 2, 3,
2615  4, 5, 6,
2616  7, 8, 9,
2617  10, 11, 12,
2618  13, 14, 15,
2619  16, 17, 18,
2620 
2621  19, 20, 21,
2622  22, 23, 24,
2623  25, 26, 27,
2624  28, 29, 30,
2625  31, 32, 33,
2626  34, 35, 36,
2627 
2628  37, 38, 39,
2629  40, 41, 42,
2630  43, 44, 45,
2631  46, 47, 48,
2632  49, 50, 51,
2633  52, 53, 54
2634  };
2635 
2636  std::vector<uint8_t> input1 =
2637  {
2638  1, 2, 3,
2639  4, 5, 6,
2640  7, 8, 9,
2641  10, 11, 12,
2642  13, 14, 15,
2643  16, 17, 18,
2644 
2645  19, 20, 21,
2646  22, 23, 24,
2647  25, 26, 27,
2648  28, 29, 30,
2649  31, 32, 33,
2650  34, 35, 36
2651  };
2652 
2653  std::vector<uint8_t> input2 =
2654  {
2655  37, 38, 39,
2656  40, 41, 42,
2657  43, 44, 45,
2658  46, 47, 48,
2659  49, 50, 51,
2660  52, 53, 54
2661  };
2662 
2663  std::vector<unsigned int> wOrigin1 = { 0, 0, 0 }; //Extent of the window is defined by size of input[0].
2664  ConcatQueueDescriptor::ViewOrigin window1(wOrigin1);
2665 
2666  std::vector<unsigned int> wOrigin2 = { 2, 0, 0 }; //Extent of the window is defined by size of input[1].
2667  ConcatQueueDescriptor::ViewOrigin window2(wOrigin2);
2668 
2669  std::unique_ptr<ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
2670 
2671  bool subTensorsSupported = workloadFactory.SupportsSubTensors();
2672 
2673  std::unique_ptr<ITensorHandle> inputHandle1 =
2674  subTensorsSupported ?
2675  tensorHandleFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
2676  tensorHandleFactory.CreateTensorHandle(inputTensorInfo1);
2677 
2678  std::unique_ptr<ITensorHandle> inputHandle2 =
2679  subTensorsSupported ?
2680  tensorHandleFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
2681  tensorHandleFactory.CreateTensorHandle(inputTensorInfo2);
2682 
2683 
2684  ConcatQueueDescriptor data;
2686  AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
2687  AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
2688  AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
2689 
2690  data.m_ViewOrigins.push_back(window1);
2691  data.m_ViewOrigins.push_back(window2);
2692 
2693  std::unique_ptr<IWorkload> workload = workloadFactory.CreateWorkload(LayerType::Concat, data, info);
2694 
2695  inputHandle1->Allocate();
2696  inputHandle2->Allocate();
2697  outputHandle->Allocate();
2698 
2699  CopyDataToITensorHandle(inputHandle1.get(), input1.data());
2700  CopyDataToITensorHandle(inputHandle2.get(), input2.data());
2701 
2702  workload->PostAllocationConfigure();
2703  workload->Execute();
2704 
2705  CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
2706 
2707  return LayerTestResult<uint8_t, 3>(actualOutput,
2708  expectedOutput,
2709  outputHandle->GetShape(),
2710  outputTensorInfo.GetShape());
2711 }
virtual std::unique_ptr< ITensorHandle > CreateSubTensorHandle(ITensorHandle &parent, TensorShape const &subTensorShape, unsigned int const *subTensorOrigin) const =0
void IgnoreUnused(Ts &&...)
std::vector< ViewOrigin > m_ViewOrigins
void CopyDataFromITensorHandle(void *mem, const armnn::ITensorHandle *tensorHandle)
void SetQuantizationScale(float scale)
Definition: Tensor.cpp:475
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)
Contains information about TensorInfos of a layer.
virtual std::unique_ptr< IWorkload > CreateWorkload(LayerType type, const QueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo) const =0
virtual bool SupportsSubTensors() const =0