ArmNN
 20.02
ConcatTestImpl.hpp File Reference

Go to the source code of this file.

Functions

template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult< T, 3 > ConcatDifferentInputOutputQParamTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, bool useSubtensor)
 
LayerTestResult< float, 3 > ConcatTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< armnn::BFloat16, 3 > ConcatBFloat16Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< armnn::Half, 3 > ConcatFloat16Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< uint8_t, 3 > ConcatUint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< uint16_t, 3 > ConcatUint16Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< uint8_t, 3 > ConcatUint8DifferentQParamsTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< float, 1 > Concat1dTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< float, 2 > Concat2dDim0Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< float, 2 > Concat2dDim1Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< float, 2 > Concat2dDim0DiffInputDimsTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< float, 2 > Concat2dDim1DiffInputDimsTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< float, 3 > Concat3dDim0Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< float, 3 > Concat3dDim1Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< float, 3 > Concat3dDim2Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, bool useSubtensor)
 
LayerTestResult< float, 3 > Concat3dDim0DiffInputDimsTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< float, 3 > Concat3dDim1DiffInputDimsTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< float, 3 > Concat3dDim2DiffInputDimsTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, bool useSubtensor)
 
LayerTestResult< float, 4 > Concat4dDim0Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< float, 4 > Concat4dDim1Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< float, 4 > Concat4dDim2Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< float, 4 > Concat4dDim3Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, bool useSubtensor)
 
LayerTestResult< float, 4 > Concat4dDiffShapeDim0Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< float, 4 > Concat4dDiffShapeDim1Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< float, 4 > Concat4dDiffShapeDim2Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< float, 4 > Concat4dDiffShapeDim3Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, bool useSubtensor)
 
LayerTestResult< uint8_t, 4 > Concat4dDim0Uint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< uint8_t, 4 > Concat4dDim1Uint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< uint8_t, 4 > Concat4dDim2Uint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< uint8_t, 4 > Concat4dDim3Uint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, bool useSubtensor)
 
LayerTestResult< uint8_t, 4 > Concat4dDiffShapeDim0Uint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< uint8_t, 4 > Concat4dDiffShapeDim1Uint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< uint8_t, 4 > Concat4dDiffShapeDim2Uint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< uint8_t, 4 > Concat4dDiffShapeDim3Uint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, bool useSubtensor)
 
LayerTestResult< uint8_t, 1 > Concat1dUint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< uint8_t, 2 > Concat2dDim0Uint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< uint8_t, 2 > Concat2dDim1Uint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< uint8_t, 2 > Concat2dDim0DiffInputDimsUint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< uint8_t, 2 > Concat2dDim1DiffInputDimsUint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< uint8_t, 3 > Concat3dDim0Uint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< uint8_t, 3 > Concat3dDim1Uint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< uint8_t, 3 > Concat3dDim2Uint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, bool useSubtensor)
 
LayerTestResult< uint8_t, 3 > Concat3dDim0DiffInputDimsUint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< uint8_t, 3 > Concat3dDim1DiffInputDimsUint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< uint8_t, 3 > Concat3dDim2DiffInputDimsUint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, bool useSubtensor)
 
LayerTestResult< uint8_t, 3 > ConcatDifferentInputOutputQParamUint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, bool useSubtensor)
 
LayerTestResult< int16_t, 3 > ConcatDifferentInputOutputQParamInt16Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, bool useSubtensor)
 

Function Documentation

◆ Concat1dTest()

LayerTestResult<float, 1> Concat1dTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 2197 of file ConcatTestImpl.cpp.

2200 {
2201  return Concat1dTestImpl<DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
2202 }

◆ Concat1dUint8Test()

LayerTestResult<uint8_t, 1> Concat1dUint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 2770 of file ConcatTestImpl.cpp.

2773 {
2774  return Concat1dTestImpl<DataType::QAsymmU8>(workloadFactory, memoryManager, 0.5f, -1);
2775 }

◆ Concat2dDim0DiffInputDimsTest()

LayerTestResult<float, 2> Concat2dDim0DiffInputDimsTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 2218 of file ConcatTestImpl.cpp.

2221 {
2222  return Concat2dDim0DiffInputDimsTestImpl<DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
2223 }

◆ Concat2dDim0DiffInputDimsUint8Test()

LayerTestResult<uint8_t, 2> Concat2dDim0DiffInputDimsUint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 2791 of file ConcatTestImpl.cpp.

2794 {
2795  return Concat2dDim0DiffInputDimsTestImpl<DataType::QAsymmU8>(
2796  workloadFactory, memoryManager, 0.5f, -1);
2797 }

◆ Concat2dDim0Test()

LayerTestResult<float, 2> Concat2dDim0Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 2204 of file ConcatTestImpl.cpp.

2207 {
2208  return Concat2dDim0TestImpl<DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
2209 }

◆ Concat2dDim0Uint8Test()

LayerTestResult<uint8_t, 2> Concat2dDim0Uint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 2777 of file ConcatTestImpl.cpp.

2780 {
2781  return Concat2dDim0TestImpl<DataType::QAsymmU8>(workloadFactory, memoryManager, 0.5f, -1);
2782 }

◆ Concat2dDim1DiffInputDimsTest()

LayerTestResult<float, 2> Concat2dDim1DiffInputDimsTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 2225 of file ConcatTestImpl.cpp.

2228 {
2229  return Concat2dDim1DiffInputDimsTestImpl<DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
2230 }

◆ Concat2dDim1DiffInputDimsUint8Test()

LayerTestResult<uint8_t, 2> Concat2dDim1DiffInputDimsUint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 2799 of file ConcatTestImpl.cpp.

2802 {
2803  return Concat2dDim1DiffInputDimsTestImpl<DataType::QAsymmU8>(
2804  workloadFactory, memoryManager, 0.5f, -1);
2805 }

◆ Concat2dDim1Test()

LayerTestResult<float, 2> Concat2dDim1Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 2211 of file ConcatTestImpl.cpp.

2214 {
2215  return Concat2dDim1TestImpl<DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
2216 }

◆ Concat2dDim1Uint8Test()

LayerTestResult<uint8_t, 2> Concat2dDim1Uint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 2784 of file ConcatTestImpl.cpp.

2787 {
2788  return Concat2dDim1TestImpl<DataType::QAsymmU8>(workloadFactory, memoryManager, 0.5f, -1);
2789 }

◆ Concat3dDim0DiffInputDimsTest()

LayerTestResult<float, 3> Concat3dDim0DiffInputDimsTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 2254 of file ConcatTestImpl.cpp.

2257 {
2258  return Concat3dDim0DiffInputDimsTestImpl<DataType::Float32>(
2259  workloadFactory, memoryManager, 0.0f, 0);
2260 }

◆ Concat3dDim0DiffInputDimsUint8Test()

LayerTestResult<uint8_t, 3> Concat3dDim0DiffInputDimsUint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 2830 of file ConcatTestImpl.cpp.

2833 {
2834  return Concat3dDim0TestImpl<DataType::QAsymmU8>(workloadFactory, memoryManager, 0.5f, -1);
2835 }

◆ Concat3dDim0Test()

LayerTestResult<float, 3> Concat3dDim0Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 2232 of file ConcatTestImpl.cpp.

2235 {
2236  return Concat3dDim0TestImpl<DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
2237 }

◆ Concat3dDim0Uint8Test()

LayerTestResult<uint8_t, 3> Concat3dDim0Uint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 2807 of file ConcatTestImpl.cpp.

2810 {
2811  return Concat3dDim0TestImpl<DataType::QAsymmU8>(workloadFactory, memoryManager, 0.5f, -1);
2812 }

◆ Concat3dDim1DiffInputDimsTest()

LayerTestResult<float, 3> Concat3dDim1DiffInputDimsTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 2262 of file ConcatTestImpl.cpp.

2265 {
2266  return Concat3dDim1DiffInputDimsTestImpl<DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
2267 }

◆ Concat3dDim1DiffInputDimsUint8Test()

LayerTestResult<uint8_t, 3> Concat3dDim1DiffInputDimsUint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 2837 of file ConcatTestImpl.cpp.

2840 {
2841  return Concat3dDim1DiffInputDimsTestImpl<DataType::QAsymmU8>(
2842  workloadFactory, memoryManager, 0.5f, -1);
2843 }

◆ Concat3dDim1Test()

LayerTestResult<float, 3> Concat3dDim1Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 2239 of file ConcatTestImpl.cpp.

2242 {
2243  return Concat3dDim1TestImpl<DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
2244 }

◆ Concat3dDim1Uint8Test()

LayerTestResult<uint8_t, 3> Concat3dDim1Uint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 2814 of file ConcatTestImpl.cpp.

2817 {
2818  return Concat3dDim1TestImpl<DataType::QAsymmU8>(workloadFactory, memoryManager, 0.5f, -1);
2819 }

◆ Concat3dDim2DiffInputDimsTest()

LayerTestResult<float, 3> Concat3dDim2DiffInputDimsTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
bool  useSubtensor 
)

Definition at line 2269 of file ConcatTestImpl.cpp.

2273 {
2274  return Concat3dDim2DiffInputDimsTestImpl<DataType::Float32>(
2275  workloadFactory, memoryManager, useSubtensor, 0.0f, 0);
2276 }

◆ Concat3dDim2DiffInputDimsUint8Test()

LayerTestResult<uint8_t, 3> Concat3dDim2DiffInputDimsUint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
bool  useSubtensor 
)

Definition at line 2845 of file ConcatTestImpl.cpp.

2849 {
2850  return Concat3dDim2DiffInputDimsTestImpl<DataType::QAsymmU8>(
2851  workloadFactory, memoryManager, useSubtensor, 0.5f, -1);
2852 }

◆ Concat3dDim2Test()

LayerTestResult<float, 3> Concat3dDim2Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
bool  useSubtensor 
)

Definition at line 2246 of file ConcatTestImpl.cpp.

2250 {
2251  return Concat3dDim2TestImpl<DataType::Float32>(workloadFactory, memoryManager, useSubtensor, 0.0f, 0);
2252 }

◆ Concat3dDim2Uint8Test()

LayerTestResult<uint8_t, 3> Concat3dDim2Uint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
bool  useSubtensor 
)

Definition at line 2821 of file ConcatTestImpl.cpp.

2825 {
2826  return Concat3dDim2TestImpl<DataType::QAsymmU8>(
2827  workloadFactory, memoryManager, useSubtensor, 0.5f, -1);
2828 }

◆ Concat4dDiffShapeDim0Test()

LayerTestResult<float, 4> Concat4dDiffShapeDim0Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 2307 of file ConcatTestImpl.cpp.

2310 {
2311  return Concat4dDiffShapeDim0TestImpl<DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
2312 }

◆ Concat4dDiffShapeDim0Uint8Test()

LayerTestResult<uint8_t, 4> Concat4dDiffShapeDim0Uint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 2883 of file ConcatTestImpl.cpp.

2886 {
2887  return Concat4dDiffShapeDim0TestImpl<DataType::QAsymmU8>(
2888  workloadFactory, memoryManager, 0.5f, -1);
2889 }

◆ Concat4dDiffShapeDim1Test()

LayerTestResult<float, 4> Concat4dDiffShapeDim1Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 2314 of file ConcatTestImpl.cpp.

2317 {
2318  return Concat4dDiffShapeDim1TestImpl<DataType::Float32>(
2319  workloadFactory, memoryManager, 0.0f, 0);
2320 }

◆ Concat4dDiffShapeDim1Uint8Test()

LayerTestResult<uint8_t, 4> Concat4dDiffShapeDim1Uint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 2891 of file ConcatTestImpl.cpp.

2894 {
2895  return Concat4dDiffShapeDim1TestImpl<DataType::QAsymmU8>(
2896  workloadFactory, memoryManager, 0.5f, -1);
2897 }

◆ Concat4dDiffShapeDim2Test()

LayerTestResult<float, 4> Concat4dDiffShapeDim2Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 2322 of file ConcatTestImpl.cpp.

2325 {
2326  return Concat4dDiffShapeDim2TestImpl<DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
2327 }

◆ Concat4dDiffShapeDim2Uint8Test()

LayerTestResult<uint8_t, 4> Concat4dDiffShapeDim2Uint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 2899 of file ConcatTestImpl.cpp.

2902 {
2903  return Concat4dDiffShapeDim2TestImpl<DataType::QAsymmU8>(
2904  workloadFactory, memoryManager, 0.5f, -1);
2905 }

◆ Concat4dDiffShapeDim3Test()

LayerTestResult<float, 4> Concat4dDiffShapeDim3Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
bool  useSubtensor 
)

Definition at line 2329 of file ConcatTestImpl.cpp.

2333 {
2334  return Concat4dDiffShapeDim3TestImpl<DataType::Float32>(
2335  workloadFactory, memoryManager, 0.0f, 0, useSubtensor);
2336 }

◆ Concat4dDiffShapeDim3Uint8Test()

LayerTestResult<uint8_t, 4> Concat4dDiffShapeDim3Uint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
bool  useSubtensor 
)

Definition at line 2907 of file ConcatTestImpl.cpp.

2911 {
2912  return Concat4dDiffShapeDim3TestImpl<DataType::QAsymmU8>(
2913  workloadFactory, memoryManager, 0.5f, -1, useSubtensor);
2914 }

◆ Concat4dDim0Test()

LayerTestResult<float, 4> Concat4dDim0Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 2278 of file ConcatTestImpl.cpp.

2281 {
2282  return Concat4dDim0TestImpl<DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
2283 }

◆ Concat4dDim0Uint8Test()

LayerTestResult<uint8_t, 4> Concat4dDim0Uint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 2854 of file ConcatTestImpl.cpp.

2857 {
2858  return Concat4dDim0TestImpl<DataType::QAsymmU8>(workloadFactory, memoryManager, 0.5f, -1);
2859 }

◆ Concat4dDim1Test()

LayerTestResult<float, 4> Concat4dDim1Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 2285 of file ConcatTestImpl.cpp.

2288 {
2289  return Concat4dDim1TestImpl<DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
2290 }

◆ Concat4dDim1Uint8Test()

LayerTestResult<uint8_t, 4> Concat4dDim1Uint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 2861 of file ConcatTestImpl.cpp.

2864 {
2865  return Concat4dDim1TestImpl<DataType::QAsymmU8>(workloadFactory, memoryManager, 0.5f, -1);
2866 }

◆ Concat4dDim2Test()

LayerTestResult<float, 4> Concat4dDim2Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 2292 of file ConcatTestImpl.cpp.

2295 {
2296  return Concat4dDim2TestImpl<DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
2297 }

◆ Concat4dDim2Uint8Test()

LayerTestResult<uint8_t, 4> Concat4dDim2Uint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 2868 of file ConcatTestImpl.cpp.

2871 {
2872  return Concat4dDim2TestImpl<DataType::QAsymmU8>(workloadFactory, memoryManager, 0.5f, -1);
2873 }

◆ Concat4dDim3Test()

LayerTestResult<float, 4> Concat4dDim3Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
bool  useSubtensor 
)

Definition at line 2299 of file ConcatTestImpl.cpp.

2303 {
2304  return Concat4dDim3TestImpl<DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0, useSubtensor);
2305 }

◆ Concat4dDim3Uint8Test()

LayerTestResult<uint8_t, 4> Concat4dDim3Uint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
bool  useSubtensor 
)

Definition at line 2875 of file ConcatTestImpl.cpp.

2878 {
2879  return Concat4dDim3TestImpl<DataType::QAsymmU8>(
2880  workloadFactory, memoryManager, 0.5f, -1, useSubtensor);
2881 }

◆ ConcatBFloat16Test()

LayerTestResult<armnn::BFloat16, 3> ConcatBFloat16Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 2345 of file ConcatTestImpl.cpp.

2348 {
2349  return Concat3dDim1TestImpl<DataType::BFloat16>(workloadFactory, memoryManager, 0.0f, 0);
2350 }

◆ ConcatDifferentInputOutputQParamInt16Test()

LayerTestResult<int16_t, 3> ConcatDifferentInputOutputQParamInt16Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
bool  useSubtensor 
)

◆ ConcatDifferentInputOutputQParamTest()

LayerTestResult<T, 3> ConcatDifferentInputOutputQParamTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
bool  useSubtensor 
)

Definition at line 1916 of file ConcatTestImpl.cpp.

References CopyDataFromITensorHandle(), CopyDataToITensorHandle(), IWorkloadFactory::CreateConcat(), armnn::CreateDescriptorForConcatenation(), IWorkloadFactory::CreateSubTensorHandle(), IWorkloadFactory::CreateTensorHandle(), armnn::IgnoreUnused(), armnn::info, QueueDescriptorWithParameters< LayerDescriptor >::m_Parameters, ConcatQueueDescriptor::m_ViewOrigins, LayerTestResult< T, n >::output, LayerTestResult< T, n >::outputExpected, and IWorkloadFactory::SupportsSubTensors().

1920 {
1921  IgnoreUnused(memoryManager);
1922 
1923  // Defines the tensor descriptors.
1924  TensorInfo outputTensorInfo({ 3, 6, 3 }, ArmnnType);
1925  TensorInfo inputTensorInfo1({ 3, 6, 2 }, ArmnnType);
1926  TensorInfo inputTensorInfo2({ 3, 6, 1 }, ArmnnType);
1927 
1928  std::vector<TensorShape> inputTensorShapes({inputTensorInfo1.GetShape(), inputTensorInfo2.GetShape()});
1929 
1930  // Quantized input1 tensor.
1931  const float inputScale1 = 0.5f;
1932  const int32_t inputOffset1 = 5;
1933 
1934  auto input1 = MakeTensor<T, 3>(inputTensorInfo1, std::vector<T>(
1935  {
1936  1, 2, 3,
1937  4, 5, 6,
1938  7, 8, 9,
1939  10, 11, 12,
1940  13, 14, 15,
1941  16, 17, 18,
1942 
1943  19, 20, 21,
1944  22, 23, 24,
1945  25, 26, 27,
1946  28, 29, 30,
1947  31, 32, 33,
1948  34, 35, 36
1949  }));
1950 
1951  // Quatized input2 tensor.
1952  const float inputScale2 = 0.2f;
1953  const int32_t inputOffset2 = 10;
1954 
1955  auto input2 = MakeTensor<T, 3>(inputTensorInfo2, std::vector<T>(
1956  {
1957  37, 38, 39,
1958  40, 41, 42,
1959  43, 44, 45,
1960  46, 47, 48,
1961  49, 50, 51,
1962  52, 53, 54
1963  }));
1964 
1965  // Quantized output tensor.
1966  const float outputScale = 0.1f;
1967  const int32_t outputOffset = 20;
1968 
1969  LayerTestResult<T, 3> ret(outputTensorInfo);
1970 
1971  ret.outputExpected = MakeTensor<T, 3>(outputTensorInfo, std::vector<T>(
1972  {
1973  0, 5, 74,
1974  10, 15, 76,
1975  20, 25, 78,
1976  30, 35, 80,
1977  40, 45, 82,
1978  50, 55, 84,
1979 
1980  60, 65, 86,
1981  70, 75, 88,
1982  80, 85, 90,
1983  90, 95, 92,
1984  100, 105, 94,
1985  110, 115, 96,
1986 
1987  120, 125, 98,
1988  130, 135, 100,
1989  140, 145, 102,
1990  150, 155, 104,
1991  160, 165, 106,
1992  170, 175, 108
1993  }));
1994 
1995  outputTensorInfo.SetQuantizationScale(outputScale);
1996  outputTensorInfo.SetQuantizationOffset(outputOffset);
1997  inputTensorInfo1.SetQuantizationScale(inputScale1);
1998  inputTensorInfo1.SetQuantizationOffset(inputOffset1);
1999  inputTensorInfo2.SetQuantizationScale(inputScale2);
2000  inputTensorInfo2.SetQuantizationOffset(inputOffset2);
2001 
2002  std::vector<unsigned int> wOrigin1 = { 0, 0, 0 }; //Extent of the window is defined by size of input[0].
2003  ConcatQueueDescriptor::ViewOrigin window1(wOrigin1);
2004 
2005  std::vector<unsigned int> wOrigin2 = { 0, 0, 2 }; //Extent of the window is defined by size of input[1].
2006  ConcatQueueDescriptor::ViewOrigin window2(wOrigin2);
2007 
2008  std::unique_ptr<ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
2009 
2010  bool subTensorsSupported = useSubtensor && workloadFactory.SupportsSubTensors();
2011 
2012  std::unique_ptr<ITensorHandle> inputHandle1 =
2013  subTensorsSupported ?
2014  workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
2015  workloadFactory.CreateTensorHandle(inputTensorInfo1);
2016 
2017  std::unique_ptr<ITensorHandle> inputHandle2 =
2018  subTensorsSupported ?
2019  workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
2020  workloadFactory.CreateTensorHandle(inputTensorInfo2);
2021 
2022  ConcatQueueDescriptor data;
2024  inputTensorShapes.begin(),inputTensorShapes.end(), 2);
2025  data.m_Parameters = desc;
2026 
2028  AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
2029  AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
2030  AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
2031 
2032  data.m_ViewOrigins.push_back(window1);
2033  data.m_ViewOrigins.push_back(window2);
2034 
2035  std::unique_ptr<IWorkload> workload = workloadFactory.CreateConcat(data, info);
2036 
2037  inputHandle1->Allocate();
2038  inputHandle2->Allocate();
2039  outputHandle->Allocate();
2040 
2041  CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0]);
2042  CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0]);
2043 
2044  workload->PostAllocationConfigure();
2045  workload->Execute();
2046 
2047  CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get());
2048 
2049  return ret;
2050 }
virtual std::unique_ptr< ITensorHandle > CreateSubTensorHandle(ITensorHandle &parent, TensorShape const &subTensorShape, unsigned int const *subTensorOrigin) const =0
void IgnoreUnused(Ts &&...)
virtual std::unique_ptr< IWorkload > CreateConcat(const ConcatQueueDescriptor &descriptor, const WorkloadInfo &info) const
std::vector< ViewOrigin > m_ViewOrigins
An OriginsDescriptor for the ConcatLayer.
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo, const bool IsMemoryManaged=true) const =0
Contains information about inputs and outputs to a layer.
OriginsDescriptor CreateDescriptorForConcatenation(TensorShapeIt first, TensorShapeIt last, unsigned int concatenationDimension)
Convenience template to create an OriginsDescriptor to use when creating a ConcatLayer for performing...
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)
virtual bool SupportsSubTensors() const =0

◆ ConcatDifferentInputOutputQParamUint8Test()

LayerTestResult<uint8_t, 3> ConcatDifferentInputOutputQParamUint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
bool  useSubtensor 
)

◆ ConcatFloat16Test()

LayerTestResult<armnn::Half, 3> ConcatFloat16Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 2338 of file ConcatTestImpl.cpp.

2341 {
2342  return Concat3dDim1TestImpl<DataType::Float16>(workloadFactory, memoryManager, 0.0f, 0);
2343 }

◆ ConcatTest()

LayerTestResult<float, 3> ConcatTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 2072 of file ConcatTestImpl.cpp.

References CopyDataFromITensorHandle(), CopyDataToITensorHandle(), IWorkloadFactory::CreateConcat(), IWorkloadFactory::CreateSubTensorHandle(), IWorkloadFactory::CreateTensorHandle(), armnn::Float32, armnn::IgnoreUnused(), armnn::info, ConcatQueueDescriptor::m_ViewOrigins, and IWorkloadFactory::SupportsSubTensors().

2075 {
2076  IgnoreUnused(memoryManager);
2077 
2078  unsigned int outputWidth = 3;
2079  unsigned int outputHeight = 6;
2080  unsigned int outputChannels = 3;
2081 
2082  unsigned int inputWidth1 = 3;
2083  unsigned int inputHeight1 = 6;
2084  unsigned int inputChannels1 = 2;
2085 
2086  unsigned int inputWidth2 = 3;
2087  unsigned int inputHeight2 = 6;
2088  unsigned int inputChannels2 = 1;
2089 
2090  // Define the tensor descriptors.
2091  TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, DataType::Float32);
2092  TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, DataType::Float32);
2093  TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, DataType::Float32);
2094 
2095  LayerTestResult<float,3> ret(outputTensorInfo);
2096 
2097  ret.outputExpected = MakeTensor<float, 3>(outputTensorInfo, std::vector<float>(
2098  {
2099  1.0f, 2.0f, 3.0f,
2100  4.0f, 5.0f, 6.0f,
2101  7.0f, 8.0f, 9.0f,
2102  10.0f, 11.0f, 12.0f,
2103  13.0f, 14.0f, 15.0f,
2104  16.0f, 17.0f, 18.0f,
2105 
2106  19.0f, 20.0f, 21.0f,
2107  22.0f, 23.0f, 24.0f,
2108  25.0f, 26.0f, 27.0f,
2109  28.0f, 29.0f, 30.0f,
2110  31.0f, 32.0f, 33.0f,
2111  34.0f, 35.0f, 36.0f,
2112 
2113  37.0f, 38.0f, 39.0f,
2114  40.0f, 41.0f, 42.0f,
2115  43.0f, 44.0f, 45.0f,
2116  46.0f, 47.0f, 48.0f,
2117  49.0f, 50.0f, 51.0f,
2118  52.0f, 53.0f, 54.0f,
2119  })
2120  );
2121 
2122  auto input1 = MakeTensor<float, 3>(inputTensorInfo1, std::vector<float>(
2123  {
2124  1.0f, 2.0f, 3.0f,
2125  4.0f, 5.0f, 6.0f,
2126  7.0f, 8.0f, 9.0f,
2127  10.0f, 11.0f, 12.0f,
2128  13.0f, 14.0f, 15.0f,
2129  16.0f, 17.0f, 18.0f,
2130 
2131  19.0f, 20.0f, 21.0f,
2132  22.0f, 23.0f, 24.0f,
2133  25.0f, 26.0f, 27.0f,
2134  28.0f, 29.0f, 30.0f,
2135  31.0f, 32.0f, 33.0f,
2136  34.0f, 35.0f, 36.0f,
2137  })
2138  );
2139 
2140  auto input2 = MakeTensor<float, 3>(inputTensorInfo2, std::vector<float>(
2141  {
2142  37.0f, 38.0f, 39.0f,
2143  40.0f, 41.0f, 42.0f,
2144  43.0f, 44.0f, 45.0f,
2145  46.0f, 47.0f, 48.0f,
2146  49.0f, 50.0f, 51.0f,
2147  52.0f, 53.0f, 54.0f,
2148  })
2149  );
2150 
2151  std::vector<unsigned int> wOrigin1 = {0, 0, 0}; //Extent of the window is defined by size of input[0].
2152  ConcatQueueDescriptor::ViewOrigin window1(wOrigin1);
2153 
2154  std::vector<unsigned int> wOrigin2 = {2, 0, 0}; //Extent of the window is defined by size of input[1].
2155  ConcatQueueDescriptor::ViewOrigin window2(wOrigin2);
2156 
2157  std::unique_ptr<ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
2158 
2159  bool subTensorsSupported = workloadFactory.SupportsSubTensors();
2160 
2161  std::unique_ptr<ITensorHandle> inputHandle1 =
2162  subTensorsSupported ?
2163  workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
2164  workloadFactory.CreateTensorHandle(inputTensorInfo1);
2165 
2166  std::unique_ptr<ITensorHandle> inputHandle2 =
2167  subTensorsSupported ?
2168  workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
2169  workloadFactory.CreateTensorHandle(inputTensorInfo2);
2170 
2171  ConcatQueueDescriptor data;
2173  AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
2174  AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
2175  AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
2176 
2177  data.m_ViewOrigins.push_back(window1);
2178  data.m_ViewOrigins.push_back(window2);
2179 
2180  std::unique_ptr<IWorkload> workload = workloadFactory.CreateConcat(data, info);
2181 
2182  inputHandle1->Allocate();
2183  inputHandle2->Allocate();
2184  outputHandle->Allocate();
2185 
2186  CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0]);
2187  CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0]);
2188 
2189  workload->PostAllocationConfigure();
2190  workload->Execute();
2191 
2192  CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get());
2193 
2194  return ret;
2195 }
virtual std::unique_ptr< ITensorHandle > CreateSubTensorHandle(ITensorHandle &parent, TensorShape const &subTensorShape, unsigned int const *subTensorOrigin) const =0
void IgnoreUnused(Ts &&...)
virtual std::unique_ptr< IWorkload > CreateConcat(const ConcatQueueDescriptor &descriptor, const WorkloadInfo &info) const
std::vector< ViewOrigin > m_ViewOrigins
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo, const bool IsMemoryManaged=true) const =0
Contains information about inputs and outputs to a layer.
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)
virtual bool SupportsSubTensors() const =0

◆ ConcatUint16Test()

LayerTestResult<uint16_t, 3> ConcatUint16Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 2635 of file ConcatTestImpl.cpp.

References CopyDataFromITensorHandle(), CopyDataToITensorHandle(), IWorkloadFactory::CreateConcat(), IWorkloadFactory::CreateSubTensorHandle(), IWorkloadFactory::CreateTensorHandle(), armnn::IgnoreUnused(), armnn::info, ConcatQueueDescriptor::m_ViewOrigins, LayerTestResult< T, n >::output, LayerTestResult< T, n >::outputExpected, armnn::QSymmS16, TensorInfo::SetQuantizationScale(), and IWorkloadFactory::SupportsSubTensors().

2638 {
2639  IgnoreUnused(memoryManager);
2640 
2641  unsigned int outputWidth = 3;
2642  unsigned int outputHeight = 6;
2643  unsigned int outputChannels = 3;
2644 
2645  unsigned int inputWidth1 = 3;
2646  unsigned int inputHeight1 = 6;
2647  unsigned int inputChannels1 = 2;
2648 
2649  unsigned int inputWidth2 = 3;
2650  unsigned int inputHeight2 = 6;
2651  unsigned int inputChannels2 = 1;
2652 
2653  // Defines the tensor descriptors.
2654  TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, DataType::QSymmS16);
2655  TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, DataType::QSymmS16);
2656  TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, DataType::QSymmS16);
2657 
2658  // Arbitrary scale and offsets. They don't really matter as the Concat operator doesn't dequantize/quantize them.
2659  const float scale = 0.13497836f;
2660  const int32_t offset = -7;
2661 
2662  outputTensorInfo.SetQuantizationScale(scale);
2663  outputTensorInfo.SetQuantizationOffset(offset);
2664  inputTensorInfo1.SetQuantizationScale(scale);
2665  inputTensorInfo1.SetQuantizationOffset(offset);
2666  inputTensorInfo2.SetQuantizationScale(scale);
2667  inputTensorInfo2.SetQuantizationOffset(offset);
2668 
2669  LayerTestResult<uint16_t, 3> ret(outputTensorInfo);
2670 
2671  ret.outputExpected = MakeTensor<uint16_t, 3>(outputTensorInfo, std::vector<uint16_t>(
2672  {
2673  1, 2, 3,
2674  4, 5, 6,
2675  7, 8, 9,
2676  10, 11, 12,
2677  13, 14, 15,
2678  16, 17, 18,
2679 
2680  19, 20, 21,
2681  22, 23, 24,
2682  25, 26, 27,
2683  28, 29, 30,
2684  31, 32, 33,
2685  34, 35, 36,
2686 
2687  37, 38, 39,
2688  40, 41, 42,
2689  43, 44, 45,
2690  46, 47, 48,
2691  49, 50, 51,
2692  52, 53, 54,
2693  }));
2694 
2695  auto input1 = MakeTensor<uint16_t, 3>(inputTensorInfo1, std::vector<uint16_t>(
2696  {
2697  1, 2, 3,
2698  4, 5, 6,
2699  7, 8, 9,
2700  10, 11, 12,
2701  13, 14, 15,
2702  16, 17, 18,
2703 
2704  19, 20, 21,
2705  22, 23, 24,
2706  25, 26, 27,
2707  28, 29, 30,
2708  31, 32, 33,
2709  34, 35, 36,
2710  }));
2711 
2712  auto input2 = MakeTensor<uint16_t, 3>(inputTensorInfo2, std::vector<uint16_t>(
2713  {
2714  37, 38, 39,
2715  40, 41, 42,
2716  43, 44, 45,
2717  46, 47, 48,
2718  49, 50, 51,
2719  52, 53, 54,
2720  }));
2721 
2722  std::vector<unsigned int> wOrigin1 = { 0, 0, 0 }; //Extent of the window is defined by size of input[0].
2723  ConcatQueueDescriptor::ViewOrigin window1(wOrigin1);
2724 
2725  std::vector<unsigned int> wOrigin2 = { 2, 0, 0 }; //Extent of the window is defined by size of input[1].
2726  ConcatQueueDescriptor::ViewOrigin window2(wOrigin2);
2727 
2728 
2729  std::unique_ptr<ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
2730 
2731  bool subTensorsSupported = workloadFactory.SupportsSubTensors();
2732 
2733  std::unique_ptr<ITensorHandle> inputHandle1 =
2734  subTensorsSupported ?
2735  workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
2736  workloadFactory.CreateTensorHandle(inputTensorInfo1);
2737 
2738  std::unique_ptr<ITensorHandle> inputHandle2 =
2739  subTensorsSupported ?
2740  workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
2741  workloadFactory.CreateTensorHandle(inputTensorInfo2);
2742 
2743 
2744  ConcatQueueDescriptor data;
2746  AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
2747  AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
2748  AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
2749 
2750  data.m_ViewOrigins.push_back(window1);
2751  data.m_ViewOrigins.push_back(window2);
2752 
2753  std::unique_ptr<IWorkload> workload = workloadFactory.CreateConcat(data, info);
2754 
2755  inputHandle1->Allocate();
2756  inputHandle2->Allocate();
2757  outputHandle->Allocate();
2758 
2759  CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0]);
2760  CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0]);
2761 
2762  workload->PostAllocationConfigure();
2763  workload->Execute();
2764 
2765  CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get());
2766 
2767  return ret;
2768 }
virtual std::unique_ptr< ITensorHandle > CreateSubTensorHandle(ITensorHandle &parent, TensorShape const &subTensorShape, unsigned int const *subTensorOrigin) const =0
void IgnoreUnused(Ts &&...)
virtual std::unique_ptr< IWorkload > CreateConcat(const ConcatQueueDescriptor &descriptor, const WorkloadInfo &info) const
std::vector< ViewOrigin > m_ViewOrigins
void SetQuantizationScale(float scale)
Definition: Tensor.cpp:259
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo, const bool IsMemoryManaged=true) const =0
Contains information about inputs and outputs to a layer.
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)
virtual bool SupportsSubTensors() const =0

◆ ConcatUint8DifferentQParamsTest()

LayerTestResult<uint8_t, 3> ConcatUint8DifferentQParamsTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 2352 of file ConcatTestImpl.cpp.

References CopyDataFromITensorHandle(), CopyDataToITensorHandle(), IWorkloadFactory::CreateConcat(), IWorkloadFactory::CreateSubTensorHandle(), IWorkloadFactory::CreateTensorHandle(), armnn::IgnoreUnused(), armnn::info, ConcatQueueDescriptor::m_ViewOrigins, LayerTestResult< T, n >::output, LayerTestResult< T, n >::outputExpected, armnn::QAsymmU8, and IWorkloadFactory::SupportsSubTensors().

2355 {
2356  IgnoreUnused(memoryManager);
2357 
2358  unsigned int outputWidth = 3;
2359  unsigned int outputHeight = 6;
2360  unsigned int outputChannels = 3;
2361 
2362  unsigned int inputWidth1 = 3;
2363  unsigned int inputHeight1 = 6;
2364  unsigned int inputChannels1 = 2;
2365 
2366  unsigned int inputWidth2 = 3;
2367  unsigned int inputHeight2 = 6;
2368  unsigned int inputChannels2 = 1;
2369 
2370  // Defines the tensor descriptors.
2371  TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, DataType::QAsymmU8);
2372  TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, DataType::QAsymmU8);
2373  TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, DataType::QAsymmU8);
2374 
2375  // Quantized input1 tensor. Range [-3, 1]
2376  const float inputScale1 = 0.015686f;
2377  const int32_t inputOffset1 = 192;
2378 
2379  auto input1 = MakeTensor<uint8_t, 3>(inputTensorInfo1, std::vector<uint8_t>(
2380  {
2381  1, 2, 3,
2382  4, 5, 6,
2383  7, 8, 9,
2384  10, 11, 12,
2385  13, 14, 15,
2386  16, 17, 18,
2387 
2388  19, 20, 21,
2389  22, 23, 24,
2390  25, 26, 27,
2391  28, 29, 30,
2392  31, 32, 33,
2393  34, 35, 36,
2394  })
2395  );
2396 
2397  // Quatized input2 tensor. Range [-1, 4]
2398  const float inputScale2 = 0.019608f;
2399  const int32_t inputOffset2 = 50;
2400 
2401  auto input2 = MakeTensor<uint8_t, 3>(inputTensorInfo2, std::vector<uint8_t>(
2402  {
2403  37, 38, 39,
2404  40, 41, 42,
2405  43, 44, 45,
2406  46, 47, 48,
2407  49, 50, 51,
2408  52, 53, 54,
2409  })
2410  );
2411 
2412  // Output has the same quantization parameters than input1,
2413  // so that only the requantization of input2 is required
2414  const float outputScale = 0.015686f;
2415  const int32_t outputOffset = 192;
2416 
2417  LayerTestResult<uint8_t, 3> ret(outputTensorInfo);
2418 
2419  ret.outputExpected = MakeTensor<uint8_t, 3>(outputTensorInfo, std::vector<uint8_t>(
2420  {
2421  1, 2, 3,
2422  4, 5, 6,
2423  7, 8, 9,
2424  10, 11, 12,
2425  13, 14, 15,
2426  16, 17, 18,
2427 
2428  19, 20, 21,
2429  22, 23, 24,
2430  25, 26, 27,
2431  28, 29, 30,
2432  31, 32, 33,
2433  34, 35, 36,
2434 
2435  176, 177, 178,
2436  179, 181, 182,
2437  183, 184, 186,
2438  187, 188, 189,
2439  191, 192, 193,
2440  195, 196, 197,
2441  })
2442  );
2443 
2444  outputTensorInfo.SetQuantizationScale(outputScale);
2445  outputTensorInfo.SetQuantizationOffset(outputOffset);
2446  inputTensorInfo1.SetQuantizationScale(inputScale1);
2447  inputTensorInfo1.SetQuantizationOffset(inputOffset1);
2448  inputTensorInfo2.SetQuantizationScale(inputScale2);
2449  inputTensorInfo2.SetQuantizationOffset(inputOffset2);
2450 
2451  std::vector<unsigned int> wOrigin1 = { 0, 0, 0 }; //Extent of the window is defined by size of input[0].
2452  ConcatQueueDescriptor::ViewOrigin window1(wOrigin1);
2453 
2454  std::vector<unsigned int> wOrigin2 = { 2, 0, 0 }; //Extent of the window is defined by size of input[1].
2455  ConcatQueueDescriptor::ViewOrigin window2(wOrigin2);
2456 
2457  std::unique_ptr<ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
2458 
2459  bool subTensorsSupported = workloadFactory.SupportsSubTensors();
2460 
2461  std::unique_ptr<ITensorHandle> inputHandle1 =
2462  subTensorsSupported ?
2463  workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
2464  workloadFactory.CreateTensorHandle(inputTensorInfo1);
2465 
2466  std::unique_ptr<ITensorHandle> inputHandle2 =
2467  subTensorsSupported ?
2468  workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
2469  workloadFactory.CreateTensorHandle(inputTensorInfo2);
2470 
2471  ConcatQueueDescriptor data;
2473  AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
2474  AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
2475  AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
2476 
2477  data.m_ViewOrigins.push_back(window1);
2478  data.m_ViewOrigins.push_back(window2);
2479 
2480  std::unique_ptr<IWorkload> workload = workloadFactory.CreateConcat(data, info);
2481 
2482  inputHandle1->Allocate();
2483  inputHandle2->Allocate();
2484  outputHandle->Allocate();
2485 
2486  CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0]);
2487  CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0]);
2488 
2489  workload->PostAllocationConfigure();
2490  workload->Execute();
2491 
2492  CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get());
2493 
2494  return ret;
2495 }
virtual std::unique_ptr< ITensorHandle > CreateSubTensorHandle(ITensorHandle &parent, TensorShape const &subTensorShape, unsigned int const *subTensorOrigin) const =0
void IgnoreUnused(Ts &&...)
virtual std::unique_ptr< IWorkload > CreateConcat(const ConcatQueueDescriptor &descriptor, const WorkloadInfo &info) const
std::vector< ViewOrigin > m_ViewOrigins
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo, const bool IsMemoryManaged=true) const =0
Contains information about inputs and outputs to a layer.
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)
virtual bool SupportsSubTensors() const =0

◆ ConcatUint8Test()

LayerTestResult<uint8_t, 3> ConcatUint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 2497 of file ConcatTestImpl.cpp.

References CopyDataFromITensorHandle(), CopyDataToITensorHandle(), IWorkloadFactory::CreateConcat(), IWorkloadFactory::CreateSubTensorHandle(), IWorkloadFactory::CreateTensorHandle(), armnn::IgnoreUnused(), armnn::info, ConcatQueueDescriptor::m_ViewOrigins, LayerTestResult< T, n >::output, LayerTestResult< T, n >::outputExpected, armnn::QAsymmU8, TensorInfo::SetQuantizationScale(), and IWorkloadFactory::SupportsSubTensors().

2500 {
2501  IgnoreUnused(memoryManager);
2502 
2503  unsigned int outputWidth = 3;
2504  unsigned int outputHeight = 6;
2505  unsigned int outputChannels = 3;
2506 
2507  unsigned int inputWidth1 = 3;
2508  unsigned int inputHeight1 = 6;
2509  unsigned int inputChannels1 = 2;
2510 
2511  unsigned int inputWidth2 = 3;
2512  unsigned int inputHeight2 = 6;
2513  unsigned int inputChannels2 = 1;
2514 
2515  // Defines the tensor descriptors.
2516  TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, DataType::QAsymmU8);
2517  TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, DataType::QAsymmU8);
2518  TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, DataType::QAsymmU8);
2519 
2520  // Arbitrary scale and offsets. They don't really matter as the Concat operator doesn't dequantize/quantize them.
2521  const float scale = 0.13497836f;
2522  const int32_t offset = -7;
2523 
2524  outputTensorInfo.SetQuantizationScale(scale);
2525  outputTensorInfo.SetQuantizationOffset(offset);
2526  inputTensorInfo1.SetQuantizationScale(scale);
2527  inputTensorInfo1.SetQuantizationOffset(offset);
2528  inputTensorInfo2.SetQuantizationScale(scale);
2529  inputTensorInfo2.SetQuantizationOffset(offset);
2530 
2531  LayerTestResult<uint8_t, 3> ret(outputTensorInfo);
2532 
2533  ret.outputExpected = MakeTensor<uint8_t, 3>(outputTensorInfo, std::vector<uint8_t>(
2534  {
2535  1, 2, 3,
2536  4, 5, 6,
2537  7, 8, 9,
2538  10, 11, 12,
2539  13, 14, 15,
2540  16, 17, 18,
2541 
2542  19, 20, 21,
2543  22, 23, 24,
2544  25, 26, 27,
2545  28, 29, 30,
2546  31, 32, 33,
2547  34, 35, 36,
2548 
2549  37, 38, 39,
2550  40, 41, 42,
2551  43, 44, 45,
2552  46, 47, 48,
2553  49, 50, 51,
2554  52, 53, 54,
2555  })
2556  );
2557 
2558  auto input1 = MakeTensor<uint8_t, 3>(inputTensorInfo1, std::vector<uint8_t>(
2559  {
2560  1, 2, 3,
2561  4, 5, 6,
2562  7, 8, 9,
2563  10, 11, 12,
2564  13, 14, 15,
2565  16, 17, 18,
2566 
2567  19, 20, 21,
2568  22, 23, 24,
2569  25, 26, 27,
2570  28, 29, 30,
2571  31, 32, 33,
2572  34, 35, 36,
2573  })
2574  );
2575 
2576  auto input2 = MakeTensor<uint8_t, 3>(inputTensorInfo2, std::vector<uint8_t>(
2577  {
2578  37, 38, 39,
2579  40, 41, 42,
2580  43, 44, 45,
2581  46, 47, 48,
2582  49, 50, 51,
2583  52, 53, 54,
2584  })
2585  );
2586 
2587  std::vector<unsigned int> wOrigin1 = { 0, 0, 0 }; //Extent of the window is defined by size of input[0].
2588  ConcatQueueDescriptor::ViewOrigin window1(wOrigin1);
2589 
2590  std::vector<unsigned int> wOrigin2 = { 2, 0, 0 }; //Extent of the window is defined by size of input[1].
2591  ConcatQueueDescriptor::ViewOrigin window2(wOrigin2);
2592 
2593 
2594  std::unique_ptr<ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
2595 
2596  bool subTensorsSupported = workloadFactory.SupportsSubTensors();
2597 
2598  std::unique_ptr<ITensorHandle> inputHandle1 =
2599  subTensorsSupported ?
2600  workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
2601  workloadFactory.CreateTensorHandle(inputTensorInfo1);
2602 
2603  std::unique_ptr<ITensorHandle> inputHandle2 =
2604  subTensorsSupported ?
2605  workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
2606  workloadFactory.CreateTensorHandle(inputTensorInfo2);
2607 
2608 
2609  ConcatQueueDescriptor data;
2611  AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
2612  AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
2613  AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
2614 
2615  data.m_ViewOrigins.push_back(window1);
2616  data.m_ViewOrigins.push_back(window2);
2617 
2618  std::unique_ptr<IWorkload> workload = workloadFactory.CreateConcat(data, info);
2619 
2620  inputHandle1->Allocate();
2621  inputHandle2->Allocate();
2622  outputHandle->Allocate();
2623 
2624  CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0]);
2625  CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0]);
2626 
2627  workload->PostAllocationConfigure();
2628  workload->Execute();
2629 
2630  CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get());
2631 
2632  return ret;
2633 }
virtual std::unique_ptr< ITensorHandle > CreateSubTensorHandle(ITensorHandle &parent, TensorShape const &subTensorShape, unsigned int const *subTensorOrigin) const =0
void IgnoreUnused(Ts &&...)
virtual std::unique_ptr< IWorkload > CreateConcat(const ConcatQueueDescriptor &descriptor, const WorkloadInfo &info) const
std::vector< ViewOrigin > m_ViewOrigins
void SetQuantizationScale(float scale)
Definition: Tensor.cpp:259
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo, const bool IsMemoryManaged=true) const =0
Contains information about inputs and outputs to a layer.
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)
virtual bool SupportsSubTensors() const =0