ArmNN  NotReleased
ConcatTestImpl.hpp File Reference

Go to the source code of this file.

Functions

template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult< T, 3 > ConcatDifferentInputOutputQParamTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, bool useSubtensor)
 
LayerTestResult< float, 3 > ConcatTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< armnn::Half, 3 > ConcatFloat16Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< uint8_t, 3 > ConcatUint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< uint16_t, 3 > ConcatUint16Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< uint8_t, 3 > ConcatUint8DifferentQParamsTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< float, 1 > Concat1dTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< float, 2 > Concat2dDim0Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< float, 2 > Concat2dDim1Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< float, 2 > Concat2dDim0DiffInputDimsTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< float, 2 > Concat2dDim1DiffInputDimsTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< float, 3 > Concat3dDim0Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< float, 3 > Concat3dDim1Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< float, 3 > Concat3dDim2Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, bool useSubtensor)
 
LayerTestResult< float, 3 > Concat3dDim0DiffInputDimsTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< float, 3 > Concat3dDim1DiffInputDimsTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< float, 3 > Concat3dDim2DiffInputDimsTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, bool useSubtensor)
 
LayerTestResult< float, 4 > Concat4dDim0Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< float, 4 > Concat4dDim1Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< float, 4 > Concat4dDim2Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< float, 4 > Concat4dDim3Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, bool useSubtensor)
 
LayerTestResult< float, 4 > Concat4dDiffShapeDim0Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< float, 4 > Concat4dDiffShapeDim1Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< float, 4 > Concat4dDiffShapeDim2Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< float, 4 > Concat4dDiffShapeDim3Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, bool useSubtensor)
 
LayerTestResult< uint8_t, 4 > Concat4dDim0Uint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< uint8_t, 4 > Concat4dDim1Uint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< uint8_t, 4 > Concat4dDim2Uint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< uint8_t, 4 > Concat4dDim3Uint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, bool useSubtensor)
 
LayerTestResult< uint8_t, 4 > Concat4dDiffShapeDim0Uint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< uint8_t, 4 > Concat4dDiffShapeDim1Uint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< uint8_t, 4 > Concat4dDiffShapeDim2Uint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< uint8_t, 4 > Concat4dDiffShapeDim3Uint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, bool useSubtensor)
 
LayerTestResult< uint8_t, 1 > Concat1dUint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< uint8_t, 2 > Concat2dDim0Uint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< uint8_t, 2 > Concat2dDim1Uint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< uint8_t, 2 > Concat2dDim0DiffInputDimsUint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< uint8_t, 2 > Concat2dDim1DiffInputDimsUint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< uint8_t, 3 > Concat3dDim0Uint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< uint8_t, 3 > Concat3dDim1Uint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< uint8_t, 3 > Concat3dDim2Uint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, bool useSubtensor)
 
LayerTestResult< uint8_t, 3 > Concat3dDim0DiffInputDimsUint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< uint8_t, 3 > Concat3dDim1DiffInputDimsUint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< uint8_t, 3 > Concat3dDim2DiffInputDimsUint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, bool useSubtensor)
 
LayerTestResult< uint8_t, 3 > ConcatDifferentInputOutputQParamUint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, bool useSubtensor)
 
LayerTestResult< int16_t, 3 > ConcatDifferentInputOutputQParamInt16Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, bool useSubtensor)
 

Function Documentation

◆ Concat1dTest()

LayerTestResult<float, 1> Concat1dTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 2197 of file ConcatTestImpl.cpp.

2200 {
2201  return Concat1dTestImpl<DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
2202 }

◆ Concat1dUint8Test()

LayerTestResult<uint8_t, 1> Concat1dUint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 2763 of file ConcatTestImpl.cpp.

2766 {
2767  return Concat1dTestImpl<DataType::QAsymmU8>(workloadFactory, memoryManager, 0.5f, -1);
2768 }

◆ Concat2dDim0DiffInputDimsTest()

LayerTestResult<float, 2> Concat2dDim0DiffInputDimsTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 2218 of file ConcatTestImpl.cpp.

2221 {
2222  return Concat2dDim0DiffInputDimsTestImpl<DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
2223 }

◆ Concat2dDim0DiffInputDimsUint8Test()

LayerTestResult<uint8_t, 2> Concat2dDim0DiffInputDimsUint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 2784 of file ConcatTestImpl.cpp.

2787 {
2788  return Concat2dDim0DiffInputDimsTestImpl<DataType::QAsymmU8>(
2789  workloadFactory, memoryManager, 0.5f, -1);
2790 }

◆ Concat2dDim0Test()

LayerTestResult<float, 2> Concat2dDim0Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 2204 of file ConcatTestImpl.cpp.

2207 {
2208  return Concat2dDim0TestImpl<DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
2209 }

◆ Concat2dDim0Uint8Test()

LayerTestResult<uint8_t, 2> Concat2dDim0Uint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 2770 of file ConcatTestImpl.cpp.

2773 {
2774  return Concat2dDim0TestImpl<DataType::QAsymmU8>(workloadFactory, memoryManager, 0.5f, -1);
2775 }

◆ Concat2dDim1DiffInputDimsTest()

LayerTestResult<float, 2> Concat2dDim1DiffInputDimsTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 2225 of file ConcatTestImpl.cpp.

2228 {
2229  return Concat2dDim1DiffInputDimsTestImpl<DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
2230 }

◆ Concat2dDim1DiffInputDimsUint8Test()

LayerTestResult<uint8_t, 2> Concat2dDim1DiffInputDimsUint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 2792 of file ConcatTestImpl.cpp.

2795 {
2796  return Concat2dDim1DiffInputDimsTestImpl<DataType::QAsymmU8>(
2797  workloadFactory, memoryManager, 0.5f, -1);
2798 }

◆ Concat2dDim1Test()

LayerTestResult<float, 2> Concat2dDim1Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 2211 of file ConcatTestImpl.cpp.

2214 {
2215  return Concat2dDim1TestImpl<DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
2216 }

◆ Concat2dDim1Uint8Test()

LayerTestResult<uint8_t, 2> Concat2dDim1Uint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 2777 of file ConcatTestImpl.cpp.

2780 {
2781  return Concat2dDim1TestImpl<DataType::QAsymmU8>(workloadFactory, memoryManager, 0.5f, -1);
2782 }

◆ Concat3dDim0DiffInputDimsTest()

LayerTestResult<float, 3> Concat3dDim0DiffInputDimsTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 2254 of file ConcatTestImpl.cpp.

2257 {
2258  return Concat3dDim0DiffInputDimsTestImpl<DataType::Float32>(
2259  workloadFactory, memoryManager, 0.0f, 0);
2260 }

◆ Concat3dDim0DiffInputDimsUint8Test()

LayerTestResult<uint8_t, 3> Concat3dDim0DiffInputDimsUint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 2823 of file ConcatTestImpl.cpp.

2826 {
2827  return Concat3dDim0TestImpl<DataType::QAsymmU8>(workloadFactory, memoryManager, 0.5f, -1);
2828 }

◆ Concat3dDim0Test()

LayerTestResult<float, 3> Concat3dDim0Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 2232 of file ConcatTestImpl.cpp.

2235 {
2236  return Concat3dDim0TestImpl<DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
2237 }

◆ Concat3dDim0Uint8Test()

LayerTestResult<uint8_t, 3> Concat3dDim0Uint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 2800 of file ConcatTestImpl.cpp.

2803 {
2804  return Concat3dDim0TestImpl<DataType::QAsymmU8>(workloadFactory, memoryManager, 0.5f, -1);
2805 }

◆ Concat3dDim1DiffInputDimsTest()

LayerTestResult<float, 3> Concat3dDim1DiffInputDimsTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 2262 of file ConcatTestImpl.cpp.

2265 {
2266  return Concat3dDim1DiffInputDimsTestImpl<DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
2267 }

◆ Concat3dDim1DiffInputDimsUint8Test()

LayerTestResult<uint8_t, 3> Concat3dDim1DiffInputDimsUint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 2830 of file ConcatTestImpl.cpp.

2833 {
2834  return Concat3dDim1DiffInputDimsTestImpl<DataType::QAsymmU8>(
2835  workloadFactory, memoryManager, 0.5f, -1);
2836 }

◆ Concat3dDim1Test()

LayerTestResult<float, 3> Concat3dDim1Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 2239 of file ConcatTestImpl.cpp.

2242 {
2243  return Concat3dDim1TestImpl<DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
2244 }

◆ Concat3dDim1Uint8Test()

LayerTestResult<uint8_t, 3> Concat3dDim1Uint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 2807 of file ConcatTestImpl.cpp.

2810 {
2811  return Concat3dDim1TestImpl<DataType::QAsymmU8>(workloadFactory, memoryManager, 0.5f, -1);
2812 }

◆ Concat3dDim2DiffInputDimsTest()

LayerTestResult<float, 3> Concat3dDim2DiffInputDimsTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
bool  useSubtensor 
)

Definition at line 2269 of file ConcatTestImpl.cpp.

2273 {
2274  return Concat3dDim2DiffInputDimsTestImpl<DataType::Float32>(
2275  workloadFactory, memoryManager, useSubtensor, 0.0f, 0);
2276 }

◆ Concat3dDim2DiffInputDimsUint8Test()

LayerTestResult<uint8_t, 3> Concat3dDim2DiffInputDimsUint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
bool  useSubtensor 
)

Definition at line 2838 of file ConcatTestImpl.cpp.

2842 {
2843  return Concat3dDim2DiffInputDimsTestImpl<DataType::QAsymmU8>(
2844  workloadFactory, memoryManager, useSubtensor, 0.5f, -1);
2845 }

◆ Concat3dDim2Test()

LayerTestResult<float, 3> Concat3dDim2Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
bool  useSubtensor 
)

Definition at line 2246 of file ConcatTestImpl.cpp.

2250 {
2251  return Concat3dDim2TestImpl<DataType::Float32>(workloadFactory, memoryManager, useSubtensor, 0.0f, 0);
2252 }

◆ Concat3dDim2Uint8Test()

LayerTestResult<uint8_t, 3> Concat3dDim2Uint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
bool  useSubtensor 
)

Definition at line 2814 of file ConcatTestImpl.cpp.

2818 {
2819  return Concat3dDim2TestImpl<DataType::QAsymmU8>(
2820  workloadFactory, memoryManager, useSubtensor, 0.5f, -1);
2821 }

◆ Concat4dDiffShapeDim0Test()

LayerTestResult<float, 4> Concat4dDiffShapeDim0Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 2307 of file ConcatTestImpl.cpp.

2310 {
2311  return Concat4dDiffShapeDim0TestImpl<DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
2312 }

◆ Concat4dDiffShapeDim0Uint8Test()

LayerTestResult<uint8_t, 4> Concat4dDiffShapeDim0Uint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 2876 of file ConcatTestImpl.cpp.

2879 {
2880  return Concat4dDiffShapeDim0TestImpl<DataType::QAsymmU8>(
2881  workloadFactory, memoryManager, 0.5f, -1);
2882 }

◆ Concat4dDiffShapeDim1Test()

LayerTestResult<float, 4> Concat4dDiffShapeDim1Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 2314 of file ConcatTestImpl.cpp.

2317 {
2318  return Concat4dDiffShapeDim1TestImpl<DataType::Float32>(
2319  workloadFactory, memoryManager, 0.0f, 0);
2320 }

◆ Concat4dDiffShapeDim1Uint8Test()

LayerTestResult<uint8_t, 4> Concat4dDiffShapeDim1Uint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 2884 of file ConcatTestImpl.cpp.

2887 {
2888  return Concat4dDiffShapeDim1TestImpl<DataType::QAsymmU8>(
2889  workloadFactory, memoryManager, 0.5f, -1);
2890 }

◆ Concat4dDiffShapeDim2Test()

LayerTestResult<float, 4> Concat4dDiffShapeDim2Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 2322 of file ConcatTestImpl.cpp.

2325 {
2326  return Concat4dDiffShapeDim2TestImpl<DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
2327 }

◆ Concat4dDiffShapeDim2Uint8Test()

LayerTestResult<uint8_t, 4> Concat4dDiffShapeDim2Uint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 2892 of file ConcatTestImpl.cpp.

2895 {
2896  return Concat4dDiffShapeDim2TestImpl<DataType::QAsymmU8>(
2897  workloadFactory, memoryManager, 0.5f, -1);
2898 }

◆ Concat4dDiffShapeDim3Test()

LayerTestResult<float, 4> Concat4dDiffShapeDim3Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
bool  useSubtensor 
)

Definition at line 2329 of file ConcatTestImpl.cpp.

2333 {
2334  return Concat4dDiffShapeDim3TestImpl<DataType::Float32>(
2335  workloadFactory, memoryManager, 0.0f, 0, useSubtensor);
2336 }

◆ Concat4dDiffShapeDim3Uint8Test()

LayerTestResult<uint8_t, 4> Concat4dDiffShapeDim3Uint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
bool  useSubtensor 
)

Definition at line 2900 of file ConcatTestImpl.cpp.

2904 {
2905  return Concat4dDiffShapeDim3TestImpl<DataType::QAsymmU8>(
2906  workloadFactory, memoryManager, 0.5f, -1, useSubtensor);
2907 }

◆ Concat4dDim0Test()

LayerTestResult<float, 4> Concat4dDim0Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 2278 of file ConcatTestImpl.cpp.

2281 {
2282  return Concat4dDim0TestImpl<DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
2283 }

◆ Concat4dDim0Uint8Test()

LayerTestResult<uint8_t, 4> Concat4dDim0Uint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 2847 of file ConcatTestImpl.cpp.

2850 {
2851  return Concat4dDim0TestImpl<DataType::QAsymmU8>(workloadFactory, memoryManager, 0.5f, -1);
2852 }

◆ Concat4dDim1Test()

LayerTestResult<float, 4> Concat4dDim1Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 2285 of file ConcatTestImpl.cpp.

2288 {
2289  return Concat4dDim1TestImpl<DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
2290 }

◆ Concat4dDim1Uint8Test()

LayerTestResult<uint8_t, 4> Concat4dDim1Uint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 2854 of file ConcatTestImpl.cpp.

2857 {
2858  return Concat4dDim1TestImpl<DataType::QAsymmU8>(workloadFactory, memoryManager, 0.5f, -1);
2859 }

◆ Concat4dDim2Test()

LayerTestResult<float, 4> Concat4dDim2Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 2292 of file ConcatTestImpl.cpp.

2295 {
2296  return Concat4dDim2TestImpl<DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
2297 }

◆ Concat4dDim2Uint8Test()

LayerTestResult<uint8_t, 4> Concat4dDim2Uint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 2861 of file ConcatTestImpl.cpp.

2864 {
2865  return Concat4dDim2TestImpl<DataType::QAsymmU8>(workloadFactory, memoryManager, 0.5f, -1);
2866 }

◆ Concat4dDim3Test()

LayerTestResult<float, 4> Concat4dDim3Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
bool  useSubtensor 
)

Definition at line 2299 of file ConcatTestImpl.cpp.

2303 {
2304  return Concat4dDim3TestImpl<DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0, useSubtensor);
2305 }

◆ Concat4dDim3Uint8Test()

LayerTestResult<uint8_t, 4> Concat4dDim3Uint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
bool  useSubtensor 
)

Definition at line 2868 of file ConcatTestImpl.cpp.

2871 {
2872  return Concat4dDim3TestImpl<DataType::QAsymmU8>(
2873  workloadFactory, memoryManager, 0.5f, -1, useSubtensor);
2874 }

◆ ConcatDifferentInputOutputQParamInt16Test()

LayerTestResult<int16_t, 3> ConcatDifferentInputOutputQParamInt16Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
bool  useSubtensor 
)

◆ ConcatDifferentInputOutputQParamTest()

LayerTestResult<T, 3> ConcatDifferentInputOutputQParamTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
bool  useSubtensor 
)

Definition at line 1916 of file ConcatTestImpl.cpp.

References CopyDataFromITensorHandle(), CopyDataToITensorHandle(), IWorkloadFactory::CreateConcat(), armnn::CreateDescriptorForConcatenation(), IWorkloadFactory::CreateSubTensorHandle(), IWorkloadFactory::CreateTensorHandle(), armnn::info, QueueDescriptorWithParameters< LayerDescriptor >::m_Parameters, ConcatQueueDescriptor::m_ViewOrigins, LayerTestResult< T, n >::output, LayerTestResult< T, n >::outputExpected, and IWorkloadFactory::SupportsSubTensors().

1920 {
1921  boost::ignore_unused(memoryManager);
1922 
1923  // Defines the tensor descriptors.
1924  TensorInfo outputTensorInfo({ 3, 6, 3 }, ArmnnType);
1925  TensorInfo inputTensorInfo1({ 3, 6, 2 }, ArmnnType);
1926  TensorInfo inputTensorInfo2({ 3, 6, 1 }, ArmnnType);
1927 
1928  std::vector<TensorShape> inputTensorShapes({inputTensorInfo1.GetShape(), inputTensorInfo2.GetShape()});
1929 
1930  // Quantized input1 tensor.
1931  const float inputScale1 = 0.5f;
1932  const int32_t inputOffset1 = 5;
1933 
1934  auto input1 = MakeTensor<T, 3>(inputTensorInfo1, std::vector<T>(
1935  {
1936  1, 2, 3,
1937  4, 5, 6,
1938  7, 8, 9,
1939  10, 11, 12,
1940  13, 14, 15,
1941  16, 17, 18,
1942 
1943  19, 20, 21,
1944  22, 23, 24,
1945  25, 26, 27,
1946  28, 29, 30,
1947  31, 32, 33,
1948  34, 35, 36
1949  }));
1950 
1951  // Quatized input2 tensor.
1952  const float inputScale2 = 0.2f;
1953  const int32_t inputOffset2 = 10;
1954 
1955  auto input2 = MakeTensor<T, 3>(inputTensorInfo2, std::vector<T>(
1956  {
1957  37, 38, 39,
1958  40, 41, 42,
1959  43, 44, 45,
1960  46, 47, 48,
1961  49, 50, 51,
1962  52, 53, 54
1963  }));
1964 
1965  // Quantized output tensor.
1966  const float outputScale = 0.1f;
1967  const int32_t outputOffset = 20;
1968 
1969  LayerTestResult<T, 3> ret(outputTensorInfo);
1970 
1971  ret.outputExpected = MakeTensor<T, 3>(outputTensorInfo, std::vector<T>(
1972  {
1973  0, 5, 74,
1974  10, 15, 76,
1975  20, 25, 78,
1976  30, 35, 80,
1977  40, 45, 82,
1978  50, 55, 84,
1979 
1980  60, 65, 86,
1981  70, 75, 88,
1982  80, 85, 90,
1983  90, 95, 92,
1984  100, 105, 94,
1985  110, 115, 96,
1986 
1987  120, 125, 98,
1988  130, 135, 100,
1989  140, 145, 102,
1990  150, 155, 104,
1991  160, 165, 106,
1992  170, 175, 108
1993  }));
1994 
1995  outputTensorInfo.SetQuantizationScale(outputScale);
1996  outputTensorInfo.SetQuantizationOffset(outputOffset);
1997  inputTensorInfo1.SetQuantizationScale(inputScale1);
1998  inputTensorInfo1.SetQuantizationOffset(inputOffset1);
1999  inputTensorInfo2.SetQuantizationScale(inputScale2);
2000  inputTensorInfo2.SetQuantizationOffset(inputOffset2);
2001 
2002  std::vector<unsigned int> wOrigin1 = { 0, 0, 0 }; //Extent of the window is defined by size of input[0].
2003  ConcatQueueDescriptor::ViewOrigin window1(wOrigin1);
2004 
2005  std::vector<unsigned int> wOrigin2 = { 0, 0, 2 }; //Extent of the window is defined by size of input[1].
2006  ConcatQueueDescriptor::ViewOrigin window2(wOrigin2);
2007 
2008  std::unique_ptr<ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
2009 
2010  bool subTensorsSupported = useSubtensor && workloadFactory.SupportsSubTensors();
2011 
2012  std::unique_ptr<ITensorHandle> inputHandle1 =
2013  subTensorsSupported ?
2014  workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
2015  workloadFactory.CreateTensorHandle(inputTensorInfo1);
2016 
2017  std::unique_ptr<ITensorHandle> inputHandle2 =
2018  subTensorsSupported ?
2019  workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
2020  workloadFactory.CreateTensorHandle(inputTensorInfo2);
2021 
2022  ConcatQueueDescriptor data;
2024  inputTensorShapes.begin(),inputTensorShapes.end(), 2);
2025  data.m_Parameters = desc;
2026 
2028  AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
2029  AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
2030  AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
2031 
2032  data.m_ViewOrigins.push_back(window1);
2033  data.m_ViewOrigins.push_back(window2);
2034 
2035  std::unique_ptr<IWorkload> workload = workloadFactory.CreateConcat(data, info);
2036 
2037  inputHandle1->Allocate();
2038  inputHandle2->Allocate();
2039  outputHandle->Allocate();
2040 
2041  CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0]);
2042  CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0]);
2043 
2044  workload->PostAllocationConfigure();
2045  workload->Execute();
2046 
2047  CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get());
2048 
2049  return ret;
2050 }
OriginsDescriptor CreateDescriptorForConcatenation(TensorShapeIt first, TensorShapeIt last, unsigned int concatenationDimension)
Convenience template to create an OriginsDescriptor to use when creating a ConcatLayer for performing...
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo, const bool IsMemoryManaged=true) const =0
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
std::vector< ViewOrigin > m_ViewOrigins
virtual std::unique_ptr< ITensorHandle > CreateSubTensorHandle(ITensorHandle &parent, TensorShape const &subTensorShape, unsigned int const *subTensorOrigin) const =0
virtual std::unique_ptr< IWorkload > CreateConcat(const ConcatQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual bool SupportsSubTensors() const =0
An OriginsDescriptor for the ConcatLayer. Descriptor to configure the concatenation process...

◆ ConcatDifferentInputOutputQParamUint8Test()

LayerTestResult<uint8_t, 3> ConcatDifferentInputOutputQParamUint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
bool  useSubtensor 
)

◆ ConcatFloat16Test()

LayerTestResult<armnn::Half, 3> ConcatFloat16Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 2338 of file ConcatTestImpl.cpp.

2341 {
2342  return Concat3dDim1TestImpl<DataType::Float16>(workloadFactory, memoryManager, 0.0f, 0);
2343 }

◆ ConcatTest()

LayerTestResult<float, 3> ConcatTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 2072 of file ConcatTestImpl.cpp.

References CopyDataFromITensorHandle(), CopyDataToITensorHandle(), IWorkloadFactory::CreateConcat(), IWorkloadFactory::CreateSubTensorHandle(), IWorkloadFactory::CreateTensorHandle(), armnn::Float32, armnn::info, ConcatQueueDescriptor::m_ViewOrigins, and IWorkloadFactory::SupportsSubTensors().

2075 {
2076  boost::ignore_unused(memoryManager);
2077 
2078  unsigned int outputWidth = 3;
2079  unsigned int outputHeight = 6;
2080  unsigned int outputChannels = 3;
2081 
2082  unsigned int inputWidth1 = 3;
2083  unsigned int inputHeight1 = 6;
2084  unsigned int inputChannels1 = 2;
2085 
2086  unsigned int inputWidth2 = 3;
2087  unsigned int inputHeight2 = 6;
2088  unsigned int inputChannels2 = 1;
2089 
2090  // Define the tensor descriptors.
2091  TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, DataType::Float32);
2092  TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, DataType::Float32);
2093  TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, DataType::Float32);
2094 
2095  LayerTestResult<float,3> ret(outputTensorInfo);
2096 
2097  ret.outputExpected = MakeTensor<float, 3>(outputTensorInfo, std::vector<float>(
2098  {
2099  1.0f, 2.0f, 3.0f,
2100  4.0f, 5.0f, 6.0f,
2101  7.0f, 8.0f, 9.0f,
2102  10.0f, 11.0f, 12.0f,
2103  13.0f, 14.0f, 15.0f,
2104  16.0f, 17.0f, 18.0f,
2105 
2106  19.0f, 20.0f, 21.0f,
2107  22.0f, 23.0f, 24.0f,
2108  25.0f, 26.0f, 27.0f,
2109  28.0f, 29.0f, 30.0f,
2110  31.0f, 32.0f, 33.0f,
2111  34.0f, 35.0f, 36.0f,
2112 
2113  37.0f, 38.0f, 39.0f,
2114  40.0f, 41.0f, 42.0f,
2115  43.0f, 44.0f, 45.0f,
2116  46.0f, 47.0f, 48.0f,
2117  49.0f, 50.0f, 51.0f,
2118  52.0f, 53.0f, 54.0f,
2119  })
2120  );
2121 
2122  auto input1 = MakeTensor<float, 3>(inputTensorInfo1, std::vector<float>(
2123  {
2124  1.0f, 2.0f, 3.0f,
2125  4.0f, 5.0f, 6.0f,
2126  7.0f, 8.0f, 9.0f,
2127  10.0f, 11.0f, 12.0f,
2128  13.0f, 14.0f, 15.0f,
2129  16.0f, 17.0f, 18.0f,
2130 
2131  19.0f, 20.0f, 21.0f,
2132  22.0f, 23.0f, 24.0f,
2133  25.0f, 26.0f, 27.0f,
2134  28.0f, 29.0f, 30.0f,
2135  31.0f, 32.0f, 33.0f,
2136  34.0f, 35.0f, 36.0f,
2137  })
2138  );
2139 
2140  auto input2 = MakeTensor<float, 3>(inputTensorInfo2, std::vector<float>(
2141  {
2142  37.0f, 38.0f, 39.0f,
2143  40.0f, 41.0f, 42.0f,
2144  43.0f, 44.0f, 45.0f,
2145  46.0f, 47.0f, 48.0f,
2146  49.0f, 50.0f, 51.0f,
2147  52.0f, 53.0f, 54.0f,
2148  })
2149  );
2150 
2151  std::vector<unsigned int> wOrigin1 = {0, 0, 0}; //Extent of the window is defined by size of input[0].
2152  ConcatQueueDescriptor::ViewOrigin window1(wOrigin1);
2153 
2154  std::vector<unsigned int> wOrigin2 = {2, 0, 0}; //Extent of the window is defined by size of input[1].
2155  ConcatQueueDescriptor::ViewOrigin window2(wOrigin2);
2156 
2157  std::unique_ptr<ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
2158 
2159  bool subTensorsSupported = workloadFactory.SupportsSubTensors();
2160 
2161  std::unique_ptr<ITensorHandle> inputHandle1 =
2162  subTensorsSupported ?
2163  workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
2164  workloadFactory.CreateTensorHandle(inputTensorInfo1);
2165 
2166  std::unique_ptr<ITensorHandle> inputHandle2 =
2167  subTensorsSupported ?
2168  workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
2169  workloadFactory.CreateTensorHandle(inputTensorInfo2);
2170 
2171  ConcatQueueDescriptor data;
2173  AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
2174  AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
2175  AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
2176 
2177  data.m_ViewOrigins.push_back(window1);
2178  data.m_ViewOrigins.push_back(window2);
2179 
2180  std::unique_ptr<IWorkload> workload = workloadFactory.CreateConcat(data, info);
2181 
2182  inputHandle1->Allocate();
2183  inputHandle2->Allocate();
2184  outputHandle->Allocate();
2185 
2186  CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0]);
2187  CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0]);
2188 
2189  workload->PostAllocationConfigure();
2190  workload->Execute();
2191 
2192  CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get());
2193 
2194  return ret;
2195 }
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo, const bool IsMemoryManaged=true) const =0
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
std::vector< ViewOrigin > m_ViewOrigins
virtual std::unique_ptr< ITensorHandle > CreateSubTensorHandle(ITensorHandle &parent, TensorShape const &subTensorShape, unsigned int const *subTensorOrigin) const =0
virtual std::unique_ptr< IWorkload > CreateConcat(const ConcatQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual bool SupportsSubTensors() const =0

◆ ConcatUint16Test()

LayerTestResult<uint16_t, 3> ConcatUint16Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 2628 of file ConcatTestImpl.cpp.

References CopyDataFromITensorHandle(), CopyDataToITensorHandle(), IWorkloadFactory::CreateConcat(), IWorkloadFactory::CreateSubTensorHandle(), IWorkloadFactory::CreateTensorHandle(), armnn::info, ConcatQueueDescriptor::m_ViewOrigins, LayerTestResult< T, n >::output, LayerTestResult< T, n >::outputExpected, armnn::QSymmS16, TensorInfo::SetQuantizationScale(), and IWorkloadFactory::SupportsSubTensors().

2631 {
2632  boost::ignore_unused(memoryManager);
2633 
2634  unsigned int outputWidth = 3;
2635  unsigned int outputHeight = 6;
2636  unsigned int outputChannels = 3;
2637 
2638  unsigned int inputWidth1 = 3;
2639  unsigned int inputHeight1 = 6;
2640  unsigned int inputChannels1 = 2;
2641 
2642  unsigned int inputWidth2 = 3;
2643  unsigned int inputHeight2 = 6;
2644  unsigned int inputChannels2 = 1;
2645 
2646  // Defines the tensor descriptors.
2647  TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, DataType::QSymmS16);
2648  TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, DataType::QSymmS16);
2649  TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, DataType::QSymmS16);
2650 
2651  // Arbitrary scale and offsets. They don't really matter as the Concat operator doesn't dequantize/quantize them.
2652  const float scale = 0.13497836f;
2653  const int32_t offset = -7;
2654 
2655  outputTensorInfo.SetQuantizationScale(scale);
2656  outputTensorInfo.SetQuantizationOffset(offset);
2657  inputTensorInfo1.SetQuantizationScale(scale);
2658  inputTensorInfo1.SetQuantizationOffset(offset);
2659  inputTensorInfo2.SetQuantizationScale(scale);
2660  inputTensorInfo2.SetQuantizationOffset(offset);
2661 
2662  LayerTestResult<uint16_t, 3> ret(outputTensorInfo);
2663 
2664  ret.outputExpected = MakeTensor<uint16_t, 3>(outputTensorInfo, std::vector<uint16_t>(
2665  {
2666  1, 2, 3,
2667  4, 5, 6,
2668  7, 8, 9,
2669  10, 11, 12,
2670  13, 14, 15,
2671  16, 17, 18,
2672 
2673  19, 20, 21,
2674  22, 23, 24,
2675  25, 26, 27,
2676  28, 29, 30,
2677  31, 32, 33,
2678  34, 35, 36,
2679 
2680  37, 38, 39,
2681  40, 41, 42,
2682  43, 44, 45,
2683  46, 47, 48,
2684  49, 50, 51,
2685  52, 53, 54,
2686  }));
2687 
2688  auto input1 = MakeTensor<uint16_t, 3>(inputTensorInfo1, std::vector<uint16_t>(
2689  {
2690  1, 2, 3,
2691  4, 5, 6,
2692  7, 8, 9,
2693  10, 11, 12,
2694  13, 14, 15,
2695  16, 17, 18,
2696 
2697  19, 20, 21,
2698  22, 23, 24,
2699  25, 26, 27,
2700  28, 29, 30,
2701  31, 32, 33,
2702  34, 35, 36,
2703  }));
2704 
2705  auto input2 = MakeTensor<uint16_t, 3>(inputTensorInfo2, std::vector<uint16_t>(
2706  {
2707  37, 38, 39,
2708  40, 41, 42,
2709  43, 44, 45,
2710  46, 47, 48,
2711  49, 50, 51,
2712  52, 53, 54,
2713  }));
2714 
2715  std::vector<unsigned int> wOrigin1 = { 0, 0, 0 }; //Extent of the window is defined by size of input[0].
2716  ConcatQueueDescriptor::ViewOrigin window1(wOrigin1);
2717 
2718  std::vector<unsigned int> wOrigin2 = { 2, 0, 0 }; //Extent of the window is defined by size of input[1].
2719  ConcatQueueDescriptor::ViewOrigin window2(wOrigin2);
2720 
2721 
2722  std::unique_ptr<ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
2723 
2724  bool subTensorsSupported = workloadFactory.SupportsSubTensors();
2725 
2726  std::unique_ptr<ITensorHandle> inputHandle1 =
2727  subTensorsSupported ?
2728  workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
2729  workloadFactory.CreateTensorHandle(inputTensorInfo1);
2730 
2731  std::unique_ptr<ITensorHandle> inputHandle2 =
2732  subTensorsSupported ?
2733  workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
2734  workloadFactory.CreateTensorHandle(inputTensorInfo2);
2735 
2736 
2737  ConcatQueueDescriptor data;
2739  AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
2740  AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
2741  AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
2742 
2743  data.m_ViewOrigins.push_back(window1);
2744  data.m_ViewOrigins.push_back(window2);
2745 
2746  std::unique_ptr<IWorkload> workload = workloadFactory.CreateConcat(data, info);
2747 
2748  inputHandle1->Allocate();
2749  inputHandle2->Allocate();
2750  outputHandle->Allocate();
2751 
2752  CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0]);
2753  CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0]);
2754 
2755  workload->PostAllocationConfigure();
2756  workload->Execute();
2757 
2758  CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get());
2759 
2760  return ret;
2761 }
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo, const bool IsMemoryManaged=true) const =0
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
std::vector< ViewOrigin > m_ViewOrigins
virtual std::unique_ptr< ITensorHandle > CreateSubTensorHandle(ITensorHandle &parent, TensorShape const &subTensorShape, unsigned int const *subTensorOrigin) const =0
virtual std::unique_ptr< IWorkload > CreateConcat(const ConcatQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual bool SupportsSubTensors() const =0
void SetQuantizationScale(float scale)
Definition: Tensor.cpp:259

◆ ConcatUint8DifferentQParamsTest()

LayerTestResult<uint8_t, 3> ConcatUint8DifferentQParamsTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 2345 of file ConcatTestImpl.cpp.

References CopyDataFromITensorHandle(), CopyDataToITensorHandle(), IWorkloadFactory::CreateConcat(), IWorkloadFactory::CreateSubTensorHandle(), IWorkloadFactory::CreateTensorHandle(), armnn::info, ConcatQueueDescriptor::m_ViewOrigins, LayerTestResult< T, n >::output, LayerTestResult< T, n >::outputExpected, armnn::QAsymmU8, and IWorkloadFactory::SupportsSubTensors().

2348 {
2349  boost::ignore_unused(memoryManager);
2350 
2351  unsigned int outputWidth = 3;
2352  unsigned int outputHeight = 6;
2353  unsigned int outputChannels = 3;
2354 
2355  unsigned int inputWidth1 = 3;
2356  unsigned int inputHeight1 = 6;
2357  unsigned int inputChannels1 = 2;
2358 
2359  unsigned int inputWidth2 = 3;
2360  unsigned int inputHeight2 = 6;
2361  unsigned int inputChannels2 = 1;
2362 
2363  // Defines the tensor descriptors.
2364  TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, DataType::QAsymmU8);
2365  TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, DataType::QAsymmU8);
2366  TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, DataType::QAsymmU8);
2367 
2368  // Quantized input1 tensor. Range [-3, 1]
2369  const float inputScale1 = 0.015686f;
2370  const int32_t inputOffset1 = 192;
2371 
2372  auto input1 = MakeTensor<uint8_t, 3>(inputTensorInfo1, std::vector<uint8_t>(
2373  {
2374  1, 2, 3,
2375  4, 5, 6,
2376  7, 8, 9,
2377  10, 11, 12,
2378  13, 14, 15,
2379  16, 17, 18,
2380 
2381  19, 20, 21,
2382  22, 23, 24,
2383  25, 26, 27,
2384  28, 29, 30,
2385  31, 32, 33,
2386  34, 35, 36,
2387  })
2388  );
2389 
2390  // Quatized input2 tensor. Range [-1, 4]
2391  const float inputScale2 = 0.019608f;
2392  const int32_t inputOffset2 = 50;
2393 
2394  auto input2 = MakeTensor<uint8_t, 3>(inputTensorInfo2, std::vector<uint8_t>(
2395  {
2396  37, 38, 39,
2397  40, 41, 42,
2398  43, 44, 45,
2399  46, 47, 48,
2400  49, 50, 51,
2401  52, 53, 54,
2402  })
2403  );
2404 
2405  // Output has the same quantization parameters than input1,
2406  // so that only the requantization of input2 is required
2407  const float outputScale = 0.015686f;
2408  const int32_t outputOffset = 192;
2409 
2410  LayerTestResult<uint8_t, 3> ret(outputTensorInfo);
2411 
2412  ret.outputExpected = MakeTensor<uint8_t, 3>(outputTensorInfo, std::vector<uint8_t>(
2413  {
2414  1, 2, 3,
2415  4, 5, 6,
2416  7, 8, 9,
2417  10, 11, 12,
2418  13, 14, 15,
2419  16, 17, 18,
2420 
2421  19, 20, 21,
2422  22, 23, 24,
2423  25, 26, 27,
2424  28, 29, 30,
2425  31, 32, 33,
2426  34, 35, 36,
2427 
2428  176, 177, 178,
2429  179, 181, 182,
2430  183, 184, 186,
2431  187, 188, 189,
2432  191, 192, 193,
2433  195, 196, 197,
2434  })
2435  );
2436 
2437  outputTensorInfo.SetQuantizationScale(outputScale);
2438  outputTensorInfo.SetQuantizationOffset(outputOffset);
2439  inputTensorInfo1.SetQuantizationScale(inputScale1);
2440  inputTensorInfo1.SetQuantizationOffset(inputOffset1);
2441  inputTensorInfo2.SetQuantizationScale(inputScale2);
2442  inputTensorInfo2.SetQuantizationOffset(inputOffset2);
2443 
2444  std::vector<unsigned int> wOrigin1 = { 0, 0, 0 }; //Extent of the window is defined by size of input[0].
2445  ConcatQueueDescriptor::ViewOrigin window1(wOrigin1);
2446 
2447  std::vector<unsigned int> wOrigin2 = { 2, 0, 0 }; //Extent of the window is defined by size of input[1].
2448  ConcatQueueDescriptor::ViewOrigin window2(wOrigin2);
2449 
2450  std::unique_ptr<ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
2451 
2452  bool subTensorsSupported = workloadFactory.SupportsSubTensors();
2453 
2454  std::unique_ptr<ITensorHandle> inputHandle1 =
2455  subTensorsSupported ?
2456  workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
2457  workloadFactory.CreateTensorHandle(inputTensorInfo1);
2458 
2459  std::unique_ptr<ITensorHandle> inputHandle2 =
2460  subTensorsSupported ?
2461  workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
2462  workloadFactory.CreateTensorHandle(inputTensorInfo2);
2463 
2464  ConcatQueueDescriptor data;
2466  AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
2467  AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
2468  AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
2469 
2470  data.m_ViewOrigins.push_back(window1);
2471  data.m_ViewOrigins.push_back(window2);
2472 
2473  std::unique_ptr<IWorkload> workload = workloadFactory.CreateConcat(data, info);
2474 
2475  inputHandle1->Allocate();
2476  inputHandle2->Allocate();
2477  outputHandle->Allocate();
2478 
2479  CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0]);
2480  CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0]);
2481 
2482  workload->PostAllocationConfigure();
2483  workload->Execute();
2484 
2485  CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get());
2486 
2487  return ret;
2488 }
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo, const bool IsMemoryManaged=true) const =0
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
std::vector< ViewOrigin > m_ViewOrigins
virtual std::unique_ptr< ITensorHandle > CreateSubTensorHandle(ITensorHandle &parent, TensorShape const &subTensorShape, unsigned int const *subTensorOrigin) const =0
virtual std::unique_ptr< IWorkload > CreateConcat(const ConcatQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual bool SupportsSubTensors() const =0

◆ ConcatUint8Test()

LayerTestResult<uint8_t, 3> ConcatUint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 2490 of file ConcatTestImpl.cpp.

References CopyDataFromITensorHandle(), CopyDataToITensorHandle(), IWorkloadFactory::CreateConcat(), IWorkloadFactory::CreateSubTensorHandle(), IWorkloadFactory::CreateTensorHandle(), armnn::info, ConcatQueueDescriptor::m_ViewOrigins, LayerTestResult< T, n >::output, LayerTestResult< T, n >::outputExpected, armnn::QAsymmU8, TensorInfo::SetQuantizationScale(), and IWorkloadFactory::SupportsSubTensors().

2493 {
2494  boost::ignore_unused(memoryManager);
2495 
2496  unsigned int outputWidth = 3;
2497  unsigned int outputHeight = 6;
2498  unsigned int outputChannels = 3;
2499 
2500  unsigned int inputWidth1 = 3;
2501  unsigned int inputHeight1 = 6;
2502  unsigned int inputChannels1 = 2;
2503 
2504  unsigned int inputWidth2 = 3;
2505  unsigned int inputHeight2 = 6;
2506  unsigned int inputChannels2 = 1;
2507 
2508  // Defines the tensor descriptors.
2509  TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, DataType::QAsymmU8);
2510  TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, DataType::QAsymmU8);
2511  TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, DataType::QAsymmU8);
2512 
2513  // Arbitrary scale and offsets. They don't really matter as the Concat operator doesn't dequantize/quantize them.
2514  const float scale = 0.13497836f;
2515  const int32_t offset = -7;
2516 
2517  outputTensorInfo.SetQuantizationScale(scale);
2518  outputTensorInfo.SetQuantizationOffset(offset);
2519  inputTensorInfo1.SetQuantizationScale(scale);
2520  inputTensorInfo1.SetQuantizationOffset(offset);
2521  inputTensorInfo2.SetQuantizationScale(scale);
2522  inputTensorInfo2.SetQuantizationOffset(offset);
2523 
2524  LayerTestResult<uint8_t, 3> ret(outputTensorInfo);
2525 
2526  ret.outputExpected = MakeTensor<uint8_t, 3>(outputTensorInfo, std::vector<uint8_t>(
2527  {
2528  1, 2, 3,
2529  4, 5, 6,
2530  7, 8, 9,
2531  10, 11, 12,
2532  13, 14, 15,
2533  16, 17, 18,
2534 
2535  19, 20, 21,
2536  22, 23, 24,
2537  25, 26, 27,
2538  28, 29, 30,
2539  31, 32, 33,
2540  34, 35, 36,
2541 
2542  37, 38, 39,
2543  40, 41, 42,
2544  43, 44, 45,
2545  46, 47, 48,
2546  49, 50, 51,
2547  52, 53, 54,
2548  })
2549  );
2550 
2551  auto input1 = MakeTensor<uint8_t, 3>(inputTensorInfo1, std::vector<uint8_t>(
2552  {
2553  1, 2, 3,
2554  4, 5, 6,
2555  7, 8, 9,
2556  10, 11, 12,
2557  13, 14, 15,
2558  16, 17, 18,
2559 
2560  19, 20, 21,
2561  22, 23, 24,
2562  25, 26, 27,
2563  28, 29, 30,
2564  31, 32, 33,
2565  34, 35, 36,
2566  })
2567  );
2568 
2569  auto input2 = MakeTensor<uint8_t, 3>(inputTensorInfo2, std::vector<uint8_t>(
2570  {
2571  37, 38, 39,
2572  40, 41, 42,
2573  43, 44, 45,
2574  46, 47, 48,
2575  49, 50, 51,
2576  52, 53, 54,
2577  })
2578  );
2579 
2580  std::vector<unsigned int> wOrigin1 = { 0, 0, 0 }; //Extent of the window is defined by size of input[0].
2581  ConcatQueueDescriptor::ViewOrigin window1(wOrigin1);
2582 
2583  std::vector<unsigned int> wOrigin2 = { 2, 0, 0 }; //Extent of the window is defined by size of input[1].
2584  ConcatQueueDescriptor::ViewOrigin window2(wOrigin2);
2585 
2586 
2587  std::unique_ptr<ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
2588 
2589  bool subTensorsSupported = workloadFactory.SupportsSubTensors();
2590 
2591  std::unique_ptr<ITensorHandle> inputHandle1 =
2592  subTensorsSupported ?
2593  workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
2594  workloadFactory.CreateTensorHandle(inputTensorInfo1);
2595 
2596  std::unique_ptr<ITensorHandle> inputHandle2 =
2597  subTensorsSupported ?
2598  workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
2599  workloadFactory.CreateTensorHandle(inputTensorInfo2);
2600 
2601 
2602  ConcatQueueDescriptor data;
2604  AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
2605  AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
2606  AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
2607 
2608  data.m_ViewOrigins.push_back(window1);
2609  data.m_ViewOrigins.push_back(window2);
2610 
2611  std::unique_ptr<IWorkload> workload = workloadFactory.CreateConcat(data, info);
2612 
2613  inputHandle1->Allocate();
2614  inputHandle2->Allocate();
2615  outputHandle->Allocate();
2616 
2617  CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0]);
2618  CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0]);
2619 
2620  workload->PostAllocationConfigure();
2621  workload->Execute();
2622 
2623  CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get());
2624 
2625  return ret;
2626 }
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo, const bool IsMemoryManaged=true) const =0
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
std::vector< ViewOrigin > m_ViewOrigins
virtual std::unique_ptr< ITensorHandle > CreateSubTensorHandle(ITensorHandle &parent, TensorShape const &subTensorShape, unsigned int const *subTensorOrigin) const =0
virtual std::unique_ptr< IWorkload > CreateConcat(const ConcatQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual bool SupportsSubTensors() const =0
void SetQuantizationScale(float scale)
Definition: Tensor.cpp:259