ArmNN
 20.08
ConcatTestImpl.hpp File Reference

Go to the source code of this file.

Functions

template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult< T, 3 > ConcatDifferentInputOutputQParamTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, bool useSubtensor)
 
LayerTestResult< float, 3 > ConcatTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< armnn::BFloat16, 3 > ConcatBFloat16Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< armnn::Half, 3 > ConcatFloat16Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< uint8_t, 3 > ConcatUint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< uint16_t, 3 > ConcatUint16Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< uint8_t, 3 > ConcatUint8DifferentQParamsTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< float, 1 > Concat1dTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< float, 2 > Concat2dDim0Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< float, 2 > Concat2dDim1Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< float, 2 > Concat2dDim0DiffInputDimsTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< float, 2 > Concat2dDim1DiffInputDimsTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< float, 3 > Concat3dDim0Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< float, 3 > Concat3dDim1Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< float, 3 > Concat3dDim2Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, bool useSubtensor)
 
LayerTestResult< float, 3 > Concat3dDim0DiffInputDimsTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< float, 3 > Concat3dDim1DiffInputDimsTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< float, 3 > Concat3dDim2DiffInputDimsTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, bool useSubtensor)
 
LayerTestResult< float, 4 > Concat4dDim0Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< float, 4 > Concat4dDim1Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< float, 4 > Concat4dDim2Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< float, 4 > Concat4dDim3Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, bool useSubtensor)
 
LayerTestResult< float, 4 > Concat4dDiffShapeDim0Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< float, 4 > Concat4dDiffShapeDim1Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< float, 4 > Concat4dDiffShapeDim2Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< float, 4 > Concat4dDiffShapeDim3Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, bool useSubtensor)
 
LayerTestResult< uint8_t, 4 > Concat4dDim0Uint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< uint8_t, 4 > Concat4dDim1Uint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< uint8_t, 4 > Concat4dDim2Uint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< uint8_t, 4 > Concat4dDim3Uint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, bool useSubtensor)
 
LayerTestResult< uint8_t, 4 > Concat4dDiffShapeDim0Uint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< uint8_t, 4 > Concat4dDiffShapeDim1Uint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< uint8_t, 4 > Concat4dDiffShapeDim2Uint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< uint8_t, 4 > Concat4dDiffShapeDim3Uint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, bool useSubtensor)
 
LayerTestResult< uint8_t, 1 > Concat1dUint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< uint8_t, 2 > Concat2dDim0Uint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< uint8_t, 2 > Concat2dDim1Uint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< uint8_t, 2 > Concat2dDim0DiffInputDimsUint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< uint8_t, 2 > Concat2dDim1DiffInputDimsUint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< uint8_t, 3 > Concat3dDim0Uint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< uint8_t, 3 > Concat3dDim1Uint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< uint8_t, 3 > Concat3dDim2Uint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, bool useSubtensor)
 
LayerTestResult< uint8_t, 3 > Concat3dDim0DiffInputDimsUint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< uint8_t, 3 > Concat3dDim1DiffInputDimsUint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< uint8_t, 3 > Concat3dDim2DiffInputDimsUint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, bool useSubtensor)
 
LayerTestResult< uint8_t, 3 > ConcatDifferentInputOutputQParamUint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, bool useSubtensor)
 
LayerTestResult< int16_t, 3 > ConcatDifferentInputOutputQParamInt16Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, bool useSubtensor)
 

Function Documentation

◆ Concat1dTest()

LayerTestResult<float, 1> Concat1dTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 2202 of file ConcatTestImpl.cpp.

2205 {
2206  return Concat1dTestImpl<DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
2207 }

◆ Concat1dUint8Test()

LayerTestResult<uint8_t, 1> Concat1dUint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 2775 of file ConcatTestImpl.cpp.

2778 {
2779  return Concat1dTestImpl<DataType::QAsymmU8>(workloadFactory, memoryManager, 0.5f, -1);
2780 }

◆ Concat2dDim0DiffInputDimsTest()

LayerTestResult<float, 2> Concat2dDim0DiffInputDimsTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 2223 of file ConcatTestImpl.cpp.

2226 {
2227  return Concat2dDim0DiffInputDimsTestImpl<DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
2228 }

◆ Concat2dDim0DiffInputDimsUint8Test()

LayerTestResult<uint8_t, 2> Concat2dDim0DiffInputDimsUint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 2796 of file ConcatTestImpl.cpp.

2799 {
2800  return Concat2dDim0DiffInputDimsTestImpl<DataType::QAsymmU8>(
2801  workloadFactory, memoryManager, 0.5f, -1);
2802 }

◆ Concat2dDim0Test()

LayerTestResult<float, 2> Concat2dDim0Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 2209 of file ConcatTestImpl.cpp.

2212 {
2213  return Concat2dDim0TestImpl<DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
2214 }

◆ Concat2dDim0Uint8Test()

LayerTestResult<uint8_t, 2> Concat2dDim0Uint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 2782 of file ConcatTestImpl.cpp.

2785 {
2786  return Concat2dDim0TestImpl<DataType::QAsymmU8>(workloadFactory, memoryManager, 0.5f, -1);
2787 }

◆ Concat2dDim1DiffInputDimsTest()

LayerTestResult<float, 2> Concat2dDim1DiffInputDimsTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 2230 of file ConcatTestImpl.cpp.

2233 {
2234  return Concat2dDim1DiffInputDimsTestImpl<DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
2235 }

◆ Concat2dDim1DiffInputDimsUint8Test()

LayerTestResult<uint8_t, 2> Concat2dDim1DiffInputDimsUint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 2804 of file ConcatTestImpl.cpp.

2807 {
2808  return Concat2dDim1DiffInputDimsTestImpl<DataType::QAsymmU8>(
2809  workloadFactory, memoryManager, 0.5f, -1);
2810 }

◆ Concat2dDim1Test()

LayerTestResult<float, 2> Concat2dDim1Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 2216 of file ConcatTestImpl.cpp.

2219 {
2220  return Concat2dDim1TestImpl<DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
2221 }

◆ Concat2dDim1Uint8Test()

LayerTestResult<uint8_t, 2> Concat2dDim1Uint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 2789 of file ConcatTestImpl.cpp.

2792 {
2793  return Concat2dDim1TestImpl<DataType::QAsymmU8>(workloadFactory, memoryManager, 0.5f, -1);
2794 }

◆ Concat3dDim0DiffInputDimsTest()

LayerTestResult<float, 3> Concat3dDim0DiffInputDimsTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 2259 of file ConcatTestImpl.cpp.

2262 {
2263  return Concat3dDim0DiffInputDimsTestImpl<DataType::Float32>(
2264  workloadFactory, memoryManager, 0.0f, 0);
2265 }

◆ Concat3dDim0DiffInputDimsUint8Test()

LayerTestResult<uint8_t, 3> Concat3dDim0DiffInputDimsUint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 2835 of file ConcatTestImpl.cpp.

2838 {
2839  return Concat3dDim0TestImpl<DataType::QAsymmU8>(workloadFactory, memoryManager, 0.5f, -1);
2840 }

◆ Concat3dDim0Test()

LayerTestResult<float, 3> Concat3dDim0Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 2237 of file ConcatTestImpl.cpp.

2240 {
2241  return Concat3dDim0TestImpl<DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
2242 }

◆ Concat3dDim0Uint8Test()

LayerTestResult<uint8_t, 3> Concat3dDim0Uint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 2812 of file ConcatTestImpl.cpp.

2815 {
2816  return Concat3dDim0TestImpl<DataType::QAsymmU8>(workloadFactory, memoryManager, 0.5f, -1);
2817 }

◆ Concat3dDim1DiffInputDimsTest()

LayerTestResult<float, 3> Concat3dDim1DiffInputDimsTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 2267 of file ConcatTestImpl.cpp.

2270 {
2271  return Concat3dDim1DiffInputDimsTestImpl<DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
2272 }

◆ Concat3dDim1DiffInputDimsUint8Test()

LayerTestResult<uint8_t, 3> Concat3dDim1DiffInputDimsUint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 2842 of file ConcatTestImpl.cpp.

2845 {
2846  return Concat3dDim1DiffInputDimsTestImpl<DataType::QAsymmU8>(
2847  workloadFactory, memoryManager, 0.5f, -1);
2848 }

◆ Concat3dDim1Test()

LayerTestResult<float, 3> Concat3dDim1Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 2244 of file ConcatTestImpl.cpp.

2247 {
2248  return Concat3dDim1TestImpl<DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
2249 }

◆ Concat3dDim1Uint8Test()

LayerTestResult<uint8_t, 3> Concat3dDim1Uint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 2819 of file ConcatTestImpl.cpp.

2822 {
2823  return Concat3dDim1TestImpl<DataType::QAsymmU8>(workloadFactory, memoryManager, 0.5f, -1);
2824 }

◆ Concat3dDim2DiffInputDimsTest()

LayerTestResult<float, 3> Concat3dDim2DiffInputDimsTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
bool  useSubtensor 
)

Definition at line 2274 of file ConcatTestImpl.cpp.

2278 {
2279  return Concat3dDim2DiffInputDimsTestImpl<DataType::Float32>(
2280  workloadFactory, memoryManager, useSubtensor, 0.0f, 0);
2281 }

◆ Concat3dDim2DiffInputDimsUint8Test()

LayerTestResult<uint8_t, 3> Concat3dDim2DiffInputDimsUint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
bool  useSubtensor 
)

Definition at line 2850 of file ConcatTestImpl.cpp.

2854 {
2855  return Concat3dDim2DiffInputDimsTestImpl<DataType::QAsymmU8>(
2856  workloadFactory, memoryManager, useSubtensor, 0.5f, -1);
2857 }

◆ Concat3dDim2Test()

LayerTestResult<float, 3> Concat3dDim2Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
bool  useSubtensor 
)

Definition at line 2251 of file ConcatTestImpl.cpp.

2255 {
2256  return Concat3dDim2TestImpl<DataType::Float32>(workloadFactory, memoryManager, useSubtensor, 0.0f, 0);
2257 }

◆ Concat3dDim2Uint8Test()

LayerTestResult<uint8_t, 3> Concat3dDim2Uint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
bool  useSubtensor 
)

Definition at line 2826 of file ConcatTestImpl.cpp.

2830 {
2831  return Concat3dDim2TestImpl<DataType::QAsymmU8>(
2832  workloadFactory, memoryManager, useSubtensor, 0.5f, -1);
2833 }

◆ Concat4dDiffShapeDim0Test()

LayerTestResult<float, 4> Concat4dDiffShapeDim0Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 2312 of file ConcatTestImpl.cpp.

2315 {
2316  return Concat4dDiffShapeDim0TestImpl<DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
2317 }

◆ Concat4dDiffShapeDim0Uint8Test()

LayerTestResult<uint8_t, 4> Concat4dDiffShapeDim0Uint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 2888 of file ConcatTestImpl.cpp.

2891 {
2892  return Concat4dDiffShapeDim0TestImpl<DataType::QAsymmU8>(
2893  workloadFactory, memoryManager, 0.5f, -1);
2894 }

◆ Concat4dDiffShapeDim1Test()

LayerTestResult<float, 4> Concat4dDiffShapeDim1Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 2319 of file ConcatTestImpl.cpp.

2322 {
2323  return Concat4dDiffShapeDim1TestImpl<DataType::Float32>(
2324  workloadFactory, memoryManager, 0.0f, 0);
2325 }

◆ Concat4dDiffShapeDim1Uint8Test()

LayerTestResult<uint8_t, 4> Concat4dDiffShapeDim1Uint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 2896 of file ConcatTestImpl.cpp.

2899 {
2900  return Concat4dDiffShapeDim1TestImpl<DataType::QAsymmU8>(
2901  workloadFactory, memoryManager, 0.5f, -1);
2902 }

◆ Concat4dDiffShapeDim2Test()

LayerTestResult<float, 4> Concat4dDiffShapeDim2Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 2327 of file ConcatTestImpl.cpp.

2330 {
2331  return Concat4dDiffShapeDim2TestImpl<DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
2332 }

◆ Concat4dDiffShapeDim2Uint8Test()

LayerTestResult<uint8_t, 4> Concat4dDiffShapeDim2Uint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 2904 of file ConcatTestImpl.cpp.

2907 {
2908  return Concat4dDiffShapeDim2TestImpl<DataType::QAsymmU8>(
2909  workloadFactory, memoryManager, 0.5f, -1);
2910 }

◆ Concat4dDiffShapeDim3Test()

LayerTestResult<float, 4> Concat4dDiffShapeDim3Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
bool  useSubtensor 
)

Definition at line 2334 of file ConcatTestImpl.cpp.

2338 {
2339  return Concat4dDiffShapeDim3TestImpl<DataType::Float32>(
2340  workloadFactory, memoryManager, 0.0f, 0, useSubtensor);
2341 }

◆ Concat4dDiffShapeDim3Uint8Test()

LayerTestResult<uint8_t, 4> Concat4dDiffShapeDim3Uint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
bool  useSubtensor 
)

Definition at line 2912 of file ConcatTestImpl.cpp.

2916 {
2917  return Concat4dDiffShapeDim3TestImpl<DataType::QAsymmU8>(
2918  workloadFactory, memoryManager, 0.5f, -1, useSubtensor);
2919 }

◆ Concat4dDim0Test()

LayerTestResult<float, 4> Concat4dDim0Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 2283 of file ConcatTestImpl.cpp.

2286 {
2287  return Concat4dDim0TestImpl<DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
2288 }

◆ Concat4dDim0Uint8Test()

LayerTestResult<uint8_t, 4> Concat4dDim0Uint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 2859 of file ConcatTestImpl.cpp.

2862 {
2863  return Concat4dDim0TestImpl<DataType::QAsymmU8>(workloadFactory, memoryManager, 0.5f, -1);
2864 }

◆ Concat4dDim1Test()

LayerTestResult<float, 4> Concat4dDim1Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 2290 of file ConcatTestImpl.cpp.

2293 {
2294  return Concat4dDim1TestImpl<DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
2295 }

◆ Concat4dDim1Uint8Test()

LayerTestResult<uint8_t, 4> Concat4dDim1Uint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 2866 of file ConcatTestImpl.cpp.

2869 {
2870  return Concat4dDim1TestImpl<DataType::QAsymmU8>(workloadFactory, memoryManager, 0.5f, -1);
2871 }

◆ Concat4dDim2Test()

LayerTestResult<float, 4> Concat4dDim2Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 2297 of file ConcatTestImpl.cpp.

2300 {
2301  return Concat4dDim2TestImpl<DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
2302 }

◆ Concat4dDim2Uint8Test()

LayerTestResult<uint8_t, 4> Concat4dDim2Uint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 2873 of file ConcatTestImpl.cpp.

2876 {
2877  return Concat4dDim2TestImpl<DataType::QAsymmU8>(workloadFactory, memoryManager, 0.5f, -1);
2878 }

◆ Concat4dDim3Test()

LayerTestResult<float, 4> Concat4dDim3Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
bool  useSubtensor 
)

Definition at line 2304 of file ConcatTestImpl.cpp.

2308 {
2309  return Concat4dDim3TestImpl<DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0, useSubtensor);
2310 }

◆ Concat4dDim3Uint8Test()

LayerTestResult<uint8_t, 4> Concat4dDim3Uint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
bool  useSubtensor 
)

Definition at line 2880 of file ConcatTestImpl.cpp.

2883 {
2884  return Concat4dDim3TestImpl<DataType::QAsymmU8>(
2885  workloadFactory, memoryManager, 0.5f, -1, useSubtensor);
2886 }

◆ ConcatBFloat16Test()

LayerTestResult<armnn::BFloat16, 3> ConcatBFloat16Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 2350 of file ConcatTestImpl.cpp.

2353 {
2354  return Concat3dDim1TestImpl<DataType::BFloat16>(workloadFactory, memoryManager, 0.0f, 0);
2355 }

◆ ConcatDifferentInputOutputQParamInt16Test()

LayerTestResult<int16_t, 3> ConcatDifferentInputOutputQParamInt16Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
bool  useSubtensor 
)

◆ ConcatDifferentInputOutputQParamTest()

LayerTestResult<T, 3> ConcatDifferentInputOutputQParamTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
bool  useSubtensor 
)

Definition at line 1921 of file ConcatTestImpl.cpp.

References ARMNN_NO_DEPRECATE_WARN_BEGIN, ARMNN_NO_DEPRECATE_WARN_END, CopyDataFromITensorHandle(), CopyDataToITensorHandle(), IWorkloadFactory::CreateConcat(), armnn::CreateDescriptorForConcatenation(), IWorkloadFactory::CreateSubTensorHandle(), IWorkloadFactory::CreateTensorHandle(), armnn::IgnoreUnused(), armnn::info, LayerTestResult< T, n >::output, LayerTestResult< T, n >::outputExpected, and IWorkloadFactory::SupportsSubTensors().

1925 {
1926  IgnoreUnused(memoryManager);
1927 
1928  // Defines the tensor descriptors.
1929  TensorInfo outputTensorInfo({ 3, 6, 3 }, ArmnnType);
1930  TensorInfo inputTensorInfo1({ 3, 6, 2 }, ArmnnType);
1931  TensorInfo inputTensorInfo2({ 3, 6, 1 }, ArmnnType);
1932 
1933  std::vector<TensorShape> inputTensorShapes({inputTensorInfo1.GetShape(), inputTensorInfo2.GetShape()});
1934 
1935  // Quantized input1 tensor.
1936  const float inputScale1 = 0.5f;
1937  const int32_t inputOffset1 = 5;
1938 
1939  auto input1 = MakeTensor<T, 3>(inputTensorInfo1, std::vector<T>(
1940  {
1941  1, 2, 3,
1942  4, 5, 6,
1943  7, 8, 9,
1944  10, 11, 12,
1945  13, 14, 15,
1946  16, 17, 18,
1947 
1948  19, 20, 21,
1949  22, 23, 24,
1950  25, 26, 27,
1951  28, 29, 30,
1952  31, 32, 33,
1953  34, 35, 36
1954  }));
1955 
1956  // Quatized input2 tensor.
1957  const float inputScale2 = 0.2f;
1958  const int32_t inputOffset2 = 10;
1959 
1960  auto input2 = MakeTensor<T, 3>(inputTensorInfo2, std::vector<T>(
1961  {
1962  37, 38, 39,
1963  40, 41, 42,
1964  43, 44, 45,
1965  46, 47, 48,
1966  49, 50, 51,
1967  52, 53, 54
1968  }));
1969 
1970  // Quantized output tensor.
1971  const float outputScale = 0.1f;
1972  const int32_t outputOffset = 20;
1973 
1974  LayerTestResult<T, 3> ret(outputTensorInfo);
1975 
1976  ret.outputExpected = MakeTensor<T, 3>(outputTensorInfo, std::vector<T>(
1977  {
1978  0, 5, 74,
1979  10, 15, 76,
1980  20, 25, 78,
1981  30, 35, 80,
1982  40, 45, 82,
1983  50, 55, 84,
1984 
1985  60, 65, 86,
1986  70, 75, 88,
1987  80, 85, 90,
1988  90, 95, 92,
1989  100, 105, 94,
1990  110, 115, 96,
1991 
1992  120, 125, 98,
1993  130, 135, 100,
1994  140, 145, 102,
1995  150, 155, 104,
1996  160, 165, 106,
1997  170, 175, 108
1998  }));
1999 
2000  outputTensorInfo.SetQuantizationScale(outputScale);
2001  outputTensorInfo.SetQuantizationOffset(outputOffset);
2002  inputTensorInfo1.SetQuantizationScale(inputScale1);
2003  inputTensorInfo1.SetQuantizationOffset(inputOffset1);
2004  inputTensorInfo2.SetQuantizationScale(inputScale2);
2005  inputTensorInfo2.SetQuantizationOffset(inputOffset2);
2006 
2007  std::vector<unsigned int> wOrigin1 = { 0, 0, 0 }; //Extent of the window is defined by size of input[0].
2008  ConcatQueueDescriptor::ViewOrigin window1(wOrigin1);
2009 
2010  std::vector<unsigned int> wOrigin2 = { 0, 0, 2 }; //Extent of the window is defined by size of input[1].
2011  ConcatQueueDescriptor::ViewOrigin window2(wOrigin2);
2013  std::unique_ptr<ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
2015  bool subTensorsSupported = useSubtensor && workloadFactory.SupportsSubTensors();
2017  std::unique_ptr<ITensorHandle> inputHandle1 =
2018  subTensorsSupported ?
2019  workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
2020  workloadFactory.CreateTensorHandle(inputTensorInfo1);
2021 
2022  std::unique_ptr<ITensorHandle> inputHandle2 =
2023  subTensorsSupported ?
2024  workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
2025  workloadFactory.CreateTensorHandle(inputTensorInfo2);
2027  ConcatQueueDescriptor data;
2029  inputTensorShapes.begin(),inputTensorShapes.end(), 2);
2030  data.m_Parameters = desc;
2031 
2033  AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
2034  AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
2035  AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
2036 
2037  data.m_ViewOrigins.push_back(window1);
2038  data.m_ViewOrigins.push_back(window2);
2039 
2040  std::unique_ptr<IWorkload> workload = workloadFactory.CreateConcat(data, info);
2041 
2042  inputHandle1->Allocate();
2043  inputHandle2->Allocate();
2044  outputHandle->Allocate();
2045 
2046  CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0]);
2047  CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0]);
2048 
2049  workload->PostAllocationConfigure();
2050  workload->Execute();
2051 
2052  CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get());
2053 
2054  return ret;
2055 }
#define ARMNN_NO_DEPRECATE_WARN_BEGIN
Definition: Deprecated.hpp:33
virtual std::unique_ptr< ITensorHandle > CreateSubTensorHandle(ITensorHandle &parent, TensorShape const &subTensorShape, unsigned int const *subTensorOrigin) const =0
void IgnoreUnused(Ts &&...)
virtual std::unique_ptr< IWorkload > CreateConcat(const ConcatQueueDescriptor &descriptor, const WorkloadInfo &info) const
#define ARMNN_NO_DEPRECATE_WARN_END
Definition: Deprecated.hpp:34
An OriginsDescriptor for the ConcatLayer.
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo, const bool IsMemoryManaged=true) const =0
Contains information about inputs and outputs to a layer.
OriginsDescriptor CreateDescriptorForConcatenation(TensorShapeIt first, TensorShapeIt last, unsigned int concatenationDimension)
Convenience template to create an OriginsDescriptor to use when creating a ConcatLayer for performing...
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)
virtual bool SupportsSubTensors() const =0

◆ ConcatDifferentInputOutputQParamUint8Test()

LayerTestResult<uint8_t, 3> ConcatDifferentInputOutputQParamUint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
bool  useSubtensor 
)

◆ ConcatFloat16Test()

LayerTestResult<armnn::Half, 3> ConcatFloat16Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 2343 of file ConcatTestImpl.cpp.

2346 {
2347  return Concat3dDim1TestImpl<DataType::Float16>(workloadFactory, memoryManager, 0.0f, 0);
2348 }

◆ ConcatTest()

LayerTestResult<float, 3> ConcatTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 2077 of file ConcatTestImpl.cpp.

References ARMNN_NO_DEPRECATE_WARN_BEGIN, ARMNN_NO_DEPRECATE_WARN_END, CopyDataFromITensorHandle(), CopyDataToITensorHandle(), IWorkloadFactory::CreateConcat(), IWorkloadFactory::CreateSubTensorHandle(), IWorkloadFactory::CreateTensorHandle(), armnn::Float32, armnn::IgnoreUnused(), armnn::info, and IWorkloadFactory::SupportsSubTensors().

2080 {
2081  IgnoreUnused(memoryManager);
2082 
2083  unsigned int outputWidth = 3;
2084  unsigned int outputHeight = 6;
2085  unsigned int outputChannels = 3;
2086 
2087  unsigned int inputWidth1 = 3;
2088  unsigned int inputHeight1 = 6;
2089  unsigned int inputChannels1 = 2;
2090 
2091  unsigned int inputWidth2 = 3;
2092  unsigned int inputHeight2 = 6;
2093  unsigned int inputChannels2 = 1;
2094 
2095  // Define the tensor descriptors.
2096  TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, DataType::Float32);
2097  TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, DataType::Float32);
2098  TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, DataType::Float32);
2099 
2100  LayerTestResult<float,3> ret(outputTensorInfo);
2101 
2102  ret.outputExpected = MakeTensor<float, 3>(outputTensorInfo, std::vector<float>(
2103  {
2104  1.0f, 2.0f, 3.0f,
2105  4.0f, 5.0f, 6.0f,
2106  7.0f, 8.0f, 9.0f,
2107  10.0f, 11.0f, 12.0f,
2108  13.0f, 14.0f, 15.0f,
2109  16.0f, 17.0f, 18.0f,
2110 
2111  19.0f, 20.0f, 21.0f,
2112  22.0f, 23.0f, 24.0f,
2113  25.0f, 26.0f, 27.0f,
2114  28.0f, 29.0f, 30.0f,
2115  31.0f, 32.0f, 33.0f,
2116  34.0f, 35.0f, 36.0f,
2117 
2118  37.0f, 38.0f, 39.0f,
2119  40.0f, 41.0f, 42.0f,
2120  43.0f, 44.0f, 45.0f,
2121  46.0f, 47.0f, 48.0f,
2122  49.0f, 50.0f, 51.0f,
2123  52.0f, 53.0f, 54.0f,
2124  })
2125  );
2126 
2127  auto input1 = MakeTensor<float, 3>(inputTensorInfo1, std::vector<float>(
2128  {
2129  1.0f, 2.0f, 3.0f,
2130  4.0f, 5.0f, 6.0f,
2131  7.0f, 8.0f, 9.0f,
2132  10.0f, 11.0f, 12.0f,
2133  13.0f, 14.0f, 15.0f,
2134  16.0f, 17.0f, 18.0f,
2135 
2136  19.0f, 20.0f, 21.0f,
2137  22.0f, 23.0f, 24.0f,
2138  25.0f, 26.0f, 27.0f,
2139  28.0f, 29.0f, 30.0f,
2140  31.0f, 32.0f, 33.0f,
2141  34.0f, 35.0f, 36.0f,
2142  })
2143  );
2144 
2145  auto input2 = MakeTensor<float, 3>(inputTensorInfo2, std::vector<float>(
2146  {
2147  37.0f, 38.0f, 39.0f,
2148  40.0f, 41.0f, 42.0f,
2149  43.0f, 44.0f, 45.0f,
2150  46.0f, 47.0f, 48.0f,
2151  49.0f, 50.0f, 51.0f,
2152  52.0f, 53.0f, 54.0f,
2153  })
2154  );
2155 
2156  std::vector<unsigned int> wOrigin1 = {0, 0, 0}; //Extent of the window is defined by size of input[0].
2157  ConcatQueueDescriptor::ViewOrigin window1(wOrigin1);
2158 
2159  std::vector<unsigned int> wOrigin2 = {2, 0, 0}; //Extent of the window is defined by size of input[1].
2160  ConcatQueueDescriptor::ViewOrigin window2(wOrigin2);
2162  std::unique_ptr<ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
2163 
2164  bool subTensorsSupported = workloadFactory.SupportsSubTensors();
2165 
2166  std::unique_ptr<ITensorHandle> inputHandle1 =
2167  subTensorsSupported ?
2168  workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
2169  workloadFactory.CreateTensorHandle(inputTensorInfo1);
2170 
2171  std::unique_ptr<ITensorHandle> inputHandle2 =
2172  subTensorsSupported ?
2173  workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
2174  workloadFactory.CreateTensorHandle(inputTensorInfo2);
2176  ConcatQueueDescriptor data;
2178  AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
2179  AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
2180  AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
2181 
2182  data.m_ViewOrigins.push_back(window1);
2183  data.m_ViewOrigins.push_back(window2);
2184 
2185  std::unique_ptr<IWorkload> workload = workloadFactory.CreateConcat(data, info);
2186 
2187  inputHandle1->Allocate();
2188  inputHandle2->Allocate();
2189  outputHandle->Allocate();
2190 
2191  CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0]);
2192  CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0]);
2193 
2194  workload->PostAllocationConfigure();
2195  workload->Execute();
2196 
2197  CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get());
2198 
2199  return ret;
2200 }
#define ARMNN_NO_DEPRECATE_WARN_BEGIN
Definition: Deprecated.hpp:33
virtual std::unique_ptr< ITensorHandle > CreateSubTensorHandle(ITensorHandle &parent, TensorShape const &subTensorShape, unsigned int const *subTensorOrigin) const =0
void IgnoreUnused(Ts &&...)
virtual std::unique_ptr< IWorkload > CreateConcat(const ConcatQueueDescriptor &descriptor, const WorkloadInfo &info) const
#define ARMNN_NO_DEPRECATE_WARN_END
Definition: Deprecated.hpp:34
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo, const bool IsMemoryManaged=true) const =0
Contains information about inputs and outputs to a layer.
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)
virtual bool SupportsSubTensors() const =0

◆ ConcatUint16Test()

LayerTestResult<uint16_t, 3> ConcatUint16Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 2640 of file ConcatTestImpl.cpp.

References ARMNN_NO_DEPRECATE_WARN_BEGIN, ARMNN_NO_DEPRECATE_WARN_END, CopyDataFromITensorHandle(), CopyDataToITensorHandle(), IWorkloadFactory::CreateConcat(), IWorkloadFactory::CreateSubTensorHandle(), IWorkloadFactory::CreateTensorHandle(), armnn::IgnoreUnused(), armnn::info, LayerTestResult< T, n >::output, LayerTestResult< T, n >::outputExpected, armnn::QSymmS16, TensorInfo::SetQuantizationScale(), and IWorkloadFactory::SupportsSubTensors().

2643 {
2644  IgnoreUnused(memoryManager);
2645 
2646  unsigned int outputWidth = 3;
2647  unsigned int outputHeight = 6;
2648  unsigned int outputChannels = 3;
2649 
2650  unsigned int inputWidth1 = 3;
2651  unsigned int inputHeight1 = 6;
2652  unsigned int inputChannels1 = 2;
2653 
2654  unsigned int inputWidth2 = 3;
2655  unsigned int inputHeight2 = 6;
2656  unsigned int inputChannels2 = 1;
2657 
2658  // Defines the tensor descriptors.
2659  TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, DataType::QSymmS16);
2660  TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, DataType::QSymmS16);
2661  TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, DataType::QSymmS16);
2662 
2663  // Arbitrary scale and offsets. They don't really matter as the Concat operator doesn't dequantize/quantize them.
2664  const float scale = 0.13497836f;
2665  const int32_t offset = -7;
2666 
2667  outputTensorInfo.SetQuantizationScale(scale);
2668  outputTensorInfo.SetQuantizationOffset(offset);
2669  inputTensorInfo1.SetQuantizationScale(scale);
2670  inputTensorInfo1.SetQuantizationOffset(offset);
2671  inputTensorInfo2.SetQuantizationScale(scale);
2672  inputTensorInfo2.SetQuantizationOffset(offset);
2673 
2674  LayerTestResult<uint16_t, 3> ret(outputTensorInfo);
2675 
2676  ret.outputExpected = MakeTensor<uint16_t, 3>(outputTensorInfo, std::vector<uint16_t>(
2677  {
2678  1, 2, 3,
2679  4, 5, 6,
2680  7, 8, 9,
2681  10, 11, 12,
2682  13, 14, 15,
2683  16, 17, 18,
2684 
2685  19, 20, 21,
2686  22, 23, 24,
2687  25, 26, 27,
2688  28, 29, 30,
2689  31, 32, 33,
2690  34, 35, 36,
2691 
2692  37, 38, 39,
2693  40, 41, 42,
2694  43, 44, 45,
2695  46, 47, 48,
2696  49, 50, 51,
2697  52, 53, 54,
2698  }));
2699 
2700  auto input1 = MakeTensor<uint16_t, 3>(inputTensorInfo1, std::vector<uint16_t>(
2701  {
2702  1, 2, 3,
2703  4, 5, 6,
2704  7, 8, 9,
2705  10, 11, 12,
2706  13, 14, 15,
2707  16, 17, 18,
2708 
2709  19, 20, 21,
2710  22, 23, 24,
2711  25, 26, 27,
2712  28, 29, 30,
2713  31, 32, 33,
2714  34, 35, 36,
2715  }));
2716 
2717  auto input2 = MakeTensor<uint16_t, 3>(inputTensorInfo2, std::vector<uint16_t>(
2718  {
2719  37, 38, 39,
2720  40, 41, 42,
2721  43, 44, 45,
2722  46, 47, 48,
2723  49, 50, 51,
2724  52, 53, 54,
2725  }));
2726 
2727  std::vector<unsigned int> wOrigin1 = { 0, 0, 0 }; //Extent of the window is defined by size of input[0].
2728  ConcatQueueDescriptor::ViewOrigin window1(wOrigin1);
2729 
2730  std::vector<unsigned int> wOrigin2 = { 2, 0, 0 }; //Extent of the window is defined by size of input[1].
2731  ConcatQueueDescriptor::ViewOrigin window2(wOrigin2);
2732 
2734  std::unique_ptr<ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
2735 
2736  bool subTensorsSupported = workloadFactory.SupportsSubTensors();
2737 
2738  std::unique_ptr<ITensorHandle> inputHandle1 =
2739  subTensorsSupported ?
2740  workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
2741  workloadFactory.CreateTensorHandle(inputTensorInfo1);
2742 
2743  std::unique_ptr<ITensorHandle> inputHandle2 =
2744  subTensorsSupported ?
2745  workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
2746  workloadFactory.CreateTensorHandle(inputTensorInfo2);
2748 
2749  ConcatQueueDescriptor data;
2751  AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
2752  AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
2753  AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
2754 
2755  data.m_ViewOrigins.push_back(window1);
2756  data.m_ViewOrigins.push_back(window2);
2757 
2758  std::unique_ptr<IWorkload> workload = workloadFactory.CreateConcat(data, info);
2759 
2760  inputHandle1->Allocate();
2761  inputHandle2->Allocate();
2762  outputHandle->Allocate();
2763 
2764  CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0]);
2765  CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0]);
2766 
2767  workload->PostAllocationConfigure();
2768  workload->Execute();
2769 
2770  CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get());
2771 
2772  return ret;
2773 }
#define ARMNN_NO_DEPRECATE_WARN_BEGIN
Definition: Deprecated.hpp:33
virtual std::unique_ptr< ITensorHandle > CreateSubTensorHandle(ITensorHandle &parent, TensorShape const &subTensorShape, unsigned int const *subTensorOrigin) const =0
void IgnoreUnused(Ts &&...)
virtual std::unique_ptr< IWorkload > CreateConcat(const ConcatQueueDescriptor &descriptor, const WorkloadInfo &info) const
#define ARMNN_NO_DEPRECATE_WARN_END
Definition: Deprecated.hpp:34
void SetQuantizationScale(float scale)
Definition: Tensor.cpp:465
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo, const bool IsMemoryManaged=true) const =0
Contains information about inputs and outputs to a layer.
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)
virtual bool SupportsSubTensors() const =0

◆ ConcatUint8DifferentQParamsTest()

LayerTestResult<uint8_t, 3> ConcatUint8DifferentQParamsTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 2357 of file ConcatTestImpl.cpp.

References ARMNN_NO_DEPRECATE_WARN_BEGIN, ARMNN_NO_DEPRECATE_WARN_END, CopyDataFromITensorHandle(), CopyDataToITensorHandle(), IWorkloadFactory::CreateConcat(), IWorkloadFactory::CreateSubTensorHandle(), IWorkloadFactory::CreateTensorHandle(), armnn::IgnoreUnused(), armnn::info, LayerTestResult< T, n >::output, LayerTestResult< T, n >::outputExpected, armnn::QAsymmU8, and IWorkloadFactory::SupportsSubTensors().

2360 {
2361  IgnoreUnused(memoryManager);
2362 
2363  unsigned int outputWidth = 3;
2364  unsigned int outputHeight = 6;
2365  unsigned int outputChannels = 3;
2366 
2367  unsigned int inputWidth1 = 3;
2368  unsigned int inputHeight1 = 6;
2369  unsigned int inputChannels1 = 2;
2370 
2371  unsigned int inputWidth2 = 3;
2372  unsigned int inputHeight2 = 6;
2373  unsigned int inputChannels2 = 1;
2374 
2375  // Defines the tensor descriptors.
2376  TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, DataType::QAsymmU8);
2377  TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, DataType::QAsymmU8);
2378  TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, DataType::QAsymmU8);
2379 
2380  // Quantized input1 tensor. Range [-3, 1]
2381  const float inputScale1 = 0.015686f;
2382  const int32_t inputOffset1 = 192;
2383 
2384  auto input1 = MakeTensor<uint8_t, 3>(inputTensorInfo1, std::vector<uint8_t>(
2385  {
2386  1, 2, 3,
2387  4, 5, 6,
2388  7, 8, 9,
2389  10, 11, 12,
2390  13, 14, 15,
2391  16, 17, 18,
2392 
2393  19, 20, 21,
2394  22, 23, 24,
2395  25, 26, 27,
2396  28, 29, 30,
2397  31, 32, 33,
2398  34, 35, 36,
2399  })
2400  );
2401 
2402  // Quatized input2 tensor. Range [-1, 4]
2403  const float inputScale2 = 0.019608f;
2404  const int32_t inputOffset2 = 50;
2405 
2406  auto input2 = MakeTensor<uint8_t, 3>(inputTensorInfo2, std::vector<uint8_t>(
2407  {
2408  37, 38, 39,
2409  40, 41, 42,
2410  43, 44, 45,
2411  46, 47, 48,
2412  49, 50, 51,
2413  52, 53, 54,
2414  })
2415  );
2416 
2417  // Output has the same quantization parameters than input1,
2418  // so that only the requantization of input2 is required
2419  const float outputScale = 0.015686f;
2420  const int32_t outputOffset = 192;
2421 
2422  LayerTestResult<uint8_t, 3> ret(outputTensorInfo);
2423 
2424  ret.outputExpected = MakeTensor<uint8_t, 3>(outputTensorInfo, std::vector<uint8_t>(
2425  {
2426  1, 2, 3,
2427  4, 5, 6,
2428  7, 8, 9,
2429  10, 11, 12,
2430  13, 14, 15,
2431  16, 17, 18,
2432 
2433  19, 20, 21,
2434  22, 23, 24,
2435  25, 26, 27,
2436  28, 29, 30,
2437  31, 32, 33,
2438  34, 35, 36,
2439 
2440  176, 177, 178,
2441  179, 181, 182,
2442  183, 184, 186,
2443  187, 188, 189,
2444  191, 192, 193,
2445  195, 196, 197,
2446  })
2447  );
2448 
2449  outputTensorInfo.SetQuantizationScale(outputScale);
2450  outputTensorInfo.SetQuantizationOffset(outputOffset);
2451  inputTensorInfo1.SetQuantizationScale(inputScale1);
2452  inputTensorInfo1.SetQuantizationOffset(inputOffset1);
2453  inputTensorInfo2.SetQuantizationScale(inputScale2);
2454  inputTensorInfo2.SetQuantizationOffset(inputOffset2);
2455 
2456  std::vector<unsigned int> wOrigin1 = { 0, 0, 0 }; //Extent of the window is defined by size of input[0].
2457  ConcatQueueDescriptor::ViewOrigin window1(wOrigin1);
2458 
2459  std::vector<unsigned int> wOrigin2 = { 2, 0, 0 }; //Extent of the window is defined by size of input[1].
2460  ConcatQueueDescriptor::ViewOrigin window2(wOrigin2);
2462  std::unique_ptr<ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
2463 
2464  bool subTensorsSupported = workloadFactory.SupportsSubTensors();
2465 
2466  std::unique_ptr<ITensorHandle> inputHandle1 =
2467  subTensorsSupported ?
2468  workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
2469  workloadFactory.CreateTensorHandle(inputTensorInfo1);
2470 
2471  std::unique_ptr<ITensorHandle> inputHandle2 =
2472  subTensorsSupported ?
2473  workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
2474  workloadFactory.CreateTensorHandle(inputTensorInfo2);
2476  ConcatQueueDescriptor data;
2478  AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
2479  AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
2480  AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
2481 
2482  data.m_ViewOrigins.push_back(window1);
2483  data.m_ViewOrigins.push_back(window2);
2484 
2485  std::unique_ptr<IWorkload> workload = workloadFactory.CreateConcat(data, info);
2486 
2487  inputHandle1->Allocate();
2488  inputHandle2->Allocate();
2489  outputHandle->Allocate();
2490 
2491  CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0]);
2492  CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0]);
2493 
2494  workload->PostAllocationConfigure();
2495  workload->Execute();
2496 
2497  CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get());
2498 
2499  return ret;
2500 }
#define ARMNN_NO_DEPRECATE_WARN_BEGIN
Definition: Deprecated.hpp:33
virtual std::unique_ptr< ITensorHandle > CreateSubTensorHandle(ITensorHandle &parent, TensorShape const &subTensorShape, unsigned int const *subTensorOrigin) const =0
void IgnoreUnused(Ts &&...)
virtual std::unique_ptr< IWorkload > CreateConcat(const ConcatQueueDescriptor &descriptor, const WorkloadInfo &info) const
#define ARMNN_NO_DEPRECATE_WARN_END
Definition: Deprecated.hpp:34
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo, const bool IsMemoryManaged=true) const =0
Contains information about inputs and outputs to a layer.
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)
virtual bool SupportsSubTensors() const =0

◆ ConcatUint8Test()

LayerTestResult<uint8_t, 3> ConcatUint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 2502 of file ConcatTestImpl.cpp.

References ARMNN_NO_DEPRECATE_WARN_BEGIN, ARMNN_NO_DEPRECATE_WARN_END, CopyDataFromITensorHandle(), CopyDataToITensorHandle(), IWorkloadFactory::CreateConcat(), IWorkloadFactory::CreateSubTensorHandle(), IWorkloadFactory::CreateTensorHandle(), armnn::IgnoreUnused(), armnn::info, LayerTestResult< T, n >::output, LayerTestResult< T, n >::outputExpected, armnn::QAsymmU8, TensorInfo::SetQuantizationScale(), and IWorkloadFactory::SupportsSubTensors().

2505 {
2506  IgnoreUnused(memoryManager);
2507 
2508  unsigned int outputWidth = 3;
2509  unsigned int outputHeight = 6;
2510  unsigned int outputChannels = 3;
2511 
2512  unsigned int inputWidth1 = 3;
2513  unsigned int inputHeight1 = 6;
2514  unsigned int inputChannels1 = 2;
2515 
2516  unsigned int inputWidth2 = 3;
2517  unsigned int inputHeight2 = 6;
2518  unsigned int inputChannels2 = 1;
2519 
2520  // Defines the tensor descriptors.
2521  TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, DataType::QAsymmU8);
2522  TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, DataType::QAsymmU8);
2523  TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, DataType::QAsymmU8);
2524 
2525  // Arbitrary scale and offsets. They don't really matter as the Concat operator doesn't dequantize/quantize them.
2526  const float scale = 0.13497836f;
2527  const int32_t offset = -7;
2528 
2529  outputTensorInfo.SetQuantizationScale(scale);
2530  outputTensorInfo.SetQuantizationOffset(offset);
2531  inputTensorInfo1.SetQuantizationScale(scale);
2532  inputTensorInfo1.SetQuantizationOffset(offset);
2533  inputTensorInfo2.SetQuantizationScale(scale);
2534  inputTensorInfo2.SetQuantizationOffset(offset);
2535 
2536  LayerTestResult<uint8_t, 3> ret(outputTensorInfo);
2537 
2538  ret.outputExpected = MakeTensor<uint8_t, 3>(outputTensorInfo, std::vector<uint8_t>(
2539  {
2540  1, 2, 3,
2541  4, 5, 6,
2542  7, 8, 9,
2543  10, 11, 12,
2544  13, 14, 15,
2545  16, 17, 18,
2546 
2547  19, 20, 21,
2548  22, 23, 24,
2549  25, 26, 27,
2550  28, 29, 30,
2551  31, 32, 33,
2552  34, 35, 36,
2553 
2554  37, 38, 39,
2555  40, 41, 42,
2556  43, 44, 45,
2557  46, 47, 48,
2558  49, 50, 51,
2559  52, 53, 54,
2560  })
2561  );
2562 
2563  auto input1 = MakeTensor<uint8_t, 3>(inputTensorInfo1, std::vector<uint8_t>(
2564  {
2565  1, 2, 3,
2566  4, 5, 6,
2567  7, 8, 9,
2568  10, 11, 12,
2569  13, 14, 15,
2570  16, 17, 18,
2571 
2572  19, 20, 21,
2573  22, 23, 24,
2574  25, 26, 27,
2575  28, 29, 30,
2576  31, 32, 33,
2577  34, 35, 36,
2578  })
2579  );
2580 
2581  auto input2 = MakeTensor<uint8_t, 3>(inputTensorInfo2, std::vector<uint8_t>(
2582  {
2583  37, 38, 39,
2584  40, 41, 42,
2585  43, 44, 45,
2586  46, 47, 48,
2587  49, 50, 51,
2588  52, 53, 54,
2589  })
2590  );
2591 
2592  std::vector<unsigned int> wOrigin1 = { 0, 0, 0 }; //Extent of the window is defined by size of input[0].
2593  ConcatQueueDescriptor::ViewOrigin window1(wOrigin1);
2594 
2595  std::vector<unsigned int> wOrigin2 = { 2, 0, 0 }; //Extent of the window is defined by size of input[1].
2596  ConcatQueueDescriptor::ViewOrigin window2(wOrigin2);
2597 
2599  std::unique_ptr<ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
2600 
2601  bool subTensorsSupported = workloadFactory.SupportsSubTensors();
2602 
2603  std::unique_ptr<ITensorHandle> inputHandle1 =
2604  subTensorsSupported ?
2605  workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
2606  workloadFactory.CreateTensorHandle(inputTensorInfo1);
2607 
2608  std::unique_ptr<ITensorHandle> inputHandle2 =
2609  subTensorsSupported ?
2610  workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
2611  workloadFactory.CreateTensorHandle(inputTensorInfo2);
2613 
2614  ConcatQueueDescriptor data;
2616  AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
2617  AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
2618  AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
2619 
2620  data.m_ViewOrigins.push_back(window1);
2621  data.m_ViewOrigins.push_back(window2);
2622 
2623  std::unique_ptr<IWorkload> workload = workloadFactory.CreateConcat(data, info);
2624 
2625  inputHandle1->Allocate();
2626  inputHandle2->Allocate();
2627  outputHandle->Allocate();
2628 
2629  CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0]);
2630  CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0]);
2631 
2632  workload->PostAllocationConfigure();
2633  workload->Execute();
2634 
2635  CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get());
2636 
2637  return ret;
2638 }
#define ARMNN_NO_DEPRECATE_WARN_BEGIN
Definition: Deprecated.hpp:33
virtual std::unique_ptr< ITensorHandle > CreateSubTensorHandle(ITensorHandle &parent, TensorShape const &subTensorShape, unsigned int const *subTensorOrigin) const =0
void IgnoreUnused(Ts &&...)
virtual std::unique_ptr< IWorkload > CreateConcat(const ConcatQueueDescriptor &descriptor, const WorkloadInfo &info) const
#define ARMNN_NO_DEPRECATE_WARN_END
Definition: Deprecated.hpp:34
void SetQuantizationScale(float scale)
Definition: Tensor.cpp:465
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo, const bool IsMemoryManaged=true) const =0
Contains information about inputs and outputs to a layer.
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)
virtual bool SupportsSubTensors() const =0