ArmNN
 21.08
ConcatTestImpl.hpp File Reference

Go to the source code of this file.

Functions

template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult< T, 3 > ConcatDifferentInputOutputQParamTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool useSubtensor)
 
LayerTestResult< float, 3 > ConcatTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< armnn::BFloat16, 3 > ConcatBFloat16Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< armnn::Half, 3 > ConcatFloat16Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< uint8_t, 3 > ConcatUint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< uint16_t, 3 > ConcatUint16Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< uint8_t, 3 > ConcatUint8DifferentQParamsTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< float, 1 > Concat1dTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< float, 2 > Concat2dDim0Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< float, 2 > Concat2dDim1Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< float, 2 > Concat2dDim0DiffInputDimsTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< float, 2 > Concat2dDim1DiffInputDimsTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< float, 3 > Concat3dDim0Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< float, 3 > Concat3dDim1Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< float, 3 > Concat3dDim2Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool useSubtensor)
 
LayerTestResult< float, 3 > Concat3dDim0DiffInputDimsTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< float, 3 > Concat3dDim1DiffInputDimsTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< float, 3 > Concat3dDim2DiffInputDimsTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool useSubtensor)
 
LayerTestResult< float, 4 > Concat4dDim0Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< float, 4 > Concat4dDim1Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< float, 4 > Concat4dDim2Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< float, 4 > Concat4dDim3Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool useSubtensor)
 
LayerTestResult< float, 4 > Concat4dDiffShapeDim0Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< float, 4 > Concat4dDiffShapeDim1Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< float, 4 > Concat4dDiffShapeDim2Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< float, 4 > Concat4dDiffShapeDim3Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool useSubtensor)
 
LayerTestResult< uint8_t, 4 > Concat4dDim0Uint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< uint8_t, 4 > Concat4dDim1Uint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< uint8_t, 4 > Concat4dDim2Uint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< uint8_t, 4 > Concat4dDim3Uint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool useSubtensor)
 
LayerTestResult< uint8_t, 4 > Concat4dDiffShapeDim0Uint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< uint8_t, 4 > Concat4dDiffShapeDim1Uint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< uint8_t, 4 > Concat4dDiffShapeDim2Uint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< uint8_t, 4 > Concat4dDiffShapeDim3Uint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool useSubtensor)
 
LayerTestResult< uint8_t, 1 > Concat1dUint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< uint8_t, 2 > Concat2dDim0Uint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< uint8_t, 2 > Concat2dDim1Uint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< uint8_t, 2 > Concat2dDim0DiffInputDimsUint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< uint8_t, 2 > Concat2dDim1DiffInputDimsUint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< uint8_t, 3 > Concat3dDim0Uint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< uint8_t, 3 > Concat3dDim1Uint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< uint8_t, 3 > Concat3dDim2Uint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool useSubtensor)
 
LayerTestResult< uint8_t, 3 > Concat3dDim0DiffInputDimsUint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< uint8_t, 3 > Concat3dDim1DiffInputDimsUint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< uint8_t, 3 > Concat3dDim2DiffInputDimsUint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool useSubtensor)
 
LayerTestResult< uint8_t, 3 > ConcatDifferentInputOutputQParamUint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool useSubtensor)
 
LayerTestResult< int16_t, 3 > ConcatDifferentInputOutputQParamInt16Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool useSubtensor)
 

Function Documentation

◆ Concat1dTest()

LayerTestResult<float, 1> Concat1dTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 2240 of file ConcatTestImpl.cpp.

Referenced by TEST_SUITE().

2244 {
2245  return Concat1dTestImpl<DataType::Float32>(workloadFactory, memoryManager, tensorHandleFactory, 0.0f, 0);
2246 }

◆ Concat1dUint8Test()

LayerTestResult<uint8_t, 1> Concat1dUint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 2849 of file ConcatTestImpl.cpp.

Referenced by TEST_SUITE().

2853 {
2854  return Concat1dTestImpl<DataType::QAsymmU8>(workloadFactory, memoryManager, tensorHandleFactory, 0.5f, -1);
2855 }

◆ Concat2dDim0DiffInputDimsTest()

LayerTestResult<float, 2> Concat2dDim0DiffInputDimsTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 2264 of file ConcatTestImpl.cpp.

Referenced by TEST_SUITE().

2268 {
2269  return Concat2dDim0DiffInputDimsTestImpl<DataType::Float32>(workloadFactory, memoryManager,
2270  tensorHandleFactory, 0.0f, 0);
2271 }

◆ Concat2dDim0DiffInputDimsUint8Test()

LayerTestResult<uint8_t, 2> Concat2dDim0DiffInputDimsUint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 2873 of file ConcatTestImpl.cpp.

Referenced by TEST_SUITE().

2877 {
2878  return Concat2dDim0DiffInputDimsTestImpl<DataType::QAsymmU8>(
2879  workloadFactory, memoryManager, tensorHandleFactory, 0.5f, -1);
2880 }

◆ Concat2dDim0Test()

LayerTestResult<float, 2> Concat2dDim0Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 2248 of file ConcatTestImpl.cpp.

Referenced by TEST_SUITE().

2252 {
2253  return Concat2dDim0TestImpl<DataType::Float32>(workloadFactory, memoryManager, tensorHandleFactory, 0.0f, 0);
2254 }

◆ Concat2dDim0Uint8Test()

LayerTestResult<uint8_t, 2> Concat2dDim0Uint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 2857 of file ConcatTestImpl.cpp.

Referenced by TEST_SUITE().

2861 {
2862  return Concat2dDim0TestImpl<DataType::QAsymmU8>(workloadFactory, memoryManager, tensorHandleFactory, 0.5f, -1);
2863 }

◆ Concat2dDim1DiffInputDimsTest()

LayerTestResult<float, 2> Concat2dDim1DiffInputDimsTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 2273 of file ConcatTestImpl.cpp.

Referenced by TEST_SUITE().

2277 {
2278  return Concat2dDim1DiffInputDimsTestImpl<DataType::Float32>(workloadFactory,
2279  memoryManager,
2280  tensorHandleFactory,
2281  0.0f,
2282  0);
2283 }

◆ Concat2dDim1DiffInputDimsUint8Test()

LayerTestResult<uint8_t, 2> Concat2dDim1DiffInputDimsUint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 2882 of file ConcatTestImpl.cpp.

Referenced by TEST_SUITE().

2886 {
2887  return Concat2dDim1DiffInputDimsTestImpl<DataType::QAsymmU8>(
2888  workloadFactory, memoryManager, tensorHandleFactory, 0.5f, -1);
2889 }

◆ Concat2dDim1Test()

LayerTestResult<float, 2> Concat2dDim1Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 2256 of file ConcatTestImpl.cpp.

Referenced by TEST_SUITE().

2260 {
2261  return Concat2dDim1TestImpl<DataType::Float32>(workloadFactory, memoryManager, tensorHandleFactory, 0.0f, 0);
2262 }

◆ Concat2dDim1Uint8Test()

LayerTestResult<uint8_t, 2> Concat2dDim1Uint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 2865 of file ConcatTestImpl.cpp.

Referenced by TEST_SUITE().

2869 {
2870  return Concat2dDim1TestImpl<DataType::QAsymmU8>(workloadFactory, memoryManager, tensorHandleFactory, 0.5f, -1);
2871 }

◆ Concat3dDim0DiffInputDimsTest()

LayerTestResult<float, 3> Concat3dDim0DiffInputDimsTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 2311 of file ConcatTestImpl.cpp.

Referenced by TEST_SUITE().

2315 {
2316  return Concat3dDim0DiffInputDimsTestImpl<DataType::Float32>(
2317  workloadFactory, memoryManager, tensorHandleFactory, 0.0f, 0);
2318 }

◆ Concat3dDim0DiffInputDimsUint8Test()

LayerTestResult<uint8_t, 3> Concat3dDim0DiffInputDimsUint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 2917 of file ConcatTestImpl.cpp.

Referenced by TEST_SUITE().

2921 {
2922  return Concat3dDim0TestImpl<DataType::QAsymmU8>(workloadFactory, memoryManager, tensorHandleFactory, 0.5f, -1);
2923 }

◆ Concat3dDim0Test()

LayerTestResult<float, 3> Concat3dDim0Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 2285 of file ConcatTestImpl.cpp.

Referenced by TEST_SUITE().

2289 {
2290  return Concat3dDim0TestImpl<DataType::Float32>(workloadFactory, memoryManager, tensorHandleFactory, 0.0f, 0);
2291 }

◆ Concat3dDim0Uint8Test()

LayerTestResult<uint8_t, 3> Concat3dDim0Uint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 2891 of file ConcatTestImpl.cpp.

Referenced by TEST_SUITE().

2895 {
2896  return Concat3dDim0TestImpl<DataType::QAsymmU8>(workloadFactory, memoryManager, tensorHandleFactory, 0.5f, -1);
2897 }

◆ Concat3dDim1DiffInputDimsTest()

LayerTestResult<float, 3> Concat3dDim1DiffInputDimsTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 2320 of file ConcatTestImpl.cpp.

Referenced by TEST_SUITE().

2324 {
2325  return Concat3dDim1DiffInputDimsTestImpl<DataType::Float32>(workloadFactory, memoryManager,
2326  tensorHandleFactory, 0.0f, 0);
2327 }

◆ Concat3dDim1DiffInputDimsUint8Test()

LayerTestResult<uint8_t, 3> Concat3dDim1DiffInputDimsUint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 2925 of file ConcatTestImpl.cpp.

Referenced by TEST_SUITE().

2929 {
2930  return Concat3dDim1DiffInputDimsTestImpl<DataType::QAsymmU8>(
2931  workloadFactory, memoryManager, tensorHandleFactory, 0.5f, -1);
2932 }

◆ Concat3dDim1Test()

LayerTestResult<float, 3> Concat3dDim1Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 2293 of file ConcatTestImpl.cpp.

Referenced by TEST_SUITE().

2297 {
2298  return Concat3dDim1TestImpl<DataType::Float32>(workloadFactory, memoryManager, tensorHandleFactory, 0.0f, 0);
2299 }

◆ Concat3dDim1Uint8Test()

LayerTestResult<uint8_t, 3> Concat3dDim1Uint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 2899 of file ConcatTestImpl.cpp.

Referenced by TEST_SUITE().

2903 {
2904  return Concat3dDim1TestImpl<DataType::QAsymmU8>(workloadFactory, memoryManager, tensorHandleFactory, 0.5f, -1);
2905 }

◆ Concat3dDim2DiffInputDimsTest()

LayerTestResult<float, 3> Concat3dDim2DiffInputDimsTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory,
bool  useSubtensor 
)

Definition at line 2329 of file ConcatTestImpl.cpp.

Referenced by TEST_SUITE().

2334 {
2335  return Concat3dDim2DiffInputDimsTestImpl<DataType::Float32>(
2336  workloadFactory, memoryManager, tensorHandleFactory, useSubtensor, 0.0f, 0);
2337 }

◆ Concat3dDim2DiffInputDimsUint8Test()

LayerTestResult<uint8_t, 3> Concat3dDim2DiffInputDimsUint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory,
bool  useSubtensor 
)

Definition at line 2934 of file ConcatTestImpl.cpp.

Referenced by TEST_SUITE().

2939 {
2940  return Concat3dDim2DiffInputDimsTestImpl<DataType::QAsymmU8>(
2941  workloadFactory, memoryManager, tensorHandleFactory, useSubtensor, 0.5f, -1);
2942 }

◆ Concat3dDim2Test()

LayerTestResult<float, 3> Concat3dDim2Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory,
bool  useSubtensor 
)

Definition at line 2301 of file ConcatTestImpl.cpp.

Referenced by TEST_SUITE().

2306 {
2307  return Concat3dDim2TestImpl<DataType::Float32>(workloadFactory, memoryManager, tensorHandleFactory,
2308  useSubtensor, 0.0f, 0);
2309 }

◆ Concat3dDim2Uint8Test()

LayerTestResult<uint8_t, 3> Concat3dDim2Uint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory,
bool  useSubtensor 
)

Definition at line 2907 of file ConcatTestImpl.cpp.

Referenced by TEST_SUITE().

2912 {
2913  return Concat3dDim2TestImpl<DataType::QAsymmU8>(
2914  workloadFactory, memoryManager, tensorHandleFactory, useSubtensor, 0.5f, -1);
2915 }

◆ Concat4dDiffShapeDim0Test()

LayerTestResult<float, 4> Concat4dDiffShapeDim0Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 2373 of file ConcatTestImpl.cpp.

Referenced by TEST_SUITE().

2377 {
2378  return Concat4dDiffShapeDim0TestImpl<DataType::Float32>(workloadFactory, memoryManager,
2379  tensorHandleFactory, 0.0f, 0);
2380 }

◆ Concat4dDiffShapeDim0Uint8Test()

LayerTestResult<uint8_t, 4> Concat4dDiffShapeDim0Uint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 2977 of file ConcatTestImpl.cpp.

Referenced by TEST_SUITE().

2981 {
2982  return Concat4dDiffShapeDim0TestImpl<DataType::QAsymmU8>(
2983  workloadFactory, memoryManager, tensorHandleFactory, 0.5f, -1);
2984 }

◆ Concat4dDiffShapeDim1Test()

LayerTestResult<float, 4> Concat4dDiffShapeDim1Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 2382 of file ConcatTestImpl.cpp.

Referenced by TEST_SUITE().

2386 {
2387  return Concat4dDiffShapeDim1TestImpl<DataType::Float32>(
2388  workloadFactory, memoryManager, tensorHandleFactory, 0.0f, 0);
2389 }

◆ Concat4dDiffShapeDim1Uint8Test()

LayerTestResult<uint8_t, 4> Concat4dDiffShapeDim1Uint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 2986 of file ConcatTestImpl.cpp.

Referenced by TEST_SUITE().

2990 {
2991  return Concat4dDiffShapeDim1TestImpl<DataType::QAsymmU8>(
2992  workloadFactory, memoryManager, tensorHandleFactory, 0.5f, -1);
2993 }

◆ Concat4dDiffShapeDim2Test()

LayerTestResult<float, 4> Concat4dDiffShapeDim2Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 2391 of file ConcatTestImpl.cpp.

Referenced by TEST_SUITE().

2395 {
2396  return Concat4dDiffShapeDim2TestImpl<DataType::Float32>(workloadFactory, memoryManager,
2397  tensorHandleFactory, 0.0f, 0);
2398 }

◆ Concat4dDiffShapeDim2Uint8Test()

LayerTestResult<uint8_t, 4> Concat4dDiffShapeDim2Uint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 2995 of file ConcatTestImpl.cpp.

Referenced by TEST_SUITE().

2999 {
3000  return Concat4dDiffShapeDim2TestImpl<DataType::QAsymmU8>(
3001  workloadFactory, memoryManager, tensorHandleFactory, 0.5f, -1);
3002 }

◆ Concat4dDiffShapeDim3Test()

LayerTestResult<float, 4> Concat4dDiffShapeDim3Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory,
bool  useSubtensor 
)

Definition at line 2400 of file ConcatTestImpl.cpp.

Referenced by TEST_SUITE().

2405 {
2406  return Concat4dDiffShapeDim3TestImpl<DataType::Float32>(
2407  workloadFactory, memoryManager, tensorHandleFactory, 0.0f, 0, useSubtensor);
2408 }

◆ Concat4dDiffShapeDim3Uint8Test()

LayerTestResult<uint8_t, 4> Concat4dDiffShapeDim3Uint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory,
bool  useSubtensor 
)

Definition at line 3004 of file ConcatTestImpl.cpp.

Referenced by TEST_SUITE().

3009 {
3010  return Concat4dDiffShapeDim3TestImpl<DataType::QAsymmU8>(
3011  workloadFactory, memoryManager, tensorHandleFactory, 0.5f, -1, useSubtensor);
3012 }

◆ Concat4dDim0Test()

LayerTestResult<float, 4> Concat4dDim0Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 2339 of file ConcatTestImpl.cpp.

Referenced by TEST_SUITE().

2343 {
2344  return Concat4dDim0TestImpl<DataType::Float32>(workloadFactory, memoryManager, tensorHandleFactory, 0.0f, 0);
2345 }

◆ Concat4dDim0Uint8Test()

LayerTestResult<uint8_t, 4> Concat4dDim0Uint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 2944 of file ConcatTestImpl.cpp.

Referenced by TEST_SUITE().

2948 {
2949  return Concat4dDim0TestImpl<DataType::QAsymmU8>(workloadFactory, memoryManager, tensorHandleFactory, 0.5f, -1);
2950 }

◆ Concat4dDim1Test()

LayerTestResult<float, 4> Concat4dDim1Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 2347 of file ConcatTestImpl.cpp.

Referenced by TEST_SUITE().

2351 {
2352  return Concat4dDim1TestImpl<DataType::Float32>(workloadFactory, memoryManager, tensorHandleFactory, 0.0f, 0);
2353 }

◆ Concat4dDim1Uint8Test()

LayerTestResult<uint8_t, 4> Concat4dDim1Uint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 2952 of file ConcatTestImpl.cpp.

Referenced by TEST_SUITE().

2956 {
2957  return Concat4dDim1TestImpl<DataType::QAsymmU8>(workloadFactory, memoryManager, tensorHandleFactory, 0.5f, -1);
2958 }

◆ Concat4dDim2Test()

LayerTestResult<float, 4> Concat4dDim2Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 2355 of file ConcatTestImpl.cpp.

Referenced by TEST_SUITE().

2359 {
2360  return Concat4dDim2TestImpl<DataType::Float32>(workloadFactory, memoryManager, tensorHandleFactory, 0.0f, 0);
2361 }

◆ Concat4dDim2Uint8Test()

LayerTestResult<uint8_t, 4> Concat4dDim2Uint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 2960 of file ConcatTestImpl.cpp.

Referenced by TEST_SUITE().

2964 {
2965  return Concat4dDim2TestImpl<DataType::QAsymmU8>(workloadFactory, memoryManager, tensorHandleFactory, 0.5f, -1);
2966 }

◆ Concat4dDim3Test()

LayerTestResult<float, 4> Concat4dDim3Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory,
bool  useSubtensor 
)

Definition at line 2363 of file ConcatTestImpl.cpp.

Referenced by TEST_SUITE().

2368 {
2369  return Concat4dDim3TestImpl<DataType::Float32>(workloadFactory, memoryManager,
2370  tensorHandleFactory, 0.0f, 0, useSubtensor);
2371 }

◆ Concat4dDim3Uint8Test()

LayerTestResult<uint8_t, 4> Concat4dDim3Uint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory,
bool  useSubtensor 
)

Definition at line 2968 of file ConcatTestImpl.cpp.

Referenced by TEST_SUITE().

2972 {
2973  return Concat4dDim3TestImpl<DataType::QAsymmU8>(
2974  workloadFactory, memoryManager, tensorHandleFactory, 0.5f, -1, useSubtensor);
2975 }

◆ ConcatBFloat16Test()

LayerTestResult<armnn::BFloat16, 3> ConcatBFloat16Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 2418 of file ConcatTestImpl.cpp.

Referenced by TEST_SUITE().

2422 {
2423  return Concat3dDim1TestImpl<DataType::BFloat16>(workloadFactory, memoryManager, tensorHandleFactory, 0.0f, 0);
2424 }

◆ ConcatDifferentInputOutputQParamInt16Test()

LayerTestResult<int16_t, 3> ConcatDifferentInputOutputQParamInt16Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory,
bool  useSubtensor 
)

◆ ConcatDifferentInputOutputQParamTest()

LayerTestResult<T, 3> ConcatDifferentInputOutputQParamTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory,
bool  useSubtensor 
)

Definition at line 1952 of file ConcatTestImpl.cpp.

References CopyDataFromITensorHandle(), CopyDataToITensorHandle(), IWorkloadFactory::CreateConcat(), armnn::CreateDescriptorForConcatenation(), ITensorHandleFactory::CreateSubTensorHandle(), ITensorHandleFactory::CreateTensorHandle(), armnn::IgnoreUnused(), armnn::info, QueueDescriptorWithParameters< LayerDescriptor >::m_Parameters, ConcatQueueDescriptor::m_ViewOrigins, TensorInfo::SetQuantizationScale(), and IWorkloadFactory::SupportsSubTensors().

1957 {
1958  IgnoreUnused(memoryManager);
1959 
1960  // Defines the tensor descriptors.
1961  TensorInfo outputTensorInfo({ 3, 6, 3 }, ArmnnType);
1962  TensorInfo inputTensorInfo1({ 3, 6, 2 }, ArmnnType);
1963  TensorInfo inputTensorInfo2({ 3, 6, 1 }, ArmnnType);
1964 
1965  std::vector<TensorShape> inputTensorShapes({inputTensorInfo1.GetShape(), inputTensorInfo2.GetShape()});
1966 
1967  // Quantized input1 tensor.
1968  const float inputScale1 = 0.5f;
1969  const int32_t inputOffset1 = 5;
1970 
1971  std::vector<T> input1 =
1972  {
1973  1, 2, 3,
1974  4, 5, 6,
1975  7, 8, 9,
1976  10, 11, 12,
1977  13, 14, 15,
1978  16, 17, 18,
1979 
1980  19, 20, 21,
1981  22, 23, 24,
1982  25, 26, 27,
1983  28, 29, 30,
1984  31, 32, 33,
1985  34, 35, 36
1986  };
1987 
1988  // Quatized input2 tensor.
1989  const float inputScale2 = 0.2f;
1990  const int32_t inputOffset2 = 10;
1991 
1992  std::vector<T> input2 =
1993  {
1994  37, 38, 39,
1995  40, 41, 42,
1996  43, 44, 45,
1997  46, 47, 48,
1998  49, 50, 51,
1999  52, 53, 54
2000  };
2001 
2002  // Quantized output tensor.
2003  const float outputScale = 0.1f;
2004  const int32_t outputOffset = 20;
2005 
2006  std::vector<T> actualOutput(outputTensorInfo.GetNumElements());
2007 
2008  std::vector<T> expectedOutput =
2009  {
2010  0, 5, 74,
2011  10, 15, 76,
2012  20, 25, 78,
2013  30, 35, 80,
2014  40, 45, 82,
2015  50, 55, 84,
2016 
2017  60, 65, 86,
2018  70, 75, 88,
2019  80, 85, 90,
2020  90, 95, 92,
2021  100, 105, 94,
2022  110, 115, 96,
2023 
2024  120, 125, 98,
2025  130, 135, 100,
2026  140, 145, 102,
2027  150, 155, 104,
2028  160, 165, 106,
2029  170, 175, 108
2030  };
2031 
2032  outputTensorInfo.SetQuantizationScale(outputScale);
2033  outputTensorInfo.SetQuantizationOffset(outputOffset);
2034  inputTensorInfo1.SetQuantizationScale(inputScale1);
2035  inputTensorInfo1.SetQuantizationOffset(inputOffset1);
2036  inputTensorInfo2.SetQuantizationScale(inputScale2);
2037  inputTensorInfo2.SetQuantizationOffset(inputOffset2);
2038 
2039  std::vector<unsigned int> wOrigin1 = { 0, 0, 0 }; //Extent of the window is defined by size of input[0].
2040  ConcatQueueDescriptor::ViewOrigin window1(wOrigin1);
2041 
2042  std::vector<unsigned int> wOrigin2 = { 0, 0, 2 }; //Extent of the window is defined by size of input[1].
2043  ConcatQueueDescriptor::ViewOrigin window2(wOrigin2);
2044 
2045  std::unique_ptr<ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
2046 
2047  bool subTensorsSupported = useSubtensor && workloadFactory.SupportsSubTensors();
2048 
2049  std::unique_ptr<ITensorHandle> inputHandle1 =
2050  subTensorsSupported ?
2051  tensorHandleFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
2052  tensorHandleFactory.CreateTensorHandle(inputTensorInfo1);
2053 
2054  std::unique_ptr<ITensorHandle> inputHandle2 =
2055  subTensorsSupported ?
2056  tensorHandleFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
2057  tensorHandleFactory.CreateTensorHandle(inputTensorInfo2);
2058 
2059  ConcatQueueDescriptor data;
2061  inputTensorShapes.begin(),inputTensorShapes.end(), 2);
2062  data.m_Parameters = desc;
2063 
2065  AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
2066  AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
2067  AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
2068 
2069  data.m_ViewOrigins.push_back(window1);
2070  data.m_ViewOrigins.push_back(window2);
2071 
2072  std::unique_ptr<IWorkload> workload = workloadFactory.CreateConcat(data, info);
2073 
2074  inputHandle1->Allocate();
2075  inputHandle2->Allocate();
2076  outputHandle->Allocate();
2077 
2078  CopyDataToITensorHandle(inputHandle1.get(), input1.data());
2079  CopyDataToITensorHandle(inputHandle2.get(), input2.data());
2080 
2081  workload->PostAllocationConfigure();
2082  workload->Execute();
2083 
2084  CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
2085 
2086  return LayerTestResult<T, 3>(actualOutput,
2087  expectedOutput,
2088  outputHandle->GetShape(),
2089  outputTensorInfo.GetShape());
2090 }
virtual std::unique_ptr< ITensorHandle > CreateSubTensorHandle(ITensorHandle &parent, TensorShape const &subTensorShape, unsigned int const *subTensorOrigin) const =0
void IgnoreUnused(Ts &&...)
virtual std::unique_ptr< IWorkload > CreateConcat(const ConcatQueueDescriptor &descriptor, const WorkloadInfo &info) const
std::vector< ViewOrigin > m_ViewOrigins
An OriginsDescriptor for the ConcatLayer.
void SetQuantizationScale(float scale)
Definition: Tensor.cpp:475
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
Contains information about TensorInfos of a layer.
OriginsDescriptor CreateDescriptorForConcatenation(TensorShapeIt first, TensorShapeIt last, unsigned int concatenationDimension)
Convenience template to create an OriginsDescriptor to use when creating a ConcatLayer for performing...
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo) const =0
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)
virtual bool SupportsSubTensors() const =0

◆ ConcatDifferentInputOutputQParamUint8Test()

LayerTestResult<uint8_t, 3> ConcatDifferentInputOutputQParamUint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory,
bool  useSubtensor 
)

◆ ConcatFloat16Test()

LayerTestResult<armnn::Half, 3> ConcatFloat16Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 2410 of file ConcatTestImpl.cpp.

Referenced by TEST_SUITE().

2414 {
2415  return Concat3dDim1TestImpl<DataType::Float16>(workloadFactory, memoryManager, tensorHandleFactory, 0.0f, 0);
2416 }

◆ ConcatTest()

LayerTestResult<float, 3> ConcatTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 2114 of file ConcatTestImpl.cpp.

References CopyDataFromITensorHandle(), CopyDataToITensorHandle(), IWorkloadFactory::CreateConcat(), ITensorHandleFactory::CreateSubTensorHandle(), ITensorHandleFactory::CreateTensorHandle(), armnn::Float32, armnn::IgnoreUnused(), armnn::info, ConcatQueueDescriptor::m_ViewOrigins, and IWorkloadFactory::SupportsSubTensors().

Referenced by TEST_SUITE().

2118 {
2119  IgnoreUnused(memoryManager);
2120 
2121  unsigned int outputWidth = 3;
2122  unsigned int outputHeight = 6;
2123  unsigned int outputChannels = 3;
2124 
2125  unsigned int inputWidth1 = 3;
2126  unsigned int inputHeight1 = 6;
2127  unsigned int inputChannels1 = 2;
2128 
2129  unsigned int inputWidth2 = 3;
2130  unsigned int inputHeight2 = 6;
2131  unsigned int inputChannels2 = 1;
2132 
2133  // Define the tensor descriptors.
2134  TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, DataType::Float32);
2135  TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, DataType::Float32);
2136  TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, DataType::Float32);
2137 
2138  std::vector<float> actualOutput(outputTensorInfo.GetNumElements());
2139 
2140  std::vector<float> expectedOutput =
2141  {
2142  1.0f, 2.0f, 3.0f,
2143  4.0f, 5.0f, 6.0f,
2144  7.0f, 8.0f, 9.0f,
2145  10.0f, 11.0f, 12.0f,
2146  13.0f, 14.0f, 15.0f,
2147  16.0f, 17.0f, 18.0f,
2148 
2149  19.0f, 20.0f, 21.0f,
2150  22.0f, 23.0f, 24.0f,
2151  25.0f, 26.0f, 27.0f,
2152  28.0f, 29.0f, 30.0f,
2153  31.0f, 32.0f, 33.0f,
2154  34.0f, 35.0f, 36.0f,
2155 
2156  37.0f, 38.0f, 39.0f,
2157  40.0f, 41.0f, 42.0f,
2158  43.0f, 44.0f, 45.0f,
2159  46.0f, 47.0f, 48.0f,
2160  49.0f, 50.0f, 51.0f,
2161  52.0f, 53.0f, 54.0f
2162  };
2163 
2164  std::vector<float> input1 =
2165  {
2166  1.0f, 2.0f, 3.0f,
2167  4.0f, 5.0f, 6.0f,
2168  7.0f, 8.0f, 9.0f,
2169  10.0f, 11.0f, 12.0f,
2170  13.0f, 14.0f, 15.0f,
2171  16.0f, 17.0f, 18.0f,
2172 
2173  19.0f, 20.0f, 21.0f,
2174  22.0f, 23.0f, 24.0f,
2175  25.0f, 26.0f, 27.0f,
2176  28.0f, 29.0f, 30.0f,
2177  31.0f, 32.0f, 33.0f,
2178  34.0f, 35.0f, 36.0f
2179  };
2180 
2181  std::vector<float> input2 =
2182  {
2183  37.0f, 38.0f, 39.0f,
2184  40.0f, 41.0f, 42.0f,
2185  43.0f, 44.0f, 45.0f,
2186  46.0f, 47.0f, 48.0f,
2187  49.0f, 50.0f, 51.0f,
2188  52.0f, 53.0f, 54.0f,
2189  };
2190 
2191  std::vector<unsigned int> wOrigin1 = {0, 0, 0}; //Extent of the window is defined by size of input[0].
2192  ConcatQueueDescriptor::ViewOrigin window1(wOrigin1);
2193 
2194  std::vector<unsigned int> wOrigin2 = {2, 0, 0}; //Extent of the window is defined by size of input[1].
2195  ConcatQueueDescriptor::ViewOrigin window2(wOrigin2);
2196 
2197  std::unique_ptr<ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
2198 
2199  bool subTensorsSupported = workloadFactory.SupportsSubTensors();
2200 
2201  std::unique_ptr<ITensorHandle> inputHandle1 =
2202  subTensorsSupported ?
2203  tensorHandleFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
2204  tensorHandleFactory.CreateTensorHandle(inputTensorInfo1);
2205 
2206  std::unique_ptr<ITensorHandle> inputHandle2 =
2207  subTensorsSupported ?
2208  tensorHandleFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
2209  tensorHandleFactory.CreateTensorHandle(inputTensorInfo2);
2210 
2211  ConcatQueueDescriptor data;
2213  AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
2214  AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
2215  AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
2216 
2217  data.m_ViewOrigins.push_back(window1);
2218  data.m_ViewOrigins.push_back(window2);
2219 
2220  std::unique_ptr<IWorkload> workload = workloadFactory.CreateConcat(data, info);
2221 
2222  inputHandle1->Allocate();
2223  inputHandle2->Allocate();
2224  outputHandle->Allocate();
2225 
2226  CopyDataToITensorHandle(inputHandle1.get(), input1.data());
2227  CopyDataToITensorHandle(inputHandle2.get(), input2.data());
2228 
2229  workload->PostAllocationConfigure();
2230  workload->Execute();
2231 
2232  CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
2233 
2234  return LayerTestResult<float, 3>(actualOutput,
2235  expectedOutput,
2236  outputHandle->GetShape(),
2237  outputTensorInfo.GetShape());
2238 }
virtual std::unique_ptr< ITensorHandle > CreateSubTensorHandle(ITensorHandle &parent, TensorShape const &subTensorShape, unsigned int const *subTensorOrigin) const =0
void IgnoreUnused(Ts &&...)
virtual std::unique_ptr< IWorkload > CreateConcat(const ConcatQueueDescriptor &descriptor, const WorkloadInfo &info) const
std::vector< ViewOrigin > m_ViewOrigins
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
Contains information about TensorInfos of a layer.
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo) const =0
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)
virtual bool SupportsSubTensors() const =0

◆ ConcatUint16Test()

LayerTestResult<uint16_t, 3> ConcatUint16Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 2710 of file ConcatTestImpl.cpp.

References CopyDataFromITensorHandle(), CopyDataToITensorHandle(), IWorkloadFactory::CreateConcat(), ITensorHandleFactory::CreateSubTensorHandle(), ITensorHandleFactory::CreateTensorHandle(), armnn::IgnoreUnused(), armnn::info, ConcatQueueDescriptor::m_ViewOrigins, armnn::QSymmS16, TensorInfo::SetQuantizationScale(), and IWorkloadFactory::SupportsSubTensors().

Referenced by TEST_SUITE().

2714 {
2715  IgnoreUnused(memoryManager);
2716 
2717  unsigned int outputWidth = 3;
2718  unsigned int outputHeight = 6;
2719  unsigned int outputChannels = 3;
2720 
2721  unsigned int inputWidth1 = 3;
2722  unsigned int inputHeight1 = 6;
2723  unsigned int inputChannels1 = 2;
2724 
2725  unsigned int inputWidth2 = 3;
2726  unsigned int inputHeight2 = 6;
2727  unsigned int inputChannels2 = 1;
2728 
2729  // Defines the tensor descriptors.
2730  TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, DataType::QSymmS16);
2731  TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, DataType::QSymmS16);
2732  TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, DataType::QSymmS16);
2733 
2734  // Arbitrary scale and offsets. They don't really matter as the Concat operator doesn't dequantize/quantize them.
2735  const float scale = 0.13497836f;
2736  const int32_t offset = -7;
2737 
2738  outputTensorInfo.SetQuantizationScale(scale);
2739  outputTensorInfo.SetQuantizationOffset(offset);
2740  inputTensorInfo1.SetQuantizationScale(scale);
2741  inputTensorInfo1.SetQuantizationOffset(offset);
2742  inputTensorInfo2.SetQuantizationScale(scale);
2743  inputTensorInfo2.SetQuantizationOffset(offset);
2744 
2745  std::vector<uint16_t> actualOutput(outputTensorInfo.GetNumElements());
2746 
2747  std::vector<uint16_t> expectedOutput =
2748  {
2749  1, 2, 3,
2750  4, 5, 6,
2751  7, 8, 9,
2752  10, 11, 12,
2753  13, 14, 15,
2754  16, 17, 18,
2755 
2756  19, 20, 21,
2757  22, 23, 24,
2758  25, 26, 27,
2759  28, 29, 30,
2760  31, 32, 33,
2761  34, 35, 36,
2762 
2763  37, 38, 39,
2764  40, 41, 42,
2765  43, 44, 45,
2766  46, 47, 48,
2767  49, 50, 51,
2768  52, 53, 54
2769  };
2770 
2771  std::vector<uint16_t> input1 =
2772  {
2773  1, 2, 3,
2774  4, 5, 6,
2775  7, 8, 9,
2776  10, 11, 12,
2777  13, 14, 15,
2778  16, 17, 18,
2779 
2780  19, 20, 21,
2781  22, 23, 24,
2782  25, 26, 27,
2783  28, 29, 30,
2784  31, 32, 33,
2785  34, 35, 36,
2786  };
2787 
2788  std::vector<uint16_t> input2 =
2789  {
2790  37, 38, 39,
2791  40, 41, 42,
2792  43, 44, 45,
2793  46, 47, 48,
2794  49, 50, 51,
2795  52, 53, 54,
2796  };
2797 
2798  std::vector<unsigned int> wOrigin1 = { 0, 0, 0 }; //Extent of the window is defined by size of input[0].
2799  ConcatQueueDescriptor::ViewOrigin window1(wOrigin1);
2800 
2801  std::vector<unsigned int> wOrigin2 = { 2, 0, 0 }; //Extent of the window is defined by size of input[1].
2802  ConcatQueueDescriptor::ViewOrigin window2(wOrigin2);
2803 
2804 
2805  std::unique_ptr<ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
2806 
2807  bool subTensorsSupported = workloadFactory.SupportsSubTensors();
2808 
2809  std::unique_ptr<ITensorHandle> inputHandle1 =
2810  subTensorsSupported ?
2811  tensorHandleFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
2812  tensorHandleFactory.CreateTensorHandle(inputTensorInfo1);
2813 
2814  std::unique_ptr<ITensorHandle> inputHandle2 =
2815  subTensorsSupported ?
2816  tensorHandleFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
2817  tensorHandleFactory.CreateTensorHandle(inputTensorInfo2);
2818 
2819 
2820  ConcatQueueDescriptor data;
2822  AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
2823  AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
2824  AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
2825 
2826  data.m_ViewOrigins.push_back(window1);
2827  data.m_ViewOrigins.push_back(window2);
2828 
2829  std::unique_ptr<IWorkload> workload = workloadFactory.CreateConcat(data, info);
2830 
2831  inputHandle1->Allocate();
2832  inputHandle2->Allocate();
2833  outputHandle->Allocate();
2834 
2835  CopyDataToITensorHandle(inputHandle1.get(), input1.data());
2836  CopyDataToITensorHandle(inputHandle2.get(), input2.data());
2837 
2838  workload->PostAllocationConfigure();
2839  workload->Execute();
2840 
2841  CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
2842 
2843  return LayerTestResult<uint16_t, 3>(actualOutput,
2844  expectedOutput,
2845  outputHandle->GetShape(),
2846  outputTensorInfo.GetShape());
2847 }
virtual std::unique_ptr< ITensorHandle > CreateSubTensorHandle(ITensorHandle &parent, TensorShape const &subTensorShape, unsigned int const *subTensorOrigin) const =0
void IgnoreUnused(Ts &&...)
virtual std::unique_ptr< IWorkload > CreateConcat(const ConcatQueueDescriptor &descriptor, const WorkloadInfo &info) const
std::vector< ViewOrigin > m_ViewOrigins
void SetQuantizationScale(float scale)
Definition: Tensor.cpp:475
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
Contains information about TensorInfos of a layer.
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo) const =0
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)
virtual bool SupportsSubTensors() const =0

◆ ConcatUint8DifferentQParamsTest()

LayerTestResult<uint8_t, 3> ConcatUint8DifferentQParamsTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 2426 of file ConcatTestImpl.cpp.

References CopyDataFromITensorHandle(), CopyDataToITensorHandle(), IWorkloadFactory::CreateConcat(), ITensorHandleFactory::CreateSubTensorHandle(), ITensorHandleFactory::CreateTensorHandle(), armnn::IgnoreUnused(), armnn::info, ConcatQueueDescriptor::m_ViewOrigins, armnn::QAsymmU8, TensorInfo::SetQuantizationScale(), and IWorkloadFactory::SupportsSubTensors().

Referenced by TEST_SUITE().

2430 {
2431  IgnoreUnused(memoryManager);
2432 
2433  unsigned int outputWidth = 3;
2434  unsigned int outputHeight = 6;
2435  unsigned int outputChannels = 3;
2436 
2437  unsigned int inputWidth1 = 3;
2438  unsigned int inputHeight1 = 6;
2439  unsigned int inputChannels1 = 2;
2440 
2441  unsigned int inputWidth2 = 3;
2442  unsigned int inputHeight2 = 6;
2443  unsigned int inputChannels2 = 1;
2444 
2445  // Defines the tensor descriptors.
2446  TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, DataType::QAsymmU8);
2447  TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, DataType::QAsymmU8);
2448  TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, DataType::QAsymmU8);
2449 
2450  // Quantized input1 tensor. Range [-3, 1]
2451  const float inputScale1 = 0.015686f;
2452  const int32_t inputOffset1 = 192;
2453 
2454  std::vector<uint8_t> input1 =
2455  {
2456  1, 2, 3,
2457  4, 5, 6,
2458  7, 8, 9,
2459  10, 11, 12,
2460  13, 14, 15,
2461  16, 17, 18,
2462 
2463  19, 20, 21,
2464  22, 23, 24,
2465  25, 26, 27,
2466  28, 29, 30,
2467  31, 32, 33,
2468  34, 35, 36
2469  };
2470 
2471  // Quatized input2 tensor. Range [-1, 4]
2472  const float inputScale2 = 0.019608f;
2473  const int32_t inputOffset2 = 50;
2474 
2475  std::vector<uint8_t> input2 =
2476  {
2477  37, 38, 39,
2478  40, 41, 42,
2479  43, 44, 45,
2480  46, 47, 48,
2481  49, 50, 51,
2482  52, 53, 54
2483  };
2484 
2485  // Output has the same quantization parameters than input1,
2486  // so that only the requantization of input2 is required
2487  const float outputScale = 0.015686f;
2488  const int32_t outputOffset = 192;
2489 
2490  std::vector<uint8_t> actualOutput(outputTensorInfo.GetNumElements());
2491 
2492  std::vector<uint8_t> expectedOutput =
2493  {
2494  1, 2, 3,
2495  4, 5, 6,
2496  7, 8, 9,
2497  10, 11, 12,
2498  13, 14, 15,
2499  16, 17, 18,
2500 
2501  19, 20, 21,
2502  22, 23, 24,
2503  25, 26, 27,
2504  28, 29, 30,
2505  31, 32, 33,
2506  34, 35, 36,
2507 
2508  176, 177, 178,
2509  179, 181, 182,
2510  183, 184, 186,
2511  187, 188, 189,
2512  191, 192, 193,
2513  195, 196, 197
2514  };
2515 
2516  outputTensorInfo.SetQuantizationScale(outputScale);
2517  outputTensorInfo.SetQuantizationOffset(outputOffset);
2518  inputTensorInfo1.SetQuantizationScale(inputScale1);
2519  inputTensorInfo1.SetQuantizationOffset(inputOffset1);
2520  inputTensorInfo2.SetQuantizationScale(inputScale2);
2521  inputTensorInfo2.SetQuantizationOffset(inputOffset2);
2522 
2523  std::vector<unsigned int> wOrigin1 = { 0, 0, 0 }; //Extent of the window is defined by size of input[0].
2524  ConcatQueueDescriptor::ViewOrigin window1(wOrigin1);
2525 
2526  std::vector<unsigned int> wOrigin2 = { 2, 0, 0 }; //Extent of the window is defined by size of input[1].
2527  ConcatQueueDescriptor::ViewOrigin window2(wOrigin2);
2528 
2529  std::unique_ptr<ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
2530 
2531  bool subTensorsSupported = workloadFactory.SupportsSubTensors();
2532 
2533  std::unique_ptr<ITensorHandle> inputHandle1 =
2534  subTensorsSupported ?
2535  tensorHandleFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
2536  tensorHandleFactory.CreateTensorHandle(inputTensorInfo1);
2537 
2538  std::unique_ptr<ITensorHandle> inputHandle2 =
2539  subTensorsSupported ?
2540  tensorHandleFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
2541  tensorHandleFactory.CreateTensorHandle(inputTensorInfo2);
2542 
2543  ConcatQueueDescriptor data;
2545  AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
2546  AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
2547  AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
2548 
2549  data.m_ViewOrigins.push_back(window1);
2550  data.m_ViewOrigins.push_back(window2);
2551 
2552  std::unique_ptr<IWorkload> workload = workloadFactory.CreateConcat(data, info);
2553 
2554  inputHandle1->Allocate();
2555  inputHandle2->Allocate();
2556  outputHandle->Allocate();
2557 
2558  CopyDataToITensorHandle(inputHandle1.get(), input1.data());
2559  CopyDataToITensorHandle(inputHandle2.get(), input2.data());
2560 
2561  workload->PostAllocationConfigure();
2562  workload->Execute();
2563 
2564  CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
2565 
2566  return LayerTestResult<uint8_t, 3>(actualOutput,
2567  expectedOutput,
2568  outputHandle->GetShape(),
2569  outputTensorInfo.GetShape());
2570 }
virtual std::unique_ptr< ITensorHandle > CreateSubTensorHandle(ITensorHandle &parent, TensorShape const &subTensorShape, unsigned int const *subTensorOrigin) const =0
void IgnoreUnused(Ts &&...)
virtual std::unique_ptr< IWorkload > CreateConcat(const ConcatQueueDescriptor &descriptor, const WorkloadInfo &info) const
std::vector< ViewOrigin > m_ViewOrigins
void SetQuantizationScale(float scale)
Definition: Tensor.cpp:475
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
Contains information about TensorInfos of a layer.
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo) const =0
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)
virtual bool SupportsSubTensors() const =0

◆ ConcatUint8Test()

LayerTestResult<uint8_t, 3> ConcatUint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 2572 of file ConcatTestImpl.cpp.

References CopyDataFromITensorHandle(), CopyDataToITensorHandle(), IWorkloadFactory::CreateConcat(), ITensorHandleFactory::CreateSubTensorHandle(), ITensorHandleFactory::CreateTensorHandle(), armnn::IgnoreUnused(), armnn::info, ConcatQueueDescriptor::m_ViewOrigins, armnn::QAsymmU8, TensorInfo::SetQuantizationScale(), and IWorkloadFactory::SupportsSubTensors().

Referenced by TEST_SUITE().

2576 {
2577  IgnoreUnused(memoryManager);
2578 
2579  unsigned int outputWidth = 3;
2580  unsigned int outputHeight = 6;
2581  unsigned int outputChannels = 3;
2582 
2583  unsigned int inputWidth1 = 3;
2584  unsigned int inputHeight1 = 6;
2585  unsigned int inputChannels1 = 2;
2586 
2587  unsigned int inputWidth2 = 3;
2588  unsigned int inputHeight2 = 6;
2589  unsigned int inputChannels2 = 1;
2590 
2591  // Defines the tensor descriptors.
2592  TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, DataType::QAsymmU8);
2593  TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, DataType::QAsymmU8);
2594  TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, DataType::QAsymmU8);
2595 
2596  // Arbitrary scale and offsets. They don't really matter as the Concat operator doesn't dequantize/quantize them.
2597  const float scale = 0.13497836f;
2598  const int32_t offset = -7;
2599 
2600  outputTensorInfo.SetQuantizationScale(scale);
2601  outputTensorInfo.SetQuantizationOffset(offset);
2602  inputTensorInfo1.SetQuantizationScale(scale);
2603  inputTensorInfo1.SetQuantizationOffset(offset);
2604  inputTensorInfo2.SetQuantizationScale(scale);
2605  inputTensorInfo2.SetQuantizationOffset(offset);
2606 
2607  std::vector<uint8_t> actualOutput(outputTensorInfo.GetNumElements());
2608 
2609  std::vector<uint8_t> expectedOutput =
2610  {
2611  1, 2, 3,
2612  4, 5, 6,
2613  7, 8, 9,
2614  10, 11, 12,
2615  13, 14, 15,
2616  16, 17, 18,
2617 
2618  19, 20, 21,
2619  22, 23, 24,
2620  25, 26, 27,
2621  28, 29, 30,
2622  31, 32, 33,
2623  34, 35, 36,
2624 
2625  37, 38, 39,
2626  40, 41, 42,
2627  43, 44, 45,
2628  46, 47, 48,
2629  49, 50, 51,
2630  52, 53, 54
2631  };
2632 
2633  std::vector<uint8_t> input1 =
2634  {
2635  1, 2, 3,
2636  4, 5, 6,
2637  7, 8, 9,
2638  10, 11, 12,
2639  13, 14, 15,
2640  16, 17, 18,
2641 
2642  19, 20, 21,
2643  22, 23, 24,
2644  25, 26, 27,
2645  28, 29, 30,
2646  31, 32, 33,
2647  34, 35, 36
2648  };
2649 
2650  std::vector<uint8_t> input2 =
2651  {
2652  37, 38, 39,
2653  40, 41, 42,
2654  43, 44, 45,
2655  46, 47, 48,
2656  49, 50, 51,
2657  52, 53, 54
2658  };
2659 
2660  std::vector<unsigned int> wOrigin1 = { 0, 0, 0 }; //Extent of the window is defined by size of input[0].
2661  ConcatQueueDescriptor::ViewOrigin window1(wOrigin1);
2662 
2663  std::vector<unsigned int> wOrigin2 = { 2, 0, 0 }; //Extent of the window is defined by size of input[1].
2664  ConcatQueueDescriptor::ViewOrigin window2(wOrigin2);
2665 
2666  std::unique_ptr<ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
2667 
2668  bool subTensorsSupported = workloadFactory.SupportsSubTensors();
2669 
2670  std::unique_ptr<ITensorHandle> inputHandle1 =
2671  subTensorsSupported ?
2672  tensorHandleFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
2673  tensorHandleFactory.CreateTensorHandle(inputTensorInfo1);
2674 
2675  std::unique_ptr<ITensorHandle> inputHandle2 =
2676  subTensorsSupported ?
2677  tensorHandleFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
2678  tensorHandleFactory.CreateTensorHandle(inputTensorInfo2);
2679 
2680 
2681  ConcatQueueDescriptor data;
2683  AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
2684  AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
2685  AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
2686 
2687  data.m_ViewOrigins.push_back(window1);
2688  data.m_ViewOrigins.push_back(window2);
2689 
2690  std::unique_ptr<IWorkload> workload = workloadFactory.CreateConcat(data, info);
2691 
2692  inputHandle1->Allocate();
2693  inputHandle2->Allocate();
2694  outputHandle->Allocate();
2695 
2696  CopyDataToITensorHandle(inputHandle1.get(), input1.data());
2697  CopyDataToITensorHandle(inputHandle2.get(), input2.data());
2698 
2699  workload->PostAllocationConfigure();
2700  workload->Execute();
2701 
2702  CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
2703 
2704  return LayerTestResult<uint8_t, 3>(actualOutput,
2705  expectedOutput,
2706  outputHandle->GetShape(),
2707  outputTensorInfo.GetShape());
2708 }
virtual std::unique_ptr< ITensorHandle > CreateSubTensorHandle(ITensorHandle &parent, TensorShape const &subTensorShape, unsigned int const *subTensorOrigin) const =0
void IgnoreUnused(Ts &&...)
virtual std::unique_ptr< IWorkload > CreateConcat(const ConcatQueueDescriptor &descriptor, const WorkloadInfo &info) const
std::vector< ViewOrigin > m_ViewOrigins
void SetQuantizationScale(float scale)
Definition: Tensor.cpp:475
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
Contains information about TensorInfos of a layer.
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo) const =0
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)
virtual bool SupportsSubTensors() const =0