ArmNN
 20.11
ConcatTestImpl.hpp File Reference

Go to the source code of this file.

Functions

template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult< T, 3 > ConcatDifferentInputOutputQParamTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool useSubtensor)
 
LayerTestResult< float, 3 > ConcatTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< armnn::BFloat16, 3 > ConcatBFloat16Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< armnn::Half, 3 > ConcatFloat16Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< uint8_t, 3 > ConcatUint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< uint16_t, 3 > ConcatUint16Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< uint8_t, 3 > ConcatUint8DifferentQParamsTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< float, 1 > Concat1dTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< float, 2 > Concat2dDim0Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< float, 2 > Concat2dDim1Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< float, 2 > Concat2dDim0DiffInputDimsTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< float, 2 > Concat2dDim1DiffInputDimsTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< float, 3 > Concat3dDim0Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< float, 3 > Concat3dDim1Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< float, 3 > Concat3dDim2Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool useSubtensor)
 
LayerTestResult< float, 3 > Concat3dDim0DiffInputDimsTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< float, 3 > Concat3dDim1DiffInputDimsTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< float, 3 > Concat3dDim2DiffInputDimsTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool useSubtensor)
 
LayerTestResult< float, 4 > Concat4dDim0Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< float, 4 > Concat4dDim1Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< float, 4 > Concat4dDim2Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< float, 4 > Concat4dDim3Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool useSubtensor)
 
LayerTestResult< float, 4 > Concat4dDiffShapeDim0Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< float, 4 > Concat4dDiffShapeDim1Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< float, 4 > Concat4dDiffShapeDim2Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< float, 4 > Concat4dDiffShapeDim3Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool useSubtensor)
 
LayerTestResult< uint8_t, 4 > Concat4dDim0Uint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< uint8_t, 4 > Concat4dDim1Uint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< uint8_t, 4 > Concat4dDim2Uint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< uint8_t, 4 > Concat4dDim3Uint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool useSubtensor)
 
LayerTestResult< uint8_t, 4 > Concat4dDiffShapeDim0Uint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< uint8_t, 4 > Concat4dDiffShapeDim1Uint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< uint8_t, 4 > Concat4dDiffShapeDim2Uint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< uint8_t, 4 > Concat4dDiffShapeDim3Uint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool useSubtensor)
 
LayerTestResult< uint8_t, 1 > Concat1dUint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< uint8_t, 2 > Concat2dDim0Uint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< uint8_t, 2 > Concat2dDim1Uint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< uint8_t, 2 > Concat2dDim0DiffInputDimsUint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< uint8_t, 2 > Concat2dDim1DiffInputDimsUint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< uint8_t, 3 > Concat3dDim0Uint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< uint8_t, 3 > Concat3dDim1Uint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< uint8_t, 3 > Concat3dDim2Uint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool useSubtensor)
 
LayerTestResult< uint8_t, 3 > Concat3dDim0DiffInputDimsUint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< uint8_t, 3 > Concat3dDim1DiffInputDimsUint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< uint8_t, 3 > Concat3dDim2DiffInputDimsUint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool useSubtensor)
 
LayerTestResult< uint8_t, 3 > ConcatDifferentInputOutputQParamUint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool useSubtensor)
 
LayerTestResult< int16_t, 3 > ConcatDifferentInputOutputQParamInt16Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool useSubtensor)
 

Function Documentation

◆ Concat1dTest()

LayerTestResult<float, 1> Concat1dTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 2237 of file ConcatTestImpl.cpp.

2241 {
2242  return Concat1dTestImpl<DataType::Float32>(workloadFactory, memoryManager, tensorHandleFactory, 0.0f, 0);
2243 }

◆ Concat1dUint8Test()

LayerTestResult<uint8_t, 1> Concat1dUint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 2843 of file ConcatTestImpl.cpp.

2847 {
2848  return Concat1dTestImpl<DataType::QAsymmU8>(workloadFactory, memoryManager, tensorHandleFactory, 0.5f, -1);
2849 }

◆ Concat2dDim0DiffInputDimsTest()

LayerTestResult<float, 2> Concat2dDim0DiffInputDimsTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 2261 of file ConcatTestImpl.cpp.

2265 {
2266  return Concat2dDim0DiffInputDimsTestImpl<DataType::Float32>(workloadFactory, memoryManager,
2267  tensorHandleFactory, 0.0f, 0);
2268 }

◆ Concat2dDim0DiffInputDimsUint8Test()

LayerTestResult<uint8_t, 2> Concat2dDim0DiffInputDimsUint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 2867 of file ConcatTestImpl.cpp.

2871 {
2872  return Concat2dDim0DiffInputDimsTestImpl<DataType::QAsymmU8>(
2873  workloadFactory, memoryManager, tensorHandleFactory, 0.5f, -1);
2874 }

◆ Concat2dDim0Test()

LayerTestResult<float, 2> Concat2dDim0Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 2245 of file ConcatTestImpl.cpp.

2249 {
2250  return Concat2dDim0TestImpl<DataType::Float32>(workloadFactory, memoryManager, tensorHandleFactory, 0.0f, 0);
2251 }

◆ Concat2dDim0Uint8Test()

LayerTestResult<uint8_t, 2> Concat2dDim0Uint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 2851 of file ConcatTestImpl.cpp.

2855 {
2856  return Concat2dDim0TestImpl<DataType::QAsymmU8>(workloadFactory, memoryManager, tensorHandleFactory, 0.5f, -1);
2857 }

◆ Concat2dDim1DiffInputDimsTest()

LayerTestResult<float, 2> Concat2dDim1DiffInputDimsTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 2270 of file ConcatTestImpl.cpp.

2274 {
2275  return Concat2dDim1DiffInputDimsTestImpl<DataType::Float32>(workloadFactory,
2276  memoryManager,
2277  tensorHandleFactory,
2278  0.0f,
2279  0);
2280 }

◆ Concat2dDim1DiffInputDimsUint8Test()

LayerTestResult<uint8_t, 2> Concat2dDim1DiffInputDimsUint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 2876 of file ConcatTestImpl.cpp.

2880 {
2881  return Concat2dDim1DiffInputDimsTestImpl<DataType::QAsymmU8>(
2882  workloadFactory, memoryManager, tensorHandleFactory, 0.5f, -1);
2883 }

◆ Concat2dDim1Test()

LayerTestResult<float, 2> Concat2dDim1Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 2253 of file ConcatTestImpl.cpp.

2257 {
2258  return Concat2dDim1TestImpl<DataType::Float32>(workloadFactory, memoryManager, tensorHandleFactory, 0.0f, 0);
2259 }

◆ Concat2dDim1Uint8Test()

LayerTestResult<uint8_t, 2> Concat2dDim1Uint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 2859 of file ConcatTestImpl.cpp.

2863 {
2864  return Concat2dDim1TestImpl<DataType::QAsymmU8>(workloadFactory, memoryManager, tensorHandleFactory, 0.5f, -1);
2865 }

◆ Concat3dDim0DiffInputDimsTest()

LayerTestResult<float, 3> Concat3dDim0DiffInputDimsTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 2308 of file ConcatTestImpl.cpp.

2312 {
2313  return Concat3dDim0DiffInputDimsTestImpl<DataType::Float32>(
2314  workloadFactory, memoryManager, tensorHandleFactory, 0.0f, 0);
2315 }

◆ Concat3dDim0DiffInputDimsUint8Test()

LayerTestResult<uint8_t, 3> Concat3dDim0DiffInputDimsUint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 2911 of file ConcatTestImpl.cpp.

2915 {
2916  return Concat3dDim0TestImpl<DataType::QAsymmU8>(workloadFactory, memoryManager, tensorHandleFactory, 0.5f, -1);
2917 }

◆ Concat3dDim0Test()

LayerTestResult<float, 3> Concat3dDim0Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 2282 of file ConcatTestImpl.cpp.

2286 {
2287  return Concat3dDim0TestImpl<DataType::Float32>(workloadFactory, memoryManager, tensorHandleFactory, 0.0f, 0);
2288 }

◆ Concat3dDim0Uint8Test()

LayerTestResult<uint8_t, 3> Concat3dDim0Uint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 2885 of file ConcatTestImpl.cpp.

2889 {
2890  return Concat3dDim0TestImpl<DataType::QAsymmU8>(workloadFactory, memoryManager, tensorHandleFactory, 0.5f, -1);
2891 }

◆ Concat3dDim1DiffInputDimsTest()

LayerTestResult<float, 3> Concat3dDim1DiffInputDimsTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 2317 of file ConcatTestImpl.cpp.

2321 {
2322  return Concat3dDim1DiffInputDimsTestImpl<DataType::Float32>(workloadFactory, memoryManager,
2323  tensorHandleFactory, 0.0f, 0);
2324 }

◆ Concat3dDim1DiffInputDimsUint8Test()

LayerTestResult<uint8_t, 3> Concat3dDim1DiffInputDimsUint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 2919 of file ConcatTestImpl.cpp.

2923 {
2924  return Concat3dDim1DiffInputDimsTestImpl<DataType::QAsymmU8>(
2925  workloadFactory, memoryManager, tensorHandleFactory, 0.5f, -1);
2926 }

◆ Concat3dDim1Test()

LayerTestResult<float, 3> Concat3dDim1Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 2290 of file ConcatTestImpl.cpp.

2294 {
2295  return Concat3dDim1TestImpl<DataType::Float32>(workloadFactory, memoryManager, tensorHandleFactory, 0.0f, 0);
2296 }

◆ Concat3dDim1Uint8Test()

LayerTestResult<uint8_t, 3> Concat3dDim1Uint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 2893 of file ConcatTestImpl.cpp.

2897 {
2898  return Concat3dDim1TestImpl<DataType::QAsymmU8>(workloadFactory, memoryManager, tensorHandleFactory, 0.5f, -1);
2899 }

◆ Concat3dDim2DiffInputDimsTest()

LayerTestResult<float, 3> Concat3dDim2DiffInputDimsTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory,
bool  useSubtensor 
)

Definition at line 2326 of file ConcatTestImpl.cpp.

2331 {
2332  return Concat3dDim2DiffInputDimsTestImpl<DataType::Float32>(
2333  workloadFactory, memoryManager, tensorHandleFactory, useSubtensor, 0.0f, 0);
2334 }

◆ Concat3dDim2DiffInputDimsUint8Test()

LayerTestResult<uint8_t, 3> Concat3dDim2DiffInputDimsUint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory,
bool  useSubtensor 
)

Definition at line 2928 of file ConcatTestImpl.cpp.

2933 {
2934  return Concat3dDim2DiffInputDimsTestImpl<DataType::QAsymmU8>(
2935  workloadFactory, memoryManager, tensorHandleFactory, useSubtensor, 0.5f, -1);
2936 }

◆ Concat3dDim2Test()

LayerTestResult<float, 3> Concat3dDim2Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory,
bool  useSubtensor 
)

Definition at line 2298 of file ConcatTestImpl.cpp.

2303 {
2304  return Concat3dDim2TestImpl<DataType::Float32>(workloadFactory, memoryManager, tensorHandleFactory,
2305  useSubtensor, 0.0f, 0);
2306 }

◆ Concat3dDim2Uint8Test()

LayerTestResult<uint8_t, 3> Concat3dDim2Uint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory,
bool  useSubtensor 
)

Definition at line 2901 of file ConcatTestImpl.cpp.

2906 {
2907  return Concat3dDim2TestImpl<DataType::QAsymmU8>(
2908  workloadFactory, memoryManager, tensorHandleFactory, useSubtensor, 0.5f, -1);
2909 }

◆ Concat4dDiffShapeDim0Test()

LayerTestResult<float, 4> Concat4dDiffShapeDim0Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 2370 of file ConcatTestImpl.cpp.

2374 {
2375  return Concat4dDiffShapeDim0TestImpl<DataType::Float32>(workloadFactory, memoryManager,
2376  tensorHandleFactory, 0.0f, 0);
2377 }

◆ Concat4dDiffShapeDim0Uint8Test()

LayerTestResult<uint8_t, 4> Concat4dDiffShapeDim0Uint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 2971 of file ConcatTestImpl.cpp.

2975 {
2976  return Concat4dDiffShapeDim0TestImpl<DataType::QAsymmU8>(
2977  workloadFactory, memoryManager, tensorHandleFactory, 0.5f, -1);
2978 }

◆ Concat4dDiffShapeDim1Test()

LayerTestResult<float, 4> Concat4dDiffShapeDim1Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 2379 of file ConcatTestImpl.cpp.

2383 {
2384  return Concat4dDiffShapeDim1TestImpl<DataType::Float32>(
2385  workloadFactory, memoryManager, tensorHandleFactory, 0.0f, 0);
2386 }

◆ Concat4dDiffShapeDim1Uint8Test()

LayerTestResult<uint8_t, 4> Concat4dDiffShapeDim1Uint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 2980 of file ConcatTestImpl.cpp.

2984 {
2985  return Concat4dDiffShapeDim1TestImpl<DataType::QAsymmU8>(
2986  workloadFactory, memoryManager, tensorHandleFactory, 0.5f, -1);
2987 }

◆ Concat4dDiffShapeDim2Test()

LayerTestResult<float, 4> Concat4dDiffShapeDim2Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 2388 of file ConcatTestImpl.cpp.

2392 {
2393  return Concat4dDiffShapeDim2TestImpl<DataType::Float32>(workloadFactory, memoryManager,
2394  tensorHandleFactory, 0.0f, 0);
2395 }

◆ Concat4dDiffShapeDim2Uint8Test()

LayerTestResult<uint8_t, 4> Concat4dDiffShapeDim2Uint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 2989 of file ConcatTestImpl.cpp.

2993 {
2994  return Concat4dDiffShapeDim2TestImpl<DataType::QAsymmU8>(
2995  workloadFactory, memoryManager, tensorHandleFactory, 0.5f, -1);
2996 }

◆ Concat4dDiffShapeDim3Test()

LayerTestResult<float, 4> Concat4dDiffShapeDim3Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory,
bool  useSubtensor 
)

Definition at line 2397 of file ConcatTestImpl.cpp.

2402 {
2403  return Concat4dDiffShapeDim3TestImpl<DataType::Float32>(
2404  workloadFactory, memoryManager, tensorHandleFactory, 0.0f, 0, useSubtensor);
2405 }

◆ Concat4dDiffShapeDim3Uint8Test()

LayerTestResult<uint8_t, 4> Concat4dDiffShapeDim3Uint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory,
bool  useSubtensor 
)

Definition at line 2998 of file ConcatTestImpl.cpp.

3003 {
3004  return Concat4dDiffShapeDim3TestImpl<DataType::QAsymmU8>(
3005  workloadFactory, memoryManager, tensorHandleFactory, 0.5f, -1, useSubtensor);
3006 }

◆ Concat4dDim0Test()

LayerTestResult<float, 4> Concat4dDim0Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 2336 of file ConcatTestImpl.cpp.

2340 {
2341  return Concat4dDim0TestImpl<DataType::Float32>(workloadFactory, memoryManager, tensorHandleFactory, 0.0f, 0);
2342 }

◆ Concat4dDim0Uint8Test()

LayerTestResult<uint8_t, 4> Concat4dDim0Uint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 2938 of file ConcatTestImpl.cpp.

2942 {
2943  return Concat4dDim0TestImpl<DataType::QAsymmU8>(workloadFactory, memoryManager, tensorHandleFactory, 0.5f, -1);
2944 }

◆ Concat4dDim1Test()

LayerTestResult<float, 4> Concat4dDim1Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 2344 of file ConcatTestImpl.cpp.

2348 {
2349  return Concat4dDim1TestImpl<DataType::Float32>(workloadFactory, memoryManager, tensorHandleFactory, 0.0f, 0);
2350 }

◆ Concat4dDim1Uint8Test()

LayerTestResult<uint8_t, 4> Concat4dDim1Uint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 2946 of file ConcatTestImpl.cpp.

2950 {
2951  return Concat4dDim1TestImpl<DataType::QAsymmU8>(workloadFactory, memoryManager, tensorHandleFactory, 0.5f, -1);
2952 }

◆ Concat4dDim2Test()

LayerTestResult<float, 4> Concat4dDim2Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 2352 of file ConcatTestImpl.cpp.

2356 {
2357  return Concat4dDim2TestImpl<DataType::Float32>(workloadFactory, memoryManager, tensorHandleFactory, 0.0f, 0);
2358 }

◆ Concat4dDim2Uint8Test()

LayerTestResult<uint8_t, 4> Concat4dDim2Uint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 2954 of file ConcatTestImpl.cpp.

2958 {
2959  return Concat4dDim2TestImpl<DataType::QAsymmU8>(workloadFactory, memoryManager, tensorHandleFactory, 0.5f, -1);
2960 }

◆ Concat4dDim3Test()

LayerTestResult<float, 4> Concat4dDim3Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory,
bool  useSubtensor 
)

Definition at line 2360 of file ConcatTestImpl.cpp.

2365 {
2366  return Concat4dDim3TestImpl<DataType::Float32>(workloadFactory, memoryManager,
2367  tensorHandleFactory, 0.0f, 0, useSubtensor);
2368 }

◆ Concat4dDim3Uint8Test()

LayerTestResult<uint8_t, 4> Concat4dDim3Uint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory,
bool  useSubtensor 
)

Definition at line 2962 of file ConcatTestImpl.cpp.

2966 {
2967  return Concat4dDim3TestImpl<DataType::QAsymmU8>(
2968  workloadFactory, memoryManager, tensorHandleFactory, 0.5f, -1, useSubtensor);
2969 }

◆ ConcatBFloat16Test()

LayerTestResult<armnn::BFloat16, 3> ConcatBFloat16Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 2415 of file ConcatTestImpl.cpp.

2419 {
2420  return Concat3dDim1TestImpl<DataType::BFloat16>(workloadFactory, memoryManager, tensorHandleFactory, 0.0f, 0);
2421 }

◆ ConcatDifferentInputOutputQParamInt16Test()

LayerTestResult<int16_t, 3> ConcatDifferentInputOutputQParamInt16Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory,
bool  useSubtensor 
)

◆ ConcatDifferentInputOutputQParamTest()

LayerTestResult<T, 3> ConcatDifferentInputOutputQParamTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory,
bool  useSubtensor 
)

Definition at line 1952 of file ConcatTestImpl.cpp.

References CopyDataFromITensorHandle(), CopyDataToITensorHandle(), IWorkloadFactory::CreateConcat(), armnn::CreateDescriptorForConcatenation(), ITensorHandleFactory::CreateSubTensorHandle(), ITensorHandleFactory::CreateTensorHandle(), armnn::IgnoreUnused(), armnn::info, QueueDescriptorWithParameters< LayerDescriptor >::m_Parameters, ConcatQueueDescriptor::m_ViewOrigins, LayerTestResult< T, n >::output, LayerTestResult< T, n >::outputExpected, and IWorkloadFactory::SupportsSubTensors().

1957 {
1958  IgnoreUnused(memoryManager);
1959 
1960  // Defines the tensor descriptors.
1961  TensorInfo outputTensorInfo({ 3, 6, 3 }, ArmnnType);
1962  TensorInfo inputTensorInfo1({ 3, 6, 2 }, ArmnnType);
1963  TensorInfo inputTensorInfo2({ 3, 6, 1 }, ArmnnType);
1964 
1965  std::vector<TensorShape> inputTensorShapes({inputTensorInfo1.GetShape(), inputTensorInfo2.GetShape()});
1966 
1967  // Quantized input1 tensor.
1968  const float inputScale1 = 0.5f;
1969  const int32_t inputOffset1 = 5;
1970 
1971  auto input1 = MakeTensor<T, 3>(inputTensorInfo1, std::vector<T>(
1972  {
1973  1, 2, 3,
1974  4, 5, 6,
1975  7, 8, 9,
1976  10, 11, 12,
1977  13, 14, 15,
1978  16, 17, 18,
1979 
1980  19, 20, 21,
1981  22, 23, 24,
1982  25, 26, 27,
1983  28, 29, 30,
1984  31, 32, 33,
1985  34, 35, 36
1986  }));
1987 
1988  // Quatized input2 tensor.
1989  const float inputScale2 = 0.2f;
1990  const int32_t inputOffset2 = 10;
1991 
1992  auto input2 = MakeTensor<T, 3>(inputTensorInfo2, std::vector<T>(
1993  {
1994  37, 38, 39,
1995  40, 41, 42,
1996  43, 44, 45,
1997  46, 47, 48,
1998  49, 50, 51,
1999  52, 53, 54
2000  }));
2001 
2002  // Quantized output tensor.
2003  const float outputScale = 0.1f;
2004  const int32_t outputOffset = 20;
2005 
2006  LayerTestResult<T, 3> ret(outputTensorInfo);
2007 
2008  ret.outputExpected = MakeTensor<T, 3>(outputTensorInfo, std::vector<T>(
2009  {
2010  0, 5, 74,
2011  10, 15, 76,
2012  20, 25, 78,
2013  30, 35, 80,
2014  40, 45, 82,
2015  50, 55, 84,
2016 
2017  60, 65, 86,
2018  70, 75, 88,
2019  80, 85, 90,
2020  90, 95, 92,
2021  100, 105, 94,
2022  110, 115, 96,
2023 
2024  120, 125, 98,
2025  130, 135, 100,
2026  140, 145, 102,
2027  150, 155, 104,
2028  160, 165, 106,
2029  170, 175, 108
2030  }));
2031 
2032  outputTensorInfo.SetQuantizationScale(outputScale);
2033  outputTensorInfo.SetQuantizationOffset(outputOffset);
2034  inputTensorInfo1.SetQuantizationScale(inputScale1);
2035  inputTensorInfo1.SetQuantizationOffset(inputOffset1);
2036  inputTensorInfo2.SetQuantizationScale(inputScale2);
2037  inputTensorInfo2.SetQuantizationOffset(inputOffset2);
2038 
2039  std::vector<unsigned int> wOrigin1 = { 0, 0, 0 }; //Extent of the window is defined by size of input[0].
2040  ConcatQueueDescriptor::ViewOrigin window1(wOrigin1);
2041 
2042  std::vector<unsigned int> wOrigin2 = { 0, 0, 2 }; //Extent of the window is defined by size of input[1].
2043  ConcatQueueDescriptor::ViewOrigin window2(wOrigin2);
2044 
2045  std::unique_ptr<ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
2046 
2047  bool subTensorsSupported = useSubtensor && workloadFactory.SupportsSubTensors();
2048 
2049  std::unique_ptr<ITensorHandle> inputHandle1 =
2050  subTensorsSupported ?
2051  tensorHandleFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
2052  tensorHandleFactory.CreateTensorHandle(inputTensorInfo1);
2053 
2054  std::unique_ptr<ITensorHandle> inputHandle2 =
2055  subTensorsSupported ?
2056  tensorHandleFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
2057  tensorHandleFactory.CreateTensorHandle(inputTensorInfo2);
2058 
2059  ConcatQueueDescriptor data;
2061  inputTensorShapes.begin(),inputTensorShapes.end(), 2);
2062  data.m_Parameters = desc;
2063 
2065  AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
2066  AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
2067  AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
2068 
2069  data.m_ViewOrigins.push_back(window1);
2070  data.m_ViewOrigins.push_back(window2);
2071 
2072  std::unique_ptr<IWorkload> workload = workloadFactory.CreateConcat(data, info);
2073 
2074  inputHandle1->Allocate();
2075  inputHandle2->Allocate();
2076  outputHandle->Allocate();
2077 
2078  CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0]);
2079  CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0]);
2080 
2081  workload->PostAllocationConfigure();
2082  workload->Execute();
2083 
2084  CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get());
2085 
2086  return ret;
2087 }
virtual std::unique_ptr< ITensorHandle > CreateSubTensorHandle(ITensorHandle &parent, TensorShape const &subTensorShape, unsigned int const *subTensorOrigin) const =0
void IgnoreUnused(Ts &&...)
virtual std::unique_ptr< IWorkload > CreateConcat(const ConcatQueueDescriptor &descriptor, const WorkloadInfo &info) const
std::vector< ViewOrigin > m_ViewOrigins
An OriginsDescriptor for the ConcatLayer.
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
Contains information about inputs and outputs to a layer.
OriginsDescriptor CreateDescriptorForConcatenation(TensorShapeIt first, TensorShapeIt last, unsigned int concatenationDimension)
Convenience template to create an OriginsDescriptor to use when creating a ConcatLayer for performing...
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo) const =0
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)
virtual bool SupportsSubTensors() const =0

◆ ConcatDifferentInputOutputQParamUint8Test()

LayerTestResult<uint8_t, 3> ConcatDifferentInputOutputQParamUint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory,
bool  useSubtensor 
)

◆ ConcatFloat16Test()

LayerTestResult<armnn::Half, 3> ConcatFloat16Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 2407 of file ConcatTestImpl.cpp.

2411 {
2412  return Concat3dDim1TestImpl<DataType::Float16>(workloadFactory, memoryManager, tensorHandleFactory, 0.0f, 0);
2413 }

◆ ConcatTest()

LayerTestResult<float, 3> ConcatTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 2111 of file ConcatTestImpl.cpp.

References CopyDataFromITensorHandle(), CopyDataToITensorHandle(), IWorkloadFactory::CreateConcat(), ITensorHandleFactory::CreateSubTensorHandle(), ITensorHandleFactory::CreateTensorHandle(), armnn::Float32, armnn::IgnoreUnused(), armnn::info, ConcatQueueDescriptor::m_ViewOrigins, and IWorkloadFactory::SupportsSubTensors().

2115 {
2116  IgnoreUnused(memoryManager);
2117 
2118  unsigned int outputWidth = 3;
2119  unsigned int outputHeight = 6;
2120  unsigned int outputChannels = 3;
2121 
2122  unsigned int inputWidth1 = 3;
2123  unsigned int inputHeight1 = 6;
2124  unsigned int inputChannels1 = 2;
2125 
2126  unsigned int inputWidth2 = 3;
2127  unsigned int inputHeight2 = 6;
2128  unsigned int inputChannels2 = 1;
2129 
2130  // Define the tensor descriptors.
2131  TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, DataType::Float32);
2132  TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, DataType::Float32);
2133  TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, DataType::Float32);
2134 
2135  LayerTestResult<float,3> ret(outputTensorInfo);
2136 
2137  ret.outputExpected = MakeTensor<float, 3>(outputTensorInfo, std::vector<float>(
2138  {
2139  1.0f, 2.0f, 3.0f,
2140  4.0f, 5.0f, 6.0f,
2141  7.0f, 8.0f, 9.0f,
2142  10.0f, 11.0f, 12.0f,
2143  13.0f, 14.0f, 15.0f,
2144  16.0f, 17.0f, 18.0f,
2145 
2146  19.0f, 20.0f, 21.0f,
2147  22.0f, 23.0f, 24.0f,
2148  25.0f, 26.0f, 27.0f,
2149  28.0f, 29.0f, 30.0f,
2150  31.0f, 32.0f, 33.0f,
2151  34.0f, 35.0f, 36.0f,
2152 
2153  37.0f, 38.0f, 39.0f,
2154  40.0f, 41.0f, 42.0f,
2155  43.0f, 44.0f, 45.0f,
2156  46.0f, 47.0f, 48.0f,
2157  49.0f, 50.0f, 51.0f,
2158  52.0f, 53.0f, 54.0f,
2159  })
2160  );
2161 
2162  auto input1 = MakeTensor<float, 3>(inputTensorInfo1, std::vector<float>(
2163  {
2164  1.0f, 2.0f, 3.0f,
2165  4.0f, 5.0f, 6.0f,
2166  7.0f, 8.0f, 9.0f,
2167  10.0f, 11.0f, 12.0f,
2168  13.0f, 14.0f, 15.0f,
2169  16.0f, 17.0f, 18.0f,
2170 
2171  19.0f, 20.0f, 21.0f,
2172  22.0f, 23.0f, 24.0f,
2173  25.0f, 26.0f, 27.0f,
2174  28.0f, 29.0f, 30.0f,
2175  31.0f, 32.0f, 33.0f,
2176  34.0f, 35.0f, 36.0f,
2177  })
2178  );
2179 
2180  auto input2 = MakeTensor<float, 3>(inputTensorInfo2, std::vector<float>(
2181  {
2182  37.0f, 38.0f, 39.0f,
2183  40.0f, 41.0f, 42.0f,
2184  43.0f, 44.0f, 45.0f,
2185  46.0f, 47.0f, 48.0f,
2186  49.0f, 50.0f, 51.0f,
2187  52.0f, 53.0f, 54.0f,
2188  })
2189  );
2190 
2191  std::vector<unsigned int> wOrigin1 = {0, 0, 0}; //Extent of the window is defined by size of input[0].
2192  ConcatQueueDescriptor::ViewOrigin window1(wOrigin1);
2193 
2194  std::vector<unsigned int> wOrigin2 = {2, 0, 0}; //Extent of the window is defined by size of input[1].
2195  ConcatQueueDescriptor::ViewOrigin window2(wOrigin2);
2196 
2197  std::unique_ptr<ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
2198 
2199  bool subTensorsSupported = workloadFactory.SupportsSubTensors();
2200 
2201  std::unique_ptr<ITensorHandle> inputHandle1 =
2202  subTensorsSupported ?
2203  tensorHandleFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
2204  tensorHandleFactory.CreateTensorHandle(inputTensorInfo1);
2205 
2206  std::unique_ptr<ITensorHandle> inputHandle2 =
2207  subTensorsSupported ?
2208  tensorHandleFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
2209  tensorHandleFactory.CreateTensorHandle(inputTensorInfo2);
2210 
2211  ConcatQueueDescriptor data;
2213  AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
2214  AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
2215  AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
2216 
2217  data.m_ViewOrigins.push_back(window1);
2218  data.m_ViewOrigins.push_back(window2);
2219 
2220  std::unique_ptr<IWorkload> workload = workloadFactory.CreateConcat(data, info);
2221 
2222  inputHandle1->Allocate();
2223  inputHandle2->Allocate();
2224  outputHandle->Allocate();
2225 
2226  CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0]);
2227  CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0]);
2228 
2229  workload->PostAllocationConfigure();
2230  workload->Execute();
2231 
2232  CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get());
2233 
2234  return ret;
2235 }
virtual std::unique_ptr< ITensorHandle > CreateSubTensorHandle(ITensorHandle &parent, TensorShape const &subTensorShape, unsigned int const *subTensorOrigin) const =0
void IgnoreUnused(Ts &&...)
virtual std::unique_ptr< IWorkload > CreateConcat(const ConcatQueueDescriptor &descriptor, const WorkloadInfo &info) const
std::vector< ViewOrigin > m_ViewOrigins
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
Contains information about inputs and outputs to a layer.
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo) const =0
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)
virtual bool SupportsSubTensors() const =0

◆ ConcatUint16Test()

LayerTestResult<uint16_t, 3> ConcatUint16Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 2707 of file ConcatTestImpl.cpp.

References CopyDataFromITensorHandle(), CopyDataToITensorHandle(), IWorkloadFactory::CreateConcat(), ITensorHandleFactory::CreateSubTensorHandle(), ITensorHandleFactory::CreateTensorHandle(), armnn::IgnoreUnused(), armnn::info, ConcatQueueDescriptor::m_ViewOrigins, LayerTestResult< T, n >::output, LayerTestResult< T, n >::outputExpected, armnn::QSymmS16, TensorInfo::SetQuantizationScale(), and IWorkloadFactory::SupportsSubTensors().

2711 {
2712  IgnoreUnused(memoryManager);
2713 
2714  unsigned int outputWidth = 3;
2715  unsigned int outputHeight = 6;
2716  unsigned int outputChannels = 3;
2717 
2718  unsigned int inputWidth1 = 3;
2719  unsigned int inputHeight1 = 6;
2720  unsigned int inputChannels1 = 2;
2721 
2722  unsigned int inputWidth2 = 3;
2723  unsigned int inputHeight2 = 6;
2724  unsigned int inputChannels2 = 1;
2725 
2726  // Defines the tensor descriptors.
2727  TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, DataType::QSymmS16);
2728  TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, DataType::QSymmS16);
2729  TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, DataType::QSymmS16);
2730 
2731  // Arbitrary scale and offsets. They don't really matter as the Concat operator doesn't dequantize/quantize them.
2732  const float scale = 0.13497836f;
2733  const int32_t offset = -7;
2734 
2735  outputTensorInfo.SetQuantizationScale(scale);
2736  outputTensorInfo.SetQuantizationOffset(offset);
2737  inputTensorInfo1.SetQuantizationScale(scale);
2738  inputTensorInfo1.SetQuantizationOffset(offset);
2739  inputTensorInfo2.SetQuantizationScale(scale);
2740  inputTensorInfo2.SetQuantizationOffset(offset);
2741 
2742  LayerTestResult<uint16_t, 3> ret(outputTensorInfo);
2743 
2744  ret.outputExpected = MakeTensor<uint16_t, 3>(outputTensorInfo, std::vector<uint16_t>(
2745  {
2746  1, 2, 3,
2747  4, 5, 6,
2748  7, 8, 9,
2749  10, 11, 12,
2750  13, 14, 15,
2751  16, 17, 18,
2752 
2753  19, 20, 21,
2754  22, 23, 24,
2755  25, 26, 27,
2756  28, 29, 30,
2757  31, 32, 33,
2758  34, 35, 36,
2759 
2760  37, 38, 39,
2761  40, 41, 42,
2762  43, 44, 45,
2763  46, 47, 48,
2764  49, 50, 51,
2765  52, 53, 54,
2766  }));
2767 
2768  auto input1 = MakeTensor<uint16_t, 3>(inputTensorInfo1, std::vector<uint16_t>(
2769  {
2770  1, 2, 3,
2771  4, 5, 6,
2772  7, 8, 9,
2773  10, 11, 12,
2774  13, 14, 15,
2775  16, 17, 18,
2776 
2777  19, 20, 21,
2778  22, 23, 24,
2779  25, 26, 27,
2780  28, 29, 30,
2781  31, 32, 33,
2782  34, 35, 36,
2783  }));
2784 
2785  auto input2 = MakeTensor<uint16_t, 3>(inputTensorInfo2, std::vector<uint16_t>(
2786  {
2787  37, 38, 39,
2788  40, 41, 42,
2789  43, 44, 45,
2790  46, 47, 48,
2791  49, 50, 51,
2792  52, 53, 54,
2793  }));
2794 
2795  std::vector<unsigned int> wOrigin1 = { 0, 0, 0 }; //Extent of the window is defined by size of input[0].
2796  ConcatQueueDescriptor::ViewOrigin window1(wOrigin1);
2797 
2798  std::vector<unsigned int> wOrigin2 = { 2, 0, 0 }; //Extent of the window is defined by size of input[1].
2799  ConcatQueueDescriptor::ViewOrigin window2(wOrigin2);
2800 
2801 
2802  std::unique_ptr<ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
2803 
2804  bool subTensorsSupported = workloadFactory.SupportsSubTensors();
2805 
2806  std::unique_ptr<ITensorHandle> inputHandle1 =
2807  subTensorsSupported ?
2808  tensorHandleFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
2809  tensorHandleFactory.CreateTensorHandle(inputTensorInfo1);
2810 
2811  std::unique_ptr<ITensorHandle> inputHandle2 =
2812  subTensorsSupported ?
2813  tensorHandleFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
2814  tensorHandleFactory.CreateTensorHandle(inputTensorInfo2);
2815 
2816 
2817  ConcatQueueDescriptor data;
2819  AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
2820  AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
2821  AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
2822 
2823  data.m_ViewOrigins.push_back(window1);
2824  data.m_ViewOrigins.push_back(window2);
2825 
2826  std::unique_ptr<IWorkload> workload = workloadFactory.CreateConcat(data, info);
2827 
2828  inputHandle1->Allocate();
2829  inputHandle2->Allocate();
2830  outputHandle->Allocate();
2831 
2832  CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0]);
2833  CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0]);
2834 
2835  workload->PostAllocationConfigure();
2836  workload->Execute();
2837 
2838  CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get());
2839 
2840  return ret;
2841 }
virtual std::unique_ptr< ITensorHandle > CreateSubTensorHandle(ITensorHandle &parent, TensorShape const &subTensorShape, unsigned int const *subTensorOrigin) const =0
void IgnoreUnused(Ts &&...)
virtual std::unique_ptr< IWorkload > CreateConcat(const ConcatQueueDescriptor &descriptor, const WorkloadInfo &info) const
std::vector< ViewOrigin > m_ViewOrigins
void SetQuantizationScale(float scale)
Definition: Tensor.cpp:464
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
Contains information about inputs and outputs to a layer.
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo) const =0
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)
virtual bool SupportsSubTensors() const =0

◆ ConcatUint8DifferentQParamsTest()

LayerTestResult<uint8_t, 3> ConcatUint8DifferentQParamsTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 2423 of file ConcatTestImpl.cpp.

References CopyDataFromITensorHandle(), CopyDataToITensorHandle(), IWorkloadFactory::CreateConcat(), ITensorHandleFactory::CreateSubTensorHandle(), ITensorHandleFactory::CreateTensorHandle(), armnn::IgnoreUnused(), armnn::info, ConcatQueueDescriptor::m_ViewOrigins, LayerTestResult< T, n >::output, LayerTestResult< T, n >::outputExpected, armnn::QAsymmU8, and IWorkloadFactory::SupportsSubTensors().

2427 {
2428  IgnoreUnused(memoryManager);
2429 
2430  unsigned int outputWidth = 3;
2431  unsigned int outputHeight = 6;
2432  unsigned int outputChannels = 3;
2433 
2434  unsigned int inputWidth1 = 3;
2435  unsigned int inputHeight1 = 6;
2436  unsigned int inputChannels1 = 2;
2437 
2438  unsigned int inputWidth2 = 3;
2439  unsigned int inputHeight2 = 6;
2440  unsigned int inputChannels2 = 1;
2441 
2442  // Defines the tensor descriptors.
2443  TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, DataType::QAsymmU8);
2444  TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, DataType::QAsymmU8);
2445  TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, DataType::QAsymmU8);
2446 
2447  // Quantized input1 tensor. Range [-3, 1]
2448  const float inputScale1 = 0.015686f;
2449  const int32_t inputOffset1 = 192;
2450 
2451  auto input1 = MakeTensor<uint8_t, 3>(inputTensorInfo1, std::vector<uint8_t>(
2452  {
2453  1, 2, 3,
2454  4, 5, 6,
2455  7, 8, 9,
2456  10, 11, 12,
2457  13, 14, 15,
2458  16, 17, 18,
2459 
2460  19, 20, 21,
2461  22, 23, 24,
2462  25, 26, 27,
2463  28, 29, 30,
2464  31, 32, 33,
2465  34, 35, 36,
2466  })
2467  );
2468 
2469  // Quatized input2 tensor. Range [-1, 4]
2470  const float inputScale2 = 0.019608f;
2471  const int32_t inputOffset2 = 50;
2472 
2473  auto input2 = MakeTensor<uint8_t, 3>(inputTensorInfo2, std::vector<uint8_t>(
2474  {
2475  37, 38, 39,
2476  40, 41, 42,
2477  43, 44, 45,
2478  46, 47, 48,
2479  49, 50, 51,
2480  52, 53, 54,
2481  })
2482  );
2483 
2484  // Output has the same quantization parameters than input1,
2485  // so that only the requantization of input2 is required
2486  const float outputScale = 0.015686f;
2487  const int32_t outputOffset = 192;
2488 
2489  LayerTestResult<uint8_t, 3> ret(outputTensorInfo);
2490 
2491  ret.outputExpected = MakeTensor<uint8_t, 3>(outputTensorInfo, std::vector<uint8_t>(
2492  {
2493  1, 2, 3,
2494  4, 5, 6,
2495  7, 8, 9,
2496  10, 11, 12,
2497  13, 14, 15,
2498  16, 17, 18,
2499 
2500  19, 20, 21,
2501  22, 23, 24,
2502  25, 26, 27,
2503  28, 29, 30,
2504  31, 32, 33,
2505  34, 35, 36,
2506 
2507  176, 177, 178,
2508  179, 181, 182,
2509  183, 184, 186,
2510  187, 188, 189,
2511  191, 192, 193,
2512  195, 196, 197,
2513  })
2514  );
2515 
2516  outputTensorInfo.SetQuantizationScale(outputScale);
2517  outputTensorInfo.SetQuantizationOffset(outputOffset);
2518  inputTensorInfo1.SetQuantizationScale(inputScale1);
2519  inputTensorInfo1.SetQuantizationOffset(inputOffset1);
2520  inputTensorInfo2.SetQuantizationScale(inputScale2);
2521  inputTensorInfo2.SetQuantizationOffset(inputOffset2);
2522 
2523  std::vector<unsigned int> wOrigin1 = { 0, 0, 0 }; //Extent of the window is defined by size of input[0].
2524  ConcatQueueDescriptor::ViewOrigin window1(wOrigin1);
2525 
2526  std::vector<unsigned int> wOrigin2 = { 2, 0, 0 }; //Extent of the window is defined by size of input[1].
2527  ConcatQueueDescriptor::ViewOrigin window2(wOrigin2);
2528 
2529  std::unique_ptr<ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
2530 
2531  bool subTensorsSupported = workloadFactory.SupportsSubTensors();
2532 
2533  std::unique_ptr<ITensorHandle> inputHandle1 =
2534  subTensorsSupported ?
2535  tensorHandleFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
2536  tensorHandleFactory.CreateTensorHandle(inputTensorInfo1);
2537 
2538  std::unique_ptr<ITensorHandle> inputHandle2 =
2539  subTensorsSupported ?
2540  tensorHandleFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
2541  tensorHandleFactory.CreateTensorHandle(inputTensorInfo2);
2542 
2543  ConcatQueueDescriptor data;
2545  AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
2546  AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
2547  AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
2548 
2549  data.m_ViewOrigins.push_back(window1);
2550  data.m_ViewOrigins.push_back(window2);
2551 
2552  std::unique_ptr<IWorkload> workload = workloadFactory.CreateConcat(data, info);
2553 
2554  inputHandle1->Allocate();
2555  inputHandle2->Allocate();
2556  outputHandle->Allocate();
2557 
2558  CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0]);
2559  CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0]);
2560 
2561  workload->PostAllocationConfigure();
2562  workload->Execute();
2563 
2564  CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get());
2565 
2566  return ret;
2567 }
virtual std::unique_ptr< ITensorHandle > CreateSubTensorHandle(ITensorHandle &parent, TensorShape const &subTensorShape, unsigned int const *subTensorOrigin) const =0
void IgnoreUnused(Ts &&...)
virtual std::unique_ptr< IWorkload > CreateConcat(const ConcatQueueDescriptor &descriptor, const WorkloadInfo &info) const
std::vector< ViewOrigin > m_ViewOrigins
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
Contains information about inputs and outputs to a layer.
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo) const =0
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)
virtual bool SupportsSubTensors() const =0

◆ ConcatUint8Test()

LayerTestResult<uint8_t, 3> ConcatUint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 2569 of file ConcatTestImpl.cpp.

References CopyDataFromITensorHandle(), CopyDataToITensorHandle(), IWorkloadFactory::CreateConcat(), ITensorHandleFactory::CreateSubTensorHandle(), ITensorHandleFactory::CreateTensorHandle(), armnn::IgnoreUnused(), armnn::info, ConcatQueueDescriptor::m_ViewOrigins, LayerTestResult< T, n >::output, LayerTestResult< T, n >::outputExpected, armnn::QAsymmU8, TensorInfo::SetQuantizationScale(), and IWorkloadFactory::SupportsSubTensors().

2573 {
2574  IgnoreUnused(memoryManager);
2575 
2576  unsigned int outputWidth = 3;
2577  unsigned int outputHeight = 6;
2578  unsigned int outputChannels = 3;
2579 
2580  unsigned int inputWidth1 = 3;
2581  unsigned int inputHeight1 = 6;
2582  unsigned int inputChannels1 = 2;
2583 
2584  unsigned int inputWidth2 = 3;
2585  unsigned int inputHeight2 = 6;
2586  unsigned int inputChannels2 = 1;
2587 
2588  // Defines the tensor descriptors.
2589  TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, DataType::QAsymmU8);
2590  TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, DataType::QAsymmU8);
2591  TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, DataType::QAsymmU8);
2592 
2593  // Arbitrary scale and offsets. They don't really matter as the Concat operator doesn't dequantize/quantize them.
2594  const float scale = 0.13497836f;
2595  const int32_t offset = -7;
2596 
2597  outputTensorInfo.SetQuantizationScale(scale);
2598  outputTensorInfo.SetQuantizationOffset(offset);
2599  inputTensorInfo1.SetQuantizationScale(scale);
2600  inputTensorInfo1.SetQuantizationOffset(offset);
2601  inputTensorInfo2.SetQuantizationScale(scale);
2602  inputTensorInfo2.SetQuantizationOffset(offset);
2603 
2604  LayerTestResult<uint8_t, 3> ret(outputTensorInfo);
2605 
2606  ret.outputExpected = MakeTensor<uint8_t, 3>(outputTensorInfo, std::vector<uint8_t>(
2607  {
2608  1, 2, 3,
2609  4, 5, 6,
2610  7, 8, 9,
2611  10, 11, 12,
2612  13, 14, 15,
2613  16, 17, 18,
2614 
2615  19, 20, 21,
2616  22, 23, 24,
2617  25, 26, 27,
2618  28, 29, 30,
2619  31, 32, 33,
2620  34, 35, 36,
2621 
2622  37, 38, 39,
2623  40, 41, 42,
2624  43, 44, 45,
2625  46, 47, 48,
2626  49, 50, 51,
2627  52, 53, 54,
2628  })
2629  );
2630 
2631  auto input1 = MakeTensor<uint8_t, 3>(inputTensorInfo1, std::vector<uint8_t>(
2632  {
2633  1, 2, 3,
2634  4, 5, 6,
2635  7, 8, 9,
2636  10, 11, 12,
2637  13, 14, 15,
2638  16, 17, 18,
2639 
2640  19, 20, 21,
2641  22, 23, 24,
2642  25, 26, 27,
2643  28, 29, 30,
2644  31, 32, 33,
2645  34, 35, 36,
2646  })
2647  );
2648 
2649  auto input2 = MakeTensor<uint8_t, 3>(inputTensorInfo2, std::vector<uint8_t>(
2650  {
2651  37, 38, 39,
2652  40, 41, 42,
2653  43, 44, 45,
2654  46, 47, 48,
2655  49, 50, 51,
2656  52, 53, 54,
2657  })
2658  );
2659 
2660  std::vector<unsigned int> wOrigin1 = { 0, 0, 0 }; //Extent of the window is defined by size of input[0].
2661  ConcatQueueDescriptor::ViewOrigin window1(wOrigin1);
2662 
2663  std::vector<unsigned int> wOrigin2 = { 2, 0, 0 }; //Extent of the window is defined by size of input[1].
2664  ConcatQueueDescriptor::ViewOrigin window2(wOrigin2);
2665 
2666  std::unique_ptr<ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
2667 
2668  bool subTensorsSupported = workloadFactory.SupportsSubTensors();
2669 
2670  std::unique_ptr<ITensorHandle> inputHandle1 =
2671  subTensorsSupported ?
2672  tensorHandleFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
2673  tensorHandleFactory.CreateTensorHandle(inputTensorInfo1);
2674 
2675  std::unique_ptr<ITensorHandle> inputHandle2 =
2676  subTensorsSupported ?
2677  tensorHandleFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
2678  tensorHandleFactory.CreateTensorHandle(inputTensorInfo2);
2679 
2680 
2681  ConcatQueueDescriptor data;
2683  AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
2684  AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
2685  AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
2686 
2687  data.m_ViewOrigins.push_back(window1);
2688  data.m_ViewOrigins.push_back(window2);
2689 
2690  std::unique_ptr<IWorkload> workload = workloadFactory.CreateConcat(data, info);
2691 
2692  inputHandle1->Allocate();
2693  inputHandle2->Allocate();
2694  outputHandle->Allocate();
2695 
2696  CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0]);
2697  CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0]);
2698 
2699  workload->PostAllocationConfigure();
2700  workload->Execute();
2701 
2702  CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get());
2703 
2704  return ret;
2705 }
virtual std::unique_ptr< ITensorHandle > CreateSubTensorHandle(ITensorHandle &parent, TensorShape const &subTensorShape, unsigned int const *subTensorOrigin) const =0
void IgnoreUnused(Ts &&...)
virtual std::unique_ptr< IWorkload > CreateConcat(const ConcatQueueDescriptor &descriptor, const WorkloadInfo &info) const
std::vector< ViewOrigin > m_ViewOrigins
void SetQuantizationScale(float scale)
Definition: Tensor.cpp:464
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
Contains information about inputs and outputs to a layer.
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo) const =0
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)
virtual bool SupportsSubTensors() const =0