ArmNN
 20.05
ConcatTestImpl.cpp File Reference

Go to the source code of this file.

Functions

OriginsDescriptor CreateDescriptorForConcat (const std::vector< TensorInfo > &inputTensorInfos, unsigned int concatDim)
 
bool NeedPermuteForConcat (const std::vector< TensorInfo > &inputTensorInfos, unsigned int concatDim)
 
TensorShape ExpandTensorShapeTo3dForPermute (const TensorShape &inputShape)
 
void Generate3dPermuteVectorForConcat (unsigned int numDimensions, unsigned int &concatDim, std::pair< PermutationVector, PermutationVector > &permutations)
 
template<typename T >
void PermuteTensorData (IWorkloadFactory &workloadFactory, const IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const PermutationVector &mappings, TensorInfo &inputTensorInfo, const T *inputData, std::vector< T > &outputData)
 
template<typename T >
void PermuteInputsForConcat (IWorkloadFactory &workloadFactory, const IBackendInternal::IMemoryManagerSharedPtr &memoryManager, std::vector< TensorInfo > &inputTensorInfos, std::vector< T *> &inputData, std::vector< std::vector< T >> &inputDataStorage, PermutationVector &permuteVector, unsigned int &concatDim, TensorInfo &outputTensorInfo)
 
template<typename T >
void PermuteOutputForConcat (IWorkloadFactory &workloadFactory, const IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const TensorInfo &tensorInfo, const PermutationVector &permuteVector, std::unique_ptr< ITensorHandle > &&inputDataHandle, T *data)
 
template<typename T >
void Concatenate (IWorkloadFactory &workloadFactory, const IBackendInternal::IMemoryManagerSharedPtr &memoryManager, std::initializer_list< const TensorInfo > inputTensorInfosOrig, std::initializer_list< T *> inputsOrig, const TensorInfo &outputTensorInfoOrig, T *output, unsigned int concatDim, bool useSubtensor)
 
template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
LayerTestResult< T, 1 > Concat1dTestImpl (IWorkloadFactory &workloadFactory, const IBackendInternal::IMemoryManagerSharedPtr &memoryManager, float qScale, int32_t qOffset)
 
template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
LayerTestResult< T, 2 > Concat2dTestImpl (IWorkloadFactory &workloadFactory, const IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const TensorInfo &outputTensorInfo, unsigned int dimension, const float qScale, const int32_t qOffset)
 
template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
LayerTestResult< T, 2 > Concat2dDim0TestImpl (IWorkloadFactory &workloadFactory, const IBackendInternal::IMemoryManagerSharedPtr &memoryManager, float qScale, int32_t qOffset)
 
template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
LayerTestResult< T, 2 > Concat2dDim1TestImpl (IWorkloadFactory &workloadFactory, const IBackendInternal::IMemoryManagerSharedPtr &memoryManager, float qScale, int32_t qOffset)
 
template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
LayerTestResult< T, 2 > Concat2dDim0DiffInputDimsTestImpl (IWorkloadFactory &workloadFactory, const IBackendInternal::IMemoryManagerSharedPtr &memoryManager, float qScale, int32_t qOffset)
 
template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
LayerTestResult< T, 2 > Concat2dDim1DiffInputDimsTestImpl (IWorkloadFactory &workloadFactory, const IBackendInternal::IMemoryManagerSharedPtr &memoryManager, float qScale, int32_t qOffset)
 
template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
LayerTestResult< T, 3 > Concat3dTestImpl (IWorkloadFactory &workloadFactory, const IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const TensorInfo &outputTensorInfo, unsigned int dimension, bool useSubtensor, float qScale, int32_t qOffset)
 
template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
LayerTestResult< T, 3 > Concat3dDim0TestImpl (IWorkloadFactory &workloadFactory, const IBackendInternal::IMemoryManagerSharedPtr &memoryManager, float qScale, int32_t qOffset)
 
template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
LayerTestResult< T, 3 > Concat3dDim1TestImpl (IWorkloadFactory &workloadFactory, const IBackendInternal::IMemoryManagerSharedPtr &memoryManager, float qScale, int32_t qOffset)
 
template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
LayerTestResult< T, 3 > Concat3dDim2TestImpl (IWorkloadFactory &workloadFactory, const IBackendInternal::IMemoryManagerSharedPtr &memoryManager, bool useSubtensor, float qScale, int32_t qOffset)
 
template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
LayerTestResult< T, 3 > Concat3dDim0DiffInputDimsTestImpl (IWorkloadFactory &workloadFactory, const IBackendInternal::IMemoryManagerSharedPtr &memoryManager, float qScale, int32_t qOffset)
 
template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
LayerTestResult< T, 3 > Concat3dDim1DiffInputDimsTestImpl (IWorkloadFactory &workloadFactory, const IBackendInternal::IMemoryManagerSharedPtr &memoryManager, float qScale, int32_t qOffset)
 
template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
LayerTestResult< T, 3 > Concat3dDim2DiffInputDimsTestImpl (IWorkloadFactory &workloadFactory, const IBackendInternal::IMemoryManagerSharedPtr &memoryManager, bool useSubtensor, float qScale, int32_t qOffset)
 
template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
LayerTestResult< T, 4 > Concat4dTestImpl (IWorkloadFactory &workloadFactory, const IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const TensorInfo &outputTensorInfo, unsigned int dimension, bool useSubtensor, float qScale, int32_t qOffset)
 
template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
LayerTestResult< T, 4 > Concat4dDim0TestImpl (IWorkloadFactory &workloadFactory, const IBackendInternal::IMemoryManagerSharedPtr &memoryManager, float qScale, int32_t qOffset)
 
template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
LayerTestResult< T, 4 > Concat4dDim1TestImpl (IWorkloadFactory &workloadFactory, const IBackendInternal::IMemoryManagerSharedPtr &memoryManager, float qScale, int32_t qOffset)
 
template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
LayerTestResult< T, 4 > Concat4dDim2TestImpl (IWorkloadFactory &workloadFactory, const IBackendInternal::IMemoryManagerSharedPtr &memoryManager, float qScale, int32_t qOffset)
 
template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
LayerTestResult< T, 4 > Concat4dDim3TestImpl (IWorkloadFactory &workloadFactory, const IBackendInternal::IMemoryManagerSharedPtr &memoryManager, float qScale, int32_t qOffset, bool useSubtensor)
 
template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
LayerTestResult< T, 4 > Concat4dDiffShapeDim0TestImpl (IWorkloadFactory &workloadFactory, const IBackendInternal::IMemoryManagerSharedPtr &memoryManager, float qScale, int32_t qOffset)
 
template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
LayerTestResult< T, 4 > Concat4dDiffShapeDim1TestImpl (IWorkloadFactory &workloadFactory, const IBackendInternal::IMemoryManagerSharedPtr &memoryManager, float qScale, int32_t qOffset)
 
template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
LayerTestResult< T, 4 > Concat4dDiffShapeDim2TestImpl (IWorkloadFactory &workloadFactory, const IBackendInternal::IMemoryManagerSharedPtr &memoryManager, float qScale, int32_t qOffset)
 
template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
LayerTestResult< T, 4 > Concat4dDiffShapeDim3TestImpl (IWorkloadFactory &workloadFactory, const IBackendInternal::IMemoryManagerSharedPtr &memoryManager, float qScale, int32_t qOffset, bool useSubtensor)
 
template<DataType ArmnnType, typename T >
LayerTestResult< T, 3 > ConcatDifferentInputOutputQParamTest (IWorkloadFactory &workloadFactory, const IBackendInternal::IMemoryManagerSharedPtr &memoryManager, bool useSubtensor)
 
template LayerTestResult< ResolveType< DataType::QAsymmU8 >, 3 > ConcatDifferentInputOutputQParamTest< DataType::QAsymmU8 > (IWorkloadFactory &workloadFactory, const IBackendInternal::IMemoryManagerSharedPtr &memoryManager, bool useSubtensor)
 
template LayerTestResult< ResolveType< DataType::QSymmS16 >, 3 > ConcatDifferentInputOutputQParamTest< DataType::QSymmS16 > (IWorkloadFactory &workloadFactory, const IBackendInternal::IMemoryManagerSharedPtr &memoryManager, bool useSubtensor)
 
LayerTestResult< float, 3 > ConcatTest (IWorkloadFactory &workloadFactory, const IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< float, 1 > Concat1dTest (IWorkloadFactory &workloadFactory, const IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< float, 2 > Concat2dDim0Test (IWorkloadFactory &workloadFactory, const IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< float, 2 > Concat2dDim1Test (IWorkloadFactory &workloadFactory, const IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< float, 2 > Concat2dDim0DiffInputDimsTest (IWorkloadFactory &workloadFactory, const IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< float, 2 > Concat2dDim1DiffInputDimsTest (IWorkloadFactory &workloadFactory, const IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< float, 3 > Concat3dDim0Test (IWorkloadFactory &workloadFactory, const IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< float, 3 > Concat3dDim1Test (IWorkloadFactory &workloadFactory, const IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< float, 3 > Concat3dDim2Test (IWorkloadFactory &workloadFactory, const IBackendInternal::IMemoryManagerSharedPtr &memoryManager, bool useSubtensor)
 
LayerTestResult< float, 3 > Concat3dDim0DiffInputDimsTest (IWorkloadFactory &workloadFactory, const IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< float, 3 > Concat3dDim1DiffInputDimsTest (IWorkloadFactory &workloadFactory, const IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< float, 3 > Concat3dDim2DiffInputDimsTest (IWorkloadFactory &workloadFactory, const IBackendInternal::IMemoryManagerSharedPtr &memoryManager, bool useSubtensor)
 
LayerTestResult< float, 4 > Concat4dDim0Test (IWorkloadFactory &workloadFactory, const IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< float, 4 > Concat4dDim1Test (IWorkloadFactory &workloadFactory, const IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< float, 4 > Concat4dDim2Test (IWorkloadFactory &workloadFactory, const IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< float, 4 > Concat4dDim3Test (IWorkloadFactory &workloadFactory, const IBackendInternal::IMemoryManagerSharedPtr &memoryManager, bool useSubtensor)
 
LayerTestResult< float, 4 > Concat4dDiffShapeDim0Test (IWorkloadFactory &workloadFactory, const IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< float, 4 > Concat4dDiffShapeDim1Test (IWorkloadFactory &workloadFactory, const IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< float, 4 > Concat4dDiffShapeDim2Test (IWorkloadFactory &workloadFactory, const IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< float, 4 > Concat4dDiffShapeDim3Test (IWorkloadFactory &workloadFactory, const IBackendInternal::IMemoryManagerSharedPtr &memoryManager, bool useSubtensor)
 
LayerTestResult< Half, 3 > ConcatFloat16Test (IWorkloadFactory &workloadFactory, const IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< BFloat16, 3 > ConcatBFloat16Test (IWorkloadFactory &workloadFactory, const IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< uint8_t, 3 > ConcatUint8DifferentQParamsTest (IWorkloadFactory &workloadFactory, const IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< uint8_t, 3 > ConcatUint8Test (IWorkloadFactory &workloadFactory, const IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< uint16_t, 3 > ConcatUint16Test (IWorkloadFactory &workloadFactory, const IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< uint8_t, 1 > Concat1dUint8Test (IWorkloadFactory &workloadFactory, const IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< uint8_t, 2 > Concat2dDim0Uint8Test (IWorkloadFactory &workloadFactory, const IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< uint8_t, 2 > Concat2dDim1Uint8Test (IWorkloadFactory &workloadFactory, const IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< uint8_t, 2 > Concat2dDim0DiffInputDimsUint8Test (IWorkloadFactory &workloadFactory, const IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< uint8_t, 2 > Concat2dDim1DiffInputDimsUint8Test (IWorkloadFactory &workloadFactory, const IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< uint8_t, 3 > Concat3dDim0Uint8Test (IWorkloadFactory &workloadFactory, const IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< uint8_t, 3 > Concat3dDim1Uint8Test (IWorkloadFactory &workloadFactory, const IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< uint8_t, 3 > Concat3dDim2Uint8Test (IWorkloadFactory &workloadFactory, const IBackendInternal::IMemoryManagerSharedPtr &memoryManager, bool useSubtensor)
 
LayerTestResult< uint8_t, 3 > Concat3dDim0DiffInputDimsUint8Test (IWorkloadFactory &workloadFactory, const IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< uint8_t, 3 > Concat3dDim1DiffInputDimsUint8Test (IWorkloadFactory &workloadFactory, const IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< uint8_t, 3 > Concat3dDim2DiffInputDimsUint8Test (IWorkloadFactory &workloadFactory, const IBackendInternal::IMemoryManagerSharedPtr &memoryManager, bool useSubtensor)
 
LayerTestResult< uint8_t, 4 > Concat4dDim0Uint8Test (IWorkloadFactory &workloadFactory, const IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< uint8_t, 4 > Concat4dDim1Uint8Test (IWorkloadFactory &workloadFactory, const IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< uint8_t, 4 > Concat4dDim2Uint8Test (IWorkloadFactory &workloadFactory, const IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< uint8_t, 4 > Concat4dDim3Uint8Test (IWorkloadFactory &workloadFactory, const IBackendInternal::IMemoryManagerSharedPtr &memoryManager, bool useSubtensor)
 
LayerTestResult< uint8_t, 4 > Concat4dDiffShapeDim0Uint8Test (IWorkloadFactory &workloadFactory, const IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< uint8_t, 4 > Concat4dDiffShapeDim1Uint8Test (IWorkloadFactory &workloadFactory, const IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< uint8_t, 4 > Concat4dDiffShapeDim2Uint8Test (IWorkloadFactory &workloadFactory, const IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< uint8_t, 4 > Concat4dDiffShapeDim3Uint8Test (IWorkloadFactory &workloadFactory, const IBackendInternal::IMemoryManagerSharedPtr &memoryManager, bool useSubtensor)
 

Function Documentation

◆ Concat1dTest()

LayerTestResult<float, 1> Concat1dTest ( IWorkloadFactory workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 2197 of file ConcatTestImpl.cpp.

2200 {
2201  return Concat1dTestImpl<DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
2202 }

◆ Concat1dTestImpl()

LayerTestResult<T, 1> Concat1dTestImpl ( IWorkloadFactory workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr memoryManager,
float  qScale,
int32_t  qOffset 
)

Definition at line 413 of file ConcatTestImpl.cpp.

418 {
419  TensorInfo inputTensorInfo({ 3 }, ArmnnType, qScale, qOffset);
420 
421  auto input0 = MakeTensor<T, 1>(inputTensorInfo, QuantizedVector<T>({ 1.0f, 2.0f, 3.0f }, qScale, qOffset));
422  auto input1 = MakeTensor<T, 1>(inputTensorInfo, QuantizedVector<T>({ 4.0f, 5.0f, 6.0f }, qScale, qOffset));
423  auto input2 = MakeTensor<T, 1>(inputTensorInfo, QuantizedVector<T>({ 7.0f, 8.0f, 9.0f }, qScale, qOffset));
424 
425  TensorInfo outputTensorInfo({ 9 }, ArmnnType, qScale, qOffset);
426 
427  LayerTestResult<T, 1> result(outputTensorInfo);
428 
429  std::vector<T> output;
430  output.resize(outputTensorInfo.GetNumElements());
431  Concatenate<T>(workloadFactory, memoryManager,
432  { inputTensorInfo, inputTensorInfo, inputTensorInfo },
433  { input0.data(), input1.data(), input2.data() },
434  outputTensorInfo,
435  output.data(),
436  0,
437  true);
438 
439  result.output = MakeTensor<T, 1>(outputTensorInfo, output);
440  result.outputExpected = MakeTensor<T, 1>(outputTensorInfo, QuantizedVector<T>(
441  {
442  1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f
443  },
444  qScale, qOffset));
445 
446  return result;
447 }

◆ Concat1dUint8Test()

LayerTestResult<uint8_t, 1> Concat1dUint8Test ( IWorkloadFactory workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 2770 of file ConcatTestImpl.cpp.

2773 {
2774  return Concat1dTestImpl<DataType::QAsymmU8>(workloadFactory, memoryManager, 0.5f, -1);
2775 }

◆ Concat2dDim0DiffInputDimsTest()

LayerTestResult<float, 2> Concat2dDim0DiffInputDimsTest ( IWorkloadFactory workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 2218 of file ConcatTestImpl.cpp.

2221 {
2222  return Concat2dDim0DiffInputDimsTestImpl<DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
2223 }

◆ Concat2dDim0DiffInputDimsTestImpl()

LayerTestResult<T, 2> Concat2dDim0DiffInputDimsTestImpl ( IWorkloadFactory workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr memoryManager,
float  qScale,
int32_t  qOffset 
)

Definition at line 569 of file ConcatTestImpl.cpp.

574 {
575  TensorInfo input0TensorInfo({ 2, 3 }, ArmnnType, qScale, qOffset);
576  auto input0 = MakeTensor<T, 2>(input0TensorInfo, QuantizedVector<T>(
577  {
578  // Batch 0
579  1.0f, 2.0f, 3.0f,
580 
581  // Batch 1
582  10.0f, 11.0f, 12.0f,
583  },
584  qScale, qOffset));
585 
586  TensorInfo input1TensorInfo({ 3, 3 }, ArmnnType, qScale, qOffset);
587  auto input1 = MakeTensor<T, 2>(input1TensorInfo, QuantizedVector<T>(
588  {
589  // Batch 0
590  4.0f, 5.0f, 6.0f,
591 
592  // Batch 1
593  13.0f, 14.0f, 15.0f,
594 
595  // Batch 0
596  7.0f, 8.0f, 9.0f,
597  },
598  qScale, qOffset));
599 
600  TensorInfo input2TensorInfo({ 1, 3 }, ArmnnType, qScale, qOffset);
601  auto input2 = MakeTensor<T, 2>(input2TensorInfo, QuantizedVector<T>(
602  {
603  // Batch 1
604  16.0f, 17.0f, 18.0f,
605  },
606  qScale, qOffset));
607 
608  TensorInfo outputTensorInfo({ 6, 3 }, ArmnnType, qScale, qOffset);
609  LayerTestResult<T, 2> result(outputTensorInfo);
610 
611  std::vector<T> output;
612  output.resize(outputTensorInfo.GetNumElements());
613  Concatenate<T>(workloadFactory, memoryManager,
614  { input0TensorInfo, input1TensorInfo, input2TensorInfo },
615  { input0.data(), input1.data(), input2.data() },
616  outputTensorInfo,
617  output.data(),
618  0,
619  true);
620 
621  result.output = MakeTensor<T, 2>(outputTensorInfo, output);
622  result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(
623  {
624  // Batch 0
625  1.0f, 2.0f, 3.0f,
626 
627  // Batch 1
628  10.0f, 11.0f, 12.0f,
629 
630  // Batch 2
631  4.0f, 5.0f, 6.0f,
632 
633  // Batch 3
634  13.0f, 14.0f, 15.0f,
635 
636  // Batch 4
637  7.0f, 8.0f, 9.0f,
638 
639  // Batch 5
640  16.0f, 17.0f, 18.0f,
641  },
642  qScale, qOffset));
643 
644  return result;
645 }

◆ Concat2dDim0DiffInputDimsUint8Test()

LayerTestResult<uint8_t, 2> Concat2dDim0DiffInputDimsUint8Test ( IWorkloadFactory workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 2791 of file ConcatTestImpl.cpp.

2794 {
2795  return Concat2dDim0DiffInputDimsTestImpl<DataType::QAsymmU8>(
2796  workloadFactory, memoryManager, 0.5f, -1);
2797 }

◆ Concat2dDim0Test()

LayerTestResult<float, 2> Concat2dDim0Test ( IWorkloadFactory workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 2204 of file ConcatTestImpl.cpp.

2207 {
2208  return Concat2dDim0TestImpl<DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
2209 }

◆ Concat2dDim0TestImpl()

LayerTestResult<T, 2> Concat2dDim0TestImpl ( IWorkloadFactory workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr memoryManager,
float  qScale,
int32_t  qOffset 
)

Definition at line 507 of file ConcatTestImpl.cpp.

References LayerTestResult< T, n >::outputExpected.

512 {
513  TensorInfo outputTensorInfo({ 6, 3 }, ArmnnType, qScale, qOffset);
514 
515  LayerTestResult<T, 2> result = Concat2dTestImpl<ArmnnType>(
516  workloadFactory, memoryManager, outputTensorInfo, 0, qScale, qOffset);
517 
518  result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(
519  {
520  // Batch 0
521  1.0f, 2.0f, 3.0f,
522 
523  // Batch 1
524  10.0f, 11.0f, 12.0f,
525 
526  // Batch 2
527  4.0f, 5.0f, 6.0f,
528 
529  // Batch 3
530  13.0f, 14.0f, 15.0f,
531 
532  // Batch 4
533  7.0f, 8.0f, 9.0f,
534 
535  // Batch 5
536  16.0f, 17.0f, 18.0f,
537  },
538  qScale, qOffset));
539 
540  return result;
541 }
boost::multi_array< T, n > outputExpected

◆ Concat2dDim0Uint8Test()

LayerTestResult<uint8_t, 2> Concat2dDim0Uint8Test ( IWorkloadFactory workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 2777 of file ConcatTestImpl.cpp.

2780 {
2781  return Concat2dDim0TestImpl<DataType::QAsymmU8>(workloadFactory, memoryManager, 0.5f, -1);
2782 }

◆ Concat2dDim1DiffInputDimsTest()

LayerTestResult<float, 2> Concat2dDim1DiffInputDimsTest ( IWorkloadFactory workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 2225 of file ConcatTestImpl.cpp.

2228 {
2229  return Concat2dDim1DiffInputDimsTestImpl<DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
2230 }

◆ Concat2dDim1DiffInputDimsTestImpl()

LayerTestResult<T, 2> Concat2dDim1DiffInputDimsTestImpl ( IWorkloadFactory workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr memoryManager,
float  qScale,
int32_t  qOffset 
)

Definition at line 648 of file ConcatTestImpl.cpp.

653 {
654  TensorInfo input0TensorInfo({ 2, 3 }, ArmnnType, qScale, qOffset);
655  auto input0 = MakeTensor<T, 2>(input0TensorInfo, QuantizedVector<T>(
656  {
657  // Batch 0
658  1.0f, 2.0f, 3.0f,
659 
660  // Batch 1
661  10.0f, 11.0f, 12.0f,
662  },
663  qScale, qOffset));
664 
665  TensorInfo input1TensorInfo({ 2, 5 }, ArmnnType, qScale, qOffset);
666  auto input1 = MakeTensor<T, 2>(input1TensorInfo, QuantizedVector<T>(
667  {
668  // Batch 0
669  4.0f, 5.0f, 6.0f, 7.0f, 8.0f,
670 
671  // Batch 1
672  13.0f, 14.0f, 15.0f, 16.0f, 17.0f,
673  },
674  qScale, qOffset));
675 
676  TensorInfo input2TensorInfo({ 2, 1 }, ArmnnType, qScale, qOffset);
677  auto input2 = MakeTensor<T, 2>(input2TensorInfo, QuantizedVector<T>(
678  {
679  // Batch 0
680  9.0f,
681 
682  // Batch 1
683  18.0f
684  },
685  qScale, qOffset));
686 
687  TensorInfo outputTensorInfo({ 2, 9 }, ArmnnType, qScale, qOffset);
688  LayerTestResult<T, 2> result(outputTensorInfo);
689 
690  std::vector<T> output;
691  output.resize(outputTensorInfo.GetNumElements());
692  Concatenate<T>(workloadFactory, memoryManager,
693  { input0TensorInfo, input1TensorInfo, input2TensorInfo },
694  { input0.data(), input1.data(), input2.data() },
695  outputTensorInfo,
696  output.data(),
697  1,
698  true);
699 
700  result.output = MakeTensor<T, 2>(outputTensorInfo, output);
701  result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(
702  {
703  // Batch 0
704  1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f,
705 
706  // Batch 1
707  10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f,
708  },
709  qScale, qOffset));
710 
711  return result;
712 }

◆ Concat2dDim1DiffInputDimsUint8Test()

LayerTestResult<uint8_t, 2> Concat2dDim1DiffInputDimsUint8Test ( IWorkloadFactory workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 2799 of file ConcatTestImpl.cpp.

2802 {
2803  return Concat2dDim1DiffInputDimsTestImpl<DataType::QAsymmU8>(
2804  workloadFactory, memoryManager, 0.5f, -1);
2805 }

◆ Concat2dDim1Test()

LayerTestResult<float, 2> Concat2dDim1Test ( IWorkloadFactory workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 2211 of file ConcatTestImpl.cpp.

2214 {
2215  return Concat2dDim1TestImpl<DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
2216 }

◆ Concat2dDim1TestImpl()

LayerTestResult<T, 2> Concat2dDim1TestImpl ( IWorkloadFactory workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr memoryManager,
float  qScale,
int32_t  qOffset 
)

Definition at line 544 of file ConcatTestImpl.cpp.

References LayerTestResult< T, n >::outputExpected.

549 {
550  TensorInfo outputTensorInfo({ 2, 9 }, ArmnnType, qScale, qOffset);
551 
552  LayerTestResult<T, 2> result = Concat2dTestImpl<ArmnnType>(
553  workloadFactory, memoryManager, outputTensorInfo, 1, qScale, qOffset);
554 
555  result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(
556  {
557  // Batch 0
558  1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f,
559 
560  // Batch 1
561  10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f
562  },
563  qScale, qOffset));
564 
565  return result;
566 }
boost::multi_array< T, n > outputExpected

◆ Concat2dDim1Uint8Test()

LayerTestResult<uint8_t, 2> Concat2dDim1Uint8Test ( IWorkloadFactory workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 2784 of file ConcatTestImpl.cpp.

2787 {
2788  return Concat2dDim1TestImpl<DataType::QAsymmU8>(workloadFactory, memoryManager, 0.5f, -1);
2789 }

◆ Concat2dTestImpl()

LayerTestResult<T, 2> Concat2dTestImpl ( IWorkloadFactory workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const TensorInfo outputTensorInfo,
unsigned int  dimension,
const float  qScale,
const int32_t  qOffset 
)

Definition at line 450 of file ConcatTestImpl.cpp.

References TensorInfo::GetNumElements(), and LayerTestResult< T, n >::output.

457 {
458  TensorInfo inputTensorInfo({ 2, 3 }, ArmnnType, qScale, qOffset);
459 
460  auto input0 = MakeTensor<T, 2>(inputTensorInfo, QuantizedVector<T>(
461  {
462  // Batch 0
463  1.0f, 2.0f, 3.0f,
464 
465  // Batch 1
466  10.0f, 11.0f, 12.0f,
467  },
468  qScale, qOffset));
469 
470  auto input1 = MakeTensor<T, 2>(inputTensorInfo, QuantizedVector<T>(
471  {
472  // Batch 0
473  4.0f, 5.0f, 6.0f,
474 
475  // Batch 1
476  13.0f, 14.0f, 15.0f,
477  },
478  qScale, qOffset));
479 
480  auto input2 = MakeTensor<T, 2>(inputTensorInfo, QuantizedVector<T>(
481  {
482  // Batch 0
483  7.0f, 8.0f, 9.0f,
484 
485  // Batch 1
486  16.0f, 17.0f, 18.0f,
487  },
488  qScale, qOffset));
489 
490  LayerTestResult<T, 2> result(outputTensorInfo);
491 
492  std::vector<T> output;
493  output.resize(outputTensorInfo.GetNumElements());
494  Concatenate<T>(workloadFactory, memoryManager,
495  { inputTensorInfo, inputTensorInfo, inputTensorInfo },
496  { input0.data(), input1.data(), input2.data() },
497  outputTensorInfo,
498  output.data(),
499  dimension,
500  true);
501 
502  result.output = MakeTensor<T, 2>(outputTensorInfo, output);
503  return result;
504 }
unsigned int GetNumElements() const
Definition: Tensor.hpp:93

◆ Concat3dDim0DiffInputDimsTest()

LayerTestResult<float, 3> Concat3dDim0DiffInputDimsTest ( IWorkloadFactory workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 2254 of file ConcatTestImpl.cpp.

2257 {
2258  return Concat3dDim0DiffInputDimsTestImpl<DataType::Float32>(
2259  workloadFactory, memoryManager, 0.0f, 0);
2260 }

◆ Concat3dDim0DiffInputDimsTestImpl()

LayerTestResult<T, 3> Concat3dDim0DiffInputDimsTestImpl ( IWorkloadFactory workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr memoryManager,
float  qScale,
int32_t  qOffset 
)

Definition at line 993 of file ConcatTestImpl.cpp.

998 {
999  TensorInfo input0TensorInfo({ 2, 3, 2 }, ArmnnType);
1000  auto input0 = MakeTensor<T, 3>(input0TensorInfo, QuantizedVector<T>(
1001  {
1002  // Batch 0, Channel 0
1003  1.0f, 2.0f,
1004 
1005  // Batch 0, Channel 1
1006  3.0f, 4.0f,
1007 
1008  // Batch 0, Channel 2
1009  5.0f, 6.0f,
1010 
1011  // Batch 1, Channel 0
1012  19.0f, 20.0f,
1013 
1014  // Batch 1, Channel 1
1015  21.0f, 22.0f,
1016 
1017  // Batch 1, Channel 2
1018  23.0f, 24.0f
1019  },
1020  qScale, qOffset));
1021 
1022  TensorInfo input1TensorInfo({ 1, 3, 2 }, ArmnnType);
1023  auto input1 = MakeTensor<T, 3>(input1TensorInfo, QuantizedVector<T>(
1024  {
1025  // Batch 0, Channel 0
1026  7.0f, 8.0f,
1027 
1028  // Batch 0, Channel 1
1029  9.0f, 10.0f,
1030 
1031  // Batch 0, Channel 2
1032  11.0f, 12.0f,
1033  },
1034  qScale, qOffset));
1035 
1036  TensorInfo input2TensorInfo({ 3, 3, 2 }, ArmnnType);
1037  auto input2 = MakeTensor<T, 3>(input2TensorInfo, QuantizedVector<T>(
1038  {
1039  // Batch 0, Channel 0
1040  25.0f, 26.0f,
1041 
1042  // Batch 0, Channel 1
1043  27.0f, 28.0f,
1044 
1045  // Batch 0, Channel 2
1046  29.0f, 30.0f,
1047 
1048  // Batch 1, Channel 0
1049  13.0f, 14.0f,
1050 
1051  // Batch 1, Channel 1
1052  15.0f, 16.0f,
1053 
1054  // Batch 1, Channel 2
1055  17.0f, 18.0f,
1056 
1057  // Batch 2, Channel 0
1058  31.0f, 32.0f,
1059 
1060  // Batch 2, Channel 1
1061  33.0f, 34.0f,
1062 
1063  // Batch 2, Channel 2
1064  35.0f, 36.0f
1065  },
1066  qScale, qOffset));
1067 
1068  TensorInfo outputTensorInfo({ 6, 3, 2 }, ArmnnType);
1069  LayerTestResult<T, 3> result(outputTensorInfo);
1070 
1071  std::vector<T> output;
1072  output.resize(outputTensorInfo.GetNumElements());
1073  Concatenate<T>(workloadFactory, memoryManager,
1074  { input0TensorInfo, input1TensorInfo, input2TensorInfo },
1075  { input0.data(), input1.data(), input2.data() },
1076  outputTensorInfo,
1077  output.data(),
1078  0,
1079  true);
1080 
1081  result.output = MakeTensor<T, 3>(outputTensorInfo, output);
1082  result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(
1083  {
1084  // Batch 0, Channel 0
1085  1.0f, 2.0f,
1086 
1087  // Batch 0, Channel 1
1088  3.0f, 4.0f,
1089 
1090  // Batch 0, Channel 2
1091  5.0f, 6.0f,
1092 
1093  // Batch 1, Channel 0
1094  19.0f, 20.0f,
1095 
1096  // Batch 1, Channel 1
1097  21.0f, 22.0f,
1098 
1099  // Batch 1, Channel 2
1100  23.0f, 24.0f,
1101 
1102  // Batch 2, Channel 0
1103  7.0f, 8.0f,
1104 
1105  // Batch 2, Channel 1
1106  9.0f, 10.0f,
1107 
1108  // Batch 2, Channel 2
1109  11.0f, 12.0f,
1110 
1111  // Batch 3, Channel 0
1112  25.0f, 26.0f,
1113 
1114  // Batch 3, Channel 1
1115  27.0f, 28.0f,
1116 
1117  // Batch 3, Channel 2
1118  29.0f, 30.0f,
1119 
1120  // Batch 4, Channel 0
1121  13.0f, 14.0f,
1122 
1123  // Batch 4, Channel 1
1124  15.0f, 16.0f,
1125 
1126  // Batch 4, Channel 2
1127  17.0f, 18.0f,
1128 
1129  // Batch 5, Channel 0
1130  31.0f, 32.0f,
1131 
1132  // Batch 5, Channel 1
1133  33.0f, 34.0f,
1134 
1135  // Batch 5, Channel 2
1136  35.0f, 36.0f
1137  },
1138  qScale, qOffset));
1139 
1140  return result;
1141 }

◆ Concat3dDim0DiffInputDimsUint8Test()

LayerTestResult<uint8_t, 3> Concat3dDim0DiffInputDimsUint8Test ( IWorkloadFactory workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 2830 of file ConcatTestImpl.cpp.

2833 {
2834  return Concat3dDim0TestImpl<DataType::QAsymmU8>(workloadFactory, memoryManager, 0.5f, -1);
2835 }

◆ Concat3dDim0Test()

LayerTestResult<float, 3> Concat3dDim0Test ( IWorkloadFactory workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 2232 of file ConcatTestImpl.cpp.

2235 {
2236  return Concat3dDim0TestImpl<DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
2237 }

◆ Concat3dDim0TestImpl()

LayerTestResult<T, 3> Concat3dDim0TestImpl ( IWorkloadFactory workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr memoryManager,
float  qScale,
int32_t  qOffset 
)

Definition at line 809 of file ConcatTestImpl.cpp.

References LayerTestResult< T, n >::outputExpected.

814 {
815  TensorInfo outputTensorInfo({ 6, 3, 2 }, ArmnnType, qScale, qOffset);
816 
817  LayerTestResult<T, 3> result = Concat3dTestImpl<ArmnnType>(
818  workloadFactory, memoryManager, outputTensorInfo, 0, true, qScale, qOffset);
819 
820  result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(
821  {
822  // Batch 0, Channel 0
823  1.0f, 2.0f,
824 
825  // Batch 0, Channel 1
826  3.0f, 4.0f,
827 
828  // Batch 0, Channel 2
829  5.0f, 6.0f,
830 
831  // Batch 1, Channel 0
832  19.0f, 20.0f,
833 
834  // Batch 1, Channel 1
835  21.0f, 22.0f,
836 
837  // Batch 1, Channel 2
838  23.0f, 24.0f,
839 
840  // Batch 2, Channel 0
841  7.0f, 8.0f,
842 
843  // Batch 2, Channel 1
844  9.0f, 10.0f,
845 
846  // Batch 2, Channel 2
847  11.0f, 12.0f,
848 
849  // Batch 3, Channel 0
850  25.0f, 26.0f,
851 
852  // Batch 3, Channel 1
853  27.0f, 28.0f,
854 
855  // Batch 3, Channel 2
856  29.0f, 30.0f,
857 
858  // Batch 4, Channel 0
859  13.0f, 14.0f,
860 
861  // Batch 4, Channel 1
862  15.0f, 16.0f,
863 
864  // Batch 4, Channel 2
865  17.0f, 18.0f,
866 
867  // Batch 5, Channel 0
868  31.0f, 32.0f,
869 
870  // Batch 5, Channel 1
871  33.0f, 34.0f,
872 
873  // Batch 5, Channel 2
874  35.0f, 36.0f
875  },
876  qScale, qOffset));
877 
878  return result;
879 }
boost::multi_array< T, n > outputExpected

◆ Concat3dDim0Uint8Test()

LayerTestResult<uint8_t, 3> Concat3dDim0Uint8Test ( IWorkloadFactory workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 2807 of file ConcatTestImpl.cpp.

2810 {
2811  return Concat3dDim0TestImpl<DataType::QAsymmU8>(workloadFactory, memoryManager, 0.5f, -1);
2812 }

◆ Concat3dDim1DiffInputDimsTest()

LayerTestResult<float, 3> Concat3dDim1DiffInputDimsTest ( IWorkloadFactory workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 2262 of file ConcatTestImpl.cpp.

2265 {
2266  return Concat3dDim1DiffInputDimsTestImpl<DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
2267 }

◆ Concat3dDim1DiffInputDimsTestImpl()

LayerTestResult<T, 3> Concat3dDim1DiffInputDimsTestImpl ( IWorkloadFactory workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr memoryManager,
float  qScale,
int32_t  qOffset 
)

Definition at line 1144 of file ConcatTestImpl.cpp.

1149 {
1150  TensorInfo input0TensorInfo({ 2, 3, 2 }, ArmnnType, qScale, qOffset);
1151  auto input0 = MakeTensor<T, 3>(input0TensorInfo, QuantizedVector<T>(
1152  {
1153  // Batch 0, Channel 0
1154  1.0f, 2.0f,
1155 
1156  // Batch 0, Channel 1
1157  3.0f, 4.0f,
1158 
1159  // Batch 0, Channel 2
1160  5.0f, 6.0f,
1161 
1162  // Batch 1, Channel 0
1163  19.0f, 20.0f,
1164 
1165  // Batch 1, Channel 1
1166  21.0f, 22.0f,
1167 
1168  // Batch 1, Channel 2
1169  23.0f, 24.0f
1170  },
1171  qScale, qOffset));
1172 
1173  TensorInfo input1TensorInfo({ 2, 4, 2 }, ArmnnType, qScale, qOffset);
1174  auto input1 = MakeTensor<T, 3>(input1TensorInfo, QuantizedVector<T>(
1175  {
1176  // Batch 0, Channel 0
1177  7.0f, 8.0f,
1178 
1179  // Batch 0, Channel 1
1180  9.0f, 10.0f,
1181 
1182  // Batch 0, Channel 2
1183  11.0f, 12.0f,
1184 
1185  // Batch 0, Channel 3
1186  25.0f, 26.0f,
1187 
1188  // Batch 1, Channel 0
1189  27.0f, 28.0f,
1190 
1191  // Batch 1, Channel 1
1192  29.0f, 30.0f,
1193 
1194  // Batch 1, Channel 2
1195  13.0f, 14.0f,
1196 
1197  // Batch 1, Channel 3
1198  15.0f, 16.0f,
1199  },
1200  qScale, qOffset));
1201 
1202  TensorInfo input2TensorInfo({ 2, 1, 2 }, ArmnnType, qScale, qOffset);
1203  auto input2 = MakeTensor<T, 3>(input2TensorInfo, QuantizedVector<T>(
1204  {
1205  // Batch 0, Channel 0
1206  17.0f, 18.0f,
1207 
1208  // Batch 1, Channel 0
1209  31.0f, 32.0f,
1210  },
1211  qScale, qOffset));
1212 
1213  TensorInfo outputTensorInfo({ 2, 8, 2 }, ArmnnType, qScale, qOffset);
1214  LayerTestResult<T, 3> result(outputTensorInfo);
1215 
1216  std::vector<T> output;
1217  output.resize(outputTensorInfo.GetNumElements());
1218  Concatenate<T>(workloadFactory, memoryManager,
1219  { input0TensorInfo, input1TensorInfo, input2TensorInfo },
1220  { input0.data(), input1.data(), input2.data() },
1221  outputTensorInfo,
1222  output.data(),
1223  1,
1224  true);
1225 
1226  result.output = MakeTensor<T, 3>(outputTensorInfo, output);
1227  result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(
1228  {
1229  // Batch 0, Channel 0
1230  1.0f, 2.0f,
1231 
1232  // Batch 0, Channel 1
1233  3.0f, 4.0f,
1234 
1235  // Batch 0, Channel 2
1236  5.0f, 6.0f,
1237 
1238  // Batch 0, Channel 3
1239  7.0f, 8.0f,
1240 
1241  // Batch 0, Channel 4
1242  9.0f, 10.0f,
1243 
1244  // Batch 0, Channel 5
1245  11.0f, 12.0f,
1246 
1247  // Batch 0, Channel 6
1248  25.0f, 26.0f,
1249 
1250  // Batch 0, Channel 7
1251  17.0f, 18.0f,
1252 
1253  // Batch 1, Channel 0
1254  19.0f, 20.0f,
1255 
1256  // Batch 1, Channel 1
1257  21.0f, 22.0f,
1258 
1259  // Batch 1, Channel 2
1260  23.0f, 24.0f,
1261 
1262  // Batch 1, Channel 3
1263  27.0f, 28.0f,
1264 
1265  // Batch 1, Channel 4
1266  29.0f, 30.0f,
1267 
1268  // Batch 1, Channel 5
1269  13.0f, 14.0f,
1270 
1271  // Batch 1, Channel 6
1272  15.0f, 16.0f,
1273 
1274  // Batch 1, Channel 7
1275  31.0f, 32.0f,
1276  },
1277  qScale, qOffset));
1278 
1279  return result;
1280 }

◆ Concat3dDim1DiffInputDimsUint8Test()

LayerTestResult<uint8_t, 3> Concat3dDim1DiffInputDimsUint8Test ( IWorkloadFactory workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 2837 of file ConcatTestImpl.cpp.

2840 {
2841  return Concat3dDim1DiffInputDimsTestImpl<DataType::QAsymmU8>(
2842  workloadFactory, memoryManager, 0.5f, -1);
2843 }

◆ Concat3dDim1Test()

LayerTestResult<float, 3> Concat3dDim1Test ( IWorkloadFactory workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 2239 of file ConcatTestImpl.cpp.

2242 {
2243  return Concat3dDim1TestImpl<DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
2244 }

◆ Concat3dDim1TestImpl()

LayerTestResult<T, 3> Concat3dDim1TestImpl ( IWorkloadFactory workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr memoryManager,
float  qScale,
int32_t  qOffset 
)

Definition at line 882 of file ConcatTestImpl.cpp.

References LayerTestResult< T, n >::outputExpected.

887 {
888  TensorInfo outputTensorInfo({ 2, 9, 2 }, ArmnnType, qScale, qOffset);
889 
890  LayerTestResult<T, 3> result = Concat3dTestImpl<ArmnnType>(
891  workloadFactory, memoryManager, outputTensorInfo, 1, true, qScale, qOffset);
892 
893  result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(
894  {
895  // Batch 0, Channel 0
896  1.0f, 2.0f,
897 
898  // Batch 0, Channel 1
899  3.0f, 4.0f,
900 
901  // Batch 0, Channel 2
902  5.0f, 6.0f,
903 
904  // Batch 0, Channel 3
905  7.0f, 8.0f,
906 
907  // Batch 0, Channel 4
908  9.0f, 10.0f,
909 
910  // Batch 0, Channel 5
911  11.0f, 12.0f,
912 
913  // Batch 0, Channel 6
914  13.0f, 14.0f,
915 
916  // Batch 0, Channel 7
917  15.0f, 16.0f,
918 
919  // Batch 0, Channel 8
920  17.0f, 18.0f,
921 
922  // Batch 1, Channel 0
923  19.0f, 20.0f,
924 
925  // Batch 1, Channel 1
926  21.0f, 22.0f,
927 
928  // Batch 1, Channel 2
929  23.0f, 24.0f,
930 
931  // Batch 1, Channel 3
932  25.0f, 26.0f,
933 
934  // Batch 1, Channel 4
935  27.0f, 28.0f,
936 
937  // Batch 1, Channel 5
938  29.0f, 30.0f,
939 
940  // Batch 1, Channel 6
941  31.0f, 32.0f,
942 
943  // Batch 1, Channel 7
944  33.0f, 34.0f,
945 
946  // Batch 1, Channel 8
947  35.0f, 36.0f
948  },
949  qScale, qOffset));
950 
951  return result;
952 }
boost::multi_array< T, n > outputExpected

◆ Concat3dDim1Uint8Test()

LayerTestResult<uint8_t, 3> Concat3dDim1Uint8Test ( IWorkloadFactory workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 2814 of file ConcatTestImpl.cpp.

2817 {
2818  return Concat3dDim1TestImpl<DataType::QAsymmU8>(workloadFactory, memoryManager, 0.5f, -1);
2819 }

◆ Concat3dDim2DiffInputDimsTest()

LayerTestResult<float, 3> Concat3dDim2DiffInputDimsTest ( IWorkloadFactory workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr memoryManager,
bool  useSubtensor 
)

Definition at line 2269 of file ConcatTestImpl.cpp.

2273 {
2274  return Concat3dDim2DiffInputDimsTestImpl<DataType::Float32>(
2275  workloadFactory, memoryManager, useSubtensor, 0.0f, 0);
2276 }

◆ Concat3dDim2DiffInputDimsTestImpl()

LayerTestResult<T, 3> Concat3dDim2DiffInputDimsTestImpl ( IWorkloadFactory workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr memoryManager,
bool  useSubtensor,
float  qScale,
int32_t  qOffset 
)

Definition at line 1283 of file ConcatTestImpl.cpp.

1289 {
1290  TensorInfo input0TensorInfo({ 2, 3, 2 }, ArmnnType, qScale, qOffset);
1291  auto input0 = MakeTensor<T, 3>(input0TensorInfo, QuantizedVector<T>(
1292  {
1293  // Batch 0, Channel 0
1294  1.0f, 2.0f,
1295 
1296  // Batch 0, Channel 1
1297  3.0f, 4.0f,
1298 
1299  // Batch 0, Channel 2
1300  5.0f, 6.0f,
1301 
1302  // Batch 1, Channel 0
1303  19.0f, 20.0f,
1304 
1305  // Batch 1, Channel 1
1306  21.0f, 22.0f,
1307 
1308  // Batch 1, Channel 2
1309  23.0f, 24.0f
1310  },
1311  qScale, qOffset));
1312 
1313  TensorInfo input1TensorInfo({ 2, 3, 1 }, ArmnnType, qScale, qOffset);
1314  auto input1 = MakeTensor<T, 3>(input1TensorInfo, QuantizedVector<T>(
1315  {
1316  // Batch 0, Channel 0
1317  7.0f,
1318 
1319  // Batch 0, Channel 1
1320  9.0f,
1321 
1322  // Batch 0, Channel 2
1323  11.0f,
1324 
1325  // Batch 1, Channel 0
1326  25.0f,
1327 
1328  // Batch 1, Channel 1
1329  27.0f,
1330 
1331  // Batch 1, Channel 2
1332  29.0f
1333  },
1334  qScale, qOffset));
1335 
1336  TensorInfo input2TensorInfo({ 2, 3, 3 }, ArmnnType, qScale, qOffset);
1337  auto input2 = MakeTensor<T, 3>(input2TensorInfo, QuantizedVector<T>(
1338  {
1339  // Batch 0, Channel 0
1340  13.0f, 14.0f, 50.0f,
1341 
1342  // Batch 0, Channel 1
1343  15.0f, 16.0f, 51.0f,
1344 
1345  // Batch 0, Channel 2
1346  17.0f, 18.0f, 52.0f,
1347 
1348  // Batch 1, Channel 0
1349  31.0f, 32.0f, 53.0f,
1350 
1351  // Batch 1, Channel 1
1352  33.0f, 34.0f, 54.0f,
1353 
1354  // Batch 1, Channel 2
1355  35.0f, 36.0f, 55.0f,
1356  },
1357  qScale, qOffset));
1358 
1359  TensorInfo outputTensorInfo({ 2, 3, 6 }, ArmnnType, qScale, qOffset);
1360  LayerTestResult<T, 3> result(outputTensorInfo);
1361 
1362  std::vector<T> output;
1363  output.resize(outputTensorInfo.GetNumElements());
1364  Concatenate<T>(workloadFactory, memoryManager,
1365  { input0TensorInfo, input1TensorInfo, input2TensorInfo },
1366  { input0.data(), input1.data(), input2.data() },
1367  outputTensorInfo,
1368  output.data(),
1369  2,
1370  useSubtensor);
1371 
1372  result.output = MakeTensor<T, 3>(outputTensorInfo, output);
1373  result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(
1374  {
1375  // Batch 0, Channel 0
1376  1.0f, 2.0f, 7.0f, 13.0f, 14.0f, 50.0f,
1377 
1378  // Batch 0, Channel 1
1379  3.0f, 4.0f, 9.0f, 15.0f, 16.0f, 51.0f,
1380 
1381  // Batch 0, Channel 2
1382  5.0f, 6.0f, 11.0f, 17.0f, 18.0f, 52.0f,
1383 
1384  // Batch 1, Channel 0
1385  19.0f, 20.0f, 25.0f, 31.0f, 32.0f, 53.0f,
1386 
1387  // Batch 1, Channel 1
1388  21.0f, 22.0f, 27.0f, 33.0f, 34.0f, 54.0f,
1389 
1390  // Batch 1, Channel 2
1391  23.0f, 24.0f, 29.0f, 35.0f, 36.0f, 55.0f,
1392  },
1393  qScale, qOffset));
1394 
1395  return result;
1396 }

◆ Concat3dDim2DiffInputDimsUint8Test()

LayerTestResult<uint8_t, 3> Concat3dDim2DiffInputDimsUint8Test ( IWorkloadFactory workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr memoryManager,
bool  useSubtensor 
)

Definition at line 2845 of file ConcatTestImpl.cpp.

2849 {
2850  return Concat3dDim2DiffInputDimsTestImpl<DataType::QAsymmU8>(
2851  workloadFactory, memoryManager, useSubtensor, 0.5f, -1);
2852 }

◆ Concat3dDim2Test()

LayerTestResult<float, 3> Concat3dDim2Test ( IWorkloadFactory workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr memoryManager,
bool  useSubtensor 
)

Definition at line 2246 of file ConcatTestImpl.cpp.

2250 {
2251  return Concat3dDim2TestImpl<DataType::Float32>(workloadFactory, memoryManager, useSubtensor, 0.0f, 0);
2252 }

◆ Concat3dDim2TestImpl()

LayerTestResult<T, 3> Concat3dDim2TestImpl ( IWorkloadFactory workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr memoryManager,
bool  useSubtensor,
float  qScale,
int32_t  qOffset 
)

Definition at line 955 of file ConcatTestImpl.cpp.

References LayerTestResult< T, n >::outputExpected.

961 {
962  TensorInfo outputTensorInfo({ 2, 3, 6 }, ArmnnType, qScale, qOffset);
963 
964  LayerTestResult<T, 3> result = Concat3dTestImpl<ArmnnType>(
965  workloadFactory, memoryManager, outputTensorInfo, 2, useSubtensor, qScale, qOffset);
966 
967  result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(
968  {
969  // Batch 0, Channel 0
970  1.0f, 2.0f, 7.0f, 8.0f, 13.0f, 14.0f,
971 
972  // Batch 0, Channel 1
973  3.0f, 4.0f, 9.0f, 10.0f, 15.0f, 16.0f,
974 
975  // Batch 0, Channel 2
976  5.0f, 6.0f, 11.0f, 12.0f, 17.0f, 18.0f,
977 
978  // Batch 1, Channel 0
979  19.0f, 20.0f, 25.0f, 26.0f, 31.0f, 32.0f,
980 
981  // Batch 1, Channel 1
982  21.0f, 22.0f, 27.0f, 28.0f, 33.0f, 34.0f,
983 
984  // Batch 1, Channel 2
985  23.0f, 24.0f, 29.0f, 30.0f, 35.0f, 36.0f,
986  },
987  qScale, qOffset));
988 
989  return result;
990 }
boost::multi_array< T, n > outputExpected

◆ Concat3dDim2Uint8Test()

LayerTestResult<uint8_t, 3> Concat3dDim2Uint8Test ( IWorkloadFactory workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr memoryManager,
bool  useSubtensor 
)

Definition at line 2821 of file ConcatTestImpl.cpp.

2825 {
2826  return Concat3dDim2TestImpl<DataType::QAsymmU8>(
2827  workloadFactory, memoryManager, useSubtensor, 0.5f, -1);
2828 }

◆ Concat3dTestImpl()

LayerTestResult<T, 3> Concat3dTestImpl ( IWorkloadFactory workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const TensorInfo outputTensorInfo,
unsigned int  dimension,
bool  useSubtensor,
float  qScale,
int32_t  qOffset 
)

Definition at line 715 of file ConcatTestImpl.cpp.

References TensorInfo::GetNumElements(), and LayerTestResult< T, n >::output.

723 {
724  TensorInfo inputTensorInfo({ 2, 3, 2 }, ArmnnType, qScale, qOffset);
725 
726  auto input0 = MakeTensor<T, 3>(inputTensorInfo, QuantizedVector<T>(
727  {
728  // Batch 0, Channel 0
729  1.0f, 2.0f,
730 
731  // Batch 0, Channel 1
732  3.0f, 4.0f,
733 
734  // Batch 0, Channel 2
735  5.0f, 6.0f,
736 
737  // Batch 1, Channel 0
738  19.0f, 20.0f,
739 
740  // Batch 1, Channel 1
741  21.0f, 22.0f,
742 
743  // Batch 1, Channel 2
744  23.0f, 24.0f
745  },
746  qScale, qOffset));
747 
748  auto input1 = MakeTensor<T, 3>(inputTensorInfo, QuantizedVector<T>(
749  {
750  // Batch 0, Channel 0
751  7.0f, 8.0f,
752 
753  // Batch 0, Channel 1
754  9.0f, 10.0f,
755 
756  // Batch 0, Channel 2
757  11.0f, 12.0f,
758 
759  // Batch 1, Channel 0
760  25.0f, 26.0f,
761 
762  // Batch 1, Channel 1
763  27.0f, 28.0f,
764 
765  // Batch 1, Channel 2
766  29.0f, 30.0f
767  },
768  qScale, qOffset));
769 
770  auto input2 = MakeTensor<T, 3>(inputTensorInfo, QuantizedVector<T>(
771  {
772  // Batch 0, Channel 0
773  13.0f, 14.0f,
774 
775  // Batch 0, Channel 1
776  15.0f, 16.0f,
777 
778  // Batch 0, Channel 2
779  17.0f, 18.0f,
780 
781  // Batch 1, Channel 0
782  31.0f, 32.0f,
783 
784  // Batch 1, Channel 1
785  33.0f, 34.0f,
786 
787  // Batch 1, Channel 2
788  35.0f, 36.0f
789  },
790  qScale, qOffset));
791 
792  LayerTestResult<T, 3> result(outputTensorInfo);
793 
794  std::vector<T> output;
795  output.resize(outputTensorInfo.GetNumElements());
796  Concatenate<T>(workloadFactory, memoryManager,
797  { inputTensorInfo, inputTensorInfo, inputTensorInfo },
798  { input0.data(), input1.data(), input2.data() },
799  outputTensorInfo,
800  output.data(),
801  dimension,
802  useSubtensor);
803 
804  result.output = MakeTensor<T, 3>(outputTensorInfo, output);
805  return result;
806 }
unsigned int GetNumElements() const
Definition: Tensor.hpp:93

◆ Concat4dDiffShapeDim0Test()

LayerTestResult<float, 4> Concat4dDiffShapeDim0Test ( IWorkloadFactory workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 2307 of file ConcatTestImpl.cpp.

2310 {
2311  return Concat4dDiffShapeDim0TestImpl<DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
2312 }

◆ Concat4dDiffShapeDim0TestImpl()

LayerTestResult<T, 4> Concat4dDiffShapeDim0TestImpl ( IWorkloadFactory workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr memoryManager,
float  qScale,
int32_t  qOffset 
)

Definition at line 1623 of file ConcatTestImpl.cpp.

1628 {
1629  constexpr unsigned int dimension = 0u;
1630 
1631  TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, ArmnnType, qScale, qOffset);
1632  auto input0 = MakeTensor<T, 4>(inputTensorInfo0, QuantizedVector<T>(
1633  {
1634  1.0f, 2.0f,
1635  3.0f, 4.0f,
1636  5.0f, 6.0f,
1637  7.0f, 8.0f,
1638  9.0f, 10.0f,
1639  11.0f, 12.0f
1640  },
1641  qScale, qOffset));
1642 
1643  TensorInfo inputTensorInfo1({ 2, 3, 2, 2 }, ArmnnType, qScale, qOffset);
1644 
1645  auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(
1646  {
1647  11.0f, 12.0f,
1648  13.0f, 14.0f,
1649  15.0f, 16.0f,
1650  17.0f, 18.0f,
1651  19.0f, 20.0f,
1652  21.0f, 22.0f,
1653 
1654  21.0f, 22.0f,
1655  23.0f, 24.0f,
1656  25.0f, 26.0f,
1657  27.0f, 28.0f,
1658  29.0f, 30.0f,
1659  31.0f, 32.0f
1660  },
1661  qScale, qOffset));
1662 
1663  TensorInfo outputTensorInfo({ 3, 3, 2, 2 }, ArmnnType, qScale, qOffset);
1664 
1665  LayerTestResult<T, 4> result(outputTensorInfo);
1666 
1667  std::vector<T> output;
1668  output.resize(outputTensorInfo.GetNumElements());
1669  Concatenate<T>(workloadFactory,
1670  memoryManager,
1671  {inputTensorInfo0, inputTensorInfo1},
1672  {input0.data(), input1.data()},
1673  outputTensorInfo,
1674  output.data(),
1675  dimension,
1676  true);
1677 
1678  result.output = MakeTensor<T, 4>(outputTensorInfo, output);
1679  result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(
1680  {
1681  1.0f, 2.0f,
1682  3.0f, 4.0f,
1683  5.0f, 6.0f,
1684  7.0f, 8.0f,
1685  9.0f, 10.0f,
1686  11.0f, 12.0f,
1687 
1688  11.0f, 12.0f,
1689  13.0f, 14.0f,
1690  15.0f, 16.0f,
1691  17.0f, 18.0f,
1692  19.0f, 20.0f,
1693  21.0f, 22.0f,
1694 
1695  21.0f, 22.0f,
1696  23.0f, 24.0f,
1697  25.0f, 26.0f,
1698  27.0f, 28.0f,
1699  29.0f, 30.0f,
1700  31.0f, 32.0f
1701  },
1702  qScale, qOffset));
1703 
1704  return result;
1705 }

◆ Concat4dDiffShapeDim0Uint8Test()

LayerTestResult<uint8_t, 4> Concat4dDiffShapeDim0Uint8Test ( IWorkloadFactory workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 2883 of file ConcatTestImpl.cpp.

2886 {
2887  return Concat4dDiffShapeDim0TestImpl<DataType::QAsymmU8>(
2888  workloadFactory, memoryManager, 0.5f, -1);
2889 }

◆ Concat4dDiffShapeDim1Test()

LayerTestResult<float, 4> Concat4dDiffShapeDim1Test ( IWorkloadFactory workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 2314 of file ConcatTestImpl.cpp.

2317 {
2318  return Concat4dDiffShapeDim1TestImpl<DataType::Float32>(
2319  workloadFactory, memoryManager, 0.0f, 0);
2320 }

◆ Concat4dDiffShapeDim1TestImpl()

LayerTestResult<T, 4> Concat4dDiffShapeDim1TestImpl ( IWorkloadFactory workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr memoryManager,
float  qScale,
int32_t  qOffset 
)

Definition at line 1708 of file ConcatTestImpl.cpp.

1713 {
1714  constexpr unsigned int dimension = 1u;
1715 
1716  TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, ArmnnType, qScale, qOffset);
1717  auto input0 = MakeTensor<T, 4>(inputTensorInfo0, QuantizedVector<T>(
1718  {
1719  1.0f, 2.0f,
1720  3.0f, 4.0f,
1721  5.0f, 6.0f,
1722  7.0f, 8.0f,
1723  9.0f, 10.0f,
1724  11.0f, 12.0f
1725  },
1726  qScale, qOffset));
1727 
1728  TensorInfo inputTensorInfo1({ 1, 2, 2, 2 }, ArmnnType, qScale, qOffset);
1729 
1730  auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(
1731  {
1732  11.0f, 12.0f,
1733  13.0f, 14.0f,
1734  15.0f, 16.0f,
1735  17.0f, 18.0f,
1736  },
1737  qScale, qOffset));
1738 
1739  TensorInfo outputTensorInfo({ 1, 5, 2, 2 }, ArmnnType, qScale, qOffset);
1740 
1741  LayerTestResult<T, 4> result(outputTensorInfo);
1742 
1743  std::vector<T> output;
1744  output.resize(outputTensorInfo.GetNumElements());
1745  Concatenate<T>(workloadFactory,
1746  memoryManager,
1747  {inputTensorInfo0, inputTensorInfo1},
1748  {input0.data(), input1.data()},
1749  outputTensorInfo,
1750  output.data(),
1751  dimension,
1752  true);
1753 
1754  result.output = MakeTensor<T, 4>(outputTensorInfo, output);
1755  result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(
1756  {
1757  1.0f, 2.0f,
1758  3.0f, 4.0f,
1759  5.0f, 6.0f,
1760  7.0f, 8.0f,
1761  9.0f, 10.0f,
1762  11.0f, 12.0f,
1763  11.0f, 12.0f,
1764  13.0f, 14.0f,
1765  15.0f, 16.0f,
1766  17.0f, 18.0f
1767  },
1768  qScale, qOffset));
1769 
1770  return result;
1771 }

◆ Concat4dDiffShapeDim1Uint8Test()

LayerTestResult<uint8_t, 4> Concat4dDiffShapeDim1Uint8Test ( IWorkloadFactory workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 2891 of file ConcatTestImpl.cpp.

2894 {
2895  return Concat4dDiffShapeDim1TestImpl<DataType::QAsymmU8>(
2896  workloadFactory, memoryManager, 0.5f, -1);
2897 }

◆ Concat4dDiffShapeDim2Test()

LayerTestResult<float, 4> Concat4dDiffShapeDim2Test ( IWorkloadFactory workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 2322 of file ConcatTestImpl.cpp.

2325 {
2326  return Concat4dDiffShapeDim2TestImpl<DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
2327 }

◆ Concat4dDiffShapeDim2TestImpl()

LayerTestResult<T, 4> Concat4dDiffShapeDim2TestImpl ( IWorkloadFactory workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr memoryManager,
float  qScale,
int32_t  qOffset 
)

Definition at line 1774 of file ConcatTestImpl.cpp.

1779 {
1780  constexpr unsigned int dimension = 2u;
1781 
1782  TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, ArmnnType, qScale, qOffset);
1783  auto input0 = MakeTensor<T, 4>(inputTensorInfo0, QuantizedVector<T>(
1784  {
1785  1.0f, 2.0f,
1786  3.0f, 4.0f,
1787  5.0f, 6.0f,
1788  7.0f, 8.0f,
1789  9.0f, 10.0f,
1790  11.0f, 12.0f
1791  },
1792  qScale, qOffset));
1793 
1794  TensorInfo inputTensorInfo1({ 1, 3, 3, 2 }, ArmnnType, qScale, qOffset);
1795  auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(
1796  {
1797  11.0f, 12.0f,
1798  13.0f, 14.0f,
1799  15.0f, 16.0f,
1800  17.0f, 18.0f,
1801  19.0f, 20.0f,
1802  21.0f, 22.0f,
1803  23.0f, 24.0f,
1804  25.0f, 26.0f,
1805  27.0f, 28.0f
1806  },
1807  qScale, qOffset));
1808 
1809  TensorInfo outputTensorInfo({ 1, 3, 5, 2 }, ArmnnType, qScale, qOffset);
1810  LayerTestResult<T, 4> result(outputTensorInfo);
1811 
1812  std::vector<T> output;
1813  output.resize(outputTensorInfo.GetNumElements());
1814  Concatenate<T>(workloadFactory,
1815  memoryManager,
1816  {inputTensorInfo0, inputTensorInfo1},
1817  {input0.data(), input1.data()},
1818  outputTensorInfo,
1819  output.data(),
1820  dimension,
1821  true);
1822 
1823  result.output = MakeTensor<T, 4>(outputTensorInfo, output);
1824  result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(
1825  {
1826  1.0f, 2.0f,
1827  3.0f, 4.0f,
1828  11.0f, 12.0f,
1829  13.0f, 14.0f,
1830  15.0f, 16.0f,
1831 
1832  5.0f, 6.0f,
1833  7.0f, 8.0f,
1834  17.0f, 18.0f,
1835  19.0f, 20.0f,
1836  21.0f, 22.0f,
1837 
1838  9.0f, 10.0f,
1839  11.0f, 12.0f,
1840  23.0f, 24.0f,
1841  25.0f, 26.0f,
1842  27.0f, 28.0f
1843  },
1844  qScale, qOffset));
1845 
1846  return result;
1847 }

◆ Concat4dDiffShapeDim2Uint8Test()

LayerTestResult<uint8_t, 4> Concat4dDiffShapeDim2Uint8Test ( IWorkloadFactory workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 2899 of file ConcatTestImpl.cpp.

2902 {
2903  return Concat4dDiffShapeDim2TestImpl<DataType::QAsymmU8>(
2904  workloadFactory, memoryManager, 0.5f, -1);
2905 }

◆ Concat4dDiffShapeDim3Test()

LayerTestResult<float, 4> Concat4dDiffShapeDim3Test ( IWorkloadFactory workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr memoryManager,
bool  useSubtensor 
)

Definition at line 2329 of file ConcatTestImpl.cpp.

2333 {
2334  return Concat4dDiffShapeDim3TestImpl<DataType::Float32>(
2335  workloadFactory, memoryManager, 0.0f, 0, useSubtensor);
2336 }

◆ Concat4dDiffShapeDim3TestImpl()

LayerTestResult<T, 4> Concat4dDiffShapeDim3TestImpl ( IWorkloadFactory workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr memoryManager,
float  qScale,
int32_t  qOffset,
bool  useSubtensor 
)

Definition at line 1850 of file ConcatTestImpl.cpp.

1856 {
1857  constexpr unsigned int dimension = 3u;
1858 
1859  TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, ArmnnType, qScale, qOffset);
1860  auto input0 = MakeTensor<T, 4>(inputTensorInfo0, QuantizedVector<T>(
1861  {
1862  1.0f, 2.0f,
1863  3.0f, 4.0f,
1864  5.0f, 6.0f,
1865  7.0f, 8.0f,
1866  9.0f, 10.0f,
1867  11.0f, 12.0f
1868  },
1869  qScale, qOffset));
1870 
1871  TensorInfo inputTensorInfo1({ 1, 3, 2, 3 }, ArmnnType, qScale, qOffset);
1872  auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(
1873  {
1874  11.0f, 12.0f, 13.0f,
1875  14.0f, 15.0f, 16.0f,
1876 
1877  17.0f, 18.0f, 19.0f,
1878  20.0f, 21.0f, 22.0f,
1879 
1880  23.0f, 24.0f, 25.0f,
1881  26.0f, 27.0f, 28.0f
1882  },
1883  qScale, qOffset));
1884 
1885  TensorInfo outputTensorInfo({ 1, 3, 2, 5 }, ArmnnType, qScale, qOffset);
1886 
1887  LayerTestResult<T, 4> result(outputTensorInfo);
1888 
1889  std::vector<T> output;
1890  output.resize(outputTensorInfo.GetNumElements());
1891  Concatenate<T>(workloadFactory,
1892  memoryManager,
1893  {inputTensorInfo0, inputTensorInfo1},
1894  {input0.data(), input1.data()},
1895  outputTensorInfo,
1896  output.data(),
1897  dimension,
1898  useSubtensor);
1899 
1900  result.output = MakeTensor<T, 4>(outputTensorInfo, output);
1901  result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(
1902  {
1903  1.0f, 2.0f, 11.0f, 12.0f, 13.0f,
1904  3.0f, 4.0f, 14.0f, 15.0f, 16.0f,
1905  5.0f, 6.0f, 17.0f, 18.0f, 19.0f,
1906  7.0f, 8.0f, 20.0f, 21.0f, 22.0f,
1907  9.0f, 10.0f, 23.0f, 24.0f, 25.0f,
1908  11.0f, 12.0f, 26.0f, 27.0f, 28.0f
1909  },
1910  qScale, qOffset));
1911 
1912  return result;
1913 }

◆ Concat4dDiffShapeDim3Uint8Test()

LayerTestResult<uint8_t, 4> Concat4dDiffShapeDim3Uint8Test ( IWorkloadFactory workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr memoryManager,
bool  useSubtensor 
)

Definition at line 2907 of file ConcatTestImpl.cpp.

2911 {
2912  return Concat4dDiffShapeDim3TestImpl<DataType::QAsymmU8>(
2913  workloadFactory, memoryManager, 0.5f, -1, useSubtensor);
2914 }

◆ Concat4dDim0Test()

LayerTestResult<float, 4> Concat4dDim0Test ( IWorkloadFactory workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 2278 of file ConcatTestImpl.cpp.

2281 {
2282  return Concat4dDim0TestImpl<DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
2283 }

◆ Concat4dDim0TestImpl()

LayerTestResult<T, 4> Concat4dDim0TestImpl ( IWorkloadFactory workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr memoryManager,
float  qScale,
int32_t  qOffset 
)

Definition at line 1462 of file ConcatTestImpl.cpp.

References LayerTestResult< T, n >::outputExpected.

1467 {
1468  TensorInfo outputTensorInfo({ 3, 3, 2, 2 }, ArmnnType, qScale, qOffset);
1469 
1470  LayerTestResult<T, 4> result = Concat4dTestImpl<ArmnnType>(
1471  workloadFactory, memoryManager, outputTensorInfo, 0, true, qScale, qOffset);
1472 
1473  result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(
1474  {
1475  1.0f, 2.0f,
1476  3.0f, 4.0f,
1477  5.0f, 6.0f,
1478  7.0f, 8.0f,
1479  9.0f, 10.0f,
1480  11.0f, 12.0f,
1481 
1482  11.0f, 12.0f,
1483  13.0f, 14.0f,
1484  15.0f, 16.0f,
1485  17.0f, 18.0f,
1486  19.0f, 20.0f,
1487  21.0f, 22.0f,
1488 
1489  21.0f, 22.0f,
1490  23.0f, 24.0f,
1491  25.0f, 26.0f,
1492  27.0f, 28.0f,
1493  29.0f, 30.0f,
1494  31.0f, 32.0f
1495  },
1496  qScale, qOffset));
1497 
1498  return result;
1499 }
boost::multi_array< T, n > outputExpected

◆ Concat4dDim0Uint8Test()

LayerTestResult<uint8_t, 4> Concat4dDim0Uint8Test ( IWorkloadFactory workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 2854 of file ConcatTestImpl.cpp.

2857 {
2858  return Concat4dDim0TestImpl<DataType::QAsymmU8>(workloadFactory, memoryManager, 0.5f, -1);
2859 }

◆ Concat4dDim1Test()

LayerTestResult<float, 4> Concat4dDim1Test ( IWorkloadFactory workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 2285 of file ConcatTestImpl.cpp.

2288 {
2289  return Concat4dDim1TestImpl<DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
2290 }

◆ Concat4dDim1TestImpl()

LayerTestResult<T, 4> Concat4dDim1TestImpl ( IWorkloadFactory workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr memoryManager,
float  qScale,
int32_t  qOffset 
)

Definition at line 1502 of file ConcatTestImpl.cpp.

References LayerTestResult< T, n >::outputExpected.

1507 {
1508  TensorInfo outputTensorInfo({ 1, 9, 2, 2 }, ArmnnType, qScale, qOffset);
1509 
1510  LayerTestResult<T, 4> result = Concat4dTestImpl<ArmnnType>(
1511  workloadFactory, memoryManager, outputTensorInfo, 1, true, qScale, qOffset);
1512 
1513  result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(
1514  {
1515  1.0f, 2.0f,
1516  3.0f, 4.0f,
1517  5.0f, 6.0f,
1518  7.0f, 8.0f,
1519  9.0f, 10.0f,
1520  11.0f, 12.0f,
1521 
1522  11.0f, 12.0f,
1523  13.0f, 14.0f,
1524  15.0f, 16.0f,
1525  17.0f, 18.0f,
1526  19.0f, 20.0f,
1527  21.0f, 22.0f,
1528 
1529  21.0f, 22.0f,
1530  23.0f, 24.0f,
1531  25.0f, 26.0f,
1532  27.0f, 28.0f,
1533  29.0f, 30.0f,
1534  31.0f, 32.0f
1535  },
1536  qScale, qOffset));
1537 
1538  return result;
1539 }
boost::multi_array< T, n > outputExpected

◆ Concat4dDim1Uint8Test()

LayerTestResult<uint8_t, 4> Concat4dDim1Uint8Test ( IWorkloadFactory workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 2861 of file ConcatTestImpl.cpp.

2864 {
2865  return Concat4dDim1TestImpl<DataType::QAsymmU8>(workloadFactory, memoryManager, 0.5f, -1);
2866 }

◆ Concat4dDim2Test()

LayerTestResult<float, 4> Concat4dDim2Test ( IWorkloadFactory workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 2292 of file ConcatTestImpl.cpp.

2295 {
2296  return Concat4dDim2TestImpl<DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
2297 }

◆ Concat4dDim2TestImpl()

LayerTestResult<T, 4> Concat4dDim2TestImpl ( IWorkloadFactory workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr memoryManager,
float  qScale,
int32_t  qOffset 
)

Definition at line 1542 of file ConcatTestImpl.cpp.

References LayerTestResult< T, n >::outputExpected.

1547 {
1548  TensorInfo outputTensorInfo({ 1, 3, 6, 2 }, ArmnnType, qScale, qOffset);
1549 
1550  LayerTestResult<T, 4> result = Concat4dTestImpl<ArmnnType>(
1551  workloadFactory, memoryManager, outputTensorInfo, 2, true, qScale, qOffset);
1552 
1553  result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(
1554  {
1555  1.0f, 2.0f,
1556  3.0f, 4.0f,
1557  11.0f, 12.0f,
1558  13.0f, 14.0f,
1559  21.0f, 22.0f,
1560  23.0f, 24.0f,
1561 
1562  5.0f, 6.0f,
1563  7.0f, 8.0f,
1564  15.0f, 16.0f,
1565  17.0f, 18.0f,
1566  25.0f, 26.0f,
1567  27.0f, 28.0f,
1568 
1569  9.0f, 10.0f,
1570  11.0f, 12.0f,
1571  19.0f, 20.0f,
1572  21.0f, 22.0f,
1573  29.0f, 30.0f,
1574  31.0f, 32.0f
1575  },
1576  qScale, qOffset));
1577 
1578  return result;
1579 }
boost::multi_array< T, n > outputExpected

◆ Concat4dDim2Uint8Test()

LayerTestResult<uint8_t, 4> Concat4dDim2Uint8Test ( IWorkloadFactory workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 2868 of file ConcatTestImpl.cpp.

2871 {
2872  return Concat4dDim2TestImpl<DataType::QAsymmU8>(workloadFactory, memoryManager, 0.5f, -1);
2873 }

◆ Concat4dDim3Test()

LayerTestResult<float, 4> Concat4dDim3Test ( IWorkloadFactory workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr memoryManager,
bool  useSubtensor 
)

Definition at line 2299 of file ConcatTestImpl.cpp.

2303 {
2304  return Concat4dDim3TestImpl<DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0, useSubtensor);
2305 }

◆ Concat4dDim3TestImpl()

LayerTestResult<T, 4> Concat4dDim3TestImpl ( IWorkloadFactory workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr memoryManager,
float  qScale,
int32_t  qOffset,
bool  useSubtensor 
)

Definition at line 1582 of file ConcatTestImpl.cpp.

References LayerTestResult< T, n >::outputExpected.

1588 {
1589  TensorInfo outputTensorInfo({ 1, 3, 2, 6 }, ArmnnType, qScale, qOffset);
1590 
1591  LayerTestResult<T, 4> result = Concat4dTestImpl<ArmnnType>(
1592  workloadFactory, memoryManager, outputTensorInfo, 3, useSubtensor, qScale, qOffset);
1593 
1594  result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(
1595  {
1596  1.0f, 2.0f,
1597  11.0f, 12.0f,
1598  21.0f, 22.0f,
1599  3.0f, 4.0f,
1600  13.0f, 14.0f,
1601  23.0f, 24.0f,
1602 
1603  5.0f, 6.0f,
1604  15.0f, 16.0f,
1605  25.0f, 26.0f,
1606  7.0f, 8.0f,
1607  17.0f, 18.0f,
1608  27.0f, 28.0f,
1609 
1610  9.0f, 10.0f,
1611  19.0f, 20.0f,
1612  29.0f, 30.0f,
1613  11.0f, 12.0f,
1614  21.0f, 22.0f,
1615  31.0f, 32.0f
1616  },
1617  qScale, qOffset));
1618 
1619  return result;
1620 }
boost::multi_array< T, n > outputExpected

◆ Concat4dDim3Uint8Test()

LayerTestResult<uint8_t, 4> Concat4dDim3Uint8Test ( IWorkloadFactory workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr memoryManager,
bool  useSubtensor 
)

Definition at line 2875 of file ConcatTestImpl.cpp.

2878 {
2879  return Concat4dDim3TestImpl<DataType::QAsymmU8>(
2880  workloadFactory, memoryManager, 0.5f, -1, useSubtensor);
2881 }

◆ Concat4dTestImpl()

LayerTestResult<T, 4> Concat4dTestImpl ( IWorkloadFactory workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const TensorInfo outputTensorInfo,
unsigned int  dimension,
bool  useSubtensor,
float  qScale,
int32_t  qOffset 
)

Definition at line 1399 of file ConcatTestImpl.cpp.

References TensorInfo::GetNumElements(), and LayerTestResult< T, n >::output.

1407 {
1408  TensorInfo inputTensorInfo({ 1, 3, 2, 2 }, ArmnnType, qScale, qOffset);
1409 
1410  auto input0 = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(
1411  {
1412  1.0f, 2.0f,
1413  3.0f, 4.0f,
1414  5.0f, 6.0f,
1415  7.0f, 8.0f,
1416  9.0f, 10.0f,
1417  11.0f, 12.0f
1418  },
1419  qScale, qOffset));
1420 
1421  auto input1 = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(
1422  {
1423  11.0f, 12.0f,
1424  13.0f, 14.0f,
1425  15.0f, 16.0f,
1426  17.0f, 18.0f,
1427  19.0f, 20.0f,
1428  21.0f, 22.0f
1429  },
1430  qScale, qOffset));
1431 
1432  auto input2 = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(
1433  {
1434  21.0f, 22.0f,
1435  23.0f, 24.0f,
1436  25.0f, 26.0f,
1437  27.0f, 28.0f,
1438  29.0f, 30.0f,
1439  31.0f, 32.0f
1440  },
1441  qScale, qOffset));
1442 
1443  LayerTestResult<T, 4> result(outputTensorInfo);
1444 
1445  std::vector<T> output;
1446  output.resize(outputTensorInfo.GetNumElements());
1447 
1448  Concatenate<T>(workloadFactory,
1449  memoryManager,
1450  {inputTensorInfo, inputTensorInfo, inputTensorInfo},
1451  {input0.data(), input1.data(), input2.data()},
1452  outputTensorInfo,
1453  output.data(),
1454  dimension,
1455  useSubtensor);
1456 
1457  result.output = MakeTensor<T, 4>(outputTensorInfo, output);
1458  return result;
1459 }
unsigned int GetNumElements() const
Definition: Tensor.hpp:93

◆ ConcatBFloat16Test()

LayerTestResult<BFloat16, 3> ConcatBFloat16Test ( IWorkloadFactory workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 2345 of file ConcatTestImpl.cpp.

2348 {
2349  return Concat3dDim1TestImpl<DataType::BFloat16>(workloadFactory, memoryManager, 0.0f, 0);
2350 }

◆ ConcatDifferentInputOutputQParamTest()

LayerTestResult<T, 3> ConcatDifferentInputOutputQParamTest ( IWorkloadFactory workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr memoryManager,
bool  useSubtensor 
)

Definition at line 1916 of file ConcatTestImpl.cpp.

References CopyDataFromITensorHandle(), CopyDataToITensorHandle(), IWorkloadFactory::CreateConcat(), armnn::CreateDescriptorForConcatenation(), IWorkloadFactory::CreateSubTensorHandle(), IWorkloadFactory::CreateTensorHandle(), armnn::IgnoreUnused(), armnn::info, QueueDescriptorWithParameters< LayerDescriptor >::m_Parameters, ConcatQueueDescriptor::m_ViewOrigins, LayerTestResult< T, n >::output, LayerTestResult< T, n >::outputExpected, and IWorkloadFactory::SupportsSubTensors().

1920 {
1921  IgnoreUnused(memoryManager);
1922 
1923  // Defines the tensor descriptors.
1924  TensorInfo outputTensorInfo({ 3, 6, 3 }, ArmnnType);
1925  TensorInfo inputTensorInfo1({ 3, 6, 2 }, ArmnnType);
1926  TensorInfo inputTensorInfo2({ 3, 6, 1 }, ArmnnType);
1927 
1928  std::vector<TensorShape> inputTensorShapes({inputTensorInfo1.GetShape(), inputTensorInfo2.GetShape()});
1929 
1930  // Quantized input1 tensor.
1931  const float inputScale1 = 0.5f;
1932  const int32_t inputOffset1 = 5;
1933 
1934  auto input1 = MakeTensor<T, 3>(inputTensorInfo1, std::vector<T>(
1935  {
1936  1, 2, 3,
1937  4, 5, 6,
1938  7, 8, 9,
1939  10, 11, 12,
1940  13, 14, 15,
1941  16, 17, 18,
1942 
1943  19, 20, 21,
1944  22, 23, 24,
1945  25, 26, 27,
1946  28, 29, 30,
1947  31, 32, 33,
1948  34, 35, 36
1949  }));
1950 
1951  // Quatized input2 tensor.
1952  const float inputScale2 = 0.2f;
1953  const int32_t inputOffset2 = 10;
1954 
1955  auto input2 = MakeTensor<T, 3>(inputTensorInfo2, std::vector<T>(
1956  {
1957  37, 38, 39,
1958  40, 41, 42,
1959  43, 44, 45,
1960  46, 47, 48,
1961  49, 50, 51,
1962  52, 53, 54
1963  }));
1964 
1965  // Quantized output tensor.
1966  const float outputScale = 0.1f;
1967  const int32_t outputOffset = 20;
1968 
1969  LayerTestResult<T, 3> ret(outputTensorInfo);
1970 
1971  ret.outputExpected = MakeTensor<T, 3>(outputTensorInfo, std::vector<T>(
1972  {
1973  0, 5, 74,
1974  10, 15, 76,
1975  20, 25, 78,
1976  30, 35, 80,
1977  40, 45, 82,
1978  50, 55, 84,
1979 
1980  60, 65, 86,
1981  70, 75, 88,
1982  80, 85, 90,
1983  90, 95, 92,
1984  100, 105, 94,
1985  110, 115, 96,
1986 
1987  120, 125, 98,
1988  130, 135, 100,
1989  140, 145, 102,
1990  150, 155, 104,
1991  160, 165, 106,
1992  170, 175, 108
1993  }));
1994 
1995  outputTensorInfo.SetQuantizationScale(outputScale);
1996  outputTensorInfo.SetQuantizationOffset(outputOffset);
1997  inputTensorInfo1.SetQuantizationScale(inputScale1);
1998  inputTensorInfo1.SetQuantizationOffset(inputOffset1);
1999  inputTensorInfo2.SetQuantizationScale(inputScale2);
2000  inputTensorInfo2.SetQuantizationOffset(inputOffset2);
2001 
2002  std::vector<unsigned int> wOrigin1 = { 0, 0, 0 }; //Extent of the window is defined by size of input[0].
2003  ConcatQueueDescriptor::ViewOrigin window1(wOrigin1);
2004 
2005  std::vector<unsigned int> wOrigin2 = { 0, 0, 2 }; //Extent of the window is defined by size of input[1].
2006  ConcatQueueDescriptor::ViewOrigin window2(wOrigin2);
2007 
2008  std::unique_ptr<ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
2009 
2010  bool subTensorsSupported = useSubtensor && workloadFactory.SupportsSubTensors();
2011 
2012  std::unique_ptr<ITensorHandle> inputHandle1 =
2013  subTensorsSupported ?
2014  workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
2015  workloadFactory.CreateTensorHandle(inputTensorInfo1);
2016 
2017  std::unique_ptr<ITensorHandle> inputHandle2 =
2018  subTensorsSupported ?
2019  workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
2020  workloadFactory.CreateTensorHandle(inputTensorInfo2);
2021 
2022  ConcatQueueDescriptor data;
2024  inputTensorShapes.begin(),inputTensorShapes.end(), 2);
2025  data.m_Parameters = desc;
2026 
2028  AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
2029  AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
2030  AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
2031 
2032  data.m_ViewOrigins.push_back(window1);
2033  data.m_ViewOrigins.push_back(window2);
2034 
2035  std::unique_ptr<IWorkload> workload = workloadFactory.CreateConcat(data, info);
2036 
2037  inputHandle1->Allocate();
2038  inputHandle2->Allocate();
2039  outputHandle->Allocate();
2040 
2041  CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0]);
2042  CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0]);
2043 
2044  workload->PostAllocationConfigure();
2045  workload->Execute();
2046 
2047  CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get());
2048 
2049  return ret;
2050 }
virtual std::unique_ptr< ITensorHandle > CreateSubTensorHandle(ITensorHandle &parent, TensorShape const &subTensorShape, unsigned int const *subTensorOrigin) const =0
void IgnoreUnused(Ts &&...)
virtual std::unique_ptr< IWorkload > CreateConcat(const ConcatQueueDescriptor &descriptor, const WorkloadInfo &info) const
std::vector< ViewOrigin > m_ViewOrigins
An OriginsDescriptor for the ConcatLayer.
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo, const bool IsMemoryManaged=true) const =0
Contains information about inputs and outputs to a layer.
OriginsDescriptor CreateDescriptorForConcatenation(TensorShapeIt first, TensorShapeIt last, unsigned int concatenationDimension)
Convenience template to create an OriginsDescriptor to use when creating a ConcatLayer for performing...
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)
virtual bool SupportsSubTensors() const =0

◆ ConcatDifferentInputOutputQParamTest< DataType::QAsymmU8 >()

template LayerTestResult<ResolveType<DataType::QAsymmU8>, 3> ConcatDifferentInputOutputQParamTest< DataType::QAsymmU8 > ( IWorkloadFactory workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr memoryManager,
bool  useSubtensor 
)

◆ ConcatDifferentInputOutputQParamTest< DataType::QSymmS16 >()

template LayerTestResult<ResolveType<DataType::QSymmS16>, 3> ConcatDifferentInputOutputQParamTest< DataType::QSymmS16 > ( IWorkloadFactory workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr memoryManager,
bool  useSubtensor 
)

◆ Concatenate()

void Concatenate ( IWorkloadFactory workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr memoryManager,
std::initializer_list< const TensorInfo inputTensorInfosOrig,
std::initializer_list< T *>  inputsOrig,
const TensorInfo outputTensorInfoOrig,
T *  output,
unsigned int  concatDim,
bool  useSubtensor 
)

Definition at line 272 of file ConcatTestImpl.cpp.

References ARMNN_ASSERT_MSG, CopyDataFromITensorHandle(), CopyDataToITensorHandle(), IWorkloadFactory::CreateConcat(), CreateDescriptorForConcat(), IWorkloadFactory::CreateSubTensorHandle(), IWorkloadFactory::CreateTensorHandle(), OriginsDescriptor::GetNumDimensions(), OriginsDescriptor::GetNumViews(), TensorInfo::GetShape(), OriginsDescriptor::GetViewOrigin(), QueueDescriptorWithParameters< LayerDescriptor >::m_Parameters, ConcatQueueDescriptor::m_ViewOrigins, NeedPermuteForConcat(), and IWorkloadFactory::SupportsSubTensors().

281 {
282  ARMNN_ASSERT_MSG(output != nullptr, "output must not be null");
283  if (output == nullptr)
284  {
285  // Nullptr is an error in the test. By returning without doing the permutation
286  // I expect the caller to fail the test. It still makes sense to report this as
287  // an assert for Debug builds.
288  return;
289  }
290 
291  // Saves a copy of the parameters which we might need to change.
292  std::vector<TensorInfo> inputTensorInfos(inputTensorInfosOrig.begin(), inputTensorInfosOrig.end());
293  std::vector<T *> inputs = inputsOrig;
294  TensorInfo outputTensorInfo = outputTensorInfoOrig;
295 
296  PermutationVector permuteVector{0, 1, 2};
297 
298  // Holds and automatically releases memory for the reshaped input data.
299  std::vector<std::vector<T>> tmpInputDataStorage;
300 
301  const size_t inputCount = inputTensorInfos.size();
302 
303  bool needPermuteForConcat = NeedPermuteForConcat(inputTensorInfos, concatDim);
304 
305  if (needPermuteForConcat)
306  {
307  //
308  // We need to permute the inputs, because concatenation along
309  // the requested axis is not supported.
310  //
311  PermuteInputsForConcat<T>(workloadFactory,
312  memoryManager,
313  inputTensorInfos,
314  inputs,
315  tmpInputDataStorage,
316  permuteVector,
317  concatDim,
318  outputTensorInfo);
319  }
320 
321  WorkloadInfo workloadInfo;
322 
323  std::vector<std::unique_ptr<ITensorHandle>> inputHandles;
324  inputHandles.reserve(inputCount);
325 
326  std::unique_ptr<ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
327 
328  ConcatQueueDescriptor queueDescriptor;
329  OriginsDescriptor viewsDescriptor = CreateDescriptorForConcat(inputTensorInfos, concatDim);
330  queueDescriptor.m_Parameters = viewsDescriptor;
331 
332  if (useSubtensor)
333  {
334  queueDescriptor.m_ViewOrigins.reserve(viewsDescriptor.GetNumViews());
335  for (unsigned int i = 0; i < viewsDescriptor.GetNumViews(); ++i)
336  {
337  queueDescriptor.m_ViewOrigins.emplace_back(std::vector<unsigned int>(viewsDescriptor.GetViewOrigin(i),
338  viewsDescriptor.GetViewOrigin(i) + viewsDescriptor.GetNumDimensions()));
339  }
340 
341  outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
342 
343  const bool subTensorsSupported = workloadFactory.SupportsSubTensors();
344  for (unsigned int i = 0; i < inputCount; ++i)
345  {
346  const TensorInfo& inputTensorInfo = inputTensorInfos[i];
347  std::unique_ptr<ITensorHandle> inputHandle =
348  subTensorsSupported ?
349  workloadFactory.CreateSubTensorHandle(*outputHandle,
350  inputTensorInfo.GetShape(),
351  queueDescriptor.m_ViewOrigins[i].m_Origin.data()) :
352  workloadFactory.CreateTensorHandle(inputTensorInfo);
353 
354  inputHandles.emplace_back(std::move(inputHandle));
355  }
356 
357  }
358  else
359  {
360  for (unsigned int i = 0; i < inputCount; ++i)
361  {
362  std::unique_ptr<ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfos[i]);
363  inputHandles.emplace_back(std::move(inputHandle));
364  }
365  }
366 
367  for (unsigned int i = 0; i < inputCount; ++i)
368  {
369  AddInputToWorkload(queueDescriptor, workloadInfo, inputTensorInfos[i], inputHandles[i].get());
370  }
371 
372  AddOutputToWorkload(queueDescriptor, workloadInfo, outputTensorInfo, outputHandle.get());
373 
374  std::unique_ptr<IWorkload> workload = workloadFactory.CreateConcat(queueDescriptor, workloadInfo);
375 
376  for (auto& inputHandle : inputHandles)
377  {
378  inputHandle->Allocate();
379  }
380 
381  outputHandle->Allocate();
382 
383  unsigned int nextInputId = 0;
384  for (auto& inputHandle : inputHandles)
385  {
386  CopyDataToITensorHandle(inputHandle.get(), inputs[nextInputId]);
387  ++nextInputId;
388  }
389 
390  workload->PostAllocationConfigure();
391  workload->Execute();
392 
393  if (needPermuteForConcat)
394  {
395  PermuteOutputForConcat<T>(workloadFactory,
396  memoryManager,
397  outputTensorInfo,
398  permuteVector,
399  std::move(outputHandle),
400  output);
401  }
402  else
403  {
404  CopyDataFromITensorHandle(output, outputHandle.get());
405  }
406 }
const TensorShape & GetShape() const
Definition: Tensor.hpp:88
virtual std::unique_ptr< ITensorHandle > CreateSubTensorHandle(ITensorHandle &parent, TensorShape const &subTensorShape, unsigned int const *subTensorOrigin) const =0
virtual std::unique_ptr< IWorkload > CreateConcat(const ConcatQueueDescriptor &descriptor, const WorkloadInfo &info) const
const uint32_t * GetViewOrigin(uint32_t idx) const
Return the view origin at the int value idx.
std::vector< ViewOrigin > m_ViewOrigins
#define ARMNN_ASSERT_MSG(COND, MSG)
Definition: Assert.hpp:15
An OriginsDescriptor for the ConcatLayer.
OriginsDescriptor CreateDescriptorForConcat(const std::vector< TensorInfo > &inputTensorInfos, unsigned int concatDim)
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo, const bool IsMemoryManaged=true) const =0
uint32_t GetNumDimensions() const
Get the number of dimensions.
Contains information about inputs and outputs to a layer.
uint32_t GetNumViews() const
Get the number of views.
bool NeedPermuteForConcat(const std::vector< TensorInfo > &inputTensorInfos, unsigned int concatDim)
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)
virtual bool SupportsSubTensors() const =0

◆ ConcatFloat16Test()

LayerTestResult<Half, 3> ConcatFloat16Test ( IWorkloadFactory workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 2338 of file ConcatTestImpl.cpp.

2341 {
2342  return Concat3dDim1TestImpl<DataType::Float16>(workloadFactory, memoryManager, 0.0f, 0);
2343 }

◆ ConcatTest()

LayerTestResult<float,3> ConcatTest ( IWorkloadFactory workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 2072 of file ConcatTestImpl.cpp.

References CopyDataFromITensorHandle(), CopyDataToITensorHandle(), IWorkloadFactory::CreateConcat(), IWorkloadFactory::CreateSubTensorHandle(), IWorkloadFactory::CreateTensorHandle(), armnn::Float32, armnn::IgnoreUnused(), armnn::info, ConcatQueueDescriptor::m_ViewOrigins, and IWorkloadFactory::SupportsSubTensors().

2075 {
2076  IgnoreUnused(memoryManager);
2077 
2078  unsigned int outputWidth = 3;
2079  unsigned int outputHeight = 6;
2080  unsigned int outputChannels = 3;
2081 
2082  unsigned int inputWidth1 = 3;
2083  unsigned int inputHeight1 = 6;
2084  unsigned int inputChannels1 = 2;
2085 
2086  unsigned int inputWidth2 = 3;
2087  unsigned int inputHeight2 = 6;
2088  unsigned int inputChannels2 = 1;
2089 
2090  // Define the tensor descriptors.
2091  TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, DataType::Float32);
2092  TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, DataType::Float32);
2093  TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, DataType::Float32);
2094 
2095  LayerTestResult<float,3> ret(outputTensorInfo);
2096 
2097  ret.outputExpected = MakeTensor<float, 3>(outputTensorInfo, std::vector<float>(
2098  {
2099  1.0f, 2.0f, 3.0f,
2100  4.0f, 5.0f, 6.0f,
2101  7.0f, 8.0f, 9.0f,
2102  10.0f, 11.0f, 12.0f,
2103  13.0f, 14.0f, 15.0f,
2104  16.0f, 17.0f, 18.0f,
2105 
2106  19.0f, 20.0f, 21.0f,
2107  22.0f, 23.0f, 24.0f,
2108  25.0f, 26.0f, 27.0f,
2109  28.0f, 29.0f, 30.0f,
2110  31.0f, 32.0f, 33.0f,
2111  34.0f, 35.0f, 36.0f,
2112 
2113  37.0f, 38.0f, 39.0f,
2114  40.0f, 41.0f, 42.0f,
2115  43.0f, 44.0f, 45.0f,
2116  46.0f, 47.0f, 48.0f,
2117  49.0f, 50.0f, 51.0f,
2118  52.0f, 53.0f, 54.0f,
2119  })
2120  );
2121 
2122  auto input1 = MakeTensor<float, 3>(inputTensorInfo1, std::vector<float>(
2123  {
2124  1.0f, 2.0f, 3.0f,
2125  4.0f, 5.0f, 6.0f,
2126  7.0f, 8.0f, 9.0f,
2127  10.0f, 11.0f, 12.0f,
2128  13.0f, 14.0f, 15.0f,
2129  16.0f, 17.0f, 18.0f,
2130 
2131  19.0f, 20.0f, 21.0f,
2132  22.0f, 23.0f, 24.0f,
2133  25.0f, 26.0f, 27.0f,
2134  28.0f, 29.0f, 30.0f,
2135  31.0f, 32.0f, 33.0f,
2136  34.0f, 35.0f, 36.0f,
2137  })
2138  );
2139 
2140  auto input2 = MakeTensor<float, 3>(inputTensorInfo2, std::vector<float>(
2141  {
2142  37.0f, 38.0f, 39.0f,
2143  40.0f, 41.0f, 42.0f,
2144  43.0f, 44.0f, 45.0f,
2145  46.0f, 47.0f, 48.0f,
2146  49.0f, 50.0f, 51.0f,
2147  52.0f, 53.0f, 54.0f,
2148  })
2149  );
2150 
2151  std::vector<unsigned int> wOrigin1 = {0, 0, 0}; //Extent of the window is defined by size of input[0].
2152  ConcatQueueDescriptor::ViewOrigin window1(wOrigin1);
2153 
2154  std::vector<unsigned int> wOrigin2 = {2, 0, 0}; //Extent of the window is defined by size of input[1].
2155  ConcatQueueDescriptor::ViewOrigin window2(wOrigin2);
2156 
2157  std::unique_ptr<ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
2158 
2159  bool subTensorsSupported = workloadFactory.SupportsSubTensors();
2160 
2161  std::unique_ptr<ITensorHandle> inputHandle1 =
2162  subTensorsSupported ?
2163  workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
2164  workloadFactory.CreateTensorHandle(inputTensorInfo1);
2165 
2166  std::unique_ptr<ITensorHandle> inputHandle2 =
2167  subTensorsSupported ?
2168  workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
2169  workloadFactory.CreateTensorHandle(inputTensorInfo2);
2170 
2171  ConcatQueueDescriptor data;
2173  AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
2174  AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
2175  AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
2176 
2177  data.m_ViewOrigins.push_back(window1);
2178  data.m_ViewOrigins.push_back(window2);
2179 
2180  std::unique_ptr<IWorkload> workload = workloadFactory.CreateConcat(data, info);
2181 
2182  inputHandle1->Allocate();
2183  inputHandle2->Allocate();
2184  outputHandle->Allocate();
2185 
2186  CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0]);
2187  CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0]);
2188 
2189  workload->PostAllocationConfigure();
2190  workload->Execute();
2191 
2192  CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get());
2193 
2194  return ret;
2195 }
virtual std::unique_ptr< ITensorHandle > CreateSubTensorHandle(ITensorHandle &parent, TensorShape const &subTensorShape, unsigned int const *subTensorOrigin) const =0
void IgnoreUnused(Ts &&...)
virtual std::unique_ptr< IWorkload > CreateConcat(const ConcatQueueDescriptor &descriptor, const WorkloadInfo &info) const
std::vector< ViewOrigin > m_ViewOrigins
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo, const bool IsMemoryManaged=true) const =0
Contains information about inputs and outputs to a layer.
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)
virtual bool SupportsSubTensors() const =0

◆ ConcatUint16Test()

LayerTestResult<uint16_t, 3> ConcatUint16Test ( IWorkloadFactory workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 2635 of file ConcatTestImpl.cpp.

References CopyDataFromITensorHandle(), CopyDataToITensorHandle(), IWorkloadFactory::CreateConcat(), IWorkloadFactory::CreateSubTensorHandle(), IWorkloadFactory::CreateTensorHandle(), armnn::IgnoreUnused(), armnn::info, ConcatQueueDescriptor::m_ViewOrigins, LayerTestResult< T, n >::output, LayerTestResult< T, n >::outputExpected, armnn::QSymmS16, TensorInfo::SetQuantizationScale(), and IWorkloadFactory::SupportsSubTensors().

2638 {
2639  IgnoreUnused(memoryManager);
2640 
2641  unsigned int outputWidth = 3;
2642  unsigned int outputHeight = 6;
2643  unsigned int outputChannels = 3;
2644 
2645  unsigned int inputWidth1 = 3;
2646  unsigned int inputHeight1 = 6;
2647  unsigned int inputChannels1 = 2;
2648 
2649  unsigned int inputWidth2 = 3;
2650  unsigned int inputHeight2 = 6;
2651  unsigned int inputChannels2 = 1;
2652 
2653  // Defines the tensor descriptors.
2654  TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, DataType::QSymmS16);
2655  TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, DataType::QSymmS16);
2656  TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, DataType::QSymmS16);
2657 
2658  // Arbitrary scale and offsets. They don't really matter as the Concat operator doesn't dequantize/quantize them.
2659  const float scale = 0.13497836f;
2660  const int32_t offset = -7;
2661 
2662  outputTensorInfo.SetQuantizationScale(scale);
2663  outputTensorInfo.SetQuantizationOffset(offset);
2664  inputTensorInfo1.SetQuantizationScale(scale);
2665  inputTensorInfo1.SetQuantizationOffset(offset);
2666  inputTensorInfo2.SetQuantizationScale(scale);
2667  inputTensorInfo2.SetQuantizationOffset(offset);
2668 
2669  LayerTestResult<uint16_t, 3> ret(outputTensorInfo);
2670 
2671  ret.outputExpected = MakeTensor<uint16_t, 3>(outputTensorInfo, std::vector<uint16_t>(
2672  {
2673  1, 2, 3,
2674  4, 5, 6,
2675  7, 8, 9,
2676  10, 11, 12,
2677  13, 14, 15,
2678  16, 17, 18,
2679 
2680  19, 20, 21,
2681  22, 23, 24,
2682  25, 26, 27,
2683  28, 29, 30,
2684  31, 32, 33,
2685  34, 35, 36,
2686 
2687  37, 38, 39,
2688  40, 41, 42,
2689  43, 44, 45,
2690  46, 47, 48,
2691  49, 50, 51,
2692  52, 53, 54,
2693  }));
2694 
2695  auto input1 = MakeTensor<uint16_t, 3>(inputTensorInfo1, std::vector<uint16_t>(
2696  {
2697  1, 2, 3,
2698  4, 5, 6,
2699  7, 8, 9,
2700  10, 11, 12,
2701  13, 14, 15,
2702  16, 17, 18,
2703 
2704  19, 20, 21,
2705  22, 23, 24,
2706  25, 26, 27,
2707  28, 29, 30,
2708  31, 32, 33,
2709  34, 35, 36,
2710  }));
2711 
2712  auto input2 = MakeTensor<uint16_t, 3>(inputTensorInfo2, std::vector<uint16_t>(
2713  {
2714  37, 38, 39,
2715  40, 41, 42,
2716  43, 44, 45,
2717  46, 47, 48,
2718  49, 50, 51,
2719  52, 53, 54,
2720  }));
2721 
2722  std::vector<unsigned int> wOrigin1 = { 0, 0, 0 }; //Extent of the window is defined by size of input[0].
2723  ConcatQueueDescriptor::ViewOrigin window1(wOrigin1);
2724 
2725  std::vector<unsigned int> wOrigin2 = { 2, 0, 0 }; //Extent of the window is defined by size of input[1].
2726  ConcatQueueDescriptor::ViewOrigin window2(wOrigin2);
2727 
2728 
2729  std::unique_ptr<ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
2730 
2731  bool subTensorsSupported = workloadFactory.SupportsSubTensors();
2732 
2733  std::unique_ptr<ITensorHandle> inputHandle1 =
2734  subTensorsSupported ?
2735  workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
2736  workloadFactory.CreateTensorHandle(inputTensorInfo1);
2737 
2738  std::unique_ptr<ITensorHandle> inputHandle2 =
2739  subTensorsSupported ?
2740  workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
2741  workloadFactory.CreateTensorHandle(inputTensorInfo2);
2742 
2743 
2744  ConcatQueueDescriptor data;
2746  AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
2747  AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
2748  AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
2749 
2750  data.m_ViewOrigins.push_back(window1);
2751  data.m_ViewOrigins.push_back(window2);
2752 
2753  std::unique_ptr<IWorkload> workload = workloadFactory.CreateConcat(data, info);
2754 
2755  inputHandle1->Allocate();
2756  inputHandle2->Allocate();
2757  outputHandle->Allocate();
2758 
2759  CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0]);
2760  CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0]);
2761 
2762  workload->PostAllocationConfigure();
2763  workload->Execute();
2764 
2765  CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get());
2766 
2767  return ret;
2768 }
virtual std::unique_ptr< ITensorHandle > CreateSubTensorHandle(ITensorHandle &parent, TensorShape const &subTensorShape, unsigned int const *subTensorOrigin) const =0
void IgnoreUnused(Ts &&...)
virtual std::unique_ptr< IWorkload > CreateConcat(const ConcatQueueDescriptor &descriptor, const WorkloadInfo &info) const
std::vector< ViewOrigin > m_ViewOrigins
void SetQuantizationScale(float scale)
Definition: Tensor.cpp:260
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo, const bool IsMemoryManaged=true) const =0
Contains information about inputs and outputs to a layer.
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)
virtual bool SupportsSubTensors() const =0

◆ ConcatUint8DifferentQParamsTest()

LayerTestResult<uint8_t, 3> ConcatUint8DifferentQParamsTest ( IWorkloadFactory workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 2352 of file ConcatTestImpl.cpp.

References CopyDataFromITensorHandle(), CopyDataToITensorHandle(), IWorkloadFactory::CreateConcat(), IWorkloadFactory::CreateSubTensorHandle(), IWorkloadFactory::CreateTensorHandle(), armnn::IgnoreUnused(), armnn::info, ConcatQueueDescriptor::m_ViewOrigins, LayerTestResult< T, n >::output, LayerTestResult< T, n >::outputExpected, armnn::QAsymmU8, and IWorkloadFactory::SupportsSubTensors().

2355 {
2356  IgnoreUnused(memoryManager);
2357 
2358  unsigned int outputWidth = 3;
2359  unsigned int outputHeight = 6;
2360  unsigned int outputChannels = 3;
2361 
2362  unsigned int inputWidth1 = 3;
2363  unsigned int inputHeight1 = 6;
2364  unsigned int inputChannels1 = 2;
2365 
2366  unsigned int inputWidth2 = 3;
2367  unsigned int inputHeight2 = 6;
2368  unsigned int inputChannels2 = 1;
2369 
2370  // Defines the tensor descriptors.
2371  TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, DataType::QAsymmU8);
2372  TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, DataType::QAsymmU8);
2373  TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, DataType::QAsymmU8);
2374 
2375  // Quantized input1 tensor. Range [-3, 1]
2376  const float inputScale1 = 0.015686f;
2377  const int32_t inputOffset1 = 192;
2378 
2379  auto input1 = MakeTensor<uint8_t, 3>(inputTensorInfo1, std::vector<uint8_t>(
2380  {
2381  1, 2, 3,
2382  4, 5, 6,
2383  7, 8, 9,
2384  10, 11, 12,
2385  13, 14, 15,
2386  16, 17, 18,
2387 
2388  19, 20, 21,
2389  22, 23, 24,
2390  25, 26, 27,
2391  28, 29, 30,
2392  31, 32, 33,
2393  34, 35, 36,
2394  })
2395  );
2396 
2397  // Quatized input2 tensor. Range [-1, 4]
2398  const float inputScale2 = 0.019608f;
2399  const int32_t inputOffset2 = 50;
2400 
2401  auto input2 = MakeTensor<uint8_t, 3>(inputTensorInfo2, std::vector<uint8_t>(
2402  {
2403  37, 38, 39,
2404  40, 41, 42,
2405  43, 44, 45,
2406  46, 47, 48,
2407  49, 50, 51,
2408  52, 53, 54,
2409  })
2410  );
2411 
2412  // Output has the same quantization parameters than input1,
2413  // so that only the requantization of input2 is required
2414  const float outputScale = 0.015686f;
2415  const int32_t outputOffset = 192;
2416 
2417  LayerTestResult<uint8_t, 3> ret(outputTensorInfo);
2418 
2419  ret.outputExpected = MakeTensor<uint8_t, 3>(outputTensorInfo, std::vector<uint8_t>(
2420  {
2421  1, 2, 3,
2422  4, 5, 6,
2423  7, 8, 9,
2424  10, 11, 12,
2425  13, 14, 15,
2426  16, 17, 18,
2427 
2428  19, 20, 21,
2429  22, 23, 24,
2430  25, 26, 27,
2431  28, 29, 30,
2432  31, 32, 33,
2433  34, 35, 36,
2434 
2435  176, 177, 178,
2436  179, 181, 182,
2437  183, 184, 186,
2438  187, 188, 189,
2439  191, 192, 193,
2440  195, 196, 197,
2441  })
2442  );
2443 
2444  outputTensorInfo.SetQuantizationScale(outputScale);
2445  outputTensorInfo.SetQuantizationOffset(outputOffset);
2446  inputTensorInfo1.SetQuantizationScale(inputScale1);
2447  inputTensorInfo1.SetQuantizationOffset(inputOffset1);
2448  inputTensorInfo2.SetQuantizationScale(inputScale2);
2449  inputTensorInfo2.SetQuantizationOffset(inputOffset2);
2450 
2451  std::vector<unsigned int> wOrigin1 = { 0, 0, 0 }; //Extent of the window is defined by size of input[0].
2452  ConcatQueueDescriptor::ViewOrigin window1(wOrigin1);
2453 
2454  std::vector<unsigned int> wOrigin2 = { 2, 0, 0 }; //Extent of the window is defined by size of input[1].
2455  ConcatQueueDescriptor::ViewOrigin window2(wOrigin2);
2456 
2457  std::unique_ptr<ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
2458 
2459  bool subTensorsSupported = workloadFactory.SupportsSubTensors();
2460 
2461  std::unique_ptr<ITensorHandle> inputHandle1 =
2462  subTensorsSupported ?
2463  workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
2464  workloadFactory.CreateTensorHandle(inputTensorInfo1);
2465 
2466  std::unique_ptr<ITensorHandle> inputHandle2 =
2467  subTensorsSupported ?
2468  workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
2469  workloadFactory.CreateTensorHandle(inputTensorInfo2);
2470 
2471  ConcatQueueDescriptor data;
2473  AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
2474  AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
2475  AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
2476 
2477  data.m_ViewOrigins.push_back(window1);
2478  data.m_ViewOrigins.push_back(window2);
2479 
2480  std::unique_ptr<IWorkload> workload = workloadFactory.CreateConcat(data, info);
2481 
2482  inputHandle1->Allocate();
2483  inputHandle2->Allocate();
2484  outputHandle->Allocate();
2485 
2486  CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0]);
2487  CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0]);
2488 
2489  workload->PostAllocationConfigure();
2490  workload->Execute();
2491 
2492  CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get());
2493 
2494  return ret;
2495 }
virtual std::unique_ptr< ITensorHandle > CreateSubTensorHandle(ITensorHandle &parent, TensorShape const &subTensorShape, unsigned int const *subTensorOrigin) const =0
void IgnoreUnused(Ts &&...)
virtual std::unique_ptr< IWorkload > CreateConcat(const ConcatQueueDescriptor &descriptor, const WorkloadInfo &info) const
std::vector< ViewOrigin > m_ViewOrigins
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo, const bool IsMemoryManaged=true) const =0
Contains information about inputs and outputs to a layer.
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)
virtual bool SupportsSubTensors() const =0

◆ ConcatUint8Test()

LayerTestResult<uint8_t, 3> ConcatUint8Test ( IWorkloadFactory workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 2497 of file ConcatTestImpl.cpp.

References CopyDataFromITensorHandle(), CopyDataToITensorHandle(), IWorkloadFactory::CreateConcat(), IWorkloadFactory::CreateSubTensorHandle(), IWorkloadFactory::CreateTensorHandle(), armnn::IgnoreUnused(), armnn::info, ConcatQueueDescriptor::m_ViewOrigins, LayerTestResult< T, n >::output, LayerTestResult< T, n >::outputExpected, armnn::QAsymmU8, TensorInfo::SetQuantizationScale(), and IWorkloadFactory::SupportsSubTensors().

2500 {
2501  IgnoreUnused(memoryManager);
2502 
2503  unsigned int outputWidth = 3;
2504  unsigned int outputHeight = 6;
2505  unsigned int outputChannels = 3;
2506 
2507  unsigned int inputWidth1 = 3;
2508  unsigned int inputHeight1 = 6;
2509  unsigned int inputChannels1 = 2;
2510 
2511  unsigned int inputWidth2 = 3;
2512  unsigned int inputHeight2 = 6;
2513  unsigned int inputChannels2 = 1;
2514 
2515  // Defines the tensor descriptors.
2516  TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, DataType::QAsymmU8);
2517  TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, DataType::QAsymmU8);
2518  TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, DataType::QAsymmU8);
2519 
2520  // Arbitrary scale and offsets. They don't really matter as the Concat operator doesn't dequantize/quantize them.
2521  const float scale = 0.13497836f;
2522  const int32_t offset = -7;
2523 
2524  outputTensorInfo.SetQuantizationScale(scale);
2525  outputTensorInfo.SetQuantizationOffset(offset);
2526  inputTensorInfo1.SetQuantizationScale(scale);
2527  inputTensorInfo1.SetQuantizationOffset(offset);
2528  inputTensorInfo2.SetQuantizationScale(scale);
2529  inputTensorInfo2.SetQuantizationOffset(offset);
2530 
2531  LayerTestResult<uint8_t, 3> ret(outputTensorInfo);
2532 
2533  ret.outputExpected = MakeTensor<uint8_t, 3>(outputTensorInfo, std::vector<uint8_t>(
2534  {
2535  1, 2, 3,
2536  4, 5, 6,
2537  7, 8, 9,
2538  10, 11, 12,
2539  13, 14, 15,
2540  16, 17, 18,
2541 
2542  19, 20, 21,
2543  22, 23, 24,
2544  25, 26, 27,
2545  28, 29, 30,
2546  31, 32, 33,
2547  34, 35, 36,
2548 
2549  37, 38, 39,
2550  40, 41, 42,
2551  43, 44, 45,
2552  46, 47, 48,
2553  49, 50, 51,
2554  52, 53, 54,
2555  })
2556  );
2557 
2558  auto input1 = MakeTensor<uint8_t, 3>(inputTensorInfo1, std::vector<uint8_t>(
2559  {
2560  1, 2, 3,
2561  4, 5, 6,
2562  7, 8, 9,
2563  10, 11, 12,
2564  13, 14, 15,
2565  16, 17, 18,
2566 
2567  19, 20, 21,
2568  22, 23, 24,
2569  25, 26, 27,
2570  28, 29, 30,
2571  31, 32, 33,
2572  34, 35, 36,
2573  })
2574  );
2575 
2576  auto input2 = MakeTensor<uint8_t, 3>(inputTensorInfo2, std::vector<uint8_t>(
2577  {
2578  37, 38, 39,
2579  40, 41, 42,
2580  43, 44, 45,
2581  46, 47, 48,
2582  49, 50, 51,
2583  52, 53, 54,
2584  })
2585  );
2586 
2587  std::vector<unsigned int> wOrigin1 = { 0, 0, 0 }; //Extent of the window is defined by size of input[0].
2588  ConcatQueueDescriptor::ViewOrigin window1(wOrigin1);
2589 
2590  std::vector<unsigned int> wOrigin2 = { 2, 0, 0 }; //Extent of the window is defined by size of input[1].
2591  ConcatQueueDescriptor::ViewOrigin window2(wOrigin2);
2592 
2593 
2594  std::unique_ptr<ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
2595 
2596  bool subTensorsSupported = workloadFactory.SupportsSubTensors();
2597 
2598  std::unique_ptr<ITensorHandle> inputHandle1 =
2599  subTensorsSupported ?
2600  workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
2601  workloadFactory.CreateTensorHandle(inputTensorInfo1);
2602 
2603  std::unique_ptr<ITensorHandle> inputHandle2 =
2604  subTensorsSupported ?
2605  workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
2606  workloadFactory.CreateTensorHandle(inputTensorInfo2);
2607 
2608 
2609  ConcatQueueDescriptor data;
2611  AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
2612  AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
2613  AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
2614 
2615  data.m_ViewOrigins.push_back(window1);
2616  data.m_ViewOrigins.push_back(window2);
2617 
2618  std::unique_ptr<IWorkload> workload = workloadFactory.CreateConcat(data, info);
2619 
2620  inputHandle1->Allocate();
2621  inputHandle2->Allocate();
2622  outputHandle->Allocate();
2623 
2624  CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0]);
2625  CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0]);
2626 
2627  workload->PostAllocationConfigure();
2628  workload->Execute();
2629 
2630  CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get());
2631 
2632  return ret;
2633 }
virtual std::unique_ptr< ITensorHandle > CreateSubTensorHandle(ITensorHandle &parent, TensorShape const &subTensorShape, unsigned int const *subTensorOrigin) const =0
void IgnoreUnused(Ts &&...)
virtual std::unique_ptr< IWorkload > CreateConcat(const ConcatQueueDescriptor &descriptor, const WorkloadInfo &info) const
std::vector< ViewOrigin > m_ViewOrigins
void SetQuantizationScale(float scale)
Definition: Tensor.cpp:260
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo, const bool IsMemoryManaged=true) const =0
Contains information about inputs and outputs to a layer.
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)
virtual bool SupportsSubTensors() const =0

◆ CreateDescriptorForConcat()

OriginsDescriptor CreateDescriptorForConcat ( const std::vector< TensorInfo > &  inputTensorInfos,
unsigned int  concatDim 
)

Definition at line 26 of file ConcatTestImpl.cpp.

References armnn::CreateDescriptorForConcatenation().

Referenced by Concatenate().

29 {
30  std::vector<TensorShape> shapes;
31  shapes.reserve(inputTensorInfos.size());
32  for (const TensorInfo& it: inputTensorInfos)
33  {
34  shapes.push_back(it.GetShape());
35  }
36 
37  return CreateDescriptorForConcatenation(shapes.begin(), shapes.end(), concatDim);
38 }
OriginsDescriptor CreateDescriptorForConcatenation(TensorShapeIt first, TensorShapeIt last, unsigned int concatenationDimension)
Convenience template to create an OriginsDescriptor to use when creating a ConcatLayer for performing...

◆ ExpandTensorShapeTo3dForPermute()

TensorShape ExpandTensorShapeTo3dForPermute ( const TensorShape inputShape)

Definition at line 72 of file ConcatTestImpl.cpp.

References TensorShape::GetNumDimensions().

Referenced by PermuteInputsForConcat().

73 {
74  unsigned int numDims = inputShape.GetNumDimensions();
75  if (numDims >= 3)
76  {
77  // Nothing to do if the inputShape has at least 3 dimensions.
78  return inputShape;
79  }
80 
81  std::vector<unsigned int> newDims(size_t(3), 1u);
82  unsigned int expandedBy = 3 - numDims;
83  for (unsigned int i=0; i<numDims; ++i)
84  {
85  newDims[expandedBy+i] = inputShape[i];
86  }
87  return TensorShape(3u, &newDims[0]);
88 }
unsigned int GetNumDimensions() const
Definition: Tensor.hpp:43

◆ Generate3dPermuteVectorForConcat()

void Generate3dPermuteVectorForConcat ( unsigned int  numDimensions,
unsigned int &  concatDim,
std::pair< PermutationVector, PermutationVector > &  permutations 
)

Definition at line 90 of file ConcatTestImpl.cpp.

References ARMNN_ASSERT, and ARMNN_ASSERT_MSG.

Referenced by PermuteInputsForConcat().

94 {
95  ARMNN_ASSERT_MSG(numDimensions <= 3,
96  "Only dimensions 1,2 and 3 are supported by this helper");
97  unsigned int expandedBy = 3 - numDimensions;
98  unsigned int expandedConcatAxis = concatDim + expandedBy;
99 
100  if (expandedConcatAxis == 2)
101  {
102  concatDim = 0;
103  PermutationVector forwardPermutation({1, 2, 0});
104  PermutationVector reversePermutation({2, 0, 1});
105  permutations = std::make_pair(forwardPermutation, reversePermutation);
106  }
107  else if (expandedConcatAxis == 1)
108  {
109  concatDim = 0;
110  PermutationVector forwardPermutation({2, 0, 1});
111  PermutationVector reversePermutation({1, 2, 0});
112  permutations = std::make_pair(forwardPermutation, reversePermutation);
113  }
114  else
115  {
116  ARMNN_ASSERT(expandedConcatAxis == 0);
117  concatDim = 0;
118  }
119 }
#define ARMNN_ASSERT_MSG(COND, MSG)
Definition: Assert.hpp:15
#define ARMNN_ASSERT(COND)
Definition: Assert.hpp:14

◆ NeedPermuteForConcat()

bool NeedPermuteForConcat ( const std::vector< TensorInfo > &  inputTensorInfos,
unsigned int  concatDim 
)

Definition at line 46 of file ConcatTestImpl.cpp.

References ARMNN_ASSERT_MSG.

Referenced by Concatenate().

49 {
50  // See note above. Additionally we expect the input shapes to have the
51  // same number of dimensions.
52  unsigned int nDimensions = 0;
53 
54  // Determine the number of dimensions as well as sanity check them
55  // agains test implementation issues.
56  for (auto && tensorInfo : inputTensorInfos)
57  {
58  if (!nDimensions)
59  {
60  nDimensions = tensorInfo.GetShape().GetNumDimensions();
61  }
62  else
63  {
64  ARMNN_ASSERT_MSG(nDimensions == tensorInfo.GetShape().GetNumDimensions(),
65  "Input shapes must have the same number of dimensions");
66  }
67  }
68 
69  return (nDimensions < 3 || (nDimensions == 3 && (nDimensions-concatDim) < 3 && (nDimensions-concatDim) != 1));
70 }
#define ARMNN_ASSERT_MSG(COND, MSG)
Definition: Assert.hpp:15

◆ PermuteInputsForConcat()

void PermuteInputsForConcat ( IWorkloadFactory workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr memoryManager,
std::vector< TensorInfo > &  inputTensorInfos,
std::vector< T *> &  inputData,
std::vector< std::vector< T >> &  inputDataStorage,
PermutationVector permuteVector,
unsigned int &  concatDim,
TensorInfo outputTensorInfo 
)

Definition at line 171 of file ConcatTestImpl.cpp.

References ARMNN_ASSERT_MSG, ExpandTensorShapeTo3dForPermute(), Generate3dPermuteVectorForConcat(), TensorInfo::GetShape(), armnn::IgnoreUnused(), PermutationVector::IsEqual(), armnnUtils::Permuted(), and TensorInfo::SetShape().

180 {
181  IgnoreUnused(memoryManager);
182  ARMNN_ASSERT_MSG(inputTensorInfos.size() > 1,
183  "Expecting more than one tensor to be concatenated here");
184 
185  unsigned int numDims = 0;
186  unsigned int nthInput = 0;
187  const PermutationVector identity({0, 1, 2});
188 
189  std::pair<PermutationVector, PermutationVector> permutations =
190  std::make_pair(identity, identity);
191 
192  inputDataStorage.resize(inputData.size());
193 
194  for (auto && tensorInfo : inputTensorInfos)
195  {
196  if (numDims == 0)
197  {
198  numDims = tensorInfo.GetShape().GetNumDimensions();
199  Generate3dPermuteVectorForConcat(numDims, concatDim, permutations);
200 
201  // Store the reverese permutation.
202  permuteVector = permutations.second;
203  ARMNN_ASSERT_MSG(!permuteVector.IsEqual(identity),
204  "Test logic error, we don't need permutation, so we shouldn't arrive here");
205  }
206  else
207  {
208  ARMNN_ASSERT_MSG(numDims == tensorInfo.GetShape().GetNumDimensions(),
209  "All inputs must have the same number of dimensions");
210  }
211 
212  TensorInfo newTensorInfo = tensorInfo;
213  newTensorInfo.SetShape(ExpandTensorShapeTo3dForPermute(tensorInfo.GetShape()));
214 
215  PermuteTensorData<T>(workloadFactory,
216  memoryManager,
217  permutations.first,
218  newTensorInfo,
219  inputData[nthInput],
220  inputDataStorage[nthInput]);
221 
222  inputData[nthInput] = inputDataStorage[nthInput].data();
223  inputTensorInfos[nthInput] = newTensorInfo;
224 
225  ++nthInput;
226  }
227 
228  outputTensorInfo.SetShape(
230  ExpandTensorShapeTo3dForPermute(outputTensorInfo.GetShape()),
231  permutations.first));
232 }
const TensorShape & GetShape() const
Definition: Tensor.hpp:88
void IgnoreUnused(Ts &&...)
void Generate3dPermuteVectorForConcat(unsigned int numDimensions, unsigned int &concatDim, std::pair< PermutationVector, PermutationVector > &permutations)
void SetShape(const TensorShape &newShape)
Definition: Tensor.hpp:90
#define ARMNN_ASSERT_MSG(COND, MSG)
Definition: Assert.hpp:15
TensorShape ExpandTensorShapeTo3dForPermute(const TensorShape &inputShape)
bool IsEqual(const PermutationVector &other) const
Definition: Types.hpp:207
armnn::TensorShape Permuted(const armnn::TensorShape &srcShape, const armnn::PermutationVector &mappings)
Definition: Permute.cpp:98

◆ PermuteOutputForConcat()

void PermuteOutputForConcat ( IWorkloadFactory workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const TensorInfo tensorInfo,
const PermutationVector permuteVector,
std::unique_ptr< ITensorHandle > &&  inputDataHandle,
T *  data 
)

Definition at line 239 of file ConcatTestImpl.cpp.

References ARMNN_ASSERT_MSG, CopyDataFromITensorHandle(), and TensorInfo::GetNumElements().

246 {
247  ARMNN_ASSERT_MSG(data != nullptr, "data must not be null");
248  if (data == nullptr)
249  {
250  // Nullptr is an error in the test. By returning without doing the permutation
251  // I expect the caller to fail the test. It still makes sense to report this as
252  // an assert for Debug builds.
253  return;
254  }
255 
256  TensorInfo resultTensorInfo = tensorInfo;
257  std::vector<T> inputData(tensorInfo.GetNumElements());
258  std::vector<T> outputData;
259 
260  CopyDataFromITensorHandle(&inputData[0], inputDataHandle.get());
261 
262  PermuteTensorData<T>(workloadFactory,
263  memoryManager,
264  permuteVector,
265  resultTensorInfo,
266  &inputData[0],
267  outputData);
268 
269  ::memcpy(data, &outputData[0], sizeof(T)*outputData.size());
270 }
#define ARMNN_ASSERT_MSG(COND, MSG)
Definition: Assert.hpp:15
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
unsigned int GetNumElements() const
Definition: Tensor.hpp:93

◆ PermuteTensorData()

void PermuteTensorData ( IWorkloadFactory workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const PermutationVector mappings,
TensorInfo inputTensorInfo,
const T *  inputData,
std::vector< T > &  outputData 
)

Definition at line 121 of file ConcatTestImpl.cpp.

References ARMNN_ASSERT_MSG, CopyDataFromITensorHandle(), CopyDataToITensorHandle(), IWorkloadFactory::CreatePermute(), IWorkloadFactory::CreateTensorHandle(), TensorInfo::GetNumElements(), armnn::IgnoreUnused(), QueueDescriptorWithParameters< LayerDescriptor >::m_Parameters, and armnnUtils::Permuted().

128 {
129  IgnoreUnused(memoryManager);
130  ARMNN_ASSERT_MSG(inputData != nullptr, "inputData must not be null");
131  if (inputData == nullptr)
132  {
133  // Nullptr is an error in the test. By returning without doing the concatenation
134  // I expect the caller to fail the test. It still makes sense to report this as
135  // an assert for Debug builds.
136  return;
137  }
138 
139  TensorInfo outputTensorInfo = armnnUtils::Permuted(inputTensorInfo, mappings);
140 
141  std::unique_ptr<ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
142  std::unique_ptr<ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
143 
144  PermuteQueueDescriptor queueDescriptor;
145  queueDescriptor.m_Parameters = PermuteDescriptor{mappings};
146  WorkloadInfo workloadInfo;
147  AddInputToWorkload(queueDescriptor, workloadInfo, inputTensorInfo, inputHandle.get());
148  AddOutputToWorkload(queueDescriptor, workloadInfo, outputTensorInfo, outputHandle.get());
149 
150  std::unique_ptr<IWorkload> workload = workloadFactory.CreatePermute(queueDescriptor, workloadInfo);
151 
152  inputHandle->Allocate();
153  outputHandle->Allocate();
154 
155  CopyDataToITensorHandle(inputHandle.get(), inputData);
156 
157  workload->PostAllocationConfigure();
158  workload->Execute();
159 
160  outputData.resize(outputTensorInfo.GetNumElements());
161  CopyDataFromITensorHandle(&outputData[0], outputHandle.get());
162  inputTensorInfo = outputTensorInfo;
163 }
void IgnoreUnused(Ts &&...)
#define ARMNN_ASSERT_MSG(COND, MSG)
Definition: Assert.hpp:15
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo, const bool IsMemoryManaged=true) const =0
virtual std::unique_ptr< IWorkload > CreatePermute(const PermuteQueueDescriptor &descriptor, const WorkloadInfo &info) const
Contains information about inputs and outputs to a layer.
armnn::TensorShape Permuted(const armnn::TensorShape &srcShape, const armnn::PermutationVector &mappings)
Definition: Permute.cpp:98
unsigned int GetNumElements() const
Definition: Tensor.hpp:93
A PermuteDescriptor for the PermuteLayer.
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)