ArmNN
 20.08
ConcatTestImpl.cpp File Reference

Go to the source code of this file.

Functions

OriginsDescriptor CreateDescriptorForConcat (const std::vector< TensorInfo > &inputTensorInfos, unsigned int concatDim)
 
bool NeedPermuteForConcat (const std::vector< TensorInfo > &inputTensorInfos, unsigned int concatDim)
 
TensorShape ExpandTensorShapeTo3dForPermute (const TensorShape &inputShape)
 
void Generate3dPermuteVectorForConcat (unsigned int numDimensions, unsigned int &concatDim, std::pair< PermutationVector, PermutationVector > &permutations)
 
template<typename T >
void PermuteTensorData (IWorkloadFactory &workloadFactory, const IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const PermutationVector &mappings, TensorInfo &inputTensorInfo, const T *inputData, std::vector< T > &outputData)
 
template<typename T >
void PermuteInputsForConcat (IWorkloadFactory &workloadFactory, const IBackendInternal::IMemoryManagerSharedPtr &memoryManager, std::vector< TensorInfo > &inputTensorInfos, std::vector< T *> &inputData, std::vector< std::vector< T >> &inputDataStorage, PermutationVector &permuteVector, unsigned int &concatDim, TensorInfo &outputTensorInfo)
 
template<typename T >
void PermuteOutputForConcat (IWorkloadFactory &workloadFactory, const IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const TensorInfo &tensorInfo, const PermutationVector &permuteVector, std::unique_ptr< ITensorHandle > &&inputDataHandle, T *data)
 
template<typename T >
void Concatenate (IWorkloadFactory &workloadFactory, const IBackendInternal::IMemoryManagerSharedPtr &memoryManager, std::initializer_list< const TensorInfo > inputTensorInfosOrig, std::initializer_list< T *> inputsOrig, const TensorInfo &outputTensorInfoOrig, T *output, unsigned int concatDim, bool useSubtensor)
 
template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
LayerTestResult< T, 1 > Concat1dTestImpl (IWorkloadFactory &workloadFactory, const IBackendInternal::IMemoryManagerSharedPtr &memoryManager, float qScale, int32_t qOffset)
 
template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
LayerTestResult< T, 2 > Concat2dTestImpl (IWorkloadFactory &workloadFactory, const IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const TensorInfo &outputTensorInfo, unsigned int dimension, const float qScale, const int32_t qOffset)
 
template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
LayerTestResult< T, 2 > Concat2dDim0TestImpl (IWorkloadFactory &workloadFactory, const IBackendInternal::IMemoryManagerSharedPtr &memoryManager, float qScale, int32_t qOffset)
 
template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
LayerTestResult< T, 2 > Concat2dDim1TestImpl (IWorkloadFactory &workloadFactory, const IBackendInternal::IMemoryManagerSharedPtr &memoryManager, float qScale, int32_t qOffset)
 
template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
LayerTestResult< T, 2 > Concat2dDim0DiffInputDimsTestImpl (IWorkloadFactory &workloadFactory, const IBackendInternal::IMemoryManagerSharedPtr &memoryManager, float qScale, int32_t qOffset)
 
template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
LayerTestResult< T, 2 > Concat2dDim1DiffInputDimsTestImpl (IWorkloadFactory &workloadFactory, const IBackendInternal::IMemoryManagerSharedPtr &memoryManager, float qScale, int32_t qOffset)
 
template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
LayerTestResult< T, 3 > Concat3dTestImpl (IWorkloadFactory &workloadFactory, const IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const TensorInfo &outputTensorInfo, unsigned int dimension, bool useSubtensor, float qScale, int32_t qOffset)
 
template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
LayerTestResult< T, 3 > Concat3dDim0TestImpl (IWorkloadFactory &workloadFactory, const IBackendInternal::IMemoryManagerSharedPtr &memoryManager, float qScale, int32_t qOffset)
 
template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
LayerTestResult< T, 3 > Concat3dDim1TestImpl (IWorkloadFactory &workloadFactory, const IBackendInternal::IMemoryManagerSharedPtr &memoryManager, float qScale, int32_t qOffset)
 
template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
LayerTestResult< T, 3 > Concat3dDim2TestImpl (IWorkloadFactory &workloadFactory, const IBackendInternal::IMemoryManagerSharedPtr &memoryManager, bool useSubtensor, float qScale, int32_t qOffset)
 
template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
LayerTestResult< T, 3 > Concat3dDim0DiffInputDimsTestImpl (IWorkloadFactory &workloadFactory, const IBackendInternal::IMemoryManagerSharedPtr &memoryManager, float qScale, int32_t qOffset)
 
template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
LayerTestResult< T, 3 > Concat3dDim1DiffInputDimsTestImpl (IWorkloadFactory &workloadFactory, const IBackendInternal::IMemoryManagerSharedPtr &memoryManager, float qScale, int32_t qOffset)
 
template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
LayerTestResult< T, 3 > Concat3dDim2DiffInputDimsTestImpl (IWorkloadFactory &workloadFactory, const IBackendInternal::IMemoryManagerSharedPtr &memoryManager, bool useSubtensor, float qScale, int32_t qOffset)
 
template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
LayerTestResult< T, 4 > Concat4dTestImpl (IWorkloadFactory &workloadFactory, const IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const TensorInfo &outputTensorInfo, unsigned int dimension, bool useSubtensor, float qScale, int32_t qOffset)
 
template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
LayerTestResult< T, 4 > Concat4dDim0TestImpl (IWorkloadFactory &workloadFactory, const IBackendInternal::IMemoryManagerSharedPtr &memoryManager, float qScale, int32_t qOffset)
 
template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
LayerTestResult< T, 4 > Concat4dDim1TestImpl (IWorkloadFactory &workloadFactory, const IBackendInternal::IMemoryManagerSharedPtr &memoryManager, float qScale, int32_t qOffset)
 
template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
LayerTestResult< T, 4 > Concat4dDim2TestImpl (IWorkloadFactory &workloadFactory, const IBackendInternal::IMemoryManagerSharedPtr &memoryManager, float qScale, int32_t qOffset)
 
template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
LayerTestResult< T, 4 > Concat4dDim3TestImpl (IWorkloadFactory &workloadFactory, const IBackendInternal::IMemoryManagerSharedPtr &memoryManager, float qScale, int32_t qOffset, bool useSubtensor)
 
template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
LayerTestResult< T, 4 > Concat4dDiffShapeDim0TestImpl (IWorkloadFactory &workloadFactory, const IBackendInternal::IMemoryManagerSharedPtr &memoryManager, float qScale, int32_t qOffset)
 
template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
LayerTestResult< T, 4 > Concat4dDiffShapeDim1TestImpl (IWorkloadFactory &workloadFactory, const IBackendInternal::IMemoryManagerSharedPtr &memoryManager, float qScale, int32_t qOffset)
 
template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
LayerTestResult< T, 4 > Concat4dDiffShapeDim2TestImpl (IWorkloadFactory &workloadFactory, const IBackendInternal::IMemoryManagerSharedPtr &memoryManager, float qScale, int32_t qOffset)
 
template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
LayerTestResult< T, 4 > Concat4dDiffShapeDim3TestImpl (IWorkloadFactory &workloadFactory, const IBackendInternal::IMemoryManagerSharedPtr &memoryManager, float qScale, int32_t qOffset, bool useSubtensor)
 
template<DataType ArmnnType, typename T >
LayerTestResult< T, 3 > ConcatDifferentInputOutputQParamTest (IWorkloadFactory &workloadFactory, const IBackendInternal::IMemoryManagerSharedPtr &memoryManager, bool useSubtensor)
 
template LayerTestResult< ResolveType< DataType::QAsymmU8 >, 3 > ConcatDifferentInputOutputQParamTest< DataType::QAsymmU8 > (IWorkloadFactory &workloadFactory, const IBackendInternal::IMemoryManagerSharedPtr &memoryManager, bool useSubtensor)
 
template LayerTestResult< ResolveType< DataType::QSymmS16 >, 3 > ConcatDifferentInputOutputQParamTest< DataType::QSymmS16 > (IWorkloadFactory &workloadFactory, const IBackendInternal::IMemoryManagerSharedPtr &memoryManager, bool useSubtensor)
 
LayerTestResult< float, 3 > ConcatTest (IWorkloadFactory &workloadFactory, const IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< float, 1 > Concat1dTest (IWorkloadFactory &workloadFactory, const IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< float, 2 > Concat2dDim0Test (IWorkloadFactory &workloadFactory, const IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< float, 2 > Concat2dDim1Test (IWorkloadFactory &workloadFactory, const IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< float, 2 > Concat2dDim0DiffInputDimsTest (IWorkloadFactory &workloadFactory, const IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< float, 2 > Concat2dDim1DiffInputDimsTest (IWorkloadFactory &workloadFactory, const IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< float, 3 > Concat3dDim0Test (IWorkloadFactory &workloadFactory, const IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< float, 3 > Concat3dDim1Test (IWorkloadFactory &workloadFactory, const IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< float, 3 > Concat3dDim2Test (IWorkloadFactory &workloadFactory, const IBackendInternal::IMemoryManagerSharedPtr &memoryManager, bool useSubtensor)
 
LayerTestResult< float, 3 > Concat3dDim0DiffInputDimsTest (IWorkloadFactory &workloadFactory, const IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< float, 3 > Concat3dDim1DiffInputDimsTest (IWorkloadFactory &workloadFactory, const IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< float, 3 > Concat3dDim2DiffInputDimsTest (IWorkloadFactory &workloadFactory, const IBackendInternal::IMemoryManagerSharedPtr &memoryManager, bool useSubtensor)
 
LayerTestResult< float, 4 > Concat4dDim0Test (IWorkloadFactory &workloadFactory, const IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< float, 4 > Concat4dDim1Test (IWorkloadFactory &workloadFactory, const IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< float, 4 > Concat4dDim2Test (IWorkloadFactory &workloadFactory, const IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< float, 4 > Concat4dDim3Test (IWorkloadFactory &workloadFactory, const IBackendInternal::IMemoryManagerSharedPtr &memoryManager, bool useSubtensor)
 
LayerTestResult< float, 4 > Concat4dDiffShapeDim0Test (IWorkloadFactory &workloadFactory, const IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< float, 4 > Concat4dDiffShapeDim1Test (IWorkloadFactory &workloadFactory, const IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< float, 4 > Concat4dDiffShapeDim2Test (IWorkloadFactory &workloadFactory, const IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< float, 4 > Concat4dDiffShapeDim3Test (IWorkloadFactory &workloadFactory, const IBackendInternal::IMemoryManagerSharedPtr &memoryManager, bool useSubtensor)
 
LayerTestResult< Half, 3 > ConcatFloat16Test (IWorkloadFactory &workloadFactory, const IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< BFloat16, 3 > ConcatBFloat16Test (IWorkloadFactory &workloadFactory, const IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< uint8_t, 3 > ConcatUint8DifferentQParamsTest (IWorkloadFactory &workloadFactory, const IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< uint8_t, 3 > ConcatUint8Test (IWorkloadFactory &workloadFactory, const IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< uint16_t, 3 > ConcatUint16Test (IWorkloadFactory &workloadFactory, const IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< uint8_t, 1 > Concat1dUint8Test (IWorkloadFactory &workloadFactory, const IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< uint8_t, 2 > Concat2dDim0Uint8Test (IWorkloadFactory &workloadFactory, const IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< uint8_t, 2 > Concat2dDim1Uint8Test (IWorkloadFactory &workloadFactory, const IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< uint8_t, 2 > Concat2dDim0DiffInputDimsUint8Test (IWorkloadFactory &workloadFactory, const IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< uint8_t, 2 > Concat2dDim1DiffInputDimsUint8Test (IWorkloadFactory &workloadFactory, const IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< uint8_t, 3 > Concat3dDim0Uint8Test (IWorkloadFactory &workloadFactory, const IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< uint8_t, 3 > Concat3dDim1Uint8Test (IWorkloadFactory &workloadFactory, const IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< uint8_t, 3 > Concat3dDim2Uint8Test (IWorkloadFactory &workloadFactory, const IBackendInternal::IMemoryManagerSharedPtr &memoryManager, bool useSubtensor)
 
LayerTestResult< uint8_t, 3 > Concat3dDim0DiffInputDimsUint8Test (IWorkloadFactory &workloadFactory, const IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< uint8_t, 3 > Concat3dDim1DiffInputDimsUint8Test (IWorkloadFactory &workloadFactory, const IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< uint8_t, 3 > Concat3dDim2DiffInputDimsUint8Test (IWorkloadFactory &workloadFactory, const IBackendInternal::IMemoryManagerSharedPtr &memoryManager, bool useSubtensor)
 
LayerTestResult< uint8_t, 4 > Concat4dDim0Uint8Test (IWorkloadFactory &workloadFactory, const IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< uint8_t, 4 > Concat4dDim1Uint8Test (IWorkloadFactory &workloadFactory, const IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< uint8_t, 4 > Concat4dDim2Uint8Test (IWorkloadFactory &workloadFactory, const IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< uint8_t, 4 > Concat4dDim3Uint8Test (IWorkloadFactory &workloadFactory, const IBackendInternal::IMemoryManagerSharedPtr &memoryManager, bool useSubtensor)
 
LayerTestResult< uint8_t, 4 > Concat4dDiffShapeDim0Uint8Test (IWorkloadFactory &workloadFactory, const IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< uint8_t, 4 > Concat4dDiffShapeDim1Uint8Test (IWorkloadFactory &workloadFactory, const IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< uint8_t, 4 > Concat4dDiffShapeDim2Uint8Test (IWorkloadFactory &workloadFactory, const IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< uint8_t, 4 > Concat4dDiffShapeDim3Uint8Test (IWorkloadFactory &workloadFactory, const IBackendInternal::IMemoryManagerSharedPtr &memoryManager, bool useSubtensor)
 

Function Documentation

◆ Concat1dTest()

LayerTestResult<float, 1> Concat1dTest ( IWorkloadFactory workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 2202 of file ConcatTestImpl.cpp.

2205 {
2206  return Concat1dTestImpl<DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
2207 }

◆ Concat1dTestImpl()

LayerTestResult<T, 1> Concat1dTestImpl ( IWorkloadFactory workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr memoryManager,
float  qScale,
int32_t  qOffset 
)

Definition at line 418 of file ConcatTestImpl.cpp.

423 {
424  TensorInfo inputTensorInfo({ 3 }, ArmnnType, qScale, qOffset);
425 
426  auto input0 = MakeTensor<T, 1>(inputTensorInfo, QuantizedVector<T>({ 1.0f, 2.0f, 3.0f }, qScale, qOffset));
427  auto input1 = MakeTensor<T, 1>(inputTensorInfo, QuantizedVector<T>({ 4.0f, 5.0f, 6.0f }, qScale, qOffset));
428  auto input2 = MakeTensor<T, 1>(inputTensorInfo, QuantizedVector<T>({ 7.0f, 8.0f, 9.0f }, qScale, qOffset));
429 
430  TensorInfo outputTensorInfo({ 9 }, ArmnnType, qScale, qOffset);
431 
432  LayerTestResult<T, 1> result(outputTensorInfo);
433 
434  std::vector<T> output;
435  output.resize(outputTensorInfo.GetNumElements());
436  Concatenate<T>(workloadFactory, memoryManager,
437  { inputTensorInfo, inputTensorInfo, inputTensorInfo },
438  { input0.data(), input1.data(), input2.data() },
439  outputTensorInfo,
440  output.data(),
441  0,
442  true);
443 
444  result.output = MakeTensor<T, 1>(outputTensorInfo, output);
445  result.outputExpected = MakeTensor<T, 1>(outputTensorInfo, QuantizedVector<T>(
446  {
447  1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f
448  },
449  qScale, qOffset));
450 
451  return result;
452 }

◆ Concat1dUint8Test()

LayerTestResult<uint8_t, 1> Concat1dUint8Test ( IWorkloadFactory workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 2775 of file ConcatTestImpl.cpp.

2778 {
2779  return Concat1dTestImpl<DataType::QAsymmU8>(workloadFactory, memoryManager, 0.5f, -1);
2780 }

◆ Concat2dDim0DiffInputDimsTest()

LayerTestResult<float, 2> Concat2dDim0DiffInputDimsTest ( IWorkloadFactory workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 2223 of file ConcatTestImpl.cpp.

2226 {
2227  return Concat2dDim0DiffInputDimsTestImpl<DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
2228 }

◆ Concat2dDim0DiffInputDimsTestImpl()

LayerTestResult<T, 2> Concat2dDim0DiffInputDimsTestImpl ( IWorkloadFactory workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr memoryManager,
float  qScale,
int32_t  qOffset 
)

Definition at line 574 of file ConcatTestImpl.cpp.

579 {
580  TensorInfo input0TensorInfo({ 2, 3 }, ArmnnType, qScale, qOffset);
581  auto input0 = MakeTensor<T, 2>(input0TensorInfo, QuantizedVector<T>(
582  {
583  // Batch 0
584  1.0f, 2.0f, 3.0f,
585 
586  // Batch 1
587  10.0f, 11.0f, 12.0f,
588  },
589  qScale, qOffset));
590 
591  TensorInfo input1TensorInfo({ 3, 3 }, ArmnnType, qScale, qOffset);
592  auto input1 = MakeTensor<T, 2>(input1TensorInfo, QuantizedVector<T>(
593  {
594  // Batch 0
595  4.0f, 5.0f, 6.0f,
596 
597  // Batch 1
598  13.0f, 14.0f, 15.0f,
599 
600  // Batch 0
601  7.0f, 8.0f, 9.0f,
602  },
603  qScale, qOffset));
604 
605  TensorInfo input2TensorInfo({ 1, 3 }, ArmnnType, qScale, qOffset);
606  auto input2 = MakeTensor<T, 2>(input2TensorInfo, QuantizedVector<T>(
607  {
608  // Batch 1
609  16.0f, 17.0f, 18.0f,
610  },
611  qScale, qOffset));
612 
613  TensorInfo outputTensorInfo({ 6, 3 }, ArmnnType, qScale, qOffset);
614  LayerTestResult<T, 2> result(outputTensorInfo);
615 
616  std::vector<T> output;
617  output.resize(outputTensorInfo.GetNumElements());
618  Concatenate<T>(workloadFactory, memoryManager,
619  { input0TensorInfo, input1TensorInfo, input2TensorInfo },
620  { input0.data(), input1.data(), input2.data() },
621  outputTensorInfo,
622  output.data(),
623  0,
624  true);
625 
626  result.output = MakeTensor<T, 2>(outputTensorInfo, output);
627  result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(
628  {
629  // Batch 0
630  1.0f, 2.0f, 3.0f,
631 
632  // Batch 1
633  10.0f, 11.0f, 12.0f,
634 
635  // Batch 2
636  4.0f, 5.0f, 6.0f,
637 
638  // Batch 3
639  13.0f, 14.0f, 15.0f,
640 
641  // Batch 4
642  7.0f, 8.0f, 9.0f,
643 
644  // Batch 5
645  16.0f, 17.0f, 18.0f,
646  },
647  qScale, qOffset));
648 
649  return result;
650 }

◆ Concat2dDim0DiffInputDimsUint8Test()

LayerTestResult<uint8_t, 2> Concat2dDim0DiffInputDimsUint8Test ( IWorkloadFactory workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 2796 of file ConcatTestImpl.cpp.

2799 {
2800  return Concat2dDim0DiffInputDimsTestImpl<DataType::QAsymmU8>(
2801  workloadFactory, memoryManager, 0.5f, -1);
2802 }

◆ Concat2dDim0Test()

LayerTestResult<float, 2> Concat2dDim0Test ( IWorkloadFactory workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 2209 of file ConcatTestImpl.cpp.

2212 {
2213  return Concat2dDim0TestImpl<DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
2214 }

◆ Concat2dDim0TestImpl()

LayerTestResult<T, 2> Concat2dDim0TestImpl ( IWorkloadFactory workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr memoryManager,
float  qScale,
int32_t  qOffset 
)

Definition at line 512 of file ConcatTestImpl.cpp.

References LayerTestResult< T, n >::outputExpected.

517 {
518  TensorInfo outputTensorInfo({ 6, 3 }, ArmnnType, qScale, qOffset);
519 
520  LayerTestResult<T, 2> result = Concat2dTestImpl<ArmnnType>(
521  workloadFactory, memoryManager, outputTensorInfo, 0, qScale, qOffset);
522 
523  result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(
524  {
525  // Batch 0
526  1.0f, 2.0f, 3.0f,
527 
528  // Batch 1
529  10.0f, 11.0f, 12.0f,
530 
531  // Batch 2
532  4.0f, 5.0f, 6.0f,
533 
534  // Batch 3
535  13.0f, 14.0f, 15.0f,
536 
537  // Batch 4
538  7.0f, 8.0f, 9.0f,
539 
540  // Batch 5
541  16.0f, 17.0f, 18.0f,
542  },
543  qScale, qOffset));
544 
545  return result;
546 }
boost::multi_array< T, n > outputExpected

◆ Concat2dDim0Uint8Test()

LayerTestResult<uint8_t, 2> Concat2dDim0Uint8Test ( IWorkloadFactory workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 2782 of file ConcatTestImpl.cpp.

2785 {
2786  return Concat2dDim0TestImpl<DataType::QAsymmU8>(workloadFactory, memoryManager, 0.5f, -1);
2787 }

◆ Concat2dDim1DiffInputDimsTest()

LayerTestResult<float, 2> Concat2dDim1DiffInputDimsTest ( IWorkloadFactory workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 2230 of file ConcatTestImpl.cpp.

2233 {
2234  return Concat2dDim1DiffInputDimsTestImpl<DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
2235 }

◆ Concat2dDim1DiffInputDimsTestImpl()

LayerTestResult<T, 2> Concat2dDim1DiffInputDimsTestImpl ( IWorkloadFactory workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr memoryManager,
float  qScale,
int32_t  qOffset 
)

Definition at line 653 of file ConcatTestImpl.cpp.

658 {
659  TensorInfo input0TensorInfo({ 2, 3 }, ArmnnType, qScale, qOffset);
660  auto input0 = MakeTensor<T, 2>(input0TensorInfo, QuantizedVector<T>(
661  {
662  // Batch 0
663  1.0f, 2.0f, 3.0f,
664 
665  // Batch 1
666  10.0f, 11.0f, 12.0f,
667  },
668  qScale, qOffset));
669 
670  TensorInfo input1TensorInfo({ 2, 5 }, ArmnnType, qScale, qOffset);
671  auto input1 = MakeTensor<T, 2>(input1TensorInfo, QuantizedVector<T>(
672  {
673  // Batch 0
674  4.0f, 5.0f, 6.0f, 7.0f, 8.0f,
675 
676  // Batch 1
677  13.0f, 14.0f, 15.0f, 16.0f, 17.0f,
678  },
679  qScale, qOffset));
680 
681  TensorInfo input2TensorInfo({ 2, 1 }, ArmnnType, qScale, qOffset);
682  auto input2 = MakeTensor<T, 2>(input2TensorInfo, QuantizedVector<T>(
683  {
684  // Batch 0
685  9.0f,
686 
687  // Batch 1
688  18.0f
689  },
690  qScale, qOffset));
691 
692  TensorInfo outputTensorInfo({ 2, 9 }, ArmnnType, qScale, qOffset);
693  LayerTestResult<T, 2> result(outputTensorInfo);
694 
695  std::vector<T> output;
696  output.resize(outputTensorInfo.GetNumElements());
697  Concatenate<T>(workloadFactory, memoryManager,
698  { input0TensorInfo, input1TensorInfo, input2TensorInfo },
699  { input0.data(), input1.data(), input2.data() },
700  outputTensorInfo,
701  output.data(),
702  1,
703  true);
704 
705  result.output = MakeTensor<T, 2>(outputTensorInfo, output);
706  result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(
707  {
708  // Batch 0
709  1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f,
710 
711  // Batch 1
712  10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f,
713  },
714  qScale, qOffset));
715 
716  return result;
717 }

◆ Concat2dDim1DiffInputDimsUint8Test()

LayerTestResult<uint8_t, 2> Concat2dDim1DiffInputDimsUint8Test ( IWorkloadFactory workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 2804 of file ConcatTestImpl.cpp.

2807 {
2808  return Concat2dDim1DiffInputDimsTestImpl<DataType::QAsymmU8>(
2809  workloadFactory, memoryManager, 0.5f, -1);
2810 }

◆ Concat2dDim1Test()

LayerTestResult<float, 2> Concat2dDim1Test ( IWorkloadFactory workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 2216 of file ConcatTestImpl.cpp.

2219 {
2220  return Concat2dDim1TestImpl<DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
2221 }

◆ Concat2dDim1TestImpl()

LayerTestResult<T, 2> Concat2dDim1TestImpl ( IWorkloadFactory workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr memoryManager,
float  qScale,
int32_t  qOffset 
)

Definition at line 549 of file ConcatTestImpl.cpp.

References LayerTestResult< T, n >::outputExpected.

554 {
555  TensorInfo outputTensorInfo({ 2, 9 }, ArmnnType, qScale, qOffset);
556 
557  LayerTestResult<T, 2> result = Concat2dTestImpl<ArmnnType>(
558  workloadFactory, memoryManager, outputTensorInfo, 1, qScale, qOffset);
559 
560  result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(
561  {
562  // Batch 0
563  1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f,
564 
565  // Batch 1
566  10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f
567  },
568  qScale, qOffset));
569 
570  return result;
571 }
boost::multi_array< T, n > outputExpected

◆ Concat2dDim1Uint8Test()

LayerTestResult<uint8_t, 2> Concat2dDim1Uint8Test ( IWorkloadFactory workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 2789 of file ConcatTestImpl.cpp.

2792 {
2793  return Concat2dDim1TestImpl<DataType::QAsymmU8>(workloadFactory, memoryManager, 0.5f, -1);
2794 }

◆ Concat2dTestImpl()

LayerTestResult<T, 2> Concat2dTestImpl ( IWorkloadFactory workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const TensorInfo outputTensorInfo,
unsigned int  dimension,
const float  qScale,
const int32_t  qOffset 
)

Definition at line 455 of file ConcatTestImpl.cpp.

References TensorInfo::GetNumElements(), and LayerTestResult< T, n >::output.

462 {
463  TensorInfo inputTensorInfo({ 2, 3 }, ArmnnType, qScale, qOffset);
464 
465  auto input0 = MakeTensor<T, 2>(inputTensorInfo, QuantizedVector<T>(
466  {
467  // Batch 0
468  1.0f, 2.0f, 3.0f,
469 
470  // Batch 1
471  10.0f, 11.0f, 12.0f,
472  },
473  qScale, qOffset));
474 
475  auto input1 = MakeTensor<T, 2>(inputTensorInfo, QuantizedVector<T>(
476  {
477  // Batch 0
478  4.0f, 5.0f, 6.0f,
479 
480  // Batch 1
481  13.0f, 14.0f, 15.0f,
482  },
483  qScale, qOffset));
484 
485  auto input2 = MakeTensor<T, 2>(inputTensorInfo, QuantizedVector<T>(
486  {
487  // Batch 0
488  7.0f, 8.0f, 9.0f,
489 
490  // Batch 1
491  16.0f, 17.0f, 18.0f,
492  },
493  qScale, qOffset));
494 
495  LayerTestResult<T, 2> result(outputTensorInfo);
496 
497  std::vector<T> output;
498  output.resize(outputTensorInfo.GetNumElements());
499  Concatenate<T>(workloadFactory, memoryManager,
500  { inputTensorInfo, inputTensorInfo, inputTensorInfo },
501  { input0.data(), input1.data(), input2.data() },
502  outputTensorInfo,
503  output.data(),
504  dimension,
505  true);
506 
507  result.output = MakeTensor<T, 2>(outputTensorInfo, output);
508  return result;
509 }
unsigned int GetNumElements() const
Definition: Tensor.hpp:192

◆ Concat3dDim0DiffInputDimsTest()

LayerTestResult<float, 3> Concat3dDim0DiffInputDimsTest ( IWorkloadFactory workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 2259 of file ConcatTestImpl.cpp.

2262 {
2263  return Concat3dDim0DiffInputDimsTestImpl<DataType::Float32>(
2264  workloadFactory, memoryManager, 0.0f, 0);
2265 }

◆ Concat3dDim0DiffInputDimsTestImpl()

LayerTestResult<T, 3> Concat3dDim0DiffInputDimsTestImpl ( IWorkloadFactory workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr memoryManager,
float  qScale,
int32_t  qOffset 
)

Definition at line 998 of file ConcatTestImpl.cpp.

1003 {
1004  TensorInfo input0TensorInfo({ 2, 3, 2 }, ArmnnType);
1005  auto input0 = MakeTensor<T, 3>(input0TensorInfo, QuantizedVector<T>(
1006  {
1007  // Batch 0, Channel 0
1008  1.0f, 2.0f,
1009 
1010  // Batch 0, Channel 1
1011  3.0f, 4.0f,
1012 
1013  // Batch 0, Channel 2
1014  5.0f, 6.0f,
1015 
1016  // Batch 1, Channel 0
1017  19.0f, 20.0f,
1018 
1019  // Batch 1, Channel 1
1020  21.0f, 22.0f,
1021 
1022  // Batch 1, Channel 2
1023  23.0f, 24.0f
1024  },
1025  qScale, qOffset));
1026 
1027  TensorInfo input1TensorInfo({ 1, 3, 2 }, ArmnnType);
1028  auto input1 = MakeTensor<T, 3>(input1TensorInfo, QuantizedVector<T>(
1029  {
1030  // Batch 0, Channel 0
1031  7.0f, 8.0f,
1032 
1033  // Batch 0, Channel 1
1034  9.0f, 10.0f,
1035 
1036  // Batch 0, Channel 2
1037  11.0f, 12.0f,
1038  },
1039  qScale, qOffset));
1040 
1041  TensorInfo input2TensorInfo({ 3, 3, 2 }, ArmnnType);
1042  auto input2 = MakeTensor<T, 3>(input2TensorInfo, QuantizedVector<T>(
1043  {
1044  // Batch 0, Channel 0
1045  25.0f, 26.0f,
1046 
1047  // Batch 0, Channel 1
1048  27.0f, 28.0f,
1049 
1050  // Batch 0, Channel 2
1051  29.0f, 30.0f,
1052 
1053  // Batch 1, Channel 0
1054  13.0f, 14.0f,
1055 
1056  // Batch 1, Channel 1
1057  15.0f, 16.0f,
1058 
1059  // Batch 1, Channel 2
1060  17.0f, 18.0f,
1061 
1062  // Batch 2, Channel 0
1063  31.0f, 32.0f,
1064 
1065  // Batch 2, Channel 1
1066  33.0f, 34.0f,
1067 
1068  // Batch 2, Channel 2
1069  35.0f, 36.0f
1070  },
1071  qScale, qOffset));
1072 
1073  TensorInfo outputTensorInfo({ 6, 3, 2 }, ArmnnType);
1074  LayerTestResult<T, 3> result(outputTensorInfo);
1075 
1076  std::vector<T> output;
1077  output.resize(outputTensorInfo.GetNumElements());
1078  Concatenate<T>(workloadFactory, memoryManager,
1079  { input0TensorInfo, input1TensorInfo, input2TensorInfo },
1080  { input0.data(), input1.data(), input2.data() },
1081  outputTensorInfo,
1082  output.data(),
1083  0,
1084  true);
1085 
1086  result.output = MakeTensor<T, 3>(outputTensorInfo, output);
1087  result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(
1088  {
1089  // Batch 0, Channel 0
1090  1.0f, 2.0f,
1091 
1092  // Batch 0, Channel 1
1093  3.0f, 4.0f,
1094 
1095  // Batch 0, Channel 2
1096  5.0f, 6.0f,
1097 
1098  // Batch 1, Channel 0
1099  19.0f, 20.0f,
1100 
1101  // Batch 1, Channel 1
1102  21.0f, 22.0f,
1103 
1104  // Batch 1, Channel 2
1105  23.0f, 24.0f,
1106 
1107  // Batch 2, Channel 0
1108  7.0f, 8.0f,
1109 
1110  // Batch 2, Channel 1
1111  9.0f, 10.0f,
1112 
1113  // Batch 2, Channel 2
1114  11.0f, 12.0f,
1115 
1116  // Batch 3, Channel 0
1117  25.0f, 26.0f,
1118 
1119  // Batch 3, Channel 1
1120  27.0f, 28.0f,
1121 
1122  // Batch 3, Channel 2
1123  29.0f, 30.0f,
1124 
1125  // Batch 4, Channel 0
1126  13.0f, 14.0f,
1127 
1128  // Batch 4, Channel 1
1129  15.0f, 16.0f,
1130 
1131  // Batch 4, Channel 2
1132  17.0f, 18.0f,
1133 
1134  // Batch 5, Channel 0
1135  31.0f, 32.0f,
1136 
1137  // Batch 5, Channel 1
1138  33.0f, 34.0f,
1139 
1140  // Batch 5, Channel 2
1141  35.0f, 36.0f
1142  },
1143  qScale, qOffset));
1144 
1145  return result;
1146 }

◆ Concat3dDim0DiffInputDimsUint8Test()

LayerTestResult<uint8_t, 3> Concat3dDim0DiffInputDimsUint8Test ( IWorkloadFactory workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 2835 of file ConcatTestImpl.cpp.

2838 {
2839  return Concat3dDim0TestImpl<DataType::QAsymmU8>(workloadFactory, memoryManager, 0.5f, -1);
2840 }

◆ Concat3dDim0Test()

LayerTestResult<float, 3> Concat3dDim0Test ( IWorkloadFactory workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 2237 of file ConcatTestImpl.cpp.

2240 {
2241  return Concat3dDim0TestImpl<DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
2242 }

◆ Concat3dDim0TestImpl()

LayerTestResult<T, 3> Concat3dDim0TestImpl ( IWorkloadFactory workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr memoryManager,
float  qScale,
int32_t  qOffset 
)

Definition at line 814 of file ConcatTestImpl.cpp.

References LayerTestResult< T, n >::outputExpected.

819 {
820  TensorInfo outputTensorInfo({ 6, 3, 2 }, ArmnnType, qScale, qOffset);
821 
822  LayerTestResult<T, 3> result = Concat3dTestImpl<ArmnnType>(
823  workloadFactory, memoryManager, outputTensorInfo, 0, true, qScale, qOffset);
824 
825  result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(
826  {
827  // Batch 0, Channel 0
828  1.0f, 2.0f,
829 
830  // Batch 0, Channel 1
831  3.0f, 4.0f,
832 
833  // Batch 0, Channel 2
834  5.0f, 6.0f,
835 
836  // Batch 1, Channel 0
837  19.0f, 20.0f,
838 
839  // Batch 1, Channel 1
840  21.0f, 22.0f,
841 
842  // Batch 1, Channel 2
843  23.0f, 24.0f,
844 
845  // Batch 2, Channel 0
846  7.0f, 8.0f,
847 
848  // Batch 2, Channel 1
849  9.0f, 10.0f,
850 
851  // Batch 2, Channel 2
852  11.0f, 12.0f,
853 
854  // Batch 3, Channel 0
855  25.0f, 26.0f,
856 
857  // Batch 3, Channel 1
858  27.0f, 28.0f,
859 
860  // Batch 3, Channel 2
861  29.0f, 30.0f,
862 
863  // Batch 4, Channel 0
864  13.0f, 14.0f,
865 
866  // Batch 4, Channel 1
867  15.0f, 16.0f,
868 
869  // Batch 4, Channel 2
870  17.0f, 18.0f,
871 
872  // Batch 5, Channel 0
873  31.0f, 32.0f,
874 
875  // Batch 5, Channel 1
876  33.0f, 34.0f,
877 
878  // Batch 5, Channel 2
879  35.0f, 36.0f
880  },
881  qScale, qOffset));
882 
883  return result;
884 }
boost::multi_array< T, n > outputExpected

◆ Concat3dDim0Uint8Test()

LayerTestResult<uint8_t, 3> Concat3dDim0Uint8Test ( IWorkloadFactory workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 2812 of file ConcatTestImpl.cpp.

2815 {
2816  return Concat3dDim0TestImpl<DataType::QAsymmU8>(workloadFactory, memoryManager, 0.5f, -1);
2817 }

◆ Concat3dDim1DiffInputDimsTest()

LayerTestResult<float, 3> Concat3dDim1DiffInputDimsTest ( IWorkloadFactory workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 2267 of file ConcatTestImpl.cpp.

2270 {
2271  return Concat3dDim1DiffInputDimsTestImpl<DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
2272 }

◆ Concat3dDim1DiffInputDimsTestImpl()

LayerTestResult<T, 3> Concat3dDim1DiffInputDimsTestImpl ( IWorkloadFactory workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr memoryManager,
float  qScale,
int32_t  qOffset 
)

Definition at line 1149 of file ConcatTestImpl.cpp.

1154 {
1155  TensorInfo input0TensorInfo({ 2, 3, 2 }, ArmnnType, qScale, qOffset);
1156  auto input0 = MakeTensor<T, 3>(input0TensorInfo, QuantizedVector<T>(
1157  {
1158  // Batch 0, Channel 0
1159  1.0f, 2.0f,
1160 
1161  // Batch 0, Channel 1
1162  3.0f, 4.0f,
1163 
1164  // Batch 0, Channel 2
1165  5.0f, 6.0f,
1166 
1167  // Batch 1, Channel 0
1168  19.0f, 20.0f,
1169 
1170  // Batch 1, Channel 1
1171  21.0f, 22.0f,
1172 
1173  // Batch 1, Channel 2
1174  23.0f, 24.0f
1175  },
1176  qScale, qOffset));
1177 
1178  TensorInfo input1TensorInfo({ 2, 4, 2 }, ArmnnType, qScale, qOffset);
1179  auto input1 = MakeTensor<T, 3>(input1TensorInfo, QuantizedVector<T>(
1180  {
1181  // Batch 0, Channel 0
1182  7.0f, 8.0f,
1183 
1184  // Batch 0, Channel 1
1185  9.0f, 10.0f,
1186 
1187  // Batch 0, Channel 2
1188  11.0f, 12.0f,
1189 
1190  // Batch 0, Channel 3
1191  25.0f, 26.0f,
1192 
1193  // Batch 1, Channel 0
1194  27.0f, 28.0f,
1195 
1196  // Batch 1, Channel 1
1197  29.0f, 30.0f,
1198 
1199  // Batch 1, Channel 2
1200  13.0f, 14.0f,
1201 
1202  // Batch 1, Channel 3
1203  15.0f, 16.0f,
1204  },
1205  qScale, qOffset));
1206 
1207  TensorInfo input2TensorInfo({ 2, 1, 2 }, ArmnnType, qScale, qOffset);
1208  auto input2 = MakeTensor<T, 3>(input2TensorInfo, QuantizedVector<T>(
1209  {
1210  // Batch 0, Channel 0
1211  17.0f, 18.0f,
1212 
1213  // Batch 1, Channel 0
1214  31.0f, 32.0f,
1215  },
1216  qScale, qOffset));
1217 
1218  TensorInfo outputTensorInfo({ 2, 8, 2 }, ArmnnType, qScale, qOffset);
1219  LayerTestResult<T, 3> result(outputTensorInfo);
1220 
1221  std::vector<T> output;
1222  output.resize(outputTensorInfo.GetNumElements());
1223  Concatenate<T>(workloadFactory, memoryManager,
1224  { input0TensorInfo, input1TensorInfo, input2TensorInfo },
1225  { input0.data(), input1.data(), input2.data() },
1226  outputTensorInfo,
1227  output.data(),
1228  1,
1229  true);
1230 
1231  result.output = MakeTensor<T, 3>(outputTensorInfo, output);
1232  result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(
1233  {
1234  // Batch 0, Channel 0
1235  1.0f, 2.0f,
1236 
1237  // Batch 0, Channel 1
1238  3.0f, 4.0f,
1239 
1240  // Batch 0, Channel 2
1241  5.0f, 6.0f,
1242 
1243  // Batch 0, Channel 3
1244  7.0f, 8.0f,
1245 
1246  // Batch 0, Channel 4
1247  9.0f, 10.0f,
1248 
1249  // Batch 0, Channel 5
1250  11.0f, 12.0f,
1251 
1252  // Batch 0, Channel 6
1253  25.0f, 26.0f,
1254 
1255  // Batch 0, Channel 7
1256  17.0f, 18.0f,
1257 
1258  // Batch 1, Channel 0
1259  19.0f, 20.0f,
1260 
1261  // Batch 1, Channel 1
1262  21.0f, 22.0f,
1263 
1264  // Batch 1, Channel 2
1265  23.0f, 24.0f,
1266 
1267  // Batch 1, Channel 3
1268  27.0f, 28.0f,
1269 
1270  // Batch 1, Channel 4
1271  29.0f, 30.0f,
1272 
1273  // Batch 1, Channel 5
1274  13.0f, 14.0f,
1275 
1276  // Batch 1, Channel 6
1277  15.0f, 16.0f,
1278 
1279  // Batch 1, Channel 7
1280  31.0f, 32.0f,
1281  },
1282  qScale, qOffset));
1283 
1284  return result;
1285 }

◆ Concat3dDim1DiffInputDimsUint8Test()

LayerTestResult<uint8_t, 3> Concat3dDim1DiffInputDimsUint8Test ( IWorkloadFactory workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 2842 of file ConcatTestImpl.cpp.

2845 {
2846  return Concat3dDim1DiffInputDimsTestImpl<DataType::QAsymmU8>(
2847  workloadFactory, memoryManager, 0.5f, -1);
2848 }

◆ Concat3dDim1Test()

LayerTestResult<float, 3> Concat3dDim1Test ( IWorkloadFactory workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 2244 of file ConcatTestImpl.cpp.

2247 {
2248  return Concat3dDim1TestImpl<DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
2249 }

◆ Concat3dDim1TestImpl()

LayerTestResult<T, 3> Concat3dDim1TestImpl ( IWorkloadFactory workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr memoryManager,
float  qScale,
int32_t  qOffset 
)

Definition at line 887 of file ConcatTestImpl.cpp.

References LayerTestResult< T, n >::outputExpected.

892 {
893  TensorInfo outputTensorInfo({ 2, 9, 2 }, ArmnnType, qScale, qOffset);
894 
895  LayerTestResult<T, 3> result = Concat3dTestImpl<ArmnnType>(
896  workloadFactory, memoryManager, outputTensorInfo, 1, true, qScale, qOffset);
897 
898  result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(
899  {
900  // Batch 0, Channel 0
901  1.0f, 2.0f,
902 
903  // Batch 0, Channel 1
904  3.0f, 4.0f,
905 
906  // Batch 0, Channel 2
907  5.0f, 6.0f,
908 
909  // Batch 0, Channel 3
910  7.0f, 8.0f,
911 
912  // Batch 0, Channel 4
913  9.0f, 10.0f,
914 
915  // Batch 0, Channel 5
916  11.0f, 12.0f,
917 
918  // Batch 0, Channel 6
919  13.0f, 14.0f,
920 
921  // Batch 0, Channel 7
922  15.0f, 16.0f,
923 
924  // Batch 0, Channel 8
925  17.0f, 18.0f,
926 
927  // Batch 1, Channel 0
928  19.0f, 20.0f,
929 
930  // Batch 1, Channel 1
931  21.0f, 22.0f,
932 
933  // Batch 1, Channel 2
934  23.0f, 24.0f,
935 
936  // Batch 1, Channel 3
937  25.0f, 26.0f,
938 
939  // Batch 1, Channel 4
940  27.0f, 28.0f,
941 
942  // Batch 1, Channel 5
943  29.0f, 30.0f,
944 
945  // Batch 1, Channel 6
946  31.0f, 32.0f,
947 
948  // Batch 1, Channel 7
949  33.0f, 34.0f,
950 
951  // Batch 1, Channel 8
952  35.0f, 36.0f
953  },
954  qScale, qOffset));
955 
956  return result;
957 }
boost::multi_array< T, n > outputExpected

◆ Concat3dDim1Uint8Test()

LayerTestResult<uint8_t, 3> Concat3dDim1Uint8Test ( IWorkloadFactory workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 2819 of file ConcatTestImpl.cpp.

2822 {
2823  return Concat3dDim1TestImpl<DataType::QAsymmU8>(workloadFactory, memoryManager, 0.5f, -1);
2824 }

◆ Concat3dDim2DiffInputDimsTest()

LayerTestResult<float, 3> Concat3dDim2DiffInputDimsTest ( IWorkloadFactory workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr memoryManager,
bool  useSubtensor 
)

Definition at line 2274 of file ConcatTestImpl.cpp.

2278 {
2279  return Concat3dDim2DiffInputDimsTestImpl<DataType::Float32>(
2280  workloadFactory, memoryManager, useSubtensor, 0.0f, 0);
2281 }

◆ Concat3dDim2DiffInputDimsTestImpl()

LayerTestResult<T, 3> Concat3dDim2DiffInputDimsTestImpl ( IWorkloadFactory workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr memoryManager,
bool  useSubtensor,
float  qScale,
int32_t  qOffset 
)

Definition at line 1288 of file ConcatTestImpl.cpp.

1294 {
1295  TensorInfo input0TensorInfo({ 2, 3, 2 }, ArmnnType, qScale, qOffset);
1296  auto input0 = MakeTensor<T, 3>(input0TensorInfo, QuantizedVector<T>(
1297  {
1298  // Batch 0, Channel 0
1299  1.0f, 2.0f,
1300 
1301  // Batch 0, Channel 1
1302  3.0f, 4.0f,
1303 
1304  // Batch 0, Channel 2
1305  5.0f, 6.0f,
1306 
1307  // Batch 1, Channel 0
1308  19.0f, 20.0f,
1309 
1310  // Batch 1, Channel 1
1311  21.0f, 22.0f,
1312 
1313  // Batch 1, Channel 2
1314  23.0f, 24.0f
1315  },
1316  qScale, qOffset));
1317 
1318  TensorInfo input1TensorInfo({ 2, 3, 1 }, ArmnnType, qScale, qOffset);
1319  auto input1 = MakeTensor<T, 3>(input1TensorInfo, QuantizedVector<T>(
1320  {
1321  // Batch 0, Channel 0
1322  7.0f,
1323 
1324  // Batch 0, Channel 1
1325  9.0f,
1326 
1327  // Batch 0, Channel 2
1328  11.0f,
1329 
1330  // Batch 1, Channel 0
1331  25.0f,
1332 
1333  // Batch 1, Channel 1
1334  27.0f,
1335 
1336  // Batch 1, Channel 2
1337  29.0f
1338  },
1339  qScale, qOffset));
1340 
1341  TensorInfo input2TensorInfo({ 2, 3, 3 }, ArmnnType, qScale, qOffset);
1342  auto input2 = MakeTensor<T, 3>(input2TensorInfo, QuantizedVector<T>(
1343  {
1344  // Batch 0, Channel 0
1345  13.0f, 14.0f, 50.0f,
1346 
1347  // Batch 0, Channel 1
1348  15.0f, 16.0f, 51.0f,
1349 
1350  // Batch 0, Channel 2
1351  17.0f, 18.0f, 52.0f,
1352 
1353  // Batch 1, Channel 0
1354  31.0f, 32.0f, 53.0f,
1355 
1356  // Batch 1, Channel 1
1357  33.0f, 34.0f, 54.0f,
1358 
1359  // Batch 1, Channel 2
1360  35.0f, 36.0f, 55.0f,
1361  },
1362  qScale, qOffset));
1363 
1364  TensorInfo outputTensorInfo({ 2, 3, 6 }, ArmnnType, qScale, qOffset);
1365  LayerTestResult<T, 3> result(outputTensorInfo);
1366 
1367  std::vector<T> output;
1368  output.resize(outputTensorInfo.GetNumElements());
1369  Concatenate<T>(workloadFactory, memoryManager,
1370  { input0TensorInfo, input1TensorInfo, input2TensorInfo },
1371  { input0.data(), input1.data(), input2.data() },
1372  outputTensorInfo,
1373  output.data(),
1374  2,
1375  useSubtensor);
1376 
1377  result.output = MakeTensor<T, 3>(outputTensorInfo, output);
1378  result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(
1379  {
1380  // Batch 0, Channel 0
1381  1.0f, 2.0f, 7.0f, 13.0f, 14.0f, 50.0f,
1382 
1383  // Batch 0, Channel 1
1384  3.0f, 4.0f, 9.0f, 15.0f, 16.0f, 51.0f,
1385 
1386  // Batch 0, Channel 2
1387  5.0f, 6.0f, 11.0f, 17.0f, 18.0f, 52.0f,
1388 
1389  // Batch 1, Channel 0
1390  19.0f, 20.0f, 25.0f, 31.0f, 32.0f, 53.0f,
1391 
1392  // Batch 1, Channel 1
1393  21.0f, 22.0f, 27.0f, 33.0f, 34.0f, 54.0f,
1394 
1395  // Batch 1, Channel 2
1396  23.0f, 24.0f, 29.0f, 35.0f, 36.0f, 55.0f,
1397  },
1398  qScale, qOffset));
1399 
1400  return result;
1401 }

◆ Concat3dDim2DiffInputDimsUint8Test()

LayerTestResult<uint8_t, 3> Concat3dDim2DiffInputDimsUint8Test ( IWorkloadFactory workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr memoryManager,
bool  useSubtensor 
)

Definition at line 2850 of file ConcatTestImpl.cpp.

2854 {
2855  return Concat3dDim2DiffInputDimsTestImpl<DataType::QAsymmU8>(
2856  workloadFactory, memoryManager, useSubtensor, 0.5f, -1);
2857 }

◆ Concat3dDim2Test()

LayerTestResult<float, 3> Concat3dDim2Test ( IWorkloadFactory workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr memoryManager,
bool  useSubtensor 
)

Definition at line 2251 of file ConcatTestImpl.cpp.

2255 {
2256  return Concat3dDim2TestImpl<DataType::Float32>(workloadFactory, memoryManager, useSubtensor, 0.0f, 0);
2257 }

◆ Concat3dDim2TestImpl()

LayerTestResult<T, 3> Concat3dDim2TestImpl ( IWorkloadFactory workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr memoryManager,
bool  useSubtensor,
float  qScale,
int32_t  qOffset 
)

Definition at line 960 of file ConcatTestImpl.cpp.

References LayerTestResult< T, n >::outputExpected.

966 {
967  TensorInfo outputTensorInfo({ 2, 3, 6 }, ArmnnType, qScale, qOffset);
968 
969  LayerTestResult<T, 3> result = Concat3dTestImpl<ArmnnType>(
970  workloadFactory, memoryManager, outputTensorInfo, 2, useSubtensor, qScale, qOffset);
971 
972  result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(
973  {
974  // Batch 0, Channel 0
975  1.0f, 2.0f, 7.0f, 8.0f, 13.0f, 14.0f,
976 
977  // Batch 0, Channel 1
978  3.0f, 4.0f, 9.0f, 10.0f, 15.0f, 16.0f,
979 
980  // Batch 0, Channel 2
981  5.0f, 6.0f, 11.0f, 12.0f, 17.0f, 18.0f,
982 
983  // Batch 1, Channel 0
984  19.0f, 20.0f, 25.0f, 26.0f, 31.0f, 32.0f,
985 
986  // Batch 1, Channel 1
987  21.0f, 22.0f, 27.0f, 28.0f, 33.0f, 34.0f,
988 
989  // Batch 1, Channel 2
990  23.0f, 24.0f, 29.0f, 30.0f, 35.0f, 36.0f,
991  },
992  qScale, qOffset));
993 
994  return result;
995 }
boost::multi_array< T, n > outputExpected

◆ Concat3dDim2Uint8Test()

LayerTestResult<uint8_t, 3> Concat3dDim2Uint8Test ( IWorkloadFactory workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr memoryManager,
bool  useSubtensor 
)

Definition at line 2826 of file ConcatTestImpl.cpp.

2830 {
2831  return Concat3dDim2TestImpl<DataType::QAsymmU8>(
2832  workloadFactory, memoryManager, useSubtensor, 0.5f, -1);
2833 }

◆ Concat3dTestImpl()

LayerTestResult<T, 3> Concat3dTestImpl ( IWorkloadFactory workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const TensorInfo outputTensorInfo,
unsigned int  dimension,
bool  useSubtensor,
float  qScale,
int32_t  qOffset 
)

Definition at line 720 of file ConcatTestImpl.cpp.

References TensorInfo::GetNumElements(), and LayerTestResult< T, n >::output.

728 {
729  TensorInfo inputTensorInfo({ 2, 3, 2 }, ArmnnType, qScale, qOffset);
730 
731  auto input0 = MakeTensor<T, 3>(inputTensorInfo, QuantizedVector<T>(
732  {
733  // Batch 0, Channel 0
734  1.0f, 2.0f,
735 
736  // Batch 0, Channel 1
737  3.0f, 4.0f,
738 
739  // Batch 0, Channel 2
740  5.0f, 6.0f,
741 
742  // Batch 1, Channel 0
743  19.0f, 20.0f,
744 
745  // Batch 1, Channel 1
746  21.0f, 22.0f,
747 
748  // Batch 1, Channel 2
749  23.0f, 24.0f
750  },
751  qScale, qOffset));
752 
753  auto input1 = MakeTensor<T, 3>(inputTensorInfo, QuantizedVector<T>(
754  {
755  // Batch 0, Channel 0
756  7.0f, 8.0f,
757 
758  // Batch 0, Channel 1
759  9.0f, 10.0f,
760 
761  // Batch 0, Channel 2
762  11.0f, 12.0f,
763 
764  // Batch 1, Channel 0
765  25.0f, 26.0f,
766 
767  // Batch 1, Channel 1
768  27.0f, 28.0f,
769 
770  // Batch 1, Channel 2
771  29.0f, 30.0f
772  },
773  qScale, qOffset));
774 
775  auto input2 = MakeTensor<T, 3>(inputTensorInfo, QuantizedVector<T>(
776  {
777  // Batch 0, Channel 0
778  13.0f, 14.0f,
779 
780  // Batch 0, Channel 1
781  15.0f, 16.0f,
782 
783  // Batch 0, Channel 2
784  17.0f, 18.0f,
785 
786  // Batch 1, Channel 0
787  31.0f, 32.0f,
788 
789  // Batch 1, Channel 1
790  33.0f, 34.0f,
791 
792  // Batch 1, Channel 2
793  35.0f, 36.0f
794  },
795  qScale, qOffset));
796 
797  LayerTestResult<T, 3> result(outputTensorInfo);
798 
799  std::vector<T> output;
800  output.resize(outputTensorInfo.GetNumElements());
801  Concatenate<T>(workloadFactory, memoryManager,
802  { inputTensorInfo, inputTensorInfo, inputTensorInfo },
803  { input0.data(), input1.data(), input2.data() },
804  outputTensorInfo,
805  output.data(),
806  dimension,
807  useSubtensor);
808 
809  result.output = MakeTensor<T, 3>(outputTensorInfo, output);
810  return result;
811 }
unsigned int GetNumElements() const
Definition: Tensor.hpp:192

◆ Concat4dDiffShapeDim0Test()

LayerTestResult<float, 4> Concat4dDiffShapeDim0Test ( IWorkloadFactory workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 2312 of file ConcatTestImpl.cpp.

2315 {
2316  return Concat4dDiffShapeDim0TestImpl<DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
2317 }

◆ Concat4dDiffShapeDim0TestImpl()

LayerTestResult<T, 4> Concat4dDiffShapeDim0TestImpl ( IWorkloadFactory workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr memoryManager,
float  qScale,
int32_t  qOffset 
)

Definition at line 1628 of file ConcatTestImpl.cpp.

1633 {
1634  constexpr unsigned int dimension = 0u;
1635 
1636  TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, ArmnnType, qScale, qOffset);
1637  auto input0 = MakeTensor<T, 4>(inputTensorInfo0, QuantizedVector<T>(
1638  {
1639  1.0f, 2.0f,
1640  3.0f, 4.0f,
1641  5.0f, 6.0f,
1642  7.0f, 8.0f,
1643  9.0f, 10.0f,
1644  11.0f, 12.0f
1645  },
1646  qScale, qOffset));
1647 
1648  TensorInfo inputTensorInfo1({ 2, 3, 2, 2 }, ArmnnType, qScale, qOffset);
1649 
1650  auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(
1651  {
1652  11.0f, 12.0f,
1653  13.0f, 14.0f,
1654  15.0f, 16.0f,
1655  17.0f, 18.0f,
1656  19.0f, 20.0f,
1657  21.0f, 22.0f,
1658 
1659  21.0f, 22.0f,
1660  23.0f, 24.0f,
1661  25.0f, 26.0f,
1662  27.0f, 28.0f,
1663  29.0f, 30.0f,
1664  31.0f, 32.0f
1665  },
1666  qScale, qOffset));
1667 
1668  TensorInfo outputTensorInfo({ 3, 3, 2, 2 }, ArmnnType, qScale, qOffset);
1669 
1670  LayerTestResult<T, 4> result(outputTensorInfo);
1671 
1672  std::vector<T> output;
1673  output.resize(outputTensorInfo.GetNumElements());
1674  Concatenate<T>(workloadFactory,
1675  memoryManager,
1676  {inputTensorInfo0, inputTensorInfo1},
1677  {input0.data(), input1.data()},
1678  outputTensorInfo,
1679  output.data(),
1680  dimension,
1681  true);
1682 
1683  result.output = MakeTensor<T, 4>(outputTensorInfo, output);
1684  result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(
1685  {
1686  1.0f, 2.0f,
1687  3.0f, 4.0f,
1688  5.0f, 6.0f,
1689  7.0f, 8.0f,
1690  9.0f, 10.0f,
1691  11.0f, 12.0f,
1692 
1693  11.0f, 12.0f,
1694  13.0f, 14.0f,
1695  15.0f, 16.0f,
1696  17.0f, 18.0f,
1697  19.0f, 20.0f,
1698  21.0f, 22.0f,
1699 
1700  21.0f, 22.0f,
1701  23.0f, 24.0f,
1702  25.0f, 26.0f,
1703  27.0f, 28.0f,
1704  29.0f, 30.0f,
1705  31.0f, 32.0f
1706  },
1707  qScale, qOffset));
1708 
1709  return result;
1710 }

◆ Concat4dDiffShapeDim0Uint8Test()

LayerTestResult<uint8_t, 4> Concat4dDiffShapeDim0Uint8Test ( IWorkloadFactory workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 2888 of file ConcatTestImpl.cpp.

2891 {
2892  return Concat4dDiffShapeDim0TestImpl<DataType::QAsymmU8>(
2893  workloadFactory, memoryManager, 0.5f, -1);
2894 }

◆ Concat4dDiffShapeDim1Test()

LayerTestResult<float, 4> Concat4dDiffShapeDim1Test ( IWorkloadFactory workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 2319 of file ConcatTestImpl.cpp.

2322 {
2323  return Concat4dDiffShapeDim1TestImpl<DataType::Float32>(
2324  workloadFactory, memoryManager, 0.0f, 0);
2325 }

◆ Concat4dDiffShapeDim1TestImpl()

LayerTestResult<T, 4> Concat4dDiffShapeDim1TestImpl ( IWorkloadFactory workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr memoryManager,
float  qScale,
int32_t  qOffset 
)

Definition at line 1713 of file ConcatTestImpl.cpp.

1718 {
1719  constexpr unsigned int dimension = 1u;
1720 
1721  TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, ArmnnType, qScale, qOffset);
1722  auto input0 = MakeTensor<T, 4>(inputTensorInfo0, QuantizedVector<T>(
1723  {
1724  1.0f, 2.0f,
1725  3.0f, 4.0f,
1726  5.0f, 6.0f,
1727  7.0f, 8.0f,
1728  9.0f, 10.0f,
1729  11.0f, 12.0f
1730  },
1731  qScale, qOffset));
1732 
1733  TensorInfo inputTensorInfo1({ 1, 2, 2, 2 }, ArmnnType, qScale, qOffset);
1734 
1735  auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(
1736  {
1737  11.0f, 12.0f,
1738  13.0f, 14.0f,
1739  15.0f, 16.0f,
1740  17.0f, 18.0f,
1741  },
1742  qScale, qOffset));
1743 
1744  TensorInfo outputTensorInfo({ 1, 5, 2, 2 }, ArmnnType, qScale, qOffset);
1745 
1746  LayerTestResult<T, 4> result(outputTensorInfo);
1747 
1748  std::vector<T> output;
1749  output.resize(outputTensorInfo.GetNumElements());
1750  Concatenate<T>(workloadFactory,
1751  memoryManager,
1752  {inputTensorInfo0, inputTensorInfo1},
1753  {input0.data(), input1.data()},
1754  outputTensorInfo,
1755  output.data(),
1756  dimension,
1757  true);
1758 
1759  result.output = MakeTensor<T, 4>(outputTensorInfo, output);
1760  result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(
1761  {
1762  1.0f, 2.0f,
1763  3.0f, 4.0f,
1764  5.0f, 6.0f,
1765  7.0f, 8.0f,
1766  9.0f, 10.0f,
1767  11.0f, 12.0f,
1768  11.0f, 12.0f,
1769  13.0f, 14.0f,
1770  15.0f, 16.0f,
1771  17.0f, 18.0f
1772  },
1773  qScale, qOffset));
1774 
1775  return result;
1776 }

◆ Concat4dDiffShapeDim1Uint8Test()

LayerTestResult<uint8_t, 4> Concat4dDiffShapeDim1Uint8Test ( IWorkloadFactory workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 2896 of file ConcatTestImpl.cpp.

2899 {
2900  return Concat4dDiffShapeDim1TestImpl<DataType::QAsymmU8>(
2901  workloadFactory, memoryManager, 0.5f, -1);
2902 }

◆ Concat4dDiffShapeDim2Test()

LayerTestResult<float, 4> Concat4dDiffShapeDim2Test ( IWorkloadFactory workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 2327 of file ConcatTestImpl.cpp.

2330 {
2331  return Concat4dDiffShapeDim2TestImpl<DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
2332 }

◆ Concat4dDiffShapeDim2TestImpl()

LayerTestResult<T, 4> Concat4dDiffShapeDim2TestImpl ( IWorkloadFactory workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr memoryManager,
float  qScale,
int32_t  qOffset 
)

Definition at line 1779 of file ConcatTestImpl.cpp.

1784 {
1785  constexpr unsigned int dimension = 2u;
1786 
1787  TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, ArmnnType, qScale, qOffset);
1788  auto input0 = MakeTensor<T, 4>(inputTensorInfo0, QuantizedVector<T>(
1789  {
1790  1.0f, 2.0f,
1791  3.0f, 4.0f,
1792  5.0f, 6.0f,
1793  7.0f, 8.0f,
1794  9.0f, 10.0f,
1795  11.0f, 12.0f
1796  },
1797  qScale, qOffset));
1798 
1799  TensorInfo inputTensorInfo1({ 1, 3, 3, 2 }, ArmnnType, qScale, qOffset);
1800  auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(
1801  {
1802  11.0f, 12.0f,
1803  13.0f, 14.0f,
1804  15.0f, 16.0f,
1805  17.0f, 18.0f,
1806  19.0f, 20.0f,
1807  21.0f, 22.0f,
1808  23.0f, 24.0f,
1809  25.0f, 26.0f,
1810  27.0f, 28.0f
1811  },
1812  qScale, qOffset));
1813 
1814  TensorInfo outputTensorInfo({ 1, 3, 5, 2 }, ArmnnType, qScale, qOffset);
1815  LayerTestResult<T, 4> result(outputTensorInfo);
1816 
1817  std::vector<T> output;
1818  output.resize(outputTensorInfo.GetNumElements());
1819  Concatenate<T>(workloadFactory,
1820  memoryManager,
1821  {inputTensorInfo0, inputTensorInfo1},
1822  {input0.data(), input1.data()},
1823  outputTensorInfo,
1824  output.data(),
1825  dimension,
1826  true);
1827 
1828  result.output = MakeTensor<T, 4>(outputTensorInfo, output);
1829  result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(
1830  {
1831  1.0f, 2.0f,
1832  3.0f, 4.0f,
1833  11.0f, 12.0f,
1834  13.0f, 14.0f,
1835  15.0f, 16.0f,
1836 
1837  5.0f, 6.0f,
1838  7.0f, 8.0f,
1839  17.0f, 18.0f,
1840  19.0f, 20.0f,
1841  21.0f, 22.0f,
1842 
1843  9.0f, 10.0f,
1844  11.0f, 12.0f,
1845  23.0f, 24.0f,
1846  25.0f, 26.0f,
1847  27.0f, 28.0f
1848  },
1849  qScale, qOffset));
1850 
1851  return result;
1852 }

◆ Concat4dDiffShapeDim2Uint8Test()

LayerTestResult<uint8_t, 4> Concat4dDiffShapeDim2Uint8Test ( IWorkloadFactory workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 2904 of file ConcatTestImpl.cpp.

2907 {
2908  return Concat4dDiffShapeDim2TestImpl<DataType::QAsymmU8>(
2909  workloadFactory, memoryManager, 0.5f, -1);
2910 }

◆ Concat4dDiffShapeDim3Test()

LayerTestResult<float, 4> Concat4dDiffShapeDim3Test ( IWorkloadFactory workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr memoryManager,
bool  useSubtensor 
)

Definition at line 2334 of file ConcatTestImpl.cpp.

2338 {
2339  return Concat4dDiffShapeDim3TestImpl<DataType::Float32>(
2340  workloadFactory, memoryManager, 0.0f, 0, useSubtensor);
2341 }

◆ Concat4dDiffShapeDim3TestImpl()

LayerTestResult<T, 4> Concat4dDiffShapeDim3TestImpl ( IWorkloadFactory workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr memoryManager,
float  qScale,
int32_t  qOffset,
bool  useSubtensor 
)

Definition at line 1855 of file ConcatTestImpl.cpp.

1861 {
1862  constexpr unsigned int dimension = 3u;
1863 
1864  TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, ArmnnType, qScale, qOffset);
1865  auto input0 = MakeTensor<T, 4>(inputTensorInfo0, QuantizedVector<T>(
1866  {
1867  1.0f, 2.0f,
1868  3.0f, 4.0f,
1869  5.0f, 6.0f,
1870  7.0f, 8.0f,
1871  9.0f, 10.0f,
1872  11.0f, 12.0f
1873  },
1874  qScale, qOffset));
1875 
1876  TensorInfo inputTensorInfo1({ 1, 3, 2, 3 }, ArmnnType, qScale, qOffset);
1877  auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(
1878  {
1879  11.0f, 12.0f, 13.0f,
1880  14.0f, 15.0f, 16.0f,
1881 
1882  17.0f, 18.0f, 19.0f,
1883  20.0f, 21.0f, 22.0f,
1884 
1885  23.0f, 24.0f, 25.0f,
1886  26.0f, 27.0f, 28.0f
1887  },
1888  qScale, qOffset));
1889 
1890  TensorInfo outputTensorInfo({ 1, 3, 2, 5 }, ArmnnType, qScale, qOffset);
1891 
1892  LayerTestResult<T, 4> result(outputTensorInfo);
1893 
1894  std::vector<T> output;
1895  output.resize(outputTensorInfo.GetNumElements());
1896  Concatenate<T>(workloadFactory,
1897  memoryManager,
1898  {inputTensorInfo0, inputTensorInfo1},
1899  {input0.data(), input1.data()},
1900  outputTensorInfo,
1901  output.data(),
1902  dimension,
1903  useSubtensor);
1904 
1905  result.output = MakeTensor<T, 4>(outputTensorInfo, output);
1906  result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(
1907  {
1908  1.0f, 2.0f, 11.0f, 12.0f, 13.0f,
1909  3.0f, 4.0f, 14.0f, 15.0f, 16.0f,
1910  5.0f, 6.0f, 17.0f, 18.0f, 19.0f,
1911  7.0f, 8.0f, 20.0f, 21.0f, 22.0f,
1912  9.0f, 10.0f, 23.0f, 24.0f, 25.0f,
1913  11.0f, 12.0f, 26.0f, 27.0f, 28.0f
1914  },
1915  qScale, qOffset));
1916 
1917  return result;
1918 }

◆ Concat4dDiffShapeDim3Uint8Test()

LayerTestResult<uint8_t, 4> Concat4dDiffShapeDim3Uint8Test ( IWorkloadFactory workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr memoryManager,
bool  useSubtensor 
)

Definition at line 2912 of file ConcatTestImpl.cpp.

2916 {
2917  return Concat4dDiffShapeDim3TestImpl<DataType::QAsymmU8>(
2918  workloadFactory, memoryManager, 0.5f, -1, useSubtensor);
2919 }

◆ Concat4dDim0Test()

LayerTestResult<float, 4> Concat4dDim0Test ( IWorkloadFactory workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 2283 of file ConcatTestImpl.cpp.

2286 {
2287  return Concat4dDim0TestImpl<DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
2288 }

◆ Concat4dDim0TestImpl()

LayerTestResult<T, 4> Concat4dDim0TestImpl ( IWorkloadFactory workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr memoryManager,
float  qScale,
int32_t  qOffset 
)

Definition at line 1467 of file ConcatTestImpl.cpp.

References LayerTestResult< T, n >::outputExpected.

1472 {
1473  TensorInfo outputTensorInfo({ 3, 3, 2, 2 }, ArmnnType, qScale, qOffset);
1474 
1475  LayerTestResult<T, 4> result = Concat4dTestImpl<ArmnnType>(
1476  workloadFactory, memoryManager, outputTensorInfo, 0, true, qScale, qOffset);
1477 
1478  result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(
1479  {
1480  1.0f, 2.0f,
1481  3.0f, 4.0f,
1482  5.0f, 6.0f,
1483  7.0f, 8.0f,
1484  9.0f, 10.0f,
1485  11.0f, 12.0f,
1486 
1487  11.0f, 12.0f,
1488  13.0f, 14.0f,
1489  15.0f, 16.0f,
1490  17.0f, 18.0f,
1491  19.0f, 20.0f,
1492  21.0f, 22.0f,
1493 
1494  21.0f, 22.0f,
1495  23.0f, 24.0f,
1496  25.0f, 26.0f,
1497  27.0f, 28.0f,
1498  29.0f, 30.0f,
1499  31.0f, 32.0f
1500  },
1501  qScale, qOffset));
1502 
1503  return result;
1504 }
boost::multi_array< T, n > outputExpected

◆ Concat4dDim0Uint8Test()

LayerTestResult<uint8_t, 4> Concat4dDim0Uint8Test ( IWorkloadFactory workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 2859 of file ConcatTestImpl.cpp.

2862 {
2863  return Concat4dDim0TestImpl<DataType::QAsymmU8>(workloadFactory, memoryManager, 0.5f, -1);
2864 }

◆ Concat4dDim1Test()

LayerTestResult<float, 4> Concat4dDim1Test ( IWorkloadFactory workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 2290 of file ConcatTestImpl.cpp.

2293 {
2294  return Concat4dDim1TestImpl<DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
2295 }

◆ Concat4dDim1TestImpl()

LayerTestResult<T, 4> Concat4dDim1TestImpl ( IWorkloadFactory workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr memoryManager,
float  qScale,
int32_t  qOffset 
)

Definition at line 1507 of file ConcatTestImpl.cpp.

References LayerTestResult< T, n >::outputExpected.

1512 {
1513  TensorInfo outputTensorInfo({ 1, 9, 2, 2 }, ArmnnType, qScale, qOffset);
1514 
1515  LayerTestResult<T, 4> result = Concat4dTestImpl<ArmnnType>(
1516  workloadFactory, memoryManager, outputTensorInfo, 1, true, qScale, qOffset);
1517 
1518  result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(
1519  {
1520  1.0f, 2.0f,
1521  3.0f, 4.0f,
1522  5.0f, 6.0f,
1523  7.0f, 8.0f,
1524  9.0f, 10.0f,
1525  11.0f, 12.0f,
1526 
1527  11.0f, 12.0f,
1528  13.0f, 14.0f,
1529  15.0f, 16.0f,
1530  17.0f, 18.0f,
1531  19.0f, 20.0f,
1532  21.0f, 22.0f,
1533 
1534  21.0f, 22.0f,
1535  23.0f, 24.0f,
1536  25.0f, 26.0f,
1537  27.0f, 28.0f,
1538  29.0f, 30.0f,
1539  31.0f, 32.0f
1540  },
1541  qScale, qOffset));
1542 
1543  return result;
1544 }
boost::multi_array< T, n > outputExpected

◆ Concat4dDim1Uint8Test()

LayerTestResult<uint8_t, 4> Concat4dDim1Uint8Test ( IWorkloadFactory workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 2866 of file ConcatTestImpl.cpp.

2869 {
2870  return Concat4dDim1TestImpl<DataType::QAsymmU8>(workloadFactory, memoryManager, 0.5f, -1);
2871 }

◆ Concat4dDim2Test()

LayerTestResult<float, 4> Concat4dDim2Test ( IWorkloadFactory workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 2297 of file ConcatTestImpl.cpp.

2300 {
2301  return Concat4dDim2TestImpl<DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
2302 }

◆ Concat4dDim2TestImpl()

LayerTestResult<T, 4> Concat4dDim2TestImpl ( IWorkloadFactory workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr memoryManager,
float  qScale,
int32_t  qOffset 
)

Definition at line 1547 of file ConcatTestImpl.cpp.

References LayerTestResult< T, n >::outputExpected.

1552 {
1553  TensorInfo outputTensorInfo({ 1, 3, 6, 2 }, ArmnnType, qScale, qOffset);
1554 
1555  LayerTestResult<T, 4> result = Concat4dTestImpl<ArmnnType>(
1556  workloadFactory, memoryManager, outputTensorInfo, 2, true, qScale, qOffset);
1557 
1558  result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(
1559  {
1560  1.0f, 2.0f,
1561  3.0f, 4.0f,
1562  11.0f, 12.0f,
1563  13.0f, 14.0f,
1564  21.0f, 22.0f,
1565  23.0f, 24.0f,
1566 
1567  5.0f, 6.0f,
1568  7.0f, 8.0f,
1569  15.0f, 16.0f,
1570  17.0f, 18.0f,
1571  25.0f, 26.0f,
1572  27.0f, 28.0f,
1573 
1574  9.0f, 10.0f,
1575  11.0f, 12.0f,
1576  19.0f, 20.0f,
1577  21.0f, 22.0f,
1578  29.0f, 30.0f,
1579  31.0f, 32.0f
1580  },
1581  qScale, qOffset));
1582 
1583  return result;
1584 }
boost::multi_array< T, n > outputExpected

◆ Concat4dDim2Uint8Test()

LayerTestResult<uint8_t, 4> Concat4dDim2Uint8Test ( IWorkloadFactory workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 2873 of file ConcatTestImpl.cpp.

2876 {
2877  return Concat4dDim2TestImpl<DataType::QAsymmU8>(workloadFactory, memoryManager, 0.5f, -1);
2878 }

◆ Concat4dDim3Test()

LayerTestResult<float, 4> Concat4dDim3Test ( IWorkloadFactory workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr memoryManager,
bool  useSubtensor 
)

Definition at line 2304 of file ConcatTestImpl.cpp.

2308 {
2309  return Concat4dDim3TestImpl<DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0, useSubtensor);
2310 }

◆ Concat4dDim3TestImpl()

LayerTestResult<T, 4> Concat4dDim3TestImpl ( IWorkloadFactory workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr memoryManager,
float  qScale,
int32_t  qOffset,
bool  useSubtensor 
)

Definition at line 1587 of file ConcatTestImpl.cpp.

References LayerTestResult< T, n >::outputExpected.

1593 {
1594  TensorInfo outputTensorInfo({ 1, 3, 2, 6 }, ArmnnType, qScale, qOffset);
1595 
1596  LayerTestResult<T, 4> result = Concat4dTestImpl<ArmnnType>(
1597  workloadFactory, memoryManager, outputTensorInfo, 3, useSubtensor, qScale, qOffset);
1598 
1599  result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(
1600  {
1601  1.0f, 2.0f,
1602  11.0f, 12.0f,
1603  21.0f, 22.0f,
1604  3.0f, 4.0f,
1605  13.0f, 14.0f,
1606  23.0f, 24.0f,
1607 
1608  5.0f, 6.0f,
1609  15.0f, 16.0f,
1610  25.0f, 26.0f,
1611  7.0f, 8.0f,
1612  17.0f, 18.0f,
1613  27.0f, 28.0f,
1614 
1615  9.0f, 10.0f,
1616  19.0f, 20.0f,
1617  29.0f, 30.0f,
1618  11.0f, 12.0f,
1619  21.0f, 22.0f,
1620  31.0f, 32.0f
1621  },
1622  qScale, qOffset));
1623 
1624  return result;
1625 }
boost::multi_array< T, n > outputExpected

◆ Concat4dDim3Uint8Test()

LayerTestResult<uint8_t, 4> Concat4dDim3Uint8Test ( IWorkloadFactory workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr memoryManager,
bool  useSubtensor 
)

Definition at line 2880 of file ConcatTestImpl.cpp.

2883 {
2884  return Concat4dDim3TestImpl<DataType::QAsymmU8>(
2885  workloadFactory, memoryManager, 0.5f, -1, useSubtensor);
2886 }

◆ Concat4dTestImpl()

LayerTestResult<T, 4> Concat4dTestImpl ( IWorkloadFactory workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const TensorInfo outputTensorInfo,
unsigned int  dimension,
bool  useSubtensor,
float  qScale,
int32_t  qOffset 
)

Definition at line 1404 of file ConcatTestImpl.cpp.

References TensorInfo::GetNumElements(), and LayerTestResult< T, n >::output.

1412 {
1413  TensorInfo inputTensorInfo({ 1, 3, 2, 2 }, ArmnnType, qScale, qOffset);
1414 
1415  auto input0 = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(
1416  {
1417  1.0f, 2.0f,
1418  3.0f, 4.0f,
1419  5.0f, 6.0f,
1420  7.0f, 8.0f,
1421  9.0f, 10.0f,
1422  11.0f, 12.0f
1423  },
1424  qScale, qOffset));
1425 
1426  auto input1 = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(
1427  {
1428  11.0f, 12.0f,
1429  13.0f, 14.0f,
1430  15.0f, 16.0f,
1431  17.0f, 18.0f,
1432  19.0f, 20.0f,
1433  21.0f, 22.0f
1434  },
1435  qScale, qOffset));
1436 
1437  auto input2 = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(
1438  {
1439  21.0f, 22.0f,
1440  23.0f, 24.0f,
1441  25.0f, 26.0f,
1442  27.0f, 28.0f,
1443  29.0f, 30.0f,
1444  31.0f, 32.0f
1445  },
1446  qScale, qOffset));
1447 
1448  LayerTestResult<T, 4> result(outputTensorInfo);
1449 
1450  std::vector<T> output;
1451  output.resize(outputTensorInfo.GetNumElements());
1452 
1453  Concatenate<T>(workloadFactory,
1454  memoryManager,
1455  {inputTensorInfo, inputTensorInfo, inputTensorInfo},
1456  {input0.data(), input1.data(), input2.data()},
1457  outputTensorInfo,
1458  output.data(),
1459  dimension,
1460  useSubtensor);
1461 
1462  result.output = MakeTensor<T, 4>(outputTensorInfo, output);
1463  return result;
1464 }
unsigned int GetNumElements() const
Definition: Tensor.hpp:192

◆ ConcatBFloat16Test()

LayerTestResult<BFloat16, 3> ConcatBFloat16Test ( IWorkloadFactory workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 2350 of file ConcatTestImpl.cpp.

2353 {
2354  return Concat3dDim1TestImpl<DataType::BFloat16>(workloadFactory, memoryManager, 0.0f, 0);
2355 }

◆ ConcatDifferentInputOutputQParamTest()

LayerTestResult<T, 3> ConcatDifferentInputOutputQParamTest ( IWorkloadFactory workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr memoryManager,
bool  useSubtensor 
)

Definition at line 1921 of file ConcatTestImpl.cpp.

References ARMNN_NO_DEPRECATE_WARN_BEGIN, ARMNN_NO_DEPRECATE_WARN_END, CopyDataFromITensorHandle(), CopyDataToITensorHandle(), IWorkloadFactory::CreateConcat(), armnn::CreateDescriptorForConcatenation(), IWorkloadFactory::CreateSubTensorHandle(), IWorkloadFactory::CreateTensorHandle(), armnn::IgnoreUnused(), armnn::info, LayerTestResult< T, n >::output, LayerTestResult< T, n >::outputExpected, and IWorkloadFactory::SupportsSubTensors().

1925 {
1926  IgnoreUnused(memoryManager);
1927 
1928  // Defines the tensor descriptors.
1929  TensorInfo outputTensorInfo({ 3, 6, 3 }, ArmnnType);
1930  TensorInfo inputTensorInfo1({ 3, 6, 2 }, ArmnnType);
1931  TensorInfo inputTensorInfo2({ 3, 6, 1 }, ArmnnType);
1932 
1933  std::vector<TensorShape> inputTensorShapes({inputTensorInfo1.GetShape(), inputTensorInfo2.GetShape()});
1934 
1935  // Quantized input1 tensor.
1936  const float inputScale1 = 0.5f;
1937  const int32_t inputOffset1 = 5;
1938 
1939  auto input1 = MakeTensor<T, 3>(inputTensorInfo1, std::vector<T>(
1940  {
1941  1, 2, 3,
1942  4, 5, 6,
1943  7, 8, 9,
1944  10, 11, 12,
1945  13, 14, 15,
1946  16, 17, 18,
1947 
1948  19, 20, 21,
1949  22, 23, 24,
1950  25, 26, 27,
1951  28, 29, 30,
1952  31, 32, 33,
1953  34, 35, 36
1954  }));
1955 
1956  // Quatized input2 tensor.
1957  const float inputScale2 = 0.2f;
1958  const int32_t inputOffset2 = 10;
1959 
1960  auto input2 = MakeTensor<T, 3>(inputTensorInfo2, std::vector<T>(
1961  {
1962  37, 38, 39,
1963  40, 41, 42,
1964  43, 44, 45,
1965  46, 47, 48,
1966  49, 50, 51,
1967  52, 53, 54
1968  }));
1969 
1970  // Quantized output tensor.
1971  const float outputScale = 0.1f;
1972  const int32_t outputOffset = 20;
1973 
1974  LayerTestResult<T, 3> ret(outputTensorInfo);
1975 
1976  ret.outputExpected = MakeTensor<T, 3>(outputTensorInfo, std::vector<T>(
1977  {
1978  0, 5, 74,
1979  10, 15, 76,
1980  20, 25, 78,
1981  30, 35, 80,
1982  40, 45, 82,
1983  50, 55, 84,
1984 
1985  60, 65, 86,
1986  70, 75, 88,
1987  80, 85, 90,
1988  90, 95, 92,
1989  100, 105, 94,
1990  110, 115, 96,
1991 
1992  120, 125, 98,
1993  130, 135, 100,
1994  140, 145, 102,
1995  150, 155, 104,
1996  160, 165, 106,
1997  170, 175, 108
1998  }));
1999 
2000  outputTensorInfo.SetQuantizationScale(outputScale);
2001  outputTensorInfo.SetQuantizationOffset(outputOffset);
2002  inputTensorInfo1.SetQuantizationScale(inputScale1);
2003  inputTensorInfo1.SetQuantizationOffset(inputOffset1);
2004  inputTensorInfo2.SetQuantizationScale(inputScale2);
2005  inputTensorInfo2.SetQuantizationOffset(inputOffset2);
2006 
2007  std::vector<unsigned int> wOrigin1 = { 0, 0, 0 }; //Extent of the window is defined by size of input[0].
2008  ConcatQueueDescriptor::ViewOrigin window1(wOrigin1);
2009 
2010  std::vector<unsigned int> wOrigin2 = { 0, 0, 2 }; //Extent of the window is defined by size of input[1].
2011  ConcatQueueDescriptor::ViewOrigin window2(wOrigin2);
2013  std::unique_ptr<ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
2015  bool subTensorsSupported = useSubtensor && workloadFactory.SupportsSubTensors();
2017  std::unique_ptr<ITensorHandle> inputHandle1 =
2018  subTensorsSupported ?
2019  workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
2020  workloadFactory.CreateTensorHandle(inputTensorInfo1);
2021 
2022  std::unique_ptr<ITensorHandle> inputHandle2 =
2023  subTensorsSupported ?
2024  workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
2025  workloadFactory.CreateTensorHandle(inputTensorInfo2);
2027  ConcatQueueDescriptor data;
2029  inputTensorShapes.begin(),inputTensorShapes.end(), 2);
2030  data.m_Parameters = desc;
2031 
2033  AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
2034  AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
2035  AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
2036 
2037  data.m_ViewOrigins.push_back(window1);
2038  data.m_ViewOrigins.push_back(window2);
2039 
2040  std::unique_ptr<IWorkload> workload = workloadFactory.CreateConcat(data, info);
2041 
2042  inputHandle1->Allocate();
2043  inputHandle2->Allocate();
2044  outputHandle->Allocate();
2045 
2046  CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0]);
2047  CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0]);
2048 
2049  workload->PostAllocationConfigure();
2050  workload->Execute();
2051 
2052  CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get());
2053 
2054  return ret;
2055 }
#define ARMNN_NO_DEPRECATE_WARN_BEGIN
Definition: Deprecated.hpp:33
virtual std::unique_ptr< ITensorHandle > CreateSubTensorHandle(ITensorHandle &parent, TensorShape const &subTensorShape, unsigned int const *subTensorOrigin) const =0
void IgnoreUnused(Ts &&...)
virtual std::unique_ptr< IWorkload > CreateConcat(const ConcatQueueDescriptor &descriptor, const WorkloadInfo &info) const
#define ARMNN_NO_DEPRECATE_WARN_END
Definition: Deprecated.hpp:34
An OriginsDescriptor for the ConcatLayer.
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo, const bool IsMemoryManaged=true) const =0
Contains information about inputs and outputs to a layer.
OriginsDescriptor CreateDescriptorForConcatenation(TensorShapeIt first, TensorShapeIt last, unsigned int concatenationDimension)
Convenience template to create an OriginsDescriptor to use when creating a ConcatLayer for performing...
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)
virtual bool SupportsSubTensors() const =0

◆ ConcatDifferentInputOutputQParamTest< DataType::QAsymmU8 >()

template LayerTestResult<ResolveType<DataType::QAsymmU8>, 3> ConcatDifferentInputOutputQParamTest< DataType::QAsymmU8 > ( IWorkloadFactory workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr memoryManager,
bool  useSubtensor 
)

◆ ConcatDifferentInputOutputQParamTest< DataType::QSymmS16 >()

template LayerTestResult<ResolveType<DataType::QSymmS16>, 3> ConcatDifferentInputOutputQParamTest< DataType::QSymmS16 > ( IWorkloadFactory workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr memoryManager,
bool  useSubtensor 
)

◆ Concatenate()

void Concatenate ( IWorkloadFactory workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr memoryManager,
std::initializer_list< const TensorInfo inputTensorInfosOrig,
std::initializer_list< T *>  inputsOrig,
const TensorInfo outputTensorInfoOrig,
T *  output,
unsigned int  concatDim,
bool  useSubtensor 
)

Definition at line 273 of file ConcatTestImpl.cpp.

References ARMNN_ASSERT_MSG, ARMNN_NO_DEPRECATE_WARN_BEGIN, ARMNN_NO_DEPRECATE_WARN_END, CopyDataFromITensorHandle(), CopyDataToITensorHandle(), IWorkloadFactory::CreateConcat(), CreateDescriptorForConcat(), IWorkloadFactory::CreateSubTensorHandle(), IWorkloadFactory::CreateTensorHandle(), OriginsDescriptor::GetNumDimensions(), OriginsDescriptor::GetNumViews(), TensorInfo::GetShape(), OriginsDescriptor::GetViewOrigin(), NeedPermuteForConcat(), and IWorkloadFactory::SupportsSubTensors().

282 {
283  ARMNN_ASSERT_MSG(output != nullptr, "output must not be null");
284  if (output == nullptr)
285  {
286  // Nullptr is an error in the test. By returning without doing the permutation
287  // I expect the caller to fail the test. It still makes sense to report this as
288  // an assert for Debug builds.
289  return;
290  }
291 
292  // Saves a copy of the parameters which we might need to change.
293  std::vector<TensorInfo> inputTensorInfos(inputTensorInfosOrig.begin(), inputTensorInfosOrig.end());
294  std::vector<T *> inputs = inputsOrig;
295  TensorInfo outputTensorInfo = outputTensorInfoOrig;
296 
297  PermutationVector permuteVector{0, 1, 2};
298 
299  // Holds and automatically releases memory for the reshaped input data.
300  std::vector<std::vector<T>> tmpInputDataStorage;
301 
302  const size_t inputCount = inputTensorInfos.size();
303 
304  bool needPermuteForConcat = NeedPermuteForConcat(inputTensorInfos, concatDim);
305 
306  if (needPermuteForConcat)
307  {
308  //
309  // We need to permute the inputs, because concatenation along
310  // the requested axis is not supported.
311  //
312  PermuteInputsForConcat<T>(workloadFactory,
313  memoryManager,
314  inputTensorInfos,
315  inputs,
316  tmpInputDataStorage,
317  permuteVector,
318  concatDim,
319  outputTensorInfo);
320  }
321 
322  WorkloadInfo workloadInfo;
323 
324  std::vector<std::unique_ptr<ITensorHandle>> inputHandles;
325  inputHandles.reserve(inputCount);
327  std::unique_ptr<ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
329  ConcatQueueDescriptor queueDescriptor;
330  OriginsDescriptor viewsDescriptor = CreateDescriptorForConcat(inputTensorInfos, concatDim);
331  queueDescriptor.m_Parameters = viewsDescriptor;
332 
333  if (useSubtensor)
334  {
335  queueDescriptor.m_ViewOrigins.reserve(viewsDescriptor.GetNumViews());
336  for (unsigned int i = 0; i < viewsDescriptor.GetNumViews(); ++i)
337  {
338  queueDescriptor.m_ViewOrigins.emplace_back(std::vector<unsigned int>(viewsDescriptor.GetViewOrigin(i),
339  viewsDescriptor.GetViewOrigin(i) + viewsDescriptor.GetNumDimensions()));
340  }
342  outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
344  const bool subTensorsSupported = workloadFactory.SupportsSubTensors();
345  for (unsigned int i = 0; i < inputCount; ++i)
346  {
347  const TensorInfo& inputTensorInfo = inputTensorInfos[i];
349  std::unique_ptr<ITensorHandle> inputHandle =
350  subTensorsSupported ?
351  workloadFactory.CreateSubTensorHandle(*outputHandle,
352  inputTensorInfo.GetShape(),
353  queueDescriptor.m_ViewOrigins[i].m_Origin.data()) :
354  workloadFactory.CreateTensorHandle(inputTensorInfo);
356  inputHandles.emplace_back(std::move(inputHandle));
357  }
358 
359 
360  }
361  else
362  {
363  for (unsigned int i = 0; i < inputCount; ++i)
364  {
366  std::unique_ptr<ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfos[i]);
368  inputHandles.emplace_back(std::move(inputHandle));
369  }
370  }
371 
372  for (unsigned int i = 0; i < inputCount; ++i)
373  {
374  AddInputToWorkload(queueDescriptor, workloadInfo, inputTensorInfos[i], inputHandles[i].get());
375  }
376 
377  AddOutputToWorkload(queueDescriptor, workloadInfo, outputTensorInfo, outputHandle.get());
378 
379  std::unique_ptr<IWorkload> workload = workloadFactory.CreateConcat(queueDescriptor, workloadInfo);
380 
381  for (auto& inputHandle : inputHandles)
382  {
383  inputHandle->Allocate();
384  }
385 
386  outputHandle->Allocate();
387 
388  unsigned int nextInputId = 0;
389  for (auto& inputHandle : inputHandles)
390  {
391  CopyDataToITensorHandle(inputHandle.get(), inputs[nextInputId]);
392  ++nextInputId;
393  }
394 
395  workload->PostAllocationConfigure();
396  workload->Execute();
397 
398  if (needPermuteForConcat)
399  {
400  PermuteOutputForConcat<T>(workloadFactory,
401  memoryManager,
402  outputTensorInfo,
403  permuteVector,
404  std::move(outputHandle),
405  output);
406  }
407  else
408  {
409  CopyDataFromITensorHandle(output, outputHandle.get());
410  }
411 }
const TensorShape & GetShape() const
Definition: Tensor.hpp:187
#define ARMNN_NO_DEPRECATE_WARN_BEGIN
Definition: Deprecated.hpp:33
virtual std::unique_ptr< ITensorHandle > CreateSubTensorHandle(ITensorHandle &parent, TensorShape const &subTensorShape, unsigned int const *subTensorOrigin) const =0
virtual std::unique_ptr< IWorkload > CreateConcat(const ConcatQueueDescriptor &descriptor, const WorkloadInfo &info) const
const uint32_t * GetViewOrigin(uint32_t idx) const
Return the view origin at the int value idx.
#define ARMNN_NO_DEPRECATE_WARN_END
Definition: Deprecated.hpp:34
#define ARMNN_ASSERT_MSG(COND, MSG)
Definition: Assert.hpp:15
An OriginsDescriptor for the ConcatLayer.
OriginsDescriptor CreateDescriptorForConcat(const std::vector< TensorInfo > &inputTensorInfos, unsigned int concatDim)
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo, const bool IsMemoryManaged=true) const =0
uint32_t GetNumDimensions() const
Get the number of dimensions.
Contains information about inputs and outputs to a layer.
uint32_t GetNumViews() const
Get the number of views.
bool NeedPermuteForConcat(const std::vector< TensorInfo > &inputTensorInfos, unsigned int concatDim)
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)
virtual bool SupportsSubTensors() const =0

◆ ConcatFloat16Test()

LayerTestResult<Half, 3> ConcatFloat16Test ( IWorkloadFactory workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 2343 of file ConcatTestImpl.cpp.

2346 {
2347  return Concat3dDim1TestImpl<DataType::Float16>(workloadFactory, memoryManager, 0.0f, 0);
2348 }

◆ ConcatTest()

LayerTestResult<float,3> ConcatTest ( IWorkloadFactory workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 2077 of file ConcatTestImpl.cpp.

References ARMNN_NO_DEPRECATE_WARN_BEGIN, ARMNN_NO_DEPRECATE_WARN_END, CopyDataFromITensorHandle(), CopyDataToITensorHandle(), IWorkloadFactory::CreateConcat(), IWorkloadFactory::CreateSubTensorHandle(), IWorkloadFactory::CreateTensorHandle(), armnn::Float32, armnn::IgnoreUnused(), armnn::info, and IWorkloadFactory::SupportsSubTensors().

2080 {
2081  IgnoreUnused(memoryManager);
2082 
2083  unsigned int outputWidth = 3;
2084  unsigned int outputHeight = 6;
2085  unsigned int outputChannels = 3;
2086 
2087  unsigned int inputWidth1 = 3;
2088  unsigned int inputHeight1 = 6;
2089  unsigned int inputChannels1 = 2;
2090 
2091  unsigned int inputWidth2 = 3;
2092  unsigned int inputHeight2 = 6;
2093  unsigned int inputChannels2 = 1;
2094 
2095  // Define the tensor descriptors.
2096  TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, DataType::Float32);
2097  TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, DataType::Float32);
2098  TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, DataType::Float32);
2099 
2100  LayerTestResult<float,3> ret(outputTensorInfo);
2101 
2102  ret.outputExpected = MakeTensor<float, 3>(outputTensorInfo, std::vector<float>(
2103  {
2104  1.0f, 2.0f, 3.0f,
2105  4.0f, 5.0f, 6.0f,
2106  7.0f, 8.0f, 9.0f,
2107  10.0f, 11.0f, 12.0f,
2108  13.0f, 14.0f, 15.0f,
2109  16.0f, 17.0f, 18.0f,
2110 
2111  19.0f, 20.0f, 21.0f,
2112  22.0f, 23.0f, 24.0f,
2113  25.0f, 26.0f, 27.0f,
2114  28.0f, 29.0f, 30.0f,
2115  31.0f, 32.0f, 33.0f,
2116  34.0f, 35.0f, 36.0f,
2117 
2118  37.0f, 38.0f, 39.0f,
2119  40.0f, 41.0f, 42.0f,
2120  43.0f, 44.0f, 45.0f,
2121  46.0f, 47.0f, 48.0f,
2122  49.0f, 50.0f, 51.0f,
2123  52.0f, 53.0f, 54.0f,
2124  })
2125  );
2126 
2127  auto input1 = MakeTensor<float, 3>(inputTensorInfo1, std::vector<float>(
2128  {
2129  1.0f, 2.0f, 3.0f,
2130  4.0f, 5.0f, 6.0f,
2131  7.0f, 8.0f, 9.0f,
2132  10.0f, 11.0f, 12.0f,
2133  13.0f, 14.0f, 15.0f,
2134  16.0f, 17.0f, 18.0f,
2135 
2136  19.0f, 20.0f, 21.0f,
2137  22.0f, 23.0f, 24.0f,
2138  25.0f, 26.0f, 27.0f,
2139  28.0f, 29.0f, 30.0f,
2140  31.0f, 32.0f, 33.0f,
2141  34.0f, 35.0f, 36.0f,
2142  })
2143  );
2144 
2145  auto input2 = MakeTensor<float, 3>(inputTensorInfo2, std::vector<float>(
2146  {
2147  37.0f, 38.0f, 39.0f,
2148  40.0f, 41.0f, 42.0f,
2149  43.0f, 44.0f, 45.0f,
2150  46.0f, 47.0f, 48.0f,
2151  49.0f, 50.0f, 51.0f,
2152  52.0f, 53.0f, 54.0f,
2153  })
2154  );
2155 
2156  std::vector<unsigned int> wOrigin1 = {0, 0, 0}; //Extent of the window is defined by size of input[0].
2157  ConcatQueueDescriptor::ViewOrigin window1(wOrigin1);
2158 
2159  std::vector<unsigned int> wOrigin2 = {2, 0, 0}; //Extent of the window is defined by size of input[1].
2160  ConcatQueueDescriptor::ViewOrigin window2(wOrigin2);
2162  std::unique_ptr<ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
2163 
2164  bool subTensorsSupported = workloadFactory.SupportsSubTensors();
2165 
2166  std::unique_ptr<ITensorHandle> inputHandle1 =
2167  subTensorsSupported ?
2168  workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
2169  workloadFactory.CreateTensorHandle(inputTensorInfo1);
2170 
2171  std::unique_ptr<ITensorHandle> inputHandle2 =
2172  subTensorsSupported ?
2173  workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
2174  workloadFactory.CreateTensorHandle(inputTensorInfo2);
2176  ConcatQueueDescriptor data;
2178  AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
2179  AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
2180  AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
2181 
2182  data.m_ViewOrigins.push_back(window1);
2183  data.m_ViewOrigins.push_back(window2);
2184 
2185  std::unique_ptr<IWorkload> workload = workloadFactory.CreateConcat(data, info);
2186 
2187  inputHandle1->Allocate();
2188  inputHandle2->Allocate();
2189  outputHandle->Allocate();
2190 
2191  CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0]);
2192  CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0]);
2193 
2194  workload->PostAllocationConfigure();
2195  workload->Execute();
2196 
2197  CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get());
2198 
2199  return ret;
2200 }
#define ARMNN_NO_DEPRECATE_WARN_BEGIN
Definition: Deprecated.hpp:33
virtual std::unique_ptr< ITensorHandle > CreateSubTensorHandle(ITensorHandle &parent, TensorShape const &subTensorShape, unsigned int const *subTensorOrigin) const =0
void IgnoreUnused(Ts &&...)
virtual std::unique_ptr< IWorkload > CreateConcat(const ConcatQueueDescriptor &descriptor, const WorkloadInfo &info) const
#define ARMNN_NO_DEPRECATE_WARN_END
Definition: Deprecated.hpp:34
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo, const bool IsMemoryManaged=true) const =0
Contains information about inputs and outputs to a layer.
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)
virtual bool SupportsSubTensors() const =0

◆ ConcatUint16Test()

LayerTestResult<uint16_t, 3> ConcatUint16Test ( IWorkloadFactory workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 2640 of file ConcatTestImpl.cpp.

References ARMNN_NO_DEPRECATE_WARN_BEGIN, ARMNN_NO_DEPRECATE_WARN_END, CopyDataFromITensorHandle(), CopyDataToITensorHandle(), IWorkloadFactory::CreateConcat(), IWorkloadFactory::CreateSubTensorHandle(), IWorkloadFactory::CreateTensorHandle(), armnn::IgnoreUnused(), armnn::info, LayerTestResult< T, n >::output, LayerTestResult< T, n >::outputExpected, armnn::QSymmS16, TensorInfo::SetQuantizationScale(), and IWorkloadFactory::SupportsSubTensors().

2643 {
2644  IgnoreUnused(memoryManager);
2645 
2646  unsigned int outputWidth = 3;
2647  unsigned int outputHeight = 6;
2648  unsigned int outputChannels = 3;
2649 
2650  unsigned int inputWidth1 = 3;
2651  unsigned int inputHeight1 = 6;
2652  unsigned int inputChannels1 = 2;
2653 
2654  unsigned int inputWidth2 = 3;
2655  unsigned int inputHeight2 = 6;
2656  unsigned int inputChannels2 = 1;
2657 
2658  // Defines the tensor descriptors.
2659  TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, DataType::QSymmS16);
2660  TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, DataType::QSymmS16);
2661  TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, DataType::QSymmS16);
2662 
2663  // Arbitrary scale and offsets. They don't really matter as the Concat operator doesn't dequantize/quantize them.
2664  const float scale = 0.13497836f;
2665  const int32_t offset = -7;
2666 
2667  outputTensorInfo.SetQuantizationScale(scale);
2668  outputTensorInfo.SetQuantizationOffset(offset);
2669  inputTensorInfo1.SetQuantizationScale(scale);
2670  inputTensorInfo1.SetQuantizationOffset(offset);
2671  inputTensorInfo2.SetQuantizationScale(scale);
2672  inputTensorInfo2.SetQuantizationOffset(offset);
2673 
2674  LayerTestResult<uint16_t, 3> ret(outputTensorInfo);
2675 
2676  ret.outputExpected = MakeTensor<uint16_t, 3>(outputTensorInfo, std::vector<uint16_t>(
2677  {
2678  1, 2, 3,
2679  4, 5, 6,
2680  7, 8, 9,
2681  10, 11, 12,
2682  13, 14, 15,
2683  16, 17, 18,
2684 
2685  19, 20, 21,
2686  22, 23, 24,
2687  25, 26, 27,
2688  28, 29, 30,
2689  31, 32, 33,
2690  34, 35, 36,
2691 
2692  37, 38, 39,
2693  40, 41, 42,
2694  43, 44, 45,
2695  46, 47, 48,
2696  49, 50, 51,
2697  52, 53, 54,
2698  }));
2699 
2700  auto input1 = MakeTensor<uint16_t, 3>(inputTensorInfo1, std::vector<uint16_t>(
2701  {
2702  1, 2, 3,
2703  4, 5, 6,
2704  7, 8, 9,
2705  10, 11, 12,
2706  13, 14, 15,
2707  16, 17, 18,
2708 
2709  19, 20, 21,
2710  22, 23, 24,
2711  25, 26, 27,
2712  28, 29, 30,
2713  31, 32, 33,
2714  34, 35, 36,
2715  }));
2716 
2717  auto input2 = MakeTensor<uint16_t, 3>(inputTensorInfo2, std::vector<uint16_t>(
2718  {
2719  37, 38, 39,
2720  40, 41, 42,
2721  43, 44, 45,
2722  46, 47, 48,
2723  49, 50, 51,
2724  52, 53, 54,
2725  }));
2726 
2727  std::vector<unsigned int> wOrigin1 = { 0, 0, 0 }; //Extent of the window is defined by size of input[0].
2728  ConcatQueueDescriptor::ViewOrigin window1(wOrigin1);
2729 
2730  std::vector<unsigned int> wOrigin2 = { 2, 0, 0 }; //Extent of the window is defined by size of input[1].
2731  ConcatQueueDescriptor::ViewOrigin window2(wOrigin2);
2732 
2734  std::unique_ptr<ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
2735 
2736  bool subTensorsSupported = workloadFactory.SupportsSubTensors();
2737 
2738  std::unique_ptr<ITensorHandle> inputHandle1 =
2739  subTensorsSupported ?
2740  workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
2741  workloadFactory.CreateTensorHandle(inputTensorInfo1);
2742 
2743  std::unique_ptr<ITensorHandle> inputHandle2 =
2744  subTensorsSupported ?
2745  workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
2746  workloadFactory.CreateTensorHandle(inputTensorInfo2);
2748 
2749  ConcatQueueDescriptor data;
2751  AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
2752  AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
2753  AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
2754 
2755  data.m_ViewOrigins.push_back(window1);
2756  data.m_ViewOrigins.push_back(window2);
2757 
2758  std::unique_ptr<IWorkload> workload = workloadFactory.CreateConcat(data, info);
2759 
2760  inputHandle1->Allocate();
2761  inputHandle2->Allocate();
2762  outputHandle->Allocate();
2763 
2764  CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0]);
2765  CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0]);
2766 
2767  workload->PostAllocationConfigure();
2768  workload->Execute();
2769 
2770  CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get());
2771 
2772  return ret;
2773 }
#define ARMNN_NO_DEPRECATE_WARN_BEGIN
Definition: Deprecated.hpp:33
virtual std::unique_ptr< ITensorHandle > CreateSubTensorHandle(ITensorHandle &parent, TensorShape const &subTensorShape, unsigned int const *subTensorOrigin) const =0
void IgnoreUnused(Ts &&...)
virtual std::unique_ptr< IWorkload > CreateConcat(const ConcatQueueDescriptor &descriptor, const WorkloadInfo &info) const
#define ARMNN_NO_DEPRECATE_WARN_END
Definition: Deprecated.hpp:34
void SetQuantizationScale(float scale)
Definition: Tensor.cpp:465
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo, const bool IsMemoryManaged=true) const =0
Contains information about inputs and outputs to a layer.
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)
virtual bool SupportsSubTensors() const =0

◆ ConcatUint8DifferentQParamsTest()

LayerTestResult<uint8_t, 3> ConcatUint8DifferentQParamsTest ( IWorkloadFactory workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 2357 of file ConcatTestImpl.cpp.

References ARMNN_NO_DEPRECATE_WARN_BEGIN, ARMNN_NO_DEPRECATE_WARN_END, CopyDataFromITensorHandle(), CopyDataToITensorHandle(), IWorkloadFactory::CreateConcat(), IWorkloadFactory::CreateSubTensorHandle(), IWorkloadFactory::CreateTensorHandle(), armnn::IgnoreUnused(), armnn::info, LayerTestResult< T, n >::output, LayerTestResult< T, n >::outputExpected, armnn::QAsymmU8, and IWorkloadFactory::SupportsSubTensors().

2360 {
2361  IgnoreUnused(memoryManager);
2362 
2363  unsigned int outputWidth = 3;
2364  unsigned int outputHeight = 6;
2365  unsigned int outputChannels = 3;
2366 
2367  unsigned int inputWidth1 = 3;
2368  unsigned int inputHeight1 = 6;
2369  unsigned int inputChannels1 = 2;
2370 
2371  unsigned int inputWidth2 = 3;
2372  unsigned int inputHeight2 = 6;
2373  unsigned int inputChannels2 = 1;
2374 
2375  // Defines the tensor descriptors.
2376  TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, DataType::QAsymmU8);
2377  TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, DataType::QAsymmU8);
2378  TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, DataType::QAsymmU8);
2379 
2380  // Quantized input1 tensor. Range [-3, 1]
2381  const float inputScale1 = 0.015686f;
2382  const int32_t inputOffset1 = 192;
2383 
2384  auto input1 = MakeTensor<uint8_t, 3>(inputTensorInfo1, std::vector<uint8_t>(
2385  {
2386  1, 2, 3,
2387  4, 5, 6,
2388  7, 8, 9,
2389  10, 11, 12,
2390  13, 14, 15,
2391  16, 17, 18,
2392 
2393  19, 20, 21,
2394  22, 23, 24,
2395  25, 26, 27,
2396  28, 29, 30,
2397  31, 32, 33,
2398  34, 35, 36,
2399  })
2400  );
2401 
2402  // Quatized input2 tensor. Range [-1, 4]
2403  const float inputScale2 = 0.019608f;
2404  const int32_t inputOffset2 = 50;
2405 
2406  auto input2 = MakeTensor<uint8_t, 3>(inputTensorInfo2, std::vector<uint8_t>(
2407  {
2408  37, 38, 39,
2409  40, 41, 42,
2410  43, 44, 45,
2411  46, 47, 48,
2412  49, 50, 51,
2413  52, 53, 54,
2414  })
2415  );
2416 
2417  // Output has the same quantization parameters than input1,
2418  // so that only the requantization of input2 is required
2419  const float outputScale = 0.015686f;
2420  const int32_t outputOffset = 192;
2421 
2422  LayerTestResult<uint8_t, 3> ret(outputTensorInfo);
2423 
2424  ret.outputExpected = MakeTensor<uint8_t, 3>(outputTensorInfo, std::vector<uint8_t>(
2425  {
2426  1, 2, 3,
2427  4, 5, 6,
2428  7, 8, 9,
2429  10, 11, 12,
2430  13, 14, 15,
2431  16, 17, 18,
2432 
2433  19, 20, 21,
2434  22, 23, 24,
2435  25, 26, 27,
2436  28, 29, 30,
2437  31, 32, 33,
2438  34, 35, 36,
2439 
2440  176, 177, 178,
2441  179, 181, 182,
2442  183, 184, 186,
2443  187, 188, 189,
2444  191, 192, 193,
2445  195, 196, 197,
2446  })
2447  );
2448 
2449  outputTensorInfo.SetQuantizationScale(outputScale);
2450  outputTensorInfo.SetQuantizationOffset(outputOffset);
2451  inputTensorInfo1.SetQuantizationScale(inputScale1);
2452  inputTensorInfo1.SetQuantizationOffset(inputOffset1);
2453  inputTensorInfo2.SetQuantizationScale(inputScale2);
2454  inputTensorInfo2.SetQuantizationOffset(inputOffset2);
2455 
2456  std::vector<unsigned int> wOrigin1 = { 0, 0, 0 }; //Extent of the window is defined by size of input[0].
2457  ConcatQueueDescriptor::ViewOrigin window1(wOrigin1);
2458 
2459  std::vector<unsigned int> wOrigin2 = { 2, 0, 0 }; //Extent of the window is defined by size of input[1].
2460  ConcatQueueDescriptor::ViewOrigin window2(wOrigin2);
2462  std::unique_ptr<ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
2463 
2464  bool subTensorsSupported = workloadFactory.SupportsSubTensors();
2465 
2466  std::unique_ptr<ITensorHandle> inputHandle1 =
2467  subTensorsSupported ?
2468  workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
2469  workloadFactory.CreateTensorHandle(inputTensorInfo1);
2470 
2471  std::unique_ptr<ITensorHandle> inputHandle2 =
2472  subTensorsSupported ?
2473  workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
2474  workloadFactory.CreateTensorHandle(inputTensorInfo2);
2476  ConcatQueueDescriptor data;
2478  AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
2479  AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
2480  AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
2481 
2482  data.m_ViewOrigins.push_back(window1);
2483  data.m_ViewOrigins.push_back(window2);
2484 
2485  std::unique_ptr<IWorkload> workload = workloadFactory.CreateConcat(data, info);
2486 
2487  inputHandle1->Allocate();
2488  inputHandle2->Allocate();
2489  outputHandle->Allocate();
2490 
2491  CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0]);
2492  CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0]);
2493 
2494  workload->PostAllocationConfigure();
2495  workload->Execute();
2496 
2497  CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get());
2498 
2499  return ret;
2500 }
#define ARMNN_NO_DEPRECATE_WARN_BEGIN
Definition: Deprecated.hpp:33
virtual std::unique_ptr< ITensorHandle > CreateSubTensorHandle(ITensorHandle &parent, TensorShape const &subTensorShape, unsigned int const *subTensorOrigin) const =0
void IgnoreUnused(Ts &&...)
virtual std::unique_ptr< IWorkload > CreateConcat(const ConcatQueueDescriptor &descriptor, const WorkloadInfo &info) const
#define ARMNN_NO_DEPRECATE_WARN_END
Definition: Deprecated.hpp:34
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo, const bool IsMemoryManaged=true) const =0
Contains information about inputs and outputs to a layer.
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)
virtual bool SupportsSubTensors() const =0

◆ ConcatUint8Test()

LayerTestResult<uint8_t, 3> ConcatUint8Test ( IWorkloadFactory workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 2502 of file ConcatTestImpl.cpp.

References ARMNN_NO_DEPRECATE_WARN_BEGIN, ARMNN_NO_DEPRECATE_WARN_END, CopyDataFromITensorHandle(), CopyDataToITensorHandle(), IWorkloadFactory::CreateConcat(), IWorkloadFactory::CreateSubTensorHandle(), IWorkloadFactory::CreateTensorHandle(), armnn::IgnoreUnused(), armnn::info, LayerTestResult< T, n >::output, LayerTestResult< T, n >::outputExpected, armnn::QAsymmU8, TensorInfo::SetQuantizationScale(), and IWorkloadFactory::SupportsSubTensors().

2505 {
2506  IgnoreUnused(memoryManager);
2507 
2508  unsigned int outputWidth = 3;
2509  unsigned int outputHeight = 6;
2510  unsigned int outputChannels = 3;
2511 
2512  unsigned int inputWidth1 = 3;
2513  unsigned int inputHeight1 = 6;
2514  unsigned int inputChannels1 = 2;
2515 
2516  unsigned int inputWidth2 = 3;
2517  unsigned int inputHeight2 = 6;
2518  unsigned int inputChannels2 = 1;
2519 
2520  // Defines the tensor descriptors.
2521  TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, DataType::QAsymmU8);
2522  TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, DataType::QAsymmU8);
2523  TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, DataType::QAsymmU8);
2524 
2525  // Arbitrary scale and offsets. They don't really matter as the Concat operator doesn't dequantize/quantize them.
2526  const float scale = 0.13497836f;
2527  const int32_t offset = -7;
2528 
2529  outputTensorInfo.SetQuantizationScale(scale);
2530  outputTensorInfo.SetQuantizationOffset(offset);
2531  inputTensorInfo1.SetQuantizationScale(scale);
2532  inputTensorInfo1.SetQuantizationOffset(offset);
2533  inputTensorInfo2.SetQuantizationScale(scale);
2534  inputTensorInfo2.SetQuantizationOffset(offset);
2535 
2536  LayerTestResult<uint8_t, 3> ret(outputTensorInfo);
2537 
2538  ret.outputExpected = MakeTensor<uint8_t, 3>(outputTensorInfo, std::vector<uint8_t>(
2539  {
2540  1, 2, 3,
2541  4, 5, 6,
2542  7, 8, 9,
2543  10, 11, 12,
2544  13, 14, 15,
2545  16, 17, 18,
2546 
2547  19, 20, 21,
2548  22, 23, 24,
2549  25, 26, 27,
2550  28, 29, 30,
2551  31, 32, 33,
2552  34, 35, 36,
2553 
2554  37, 38, 39,
2555  40, 41, 42,
2556  43, 44, 45,
2557  46, 47, 48,
2558  49, 50, 51,
2559  52, 53, 54,
2560  })
2561  );
2562 
2563  auto input1 = MakeTensor<uint8_t, 3>(inputTensorInfo1, std::vector<uint8_t>(
2564  {
2565  1, 2, 3,
2566  4, 5, 6,
2567  7, 8, 9,
2568  10, 11, 12,
2569  13, 14, 15,
2570  16, 17, 18,
2571 
2572  19, 20, 21,
2573  22, 23, 24,
2574  25, 26, 27,
2575  28, 29, 30,
2576  31, 32, 33,
2577  34, 35, 36,
2578  })
2579  );
2580 
2581  auto input2 = MakeTensor<uint8_t, 3>(inputTensorInfo2, std::vector<uint8_t>(
2582  {
2583  37, 38, 39,
2584  40, 41, 42,
2585  43, 44, 45,
2586  46, 47, 48,
2587  49, 50, 51,
2588  52, 53, 54,
2589  })
2590  );
2591 
2592  std::vector<unsigned int> wOrigin1 = { 0, 0, 0 }; //Extent of the window is defined by size of input[0].
2593  ConcatQueueDescriptor::ViewOrigin window1(wOrigin1);
2594 
2595  std::vector<unsigned int> wOrigin2 = { 2, 0, 0 }; //Extent of the window is defined by size of input[1].
2596  ConcatQueueDescriptor::ViewOrigin window2(wOrigin2);
2597 
2599  std::unique_ptr<ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
2600 
2601  bool subTensorsSupported = workloadFactory.SupportsSubTensors();
2602 
2603  std::unique_ptr<ITensorHandle> inputHandle1 =
2604  subTensorsSupported ?
2605  workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
2606  workloadFactory.CreateTensorHandle(inputTensorInfo1);
2607 
2608  std::unique_ptr<ITensorHandle> inputHandle2 =
2609  subTensorsSupported ?
2610  workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
2611  workloadFactory.CreateTensorHandle(inputTensorInfo2);
2613 
2614  ConcatQueueDescriptor data;
2616  AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
2617  AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
2618  AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
2619 
2620  data.m_ViewOrigins.push_back(window1);
2621  data.m_ViewOrigins.push_back(window2);
2622 
2623  std::unique_ptr<IWorkload> workload = workloadFactory.CreateConcat(data, info);
2624 
2625  inputHandle1->Allocate();
2626  inputHandle2->Allocate();
2627  outputHandle->Allocate();
2628 
2629  CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0]);
2630  CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0]);
2631 
2632  workload->PostAllocationConfigure();
2633  workload->Execute();
2634 
2635  CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get());
2636 
2637  return ret;
2638 }
#define ARMNN_NO_DEPRECATE_WARN_BEGIN
Definition: Deprecated.hpp:33
virtual std::unique_ptr< ITensorHandle > CreateSubTensorHandle(ITensorHandle &parent, TensorShape const &subTensorShape, unsigned int const *subTensorOrigin) const =0
void IgnoreUnused(Ts &&...)
virtual std::unique_ptr< IWorkload > CreateConcat(const ConcatQueueDescriptor &descriptor, const WorkloadInfo &info) const
#define ARMNN_NO_DEPRECATE_WARN_END
Definition: Deprecated.hpp:34
void SetQuantizationScale(float scale)
Definition: Tensor.cpp:465
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo, const bool IsMemoryManaged=true) const =0
Contains information about inputs and outputs to a layer.
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)
virtual bool SupportsSubTensors() const =0

◆ CreateDescriptorForConcat()

OriginsDescriptor CreateDescriptorForConcat ( const std::vector< TensorInfo > &  inputTensorInfos,
unsigned int  concatDim 
)

Definition at line 26 of file ConcatTestImpl.cpp.

References armnn::CreateDescriptorForConcatenation().

Referenced by Concatenate().

29 {
30  std::vector<TensorShape> shapes;
31  shapes.reserve(inputTensorInfos.size());
32  for (const TensorInfo& it: inputTensorInfos)
33  {
34  shapes.push_back(it.GetShape());
35  }
36 
37  return CreateDescriptorForConcatenation(shapes.begin(), shapes.end(), concatDim);
38 }
OriginsDescriptor CreateDescriptorForConcatenation(TensorShapeIt first, TensorShapeIt last, unsigned int concatenationDimension)
Convenience template to create an OriginsDescriptor to use when creating a ConcatLayer for performing...

◆ ExpandTensorShapeTo3dForPermute()

TensorShape ExpandTensorShapeTo3dForPermute ( const TensorShape inputShape)

Definition at line 72 of file ConcatTestImpl.cpp.

References TensorShape::GetNumDimensions().

Referenced by PermuteInputsForConcat().

73 {
74  unsigned int numDims = inputShape.GetNumDimensions();
75  if (numDims >= 3)
76  {
77  // Nothing to do if the inputShape has at least 3 dimensions.
78  return inputShape;
79  }
80 
81  std::vector<unsigned int> newDims(size_t(3), 1u);
82  unsigned int expandedBy = 3 - numDims;
83  for (unsigned int i=0; i<numDims; ++i)
84  {
85  newDims[expandedBy+i] = inputShape[i];
86  }
87  return TensorShape(3u, &newDims[0]);
88 }
unsigned int GetNumDimensions() const
Function that returns the tensor rank.
Definition: Tensor.cpp:175

◆ Generate3dPermuteVectorForConcat()

void Generate3dPermuteVectorForConcat ( unsigned int  numDimensions,
unsigned int &  concatDim,
std::pair< PermutationVector, PermutationVector > &  permutations 
)

Definition at line 90 of file ConcatTestImpl.cpp.

References ARMNN_ASSERT, and ARMNN_ASSERT_MSG.

Referenced by PermuteInputsForConcat().

94 {
95  ARMNN_ASSERT_MSG(numDimensions <= 3,
96  "Only dimensions 1,2 and 3 are supported by this helper");
97  unsigned int expandedBy = 3 - numDimensions;
98  unsigned int expandedConcatAxis = concatDim + expandedBy;
99 
100  if (expandedConcatAxis == 2)
101  {
102  concatDim = 0;
103  PermutationVector forwardPermutation({1, 2, 0});
104  PermutationVector reversePermutation({2, 0, 1});
105  permutations = std::make_pair(forwardPermutation, reversePermutation);
106  }
107  else if (expandedConcatAxis == 1)
108  {
109  concatDim = 0;
110  PermutationVector forwardPermutation({2, 0, 1});
111  PermutationVector reversePermutation({1, 2, 0});
112  permutations = std::make_pair(forwardPermutation, reversePermutation);
113  }
114  else
115  {
116  ARMNN_ASSERT(expandedConcatAxis == 0);
117  concatDim = 0;
118  }
119 }
#define ARMNN_ASSERT_MSG(COND, MSG)
Definition: Assert.hpp:15
#define ARMNN_ASSERT(COND)
Definition: Assert.hpp:14

◆ NeedPermuteForConcat()

bool NeedPermuteForConcat ( const std::vector< TensorInfo > &  inputTensorInfos,
unsigned int  concatDim 
)

Definition at line 46 of file ConcatTestImpl.cpp.

References ARMNN_ASSERT_MSG.

Referenced by Concatenate().

49 {
50  // See note above. Additionally we expect the input shapes to have the
51  // same number of dimensions.
52  unsigned int nDimensions = 0;
53 
54  // Determine the number of dimensions as well as sanity check them
55  // agains test implementation issues.
56  for (auto && tensorInfo : inputTensorInfos)
57  {
58  if (!nDimensions)
59  {
60  nDimensions = tensorInfo.GetShape().GetNumDimensions();
61  }
62  else
63  {
64  ARMNN_ASSERT_MSG(nDimensions == tensorInfo.GetShape().GetNumDimensions(),
65  "Input shapes must have the same number of dimensions");
66  }
67  }
68 
69  return (nDimensions < 3 || (nDimensions == 3 && (nDimensions-concatDim) < 3 && (nDimensions-concatDim) != 1));
70 }
#define ARMNN_ASSERT_MSG(COND, MSG)
Definition: Assert.hpp:15

◆ PermuteInputsForConcat()

void PermuteInputsForConcat ( IWorkloadFactory workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr memoryManager,
std::vector< TensorInfo > &  inputTensorInfos,
std::vector< T *> &  inputData,
std::vector< std::vector< T >> &  inputDataStorage,
PermutationVector permuteVector,
unsigned int &  concatDim,
TensorInfo outputTensorInfo 
)

Definition at line 172 of file ConcatTestImpl.cpp.

References ARMNN_ASSERT_MSG, ExpandTensorShapeTo3dForPermute(), Generate3dPermuteVectorForConcat(), TensorInfo::GetShape(), armnn::IgnoreUnused(), PermutationVector::IsEqual(), armnnUtils::Permuted(), and TensorInfo::SetShape().

181 {
182  IgnoreUnused(memoryManager);
183  ARMNN_ASSERT_MSG(inputTensorInfos.size() > 1,
184  "Expecting more than one tensor to be concatenated here");
185 
186  unsigned int numDims = 0;
187  unsigned int nthInput = 0;
188  const PermutationVector identity({0, 1, 2});
189 
190  std::pair<PermutationVector, PermutationVector> permutations =
191  std::make_pair(identity, identity);
192 
193  inputDataStorage.resize(inputData.size());
194 
195  for (auto && tensorInfo : inputTensorInfos)
196  {
197  if (numDims == 0)
198  {
199  numDims = tensorInfo.GetShape().GetNumDimensions();
200  Generate3dPermuteVectorForConcat(numDims, concatDim, permutations);
201 
202  // Store the reverese permutation.
203  permuteVector = permutations.second;
204  ARMNN_ASSERT_MSG(!permuteVector.IsEqual(identity),
205  "Test logic error, we don't need permutation, so we shouldn't arrive here");
206  }
207  else
208  {
209  ARMNN_ASSERT_MSG(numDims == tensorInfo.GetShape().GetNumDimensions(),
210  "All inputs must have the same number of dimensions");
211  }
212 
213  TensorInfo newTensorInfo = tensorInfo;
214  newTensorInfo.SetShape(ExpandTensorShapeTo3dForPermute(tensorInfo.GetShape()));
215 
216  PermuteTensorData<T>(workloadFactory,
217  memoryManager,
218  permutations.first,
219  newTensorInfo,
220  inputData[nthInput],
221  inputDataStorage[nthInput]);
222 
223  inputData[nthInput] = inputDataStorage[nthInput].data();
224  inputTensorInfos[nthInput] = newTensorInfo;
225 
226  ++nthInput;
227  }
228 
229  outputTensorInfo.SetShape(
231  ExpandTensorShapeTo3dForPermute(outputTensorInfo.GetShape()),
232  permutations.first));
233 }
const TensorShape & GetShape() const
Definition: Tensor.hpp:187
void IgnoreUnused(Ts &&...)
void Generate3dPermuteVectorForConcat(unsigned int numDimensions, unsigned int &concatDim, std::pair< PermutationVector, PermutationVector > &permutations)
void SetShape(const TensorShape &newShape)
Definition: Tensor.hpp:189
#define ARMNN_ASSERT_MSG(COND, MSG)
Definition: Assert.hpp:15
TensorShape ExpandTensorShapeTo3dForPermute(const TensorShape &inputShape)
bool IsEqual(const PermutationVector &other) const
Definition: Types.hpp:230
armnn::TensorShape Permuted(const armnn::TensorShape &srcShape, const armnn::PermutationVector &mappings)
Definition: Permute.cpp:98

◆ PermuteOutputForConcat()

void PermuteOutputForConcat ( IWorkloadFactory workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const TensorInfo tensorInfo,
const PermutationVector permuteVector,
std::unique_ptr< ITensorHandle > &&  inputDataHandle,
T *  data 
)

Definition at line 240 of file ConcatTestImpl.cpp.

References ARMNN_ASSERT_MSG, CopyDataFromITensorHandle(), and TensorInfo::GetNumElements().

247 {
248  ARMNN_ASSERT_MSG(data != nullptr, "data must not be null");
249  if (data == nullptr)
250  {
251  // Nullptr is an error in the test. By returning without doing the permutation
252  // I expect the caller to fail the test. It still makes sense to report this as
253  // an assert for Debug builds.
254  return;
255  }
256 
257  TensorInfo resultTensorInfo = tensorInfo;
258  std::vector<T> inputData(tensorInfo.GetNumElements());
259  std::vector<T> outputData;
260 
261  CopyDataFromITensorHandle(&inputData[0], inputDataHandle.get());
262 
263  PermuteTensorData<T>(workloadFactory,
264  memoryManager,
265  permuteVector,
266  resultTensorInfo,
267  &inputData[0],
268  outputData);
269 
270  ::memcpy(data, &outputData[0], sizeof(T)*outputData.size());
271 }
#define ARMNN_ASSERT_MSG(COND, MSG)
Definition: Assert.hpp:15
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
unsigned int GetNumElements() const
Definition: Tensor.hpp:192

◆ PermuteTensorData()

void PermuteTensorData ( IWorkloadFactory workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const PermutationVector mappings,
TensorInfo inputTensorInfo,
const T *  inputData,
std::vector< T > &  outputData 
)

Definition at line 121 of file ConcatTestImpl.cpp.

References ARMNN_ASSERT_MSG, ARMNN_NO_DEPRECATE_WARN_BEGIN, ARMNN_NO_DEPRECATE_WARN_END, CopyDataFromITensorHandle(), CopyDataToITensorHandle(), IWorkloadFactory::CreatePermute(), IWorkloadFactory::CreateTensorHandle(), TensorInfo::GetNumElements(), armnn::IgnoreUnused(), QueueDescriptorWithParameters< LayerDescriptor >::m_Parameters, and armnnUtils::Permuted().

128 {
129  IgnoreUnused(memoryManager);
130  ARMNN_ASSERT_MSG(inputData != nullptr, "inputData must not be null");
131  if (inputData == nullptr)
132  {
133  // Nullptr is an error in the test. By returning without doing the concatenation
134  // I expect the caller to fail the test. It still makes sense to report this as
135  // an assert for Debug builds.
136  return;
137  }
138 
139  TensorInfo outputTensorInfo = armnnUtils::Permuted(inputTensorInfo, mappings);
141  std::unique_ptr<ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
142  std::unique_ptr<ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
144 
145  PermuteQueueDescriptor queueDescriptor;
146  queueDescriptor.m_Parameters = PermuteDescriptor{mappings};
147  WorkloadInfo workloadInfo;
148  AddInputToWorkload(queueDescriptor, workloadInfo, inputTensorInfo, inputHandle.get());
149  AddOutputToWorkload(queueDescriptor, workloadInfo, outputTensorInfo, outputHandle.get());
150 
151  std::unique_ptr<IWorkload> workload = workloadFactory.CreatePermute(queueDescriptor, workloadInfo);
152 
153  inputHandle->Allocate();
154  outputHandle->Allocate();
155 
156  CopyDataToITensorHandle(inputHandle.get(), inputData);
157 
158  workload->PostAllocationConfigure();
159  workload->Execute();
160 
161  outputData.resize(outputTensorInfo.GetNumElements());
162  CopyDataFromITensorHandle(&outputData[0], outputHandle.get());
163  inputTensorInfo = outputTensorInfo;
164 }
#define ARMNN_NO_DEPRECATE_WARN_BEGIN
Definition: Deprecated.hpp:33
void IgnoreUnused(Ts &&...)
#define ARMNN_NO_DEPRECATE_WARN_END
Definition: Deprecated.hpp:34
#define ARMNN_ASSERT_MSG(COND, MSG)
Definition: Assert.hpp:15
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo, const bool IsMemoryManaged=true) const =0
virtual std::unique_ptr< IWorkload > CreatePermute(const PermuteQueueDescriptor &descriptor, const WorkloadInfo &info) const
Contains information about inputs and outputs to a layer.
armnn::TensorShape Permuted(const armnn::TensorShape &srcShape, const armnn::PermutationVector &mappings)
Definition: Permute.cpp:98
unsigned int GetNumElements() const
Definition: Tensor.hpp:192
A PermuteDescriptor for the PermuteLayer.
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)