19 template<armnn::DataType ArmnnType,
typename T>
26 const float customPaddingValue)
33 const armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType, qScale, qOffset);
35 std::vector<T> inputValues = armnnUtils::QuantizedVector<T>(
44 auto p = customPaddingValue;
45 std::vector<T> expectedOutputValues = armnnUtils::QuantizedVector<T>(
57 auto inputTensor = MakeTensor<T, 2>(inputTensorInfo, std::vector<T>(inputValues));
60 result.
outputExpected = MakeTensor<T, 2>(outputTensorInfo, std::vector<T>(expectedOutputValues));
62 std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.
CreateTensorHandle(inputTensorInfo);
63 std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.
CreateTensorHandle(outputTensorInfo);
68 std::vector<std::pair<unsigned int, unsigned int>> padList;
69 padList.push_back(std::pair<unsigned int, unsigned int>(2,2));
70 padList.push_back(std::pair<unsigned int, unsigned int>(2,2));
76 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
77 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
79 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.
CreatePad(descriptor, info);
81 inputHandle->Allocate();
82 outputHandle->Allocate();
86 workload->PostAllocationConfigure();
94 template<armnn::DataType ArmnnType,
typename T>
107 const armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType, qScale, qOffset);
109 std::vector<T> inputValues = armnnUtils::QuantizedVector<T>(
121 std::vector<T> expectedOutputValues = armnnUtils::QuantizedVector<T>(
143 auto inputTensor = MakeTensor<T, 3>(inputTensorInfo, std::vector<T>(inputValues));
146 result.
outputExpected = MakeTensor<T, 3>(outputTensorInfo, std::vector<T>(expectedOutputValues));
148 std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.
CreateTensorHandle(inputTensorInfo);
149 std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.
CreateTensorHandle(outputTensorInfo);
154 std::vector<std::pair<unsigned int, unsigned int>> PadList;
155 PadList.push_back(std::pair<unsigned int, unsigned int>(0,1));
156 PadList.push_back(std::pair<unsigned int, unsigned int>(2,1));
157 PadList.push_back(std::pair<unsigned int, unsigned int>(2,2));
162 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
163 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
165 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.
CreatePad(descriptor, info);
167 inputHandle->Allocate();
168 outputHandle->Allocate();
172 workload->PostAllocationConfigure();
180 template<armnn::DataType ArmnnType,
typename T>
193 const armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType, qScale, qOffset);
195 std::vector<T> inputValues = armnnUtils::QuantizedVector<T>(
219 std::vector<T> expectedOutputValues = armnnUtils::QuantizedVector<T>(
383 auto inputTensor = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(inputValues));
386 result.
outputExpected = MakeTensor<T, 4>(outputTensorInfo, std::vector<T>(expectedOutputValues));
388 std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.
CreateTensorHandle(inputTensorInfo);
389 std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.
CreateTensorHandle(outputTensorInfo);
393 std::vector<std::pair<unsigned int, unsigned int>> PadList;
394 PadList.push_back(std::pair<unsigned int, unsigned int>(1,1));
395 PadList.push_back(std::pair<unsigned int, unsigned int>(2,1));
396 PadList.push_back(std::pair<unsigned int, unsigned int>(3,1));
397 PadList.push_back(std::pair<unsigned int, unsigned int>(1,1));
402 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
403 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
405 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.
CreatePad(descriptor, info);
407 inputHandle->Allocate();
408 outputHandle->Allocate();
412 workload->PostAllocationConfigure();
420 template<armnn::DataType ArmnnType,
typename T>
427 const float customPaddingValue)
434 const armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType, qScale, qOffset);
436 std::vector<T> inputValues =
444 T p =
static_cast<T
>(customPaddingValue);
445 std::vector<T> expectedOutputValues =
456 auto inputTensor = MakeTensor<T, 2>(inputTensorInfo, std::vector<T>(inputValues));
459 result.
outputExpected = MakeTensor<T, 2>(outputTensorInfo, std::vector<T>(expectedOutputValues));
461 std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.
CreateTensorHandle(inputTensorInfo);
462 std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.
CreateTensorHandle(outputTensorInfo);
467 std::vector<std::pair<unsigned int, unsigned int>> padList;
468 padList.push_back(std::pair<unsigned int, unsigned int>(2,2));
469 padList.push_back(std::pair<unsigned int, unsigned int>(2,2));
475 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
476 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
478 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.
CreatePad(descriptor, info);
480 inputHandle->Allocate();
481 outputHandle->Allocate();
485 workload->PostAllocationConfigure();
498 Pad2dTestCommon<armnn::DataType::QSymmS16>(
504 const float customPaddingValue);
507 Pad3dTestCommon<armnn::DataType::QSymmS16>(
514 template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 4>
515 Pad4dTestCommon<armnn::DataType::QSymmS16>(
523 PadQAsymmTestCommon<armnn::DataType::QAsymmS8>(
529 const float customPaddingValue);
532 PadQAsymmTestCommon<armnn::DataType::QAsymmU8>(
538 const float customPaddingValue);
549 return Pad2dTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, tensorHandleFactory, 1.0f, 0);
557 return Pad2dTestCommon<armnn::DataType::QAsymmU8>(
558 workloadFactory, memoryManager, tensorHandleFactory, 1.0f, 0, 1.0f);
566 return Pad3dTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, tensorHandleFactory, 1.0f, 0);
574 return Pad4dTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, tensorHandleFactory, 1.0f, 0);
582 return Pad2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, tensorHandleFactory, 0.0f, 0);
590 return Pad2dTestCommon<armnn::DataType::Float32>(
591 workloadFactory, memoryManager, tensorHandleFactory, 0.0f, 0, 1.0f);
599 return Pad3dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, tensorHandleFactory, 0.0f, 0);
607 return Pad4dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, tensorHandleFactory, 0.0f, 0);
615 return Pad2dTestCommon<armnn::DataType::BFloat16>(workloadFactory, memoryManager, tensorHandleFactory, 0.0f, 0);
623 return Pad2dTestCommon<armnn::DataType::BFloat16>(
624 workloadFactory, memoryManager, tensorHandleFactory, 0.0f, 0, 1.0f);
632 return Pad3dTestCommon<armnn::DataType::BFloat16>(workloadFactory, memoryManager, tensorHandleFactory, 0.0f, 0);
640 return Pad4dTestCommon<armnn::DataType::BFloat16>(workloadFactory, memoryManager, tensorHandleFactory, 0.0f, 0);
648 return Pad2dTestCommon<armnn::DataType::QSymmS8>(
649 workloadFactory, memoryManager, tensorHandleFactory, 1.0f, 0);
657 return Pad2dTestCommon<armnn::DataType::QSymmS8>(
658 workloadFactory, memoryManager, tensorHandleFactory, 1.0f, 0, 1.0f);
666 return Pad3dTestCommon<armnn::DataType::QSymmS8>(workloadFactory, memoryManager, tensorHandleFactory, 1.0f, 0);
674 return Pad4dTestCommon<armnn::DataType::QSymmS8>(workloadFactory, memoryManager, tensorHandleFactory, 1.0f, 0);
682 return PadQAsymmTestCommon<armnn::DataType::QAsymmS8>(
683 workloadFactory, memoryManager, tensorHandleFactory, 2.0f, 2);
691 return PadQAsymmTestCommon<armnn::DataType::QAsymmS8>(
692 workloadFactory, memoryManager, tensorHandleFactory, 2.0f, 3, 1.0f);
LayerTestResult< T, 2 > Pad2dTestCommon(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, float qScale, int32_t qOffset, const float customPaddingValue)
LayerTestResult< int8_t, 4 > PadInt84dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< int8_t, 2 > PadInt82dCustomPaddingTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
float m_PadValue
Optional value to use for padding, defaults to 0.
LayerTestResult< armnn::BFloat16, 2 > PadBFloat162dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
boost::multi_array< T, n > outputExpected
LayerTestResult< armnn::BFloat16, 4 > PadBFloat164dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< T, 2 > PadQAsymmTestCommon(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, float qScale, int32_t qOffset, const float customPaddingValue)
LayerTestResult< float, 2 > PadFloat322dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< T, 4 > Pad4dTestCommon(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, float qScale, int32_t qOffset)
std::vector< std::pair< unsigned int, unsigned int > > m_PadList
Specifies the padding for input dimension.
void IgnoreUnused(Ts &&...)
LayerTestResult< armnn::BFloat16, 2 > PadBFloat162dCustomPaddingTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerDescriptor m_Parameters
LayerTestResult< uint8_t, 4 > PadUint84dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< uint8_t, 3 > PadUint83dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< T, 3 > Pad3dTestCommon(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, float qScale, int32_t qOffset)
LayerTestResult< int8_t, 3 > PadInt83dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
std::shared_ptr< IMemoryManager > IMemoryManagerSharedPtr
LayerTestResult< uint8_t, 2 > PadUint82dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
virtual std::unique_ptr< IWorkload > CreatePad(const PadQueueDescriptor &descriptor, const WorkloadInfo &Info) const
LayerTestResult< float, 4 > PadFloat324dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
LayerTestResult< uint8_t, 2 > PadUint82dCustomPaddingTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
boost::multi_array< T, n > output
LayerTestResult< int8_t, 2 > PadInt8CustomPaddingAsymmTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< armnn::BFloat16, 3 > PadBFloat163dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< float, 2 > PadFloat322dCustomPaddingTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< float, 3 > PadFloat323dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
Contains information about inputs and outputs to a layer.
LayerTestResult< int8_t, 2 > PadInt8AsymmTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< int8_t, 2 > PadInt82dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo) const =0
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)