23 #include <doctest/doctest.h> 33 using TensorData = std::pair<armnn::TensorInfo, std::vector<T>>;
36 void VerifyInputTensorData(
const TensorData<T>& data,
const std::string& tensorName)
38 if (data.first.GetNumElements() > data.second.size())
41 std::to_string(data.first.GetNumElements()) +
"but got " + std::to_string(data.second.size()));
45 template<
typename T,
typename BT>
50 const TensorData<T>& input,
51 TensorData<T>& output,
52 const TensorData<T>& weights,
56 using namespace armnn;
58 VerifyInputTensorData(input,
"input");
59 VerifyInputTensorData(weights,
"biases");
63 if (!biases.has_value())
67 VerifyInputTensorData(biases.value(),
"biases");
75 queueDescriptor.
m_Weight = &weightsTensor;
79 std::unique_ptr<ScopedTensorHandle> biasesTensor;
83 biasesTensor = std::make_unique<ScopedTensorHandle>(biases.value().first);
84 queueDescriptor.
m_Bias = biasesTensor.get();
90 std::unique_ptr<ITensorHandle> inputHandle = tensorHandleFactory.
CreateTensorHandle(input.first);
91 std::unique_ptr<ITensorHandle> outputHandle = tensorHandleFactory.
CreateTensorHandle(output.first);
95 AddInputToWorkload(queueDescriptor, workloadInfo, input.first, inputHandle.get());
96 AddOutputToWorkload(queueDescriptor, workloadInfo, output.first, outputHandle.get());
98 std::unique_ptr<armnn::IWorkload> workload =
101 inputHandle->Allocate();
102 outputHandle->Allocate();
106 ExecuteWorkload(*workload, memoryManager);
109 output.second = std::vector<T>(output.first.GetNumElements(), T());
113 template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
typename T = armnn::ResolveType<ArmnnType>>
120 const std::vector<float>& inputData,
122 const std::vector<float>& expectedOutputData,
124 const std::vector<float>& weightsData,
126 const std::vector<float>& biasesData)
128 using namespace armnn;
131 if (armnn::IsQuantizedType<T>())
133 constexpr
float qScale = 0.50f;
134 constexpr int32_t qOffset = 10;
150 TensorData<T> input =
157 TensorData<T> weights =
160 armnnUtils::QuantizedVector<T>(weightsData,
170 TensorData<BT> biases =
173 armnnUtils::QuantizedVector<BT>(biasesData,
182 TensorData<T> output = { outputInfo, {} };
185 TransposeConvolution2dTestImpl(workloadFactory,
196 testResult.m_ActualData = output.second;
197 testResult.m_ExpectedData = armnnUtils::QuantizedVector<T>(expectedOutputData,
206 std::vector<T>& inputData,
208 std::vector<T>& outputData,
210 std::vector<T>& weightsData)
212 PermuteTensorNchwToNhwc<T>(inputInfo, inputData);
213 PermuteTensorNchwToNhwc<T>(outputInfo, outputData);
214 PermuteTensorNchwToNhwc<T>(weightsInfo, weightsData);
219 template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
typename T>
227 using namespace armnn;
229 constexpr
unsigned int batches = 1u;
230 constexpr
unsigned int channels = 1u;
232 constexpr
unsigned int wInput = 3u;
233 constexpr
unsigned int hInput = wInput;
235 constexpr
unsigned int wOutput = 5u;
236 constexpr
unsigned int hOutput = wOutput;
238 constexpr
unsigned int wWeights = 3u;
239 constexpr
unsigned int hWeights = wWeights;
241 TensorShape inputShape = { batches, channels, hInput, wInput };
242 TensorShape outputShape = { batches, channels, hOutput, wOutput };
243 TensorShape weightsShape = { batches, channels, hWeights, wWeights };
246 TensorInfo outputInfo(outputShape, ArmnnType);
247 TensorInfo weightsInfo(weightsShape, ArmnnType);
248 TensorInfo biasesInfo({ channels }, ArmnnBType);
250 std::vector<float> inputData =
257 std::vector<float> weightsData =
264 std::vector<float> biasesData = { 1.f };
266 std::vector<float> expectedOutputData =
268 1.f, 3.f, 6.f, 5.f, 3.f,
269 5.f, 12.f, 21.f, 16.f, 9.f,
270 12.f, 27.f, 45.f, 33.f, 18.f,
271 11.f, 24.f, 39.f, 28.f, 15.f,
272 7.f, 15.f, 24.f, 17.f, 9.f
278 std::transform(expectedOutputData.begin(), expectedOutputData.end(), expectedOutputData.begin(),
279 [&](
float f) ->
float {
return f + biasesData[0]; });
284 descriptor.m_StrideY = 1;
285 descriptor.m_BiasEnabled = biasEnabled;
286 descriptor.m_DataLayout = layout;
291 SwizzleData(inputInfo, inputData, outputInfo, expectedOutputData, weightsInfo, weightsData);
294 return TransposeConvolution2dTest<ArmnnType, ArmnnBType>(workloadFactory,
308 template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
typename T>
316 using namespace armnn;
318 constexpr
unsigned int batches = 1u;
319 constexpr
unsigned int channels = 1u;
321 constexpr
unsigned int wInput = 4u;
322 constexpr
unsigned int hInput = wInput;
324 constexpr
unsigned int wOutput = 2u;
325 constexpr
unsigned int hOutput = wOutput;
327 constexpr
unsigned int wWeights = 3u;
328 constexpr
unsigned int hWeights = wWeights;
330 TensorShape inputShape = { batches, channels, hInput, wInput };
331 TensorShape outputShape = { batches, channels, hOutput, wOutput };
332 TensorShape weightsShape = { batches, channels, hWeights, wWeights };
335 TensorInfo outputInfo(outputShape, ArmnnType);
336 TensorInfo weightsInfo(weightsShape, ArmnnType);
337 TensorInfo biasesInfo({ channels }, ArmnnBType);
339 std::vector<float> inputData =
347 std::vector<float> weightsData =
354 std::vector<float> biasesData = { 1.f };
356 std::vector<float> expectedOutputData =
365 std::transform(expectedOutputData.begin(), expectedOutputData.end(), expectedOutputData.begin(),
366 [&](
float f) ->
float {
return f + biasesData[0]; });
371 descriptor.m_PadRight = 2;
372 descriptor.m_PadTop = 2;
373 descriptor.m_PadBottom = 2;
374 descriptor.m_StrideX = 1;
375 descriptor.m_StrideY = 1;
376 descriptor.m_BiasEnabled = biasEnabled;
377 descriptor.m_DataLayout = layout;
382 SwizzleData(inputInfo, inputData, outputInfo, expectedOutputData, weightsInfo, weightsData);
385 return TransposeConvolution2dTest<ArmnnType, ArmnnBType>(workloadFactory,
399 template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
typename T>
407 using namespace armnn;
409 constexpr
unsigned int batches = 1u;
410 constexpr
unsigned int channels = 1u;
412 constexpr
unsigned int wInput = 3u;
413 constexpr
unsigned int hInput = wInput;
415 constexpr
unsigned int wOutput = 7u;
416 constexpr
unsigned int hOutput = wOutput;
418 constexpr
unsigned int wWeights = 3u;
419 constexpr
unsigned int hWeights = wWeights;
421 TensorShape inputShape = { batches, channels, hInput, wInput };
422 TensorShape outputShape = { batches, channels, hOutput, wOutput };
423 TensorShape weightsShape = { batches, channels, hWeights, wWeights };
426 TensorInfo outputInfo(outputShape, ArmnnType);
427 TensorInfo weightsInfo(weightsShape, ArmnnType);
428 TensorInfo biasesInfo({ channels }, ArmnnBType);
430 std::vector<float> inputData =
437 std::vector<float> weightsData =
444 std::vector<float> biasesData = { 1.f };
446 std::vector<float> expectedOutputData =
448 1.f, 2.f, 4.f, 2.f, 4.f, 2.f, 3.f,
449 4.f, 5.f, 10.f, 5.f, 10.f, 5.f, 6.f,
450 8.f, 10.f, 20.f, 10.f, 20.f, 10.f, 12.f,
451 4.f, 5.f, 10.f, 5.f, 10.f, 5.f, 6.f,
452 8.f, 10.f, 20.f, 10.f, 20.f, 10.f, 12.f,
453 4.f, 5.f, 10.f, 5.f, 10.f, 5.f, 6.f,
454 7.f, 8.f, 16.f, 8.f, 16.f, 8.f, 9.f
460 std::transform(expectedOutputData.begin(), expectedOutputData.end(), expectedOutputData.begin(),
461 [&](
float f) ->
float {
return f + biasesData[0]; });
466 descriptor.m_StrideY = 2;
467 descriptor.m_BiasEnabled = biasEnabled;
468 descriptor.m_DataLayout = layout;
473 SwizzleData(inputInfo, inputData, outputInfo, expectedOutputData, weightsInfo, weightsData);
476 return TransposeConvolution2dTest<ArmnnType, ArmnnBType>(workloadFactory,
490 template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
typename T>
497 using namespace armnn;
507 TensorInfo outputInfo(outputShape, ArmnnType);
508 TensorInfo weightsInfo(weightsShape, ArmnnType);
509 TensorInfo biasesInfo(biasesShape, ArmnnBType);
511 std::vector<float> inputData =
517 std::vector<float> weightsData =
528 std::vector<float> biasesData = { -1.5f, -2.0f };
530 std::vector<float> expectedOutputData =
532 -0.5f, 1.5f, 5.5f, 4.5f, 8.5f,
533 5.5f, 7.5f, 23.5f, 16.5f, 20.5f,
534 14.5f, 22.5f, 60.5f, 40.5f, 52.5f,
535 19.5f, 25.5f, 59.5f, 34.5f, 42.5f,
536 37.5f, 43.5f, 101.5f, 58.5f, 66.5f,
538 0.0f, 2.0f, 8.0f, 6.0f, 10.0f,
539 6.0f, 8.0f, 26.0f, 18.0f, 22.0f,
540 18.0f, 26.0f, 70.0f, 46.0f, 58.0f,
541 22.0f, 28.0f, 66.0f, 38.0f, 46.0f,
542 40.0f, 46.0f, 108.0f, 62.0f, 70.0f
554 SwizzleData(inputInfo, inputData, outputInfo, expectedOutputData, weightsInfo, weightsData);
557 return TransposeConvolution2dTest<ArmnnType, ArmnnBType>(workloadFactory,
577 using namespace armnn;
583 TensorInfo inputInfo ({ 1, 1, 2, 2 }, inputType, 0.50f, 10);
584 TensorInfo outputInfo({ 1, 2, 5, 5 }, inputType, 0.50f, 10);
586 const std::vector<float> quantScales{ 0.25f, 0.5f };
587 constexpr
unsigned int quantDimension = 0;
589 TensorInfo kernelInfo({ 2, 1, 3, 3 }, kernelType, quantScales, quantDimension);
591 const std::vector<float> biasQuantScales{ 0.125f, 0.25f };
592 TensorInfo biasInfo({ 2 }, biasType, biasQuantScales, quantDimension);
594 std::vector<uint8_t> inputData =
600 std::vector<int8_t> kernelData =
611 std::vector<int32_t> biasData = { -12, -8 };
615 std::vector<uint8_t> expectedOutputData =
619 39, 55, 131, 91, 115,
621 85, 97, 213, 127, 143,
625 46, 62, 150, 102, 126,
626 54, 66, 142, 86, 102,
627 90, 102, 226, 134, 150
643 std::unique_ptr<ITensorHandle> inputHandle = tensorHandleFactory.
CreateTensorHandle(inputInfo);
644 std::unique_ptr<ITensorHandle> outputHandle = tensorHandleFactory.
CreateTensorHandle(outputInfo);
655 queueDescriptor.m_Weight = &weightTensor;
656 queueDescriptor.m_Bias = &biasTensor;
658 AddInputToWorkload(queueDescriptor, workloadInfo, inputInfo, inputHandle.get());
659 AddOutputToWorkload(queueDescriptor, workloadInfo, outputInfo, outputHandle.get());
662 inputHandle->Allocate();
663 outputHandle->Allocate();
667 ExecuteWorkload(*workload, memoryManager);
673 outputHandle->GetShape(),
682 SimpleTransposeConvolution2dTest<armnn::DataType::Float32, armnn::DataType::Float32>(
690 SimpleTransposeConvolution2dTest<armnn::DataType::QAsymmS8, armnn::DataType::Signed32>(
698 SimpleTransposeConvolution2dTest<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
706 SimpleTransposeConvolution2dTest<armnn::DataType::QSymmS16, armnn::DataType::Signed32>(
713 template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
714 PaddedTransposeConvolution2dTest<armnn::DataType::Float32, armnn::DataType::Float32>(
721 template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmS8>, 4>
722 PaddedTransposeConvolution2dTest<armnn::DataType::QAsymmS8, armnn::DataType::Signed32>(
729 template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 4>
730 PaddedTransposeConvolution2dTest<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
737 template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 4>
738 PaddedTransposeConvolution2dTest<armnn::DataType::QSymmS16, armnn::DataType::Signed32>(
745 template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
746 StridedTransposeConvolution2dTest<armnn::DataType::Float32, armnn::DataType::Float32>(
753 template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmS8>, 4>
754 StridedTransposeConvolution2dTest<armnn::DataType::QAsymmS8, armnn::DataType::Signed32>(
761 template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 4>
762 StridedTransposeConvolution2dTest<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
769 template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 4>
770 StridedTransposeConvolution2dTest<armnn::DataType::QSymmS16, armnn::DataType::Signed32>(
777 template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
778 MultiChannelTransposeConvolution2dTest<armnn::DataType::Float32, armnn::DataType::Float32>(
784 template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmS8>, 4>
785 MultiChannelTransposeConvolution2dTest<armnn::DataType::QAsymmS8, armnn::DataType::Signed32>(
791 template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 4>
792 MultiChannelTransposeConvolution2dTest<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
798 template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 4>
799 MultiChannelTransposeConvolution2dTest<armnn::DataType::QSymmS16, armnn::DataType::Signed32>(
A TransposeConvolution2dDescriptor for the TransposeConvolution2dLayer.
const TensorShape & GetShape() const
bool m_BiasEnabled
Enable/disable bias.
typename ResolveTypeImpl< DT >::Type ResolveType
LayerTestResult< T, 4 > SimpleTransposeConvolution2dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool biasEnabled, const armnn::DataLayout layout)
Copyright (c) 2021 ARM Limited and Contributors.
void IgnoreUnused(Ts &&...)
LayerDescriptor m_Parameters
LayerTestResult< T, 4 > MultiChannelTransposeConvolution2dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::DataLayout layout)
void PermuteTensorNchwToNhwc(armnn::TensorInfo &tensorInfo, std::vector< T > &tensorData)
std::shared_ptr< IMemoryManager > IMemoryManagerSharedPtr
int32_t GetQuantizationOffset() const
float GetQuantizationScale() const
LayerTestResult< T, 4 > StridedTransposeConvolution2dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool biasEnabled, const armnn::DataLayout layout)
void SetQuantizationScale(float scale)
void AllocateAndCopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
uint32_t m_PadLeft
Padding left value in the width dimension.
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
virtual std::unique_ptr< IWorkload > CreateTransposeConvolution2d(const TransposeConvolution2dQueueDescriptor &descriptor, const WorkloadInfo &info) const
LayerTestResult< uint8_t, 4 > TransposeConvolution2dPerAxisQuantTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::DataLayout layout)
Contains information about TensorInfos of a layer.
void SetQuantizationOffset(int32_t offset)
const ConstTensorHandle * m_Weight
const ConstTensorHandle * m_Bias
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo) const =0
LayerTestResult< T, 4 > PaddedTransposeConvolution2dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool biasEnabled, const armnn::DataLayout layout)
unsigned int GetNumElements() const
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)