23 #include <boost/test/unit_test.hpp> 33 using TensorData = std::pair<armnn::TensorInfo, std::vector<T>>;
36 void VerifyInputTensorData(
const TensorData<T>& data,
const std::string& tensorName)
38 if (data.first.GetNumElements() > data.second.size())
41 std::to_string(data.first.GetNumElements()) +
"but got " + std::to_string(data.second.size()));
45 template<
typename T,
typename BT>
49 const TensorData<T>& input,
50 TensorData<T>& output,
51 const TensorData<T>& weights,
55 using namespace armnn;
57 VerifyInputTensorData(input,
"input");
58 VerifyInputTensorData(weights,
"biases");
62 if (!biases.has_value())
66 VerifyInputTensorData(biases.value(),
"biases");
74 queueDescriptor.
m_Weight = &weightsTensor;
78 std::unique_ptr<ScopedCpuTensorHandle> biasesTensor;
82 biasesTensor = std::make_unique<ScopedCpuTensorHandle>(biases.value().first);
83 queueDescriptor.
m_Bias = biasesTensor.get();
89 std::unique_ptr<ITensorHandle> inputHandle = workloadFactory.
CreateTensorHandle(input.first);
90 std::unique_ptr<ITensorHandle> outputHandle = workloadFactory.
CreateTensorHandle(output.first);
94 AddInputToWorkload(queueDescriptor, workloadInfo, input.first, inputHandle.get());
95 AddOutputToWorkload(queueDescriptor, workloadInfo, output.first, outputHandle.get());
97 std::unique_ptr<armnn::IWorkload> workload =
100 inputHandle->Allocate();
101 outputHandle->Allocate();
105 ExecuteWorkload(*workload, memoryManager);
108 output.second = std::vector<T>(output.first.GetNumElements(), 0.0f);
112 template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
typename T = armnn::ResolveType<ArmnnType>>
118 const std::vector<float>& inputData,
120 const std::vector<float>& expectedOutputData,
122 const std::vector<float>& weightsData,
124 const std::vector<float>& biasesData)
126 using namespace armnn;
129 if (armnn::IsQuantizedType<T>())
131 constexpr
float qScale = 0.50f;
132 constexpr int32_t qOffset = 10;
148 TensorData<T> input =
155 TensorData<T> weights =
158 armnnUtils::QuantizedVector<T>(weightsData,
168 TensorData<BT> biases =
171 armnnUtils::QuantizedVector<BT>(biasesData,
180 TensorData<T> output = { outputInfo, {} };
183 TransposeConvolution2dTestImpl(workloadFactory,
193 testResult.output = MakeTensor<T, 4>(outputInfo, output.second);
194 testResult.outputExpected = MakeTensor<T, 4>(outputInfo,
195 armnnUtils::QuantizedVector<T>(expectedOutputData,
196 outputInfo.GetQuantizationScale(),
197 outputInfo.GetQuantizationOffset()));
204 std::vector<T>& inputData,
206 std::vector<T>& outputData,
208 std::vector<T>& weightsData)
210 PermuteTensorNchwToNhwc<T>(inputInfo, inputData);
211 PermuteTensorNchwToNhwc<T>(outputInfo, outputData);
212 PermuteTensorNchwToNhwc<T>(weightsInfo, weightsData);
217 template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
typename T>
224 using namespace armnn;
226 constexpr
unsigned int batches = 1u;
227 constexpr
unsigned int channels = 1u;
229 constexpr
unsigned int wInput = 3u;
230 constexpr
unsigned int hInput = wInput;
232 constexpr
unsigned int wOutput = 5u;
233 constexpr
unsigned int hOutput = wOutput;
235 constexpr
unsigned int wWeights = 3u;
236 constexpr
unsigned int hWeights = wWeights;
238 TensorShape inputShape = { batches, channels, hInput, wInput };
239 TensorShape outputShape = { batches, channels, hOutput, wOutput };
240 TensorShape weightsShape = { batches, channels, hWeights, wWeights };
243 TensorInfo outputInfo(outputShape, ArmnnType);
244 TensorInfo weightsInfo(weightsShape, ArmnnType);
245 TensorInfo biasesInfo({ channels }, ArmnnBType);
247 std::vector<float> inputData =
254 std::vector<float> weightsData =
261 std::vector<float> biasesData = { 1.f };
263 std::vector<float> expectedOutputData =
265 1.f, 3.f, 6.f, 5.f, 3.f,
266 5.f, 12.f, 21.f, 16.f, 9.f,
267 12.f, 27.f, 45.f, 33.f, 18.f,
268 11.f, 24.f, 39.f, 28.f, 15.f,
269 7.f, 15.f, 24.f, 17.f, 9.f
275 std::transform(expectedOutputData.begin(), expectedOutputData.end(), expectedOutputData.begin(),
276 [&](
float f) ->
float {
return f + biasesData[0]; });
281 descriptor.m_StrideY = 1;
282 descriptor.m_BiasEnabled = biasEnabled;
283 descriptor.m_DataLayout = layout;
288 SwizzleData(inputInfo, inputData, outputInfo, expectedOutputData, weightsInfo, weightsData);
291 return TransposeConvolution2dTest<ArmnnType, ArmnnBType>(workloadFactory,
304 template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
typename T>
311 using namespace armnn;
313 constexpr
unsigned int batches = 1u;
314 constexpr
unsigned int channels = 1u;
316 constexpr
unsigned int wInput = 4u;
317 constexpr
unsigned int hInput = wInput;
319 constexpr
unsigned int wOutput = 2u;
320 constexpr
unsigned int hOutput = wOutput;
322 constexpr
unsigned int wWeights = 3u;
323 constexpr
unsigned int hWeights = wWeights;
325 TensorShape inputShape = { batches, channels, hInput, wInput };
326 TensorShape outputShape = { batches, channels, hOutput, wOutput };
327 TensorShape weightsShape = { batches, channels, hWeights, wWeights };
330 TensorInfo outputInfo(outputShape, ArmnnType);
331 TensorInfo weightsInfo(weightsShape, ArmnnType);
332 TensorInfo biasesInfo({ channels }, ArmnnBType);
334 std::vector<float> inputData =
342 std::vector<float> weightsData =
349 std::vector<float> biasesData = { 1.f };
351 std::vector<float> expectedOutputData =
360 std::transform(expectedOutputData.begin(), expectedOutputData.end(), expectedOutputData.begin(),
361 [&](
float f) ->
float {
return f + biasesData[0]; });
366 descriptor.m_PadRight = 2;
367 descriptor.m_PadTop = 2;
368 descriptor.m_PadBottom = 2;
369 descriptor.m_StrideX = 1;
370 descriptor.m_StrideY = 1;
371 descriptor.m_BiasEnabled = biasEnabled;
372 descriptor.m_DataLayout = layout;
377 SwizzleData(inputInfo, inputData, outputInfo, expectedOutputData, weightsInfo, weightsData);
380 return TransposeConvolution2dTest<ArmnnType, ArmnnBType>(workloadFactory,
393 template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
typename T>
400 using namespace armnn;
402 constexpr
unsigned int batches = 1u;
403 constexpr
unsigned int channels = 1u;
405 constexpr
unsigned int wInput = 3u;
406 constexpr
unsigned int hInput = wInput;
408 constexpr
unsigned int wOutput = 7u;
409 constexpr
unsigned int hOutput = wOutput;
411 constexpr
unsigned int wWeights = 3u;
412 constexpr
unsigned int hWeights = wWeights;
414 TensorShape inputShape = { batches, channels, hInput, wInput };
415 TensorShape outputShape = { batches, channels, hOutput, wOutput };
416 TensorShape weightsShape = { batches, channels, hWeights, wWeights };
419 TensorInfo outputInfo(outputShape, ArmnnType);
420 TensorInfo weightsInfo(weightsShape, ArmnnType);
421 TensorInfo biasesInfo({ channels }, ArmnnBType);
423 std::vector<float> inputData =
430 std::vector<float> weightsData =
437 std::vector<float> biasesData = { 1.f };
439 std::vector<float> expectedOutputData =
441 1.f, 2.f, 4.f, 2.f, 4.f, 2.f, 3.f,
442 4.f, 5.f, 10.f, 5.f, 10.f, 5.f, 6.f,
443 8.f, 10.f, 20.f, 10.f, 20.f, 10.f, 12.f,
444 4.f, 5.f, 10.f, 5.f, 10.f, 5.f, 6.f,
445 8.f, 10.f, 20.f, 10.f, 20.f, 10.f, 12.f,
446 4.f, 5.f, 10.f, 5.f, 10.f, 5.f, 6.f,
447 7.f, 8.f, 16.f, 8.f, 16.f, 8.f, 9.f
453 std::transform(expectedOutputData.begin(), expectedOutputData.end(), expectedOutputData.begin(),
454 [&](
float f) ->
float {
return f + biasesData[0]; });
459 descriptor.m_StrideY = 2;
460 descriptor.m_BiasEnabled = biasEnabled;
461 descriptor.m_DataLayout = layout;
466 SwizzleData(inputInfo, inputData, outputInfo, expectedOutputData, weightsInfo, weightsData);
469 return TransposeConvolution2dTest<ArmnnType, ArmnnBType>(workloadFactory,
482 template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
typename T>
488 using namespace armnn;
498 TensorInfo outputInfo(outputShape, ArmnnType);
499 TensorInfo weightsInfo(weightsShape, ArmnnType);
500 TensorInfo biasesInfo(biasesShape, ArmnnBType);
502 std::vector<float> inputData =
508 std::vector<float> weightsData =
519 std::vector<float> biasesData = { -1.5f, -2.0f };
521 std::vector<float> expectedOutputData =
523 -0.5f, 1.5f, 5.5f, 4.5f, 8.5f,
524 5.5f, 7.5f, 23.5f, 16.5f, 20.5f,
525 14.5f, 22.5f, 60.5f, 40.5f, 52.5f,
526 19.5f, 25.5f, 59.5f, 34.5f, 42.5f,
527 37.5f, 43.5f, 101.5f, 58.5f, 66.5f,
529 0.0f, 2.0f, 8.0f, 6.0f, 10.0f,
530 6.0f, 8.0f, 26.0f, 18.0f, 22.0f,
531 18.0f, 26.0f, 70.0f, 46.0f, 58.0f,
532 22.0f, 28.0f, 66.0f, 38.0f, 46.0f,
533 40.0f, 46.0f, 108.0f, 62.0f, 70.0f
545 SwizzleData(inputInfo, inputData, outputInfo, expectedOutputData, weightsInfo, weightsData);
548 return TransposeConvolution2dTest<ArmnnType, ArmnnBType>(workloadFactory,
566 using namespace armnn;
572 TensorInfo inputInfo ({ 1, 1, 2, 2 }, inputType, 0.50f, 10);
573 TensorInfo outputInfo({ 1, 2, 5, 5 }, inputType, 0.50f, 10);
575 const std::vector<float> quantScales{ 0.25f, 0.5f };
576 constexpr
unsigned int quantDimension = 0;
578 TensorInfo kernelInfo({ 2, 1, 3, 3 }, kernelType, quantScales, quantDimension);
580 const std::vector<float> biasQuantScales{ 0.125f, 0.25f };
581 TensorInfo biasInfo({ 2 }, biasType, biasQuantScales, quantDimension);
583 std::vector<uint8_t> inputData =
589 std::vector<int8_t> kernelData =
600 std::vector<int32_t> biasData = { -12, -8 };
602 std::vector<uint8_t> expectedOutputData =
606 39, 55, 131, 91, 115,
608 85, 97, 213, 127, 143,
612 46, 62, 150, 102, 126,
613 54, 66, 142, 86, 102,
614 90, 102, 226, 134, 150
630 std::unique_ptr<ITensorHandle> inputHandle = workloadFactory.
CreateTensorHandle(inputInfo);
631 std::unique_ptr<ITensorHandle> outputHandle = workloadFactory.
CreateTensorHandle(outputInfo);
642 queueDescriptor.m_Weight = &weightTensor;
643 queueDescriptor.m_Bias = &biasTensor;
645 AddInputToWorkload(queueDescriptor, workloadInfo, inputInfo, inputHandle.get());
646 AddOutputToWorkload(queueDescriptor, workloadInfo, outputInfo, outputHandle.get());
649 inputHandle->Allocate();
650 outputHandle->Allocate();
654 ExecuteWorkload(*workload, memoryManager);
658 ret.
outputExpected = MakeTensor<uint8_t, 4>(outputInfo, expectedOutputData);
668 SimpleTransposeConvolution2dTest<armnn::DataType::Float32, armnn::DataType::Float32>(
675 SimpleTransposeConvolution2dTest<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
682 SimpleTransposeConvolution2dTest<armnn::DataType::QSymmS16, armnn::DataType::Signed32>(
689 PaddedTransposeConvolution2dTest<armnn::DataType::Float32, armnn::DataType::Float32>(
695 template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 4>
696 PaddedTransposeConvolution2dTest<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
702 template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 4>
703 PaddedTransposeConvolution2dTest<armnn::DataType::QSymmS16, armnn::DataType::Signed32>(
709 template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
710 StridedTransposeConvolution2dTest<armnn::DataType::Float32, armnn::DataType::Float32>(
716 template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 4>
717 StridedTransposeConvolution2dTest<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
723 template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 4>
724 StridedTransposeConvolution2dTest<armnn::DataType::QSymmS16, armnn::DataType::Signed32>(
730 template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
731 MultiChannelTransposeConvolution2dTest<armnn::DataType::Float32, armnn::DataType::Float32>(
736 template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 4>
737 MultiChannelTransposeConvolution2dTest<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
742 template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 4>
743 MultiChannelTransposeConvolution2dTest<armnn::DataType::QSymmS16, armnn::DataType::Signed32>(
A TransposeConvolution2dDescriptor for the TransposeConvolution2dLayer.
LayerTestResult< T, 4 > PaddedTransposeConvolution2dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, bool biasEnabled, const armnn::DataLayout layout)
LayerTestResult< T, 4 > MultiChannelTransposeConvolution2dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::DataLayout layout)
bool m_BiasEnabled
Enable/disable bias.
boost::multi_array< T, n > outputExpected
typename ResolveTypeImpl< DT >::Type ResolveType
Copyright (c) 2020 ARM Limited.
void IgnoreUnused(Ts &&...)
const ConstCpuTensorHandle * m_Weight
LayerDescriptor m_Parameters
void PermuteTensorNchwToNhwc(armnn::TensorInfo &tensorInfo, std::vector< T > &tensorData)
const ConstCpuTensorHandle * m_Bias
std::shared_ptr< IMemoryManager > IMemoryManagerSharedPtr
int32_t GetQuantizationOffset() const
float GetQuantizationScale() const
void SetQuantizationScale(float scale)
void AllocateAndCopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo, const bool IsMemoryManaged=true) const =0
LayerTestResult< T, 4 > SimpleTransposeConvolution2dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, bool biasEnabled, const armnn::DataLayout layout)
boost::multi_array< T, n > output
uint32_t m_PadLeft
Padding left value in the width dimension.
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
LayerTestResult< uint8_t, 4 > TransposeConvolution2dPerAxisQuantTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::DataLayout layout)
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
virtual std::unique_ptr< IWorkload > CreateTransposeConvolution2d(const TransposeConvolution2dQueueDescriptor &descriptor, const WorkloadInfo &info) const
LayerTestResult< T, 4 > StridedTransposeConvolution2dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, bool biasEnabled, const armnn::DataLayout layout)
Contains information about inputs and outputs to a layer.
void SetQuantizationOffset(int32_t offset)
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)