31 static std::vector<float> Bias2({0, 2});
33 static std::vector<float> Bias4({1, 2, 3, 4});
35 static std::vector<float> Bias8({1, 2, 3, 4, 1, 2, 3, 4});
38 static std::vector<float> ConvInput3x8x16({
39 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
40 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
41 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
42 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
43 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
44 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
45 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
46 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
47 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
48 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
49 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
50 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
51 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
52 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
53 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
54 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
55 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
56 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
57 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
58 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
59 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
60 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
61 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
62 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1
72 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
73 boost::multi_array<T, 1>
GetBias2(
bool biasEnabled,
float qScale)
78 boost::multi_array<T, 1> bias = MakeTensor<T, 1>(biasDesc, QuantizedVector<T>(Bias2, qScale, 0));
83 return boost::multi_array<T, 1>();
88 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
89 boost::multi_array<T, 1>
GetBias4(
bool biasEnabled,
float qScale)
94 boost::multi_array<T, 1> bias = MakeTensor<T, 1>(biasDesc, QuantizedVector<T>(Bias4, qScale, 0));
99 return boost::multi_array<T, 1>();
104 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
105 boost::multi_array<T, 1>
GetBias8(
bool biasEnabled,
float qScale)
109 armnn::TensorInfo biasDesc({
static_cast<unsigned int>(Bias4.size())}, ArmnnType);
110 boost::multi_array<T, 1> bias = MakeTensor<T, 1>(biasDesc, QuantizedVector<T>(Bias8, qScale, 0));
115 return boost::multi_array<T, 1>();
120 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
125 const unsigned int outputChannels = outputInfo.
GetShape()[channelsIndex];
127 switch (outputChannels)
132 return GetBias2<ArmnnType>(biasEnabled, qScale);
136 return GetBias4<ArmnnType>(biasEnabled, qScale);
140 return GetBias8<ArmnnType>(biasEnabled, qScale);
152 struct FullyConnectedBiasTypeForInputType;
155 struct FullyConnectedBiasTypeForInputType<float>
161 struct FullyConnectedBiasTypeForInputType<uint8_t>
163 using Type = int32_t;
167 template<
typename T,
typename B>
168 void ApplyBias(std::vector<T>& v,
float vScale, int32_t vOffset,
169 const std::vector<B>& bias,
float bScale, int32_t bOffset, uint32_t w, uint32_t h)
171 ARMNN_ASSERT_MSG((armnn::IsQuantizedType<T>() && vScale != 0.0f) || (!armnn::IsQuantizedType<T>()),
172 "Invalid type and parameter combination.");
173 ARMNN_ASSERT_MSG((armnn::IsQuantizedType<B>() && bScale != 0.0f) || (!armnn::IsQuantizedType<B>()),
174 "Invalid type and parameter combination.");
177 for (uint32_t i = 0; i < bias.size(); ++i)
180 for (uint32_t y = 0; y < h; ++y)
182 for (uint32_t x = 0; x < w; ++x)
184 uint32_t offset = (i * h + y) * w + x;
186 T& outRef = v[offset];
188 outRef = SelectiveQuantize<T>(dOutput + dBias, vScale, vOffset);
204 const boost::multi_array<T, 4>& originalInput,
205 const boost::multi_array<T, 4>& originalKernel,
206 const boost::multi_array<B, 1>& bias,
207 const boost::multi_array<T, 4>& originalOutputExpected,
211 uint32_t padLeft = 0,
213 uint32_t padRight = 0,
214 uint32_t padBottom = 0,
215 uint32_t strideX = 1,
216 uint32_t strideY = 1,
217 uint32_t dilationX = 1,
218 uint32_t dilationY = 1)
226 unsigned int outputHeight =
armnn::numeric_cast<
unsigned int>(originalOutputExpected.shape()[2]);
227 unsigned int outputWidth =
armnn::numeric_cast<
unsigned int>(originalOutputExpected.shape()[3]);
228 unsigned int outputChannels =
armnn::numeric_cast<
unsigned int>(originalOutputExpected.shape()[1]);
229 unsigned int outputNum =
armnn::numeric_cast<
unsigned int>(originalOutputExpected.shape()[0]);
233 unsigned int kernelChannels =
armnn::numeric_cast<
unsigned int>(originalKernel.shape()[1]);
234 unsigned int kernelDepthMul =
armnn::numeric_cast<
unsigned int>(originalKernel.shape()[0]);
236 bool biasEnabled = bias.size() > 0;
243 ARMNN_ASSERT(!biasEnabled || bias.size() == outputChannels);
253 armnn::TensorInfo biasDesc({
static_cast<unsigned int>(bias.size())}, ArmnnBType);
256 if(armnn::IsQuantizedType<T>())
258 inputTensorInfo.SetQuantizationScale(qScale);
259 inputTensorInfo.SetQuantizationOffset(qOffset);
271 std::vector<T> inputImage;
272 inputImage.assign(originalInput.data(), originalInput.data() + 1*inputChannels*inputHeight*inputWidth);
273 std::vector<T> inputData;
274 inputData.insert(inputData.end(), inputImage.begin(), inputImage.end());
275 inputData.insert(inputData.end(), inputImage.begin(), inputImage.end());
281 std::vector<T> tmp(inputData.size());
282 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(),
sizeof(T));
286 auto batchedInput = MakeTensor<T, 4>(inputTensorInfo, inputData);
288 std::vector<T> outputImage;
289 outputImage.assign(originalOutputExpected.data(),
290 originalOutputExpected.data() + outputChannels*outputHeight*outputWidth);
295 std::vector<T> biasV;
296 biasV.assign(bias.data(), bias.data() + outputChannels);
299 outputWidth, outputHeight);
303 std::vector<T> outputData;
304 outputData.insert(outputData.end(), outputImage.begin(), outputImage.end());
305 outputData.insert(outputData.end(), outputImage.begin(), outputImage.end());
310 std::vector<T> tmp(outputData.size());
314 ret.
outputExpected = MakeTensor<T, 4>(outputTensorInfo, outputData);
316 std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.
CreateTensorHandle(inputTensorInfo);
317 std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.
CreateTensorHandle(outputTensorInfo);
324 boost::multi_array<T, 4> kernel = boost::multi_array<T, 4>(originalKernel);
336 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
337 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
340 data.
m_Bias = &biasTensor;
352 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.
CreateConvolution2d(data, info);
353 inputHandle->Allocate();
354 outputHandle->Allocate();
358 ExecuteWorkload(*workload, memoryManager);
372 const boost::multi_array<T, 4>& input,
373 const boost::multi_array<T, 4>& kernel,
374 const boost::multi_array<B, 1>& bias,
375 const boost::multi_array<O, 4>& outputExpected,
379 uint32_t padLeft = 1,
381 uint32_t padRight = 1,
382 uint32_t padBottom = 1,
383 uint32_t strideX = 1,
384 uint32_t strideY = 1)
398 unsigned int outputChannels =
armnn::numeric_cast<
unsigned int>(outputExpected.shape()[3]);
402 bool biasEnabled = bias.size() > 0;
405 armnn::TensorInfo inputTensorInfo({inputNum, inputHeight, inputWidth, inputChannels}, ArmnnType);
406 armnn::TensorInfo outputTensorInfo({outputNum, outputHeight, outputWidth, outputChannels},
408 armnn::TensorInfo kernelDesc({kernelChanMul, kernelHeight, kernelWidth, kernelChannels}, ArmnnType);
409 armnn::TensorInfo biasDesc({
static_cast<unsigned int>(bias.size())}, ArmnnBType);
412 std::vector<T> inputData;
413 inputData.assign(input.data(), input.data() + inputHeight*inputWidth*inputChannels);
414 auto batchedInput = MakeTensor<T, 4>(inputTensorInfo, inputData);
417 std::vector<O> outputData;
418 outputData.assign(outputExpected.data(), outputExpected.data() + outputHeight*outputWidth*outputChannels);
421 ret.
outputExpected = MakeTensor<O, 4>(outputTensorInfo, outputData);
423 std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.
CreateTensorHandle(inputTensorInfo);
424 std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.
CreateTensorHandle(outputTensorInfo);
434 data.
m_Bias = &biasTensor;
445 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
446 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
448 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.
CreateConvolution2d(data, info);
449 inputHandle->Allocate();
450 outputHandle->Allocate();
454 ExecuteWorkload(*workload, memoryManager);
461 template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
typename T = armnn::ResolveType<ArmnnType>>
476 unsigned int batchSize = 1;
477 unsigned int inputChannels = 2;
478 unsigned int outputChannels = 3;
479 unsigned int inputSize = 5;
480 unsigned int kernelSize = 3;
481 unsigned int padSize = 2;
482 unsigned int stride = 1;
483 unsigned int outputSize = 7;
485 armnn::TensorInfo inputInfo({batchSize, inputChannels, inputSize, 1}, ArmnnType);
486 armnn::TensorInfo outputInfo({batchSize, outputChannels, outputSize, 1}, ArmnnType);
487 armnn::TensorInfo kernelInfo({outputChannels, inputChannels, kernelSize, 1}, ArmnnType);
491 if(armnn::IsQuantizedType<T>())
494 inputInfo.SetQuantizationOffset(qOffset);
495 outputInfo.SetQuantizationScale(qScale);
496 outputInfo.SetQuantizationOffset(qOffset);
497 kernelInfo.SetQuantizationScale(qScale);
498 kernelInfo.SetQuantizationOffset(qOffset);
499 biasInfo.SetQuantizationScale(inputInfo.GetQuantizationScale()*kernelInfo.GetQuantizationScale());
500 biasInfo.SetQuantizationOffset(0);
503 std::vector<T> inputData = QuantizedVector<T>(
505 5.0f, -2.0f, 2.5f, 0.0f, 1.0f,
506 -3.0f, 3.2f, 5.0f, 2.0f, 3.0f,
508 inputInfo.GetQuantizationScale(),
509 inputInfo.GetQuantizationOffset());
511 std::vector<T> kernelData = QuantizedVector<T>(
522 kernelInfo.GetQuantizationScale(),
523 kernelInfo.GetQuantizationOffset());
525 std::vector<B> biasData =
526 QuantizedVector<B>({ 1.0f, 0.0f, 0.0f }, biasInfo.GetQuantizationScale(), biasInfo.GetQuantizationOffset());
528 std::vector<T> outputData = QuantizedVector<T>(
530 4.5f, -10.8f, 5.0f + 6.4f - 7.5f, -2.0f + 10.0f -3.0f, 2.5f + 4.0f - 4.5f, 6.0f, 1.0f,
531 -0.6f, -0.6f + 0.64f, -0.6f + 0.64f + 1.0f, 0.64f + 1.0f + 0.4f, 1.0f + 0.4f + 0.6f, 0.4f + 0.6f, 0.6f,
532 2.5f, -1.0f + 3.0f, 1.25f - 3.2f + 2.5f, -1.0f - 5.0f, 1.25f + 0.5f - 2.0f, -3.0f, 0.5f
534 outputInfo.GetQuantizationScale(),
535 outputInfo.GetQuantizationOffset());
540 ApplyBias(outputData, outputInfo.GetQuantizationScale(), outputInfo.GetQuantizationOffset(),
541 biasData, biasInfo.GetQuantizationScale(), biasInfo.GetQuantizationOffset(),
545 std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.
CreateTensorHandle(inputInfo);
546 std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.
CreateTensorHandle(outputInfo);
556 AddInputToWorkload(data, info, inputInfo, inputHandle.get());
557 AddOutputToWorkload(data, info, outputInfo, outputHandle.get());
560 data.
m_Bias = &biasTensor;
569 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.
CreateConvolution2d(data, info);
570 inputHandle->Allocate();
571 outputHandle->Allocate();
575 ExecuteWorkload(*workload, memoryManager);
584 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
598 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc,
608 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, {
617 const std::vector<float> outputData =
624 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, outputData);
626 return SimpleConvolution2dNhwcTestImpl<ArmnnType, ArmnnType>(
632 boost::multi_array<T, 1>(),
639 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
653 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc,
664 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc,
674 const std::vector<T> outputData =
681 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, outputData);
683 uint32_t padLeft = 1;
685 uint32_t padRight = 1;
686 uint32_t padBottom = 1;
687 uint32_t strideX = 2;
688 uint32_t strideY = 2;
690 return SimpleConvolution2dNhwcTestImpl<ArmnnType, ArmnnType>(
696 boost::multi_array<T, 1>(),
709 template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
typename T = armnn::ResolveType<ArmnnType>>
721 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc, QuantizedVector<T>(ConvInput3x8x16, qScale, qOffset));
725 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, std::vector<T>(
768 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, std::vector<T>(
770 -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24,
771 -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25,
772 -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f,
773 -23.5f, -23.5f, -23.5f,
774 -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f,
775 -23.5f, -23.5f, -23.5f,
777 5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
778 5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
779 5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
780 5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
784 return SimpleConvolution2dTestImpl<ArmnnType, ArmnnBType>(
790 GetBias2<ArmnnBType>(biasEnabled, qScale * qScale),
812 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc, QuantizedVector<T>(ConvInput3x8x16, qScale, qOffset));
816 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, std::vector<T>(
847 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, std::vector<T>(
849 -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15,
850 -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16,
851 -14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,
852 -14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,
853 -14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,
854 -14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,
856 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
857 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
858 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
859 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
860 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
861 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
865 return SimpleConvolution2dTestImpl<ArmnnType, ArmnnBType>(
871 GetBias2<ArmnnBType>(biasEnabled, qScale * qScale),
890 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc, std::vector<T>(
900 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, std::vector<T>(
917 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, std::vector<T>(
920 -242, -594, -934, -372, 0, 0,
921 -495, -1190, -1850, -725, 0, 0,
922 -538, -1256, -1916, -748, 0, 0,
923 -273, -626, -946, -363, 0, 0,
930 return SimpleConvolution2dTestImpl<ArmnnType, ArmnnBType>(
936 GetBias2<ArmnnBType>(
false, qScale * qScale),
959 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc, std::vector<T>(
966 }, qScale, qOffset)));
970 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, std::vector<T>(
981 std::vector<T> myVec(outputDesc.GetNumElements(), 0);
982 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, std::vector<T>(
984 -7140, -10580, -13940, -9300, -5230,
985 -9590, -14120, -18520, -12290, -6860,
986 -9980, -14560, -18960, -12560, -7000,
987 -7518, -10904, -14144, -9318, -5152,
988 -5032, -7256, -9376, -6142, -3368,
992 return SimpleConvolution2dTestImpl<ArmnnType, ArmnnBType>(
998 GetBias2<ArmnnBType>(
false, qScale * qScale),
1009 template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
typename T = armnn::ResolveType<ArmnnType>>
1014 const std::vector<float>& inputNoQuantizedValues,
1016 const std::vector<float>& kernelNoQuantizedValues,
1018 const std::vector<float>& outputExpectedNoQuantizedValues,
1023 uint32_t padLeft = 0,
1024 uint32_t padTop = 0,
1025 uint32_t padRight = 0,
1026 uint32_t padBottom = 0,
1027 uint32_t strideX = 1,
1028 uint32_t strideY = 1,
1029 bool biasEnabled =
false 1065 auto input = MakeTensor<T, 4>(inputTensorInfo,
1066 std::vector<T>(QuantizedVector<T>(inputNoQuantizedValues,
1067 inputTensorInfo.GetQuantizationScale(),
1068 inputTensorInfo.GetQuantizationOffset())));
1069 auto kernel = MakeTensor<T, 4>(kernelTensorInfo,
1070 std::vector<T>(QuantizedVector<T>(kernelNoQuantizedValues,
1071 kernelTensorInfo.GetQuantizationScale(),
1072 kernelTensorInfo.GetQuantizationOffset())));
1073 auto expectedOutput =
1074 MakeTensor<T, 4>(outputTensorInfo,
1075 std::vector<T>(QuantizedVector<T>(outputExpectedNoQuantizedValues,
1076 outputTensorInfo.GetQuantizationScale(),
1077 outputTensorInfo.GetQuantizationOffset())));
1079 return SimpleConvolution2dTestImpl<ArmnnType, ArmnnBType>(
1082 tensorHandleFactory,
1085 GetBias2<ArmnnBType>(biasEnabled, qScale * qScale),
1100 template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
typename T>
1109 std::vector<float> inputNoQuantizedValues =
1111 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1112 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1113 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1114 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
1115 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
1116 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
1117 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1118 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1119 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1120 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
1124 std::vector<float> kernelNoQuantizedValues =
1134 std::vector<float> outputExpectedNoQuantizedValues =
1142 return Convolution2d3x3DilationTestCommon<ArmnnType, ArmnnBType>(
1145 tensorHandleFactory,
1146 inputNoQuantizedValues,
1148 kernelNoQuantizedValues,
1150 outputExpectedNoQuantizedValues,
1158 template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
typename T>
1167 std::vector<float> inputNoQuantizedValues =
1169 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1170 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1171 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1172 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
1173 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
1174 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
1175 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1176 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1177 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1178 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1180 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1181 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1182 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1183 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
1184 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
1185 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
1186 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1187 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1188 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1189 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
1193 std::vector<float> kernelNoQuantizedValues =
1207 std::vector<float> outputExpectedNoQuantizedValues =
1215 return Convolution2d3x3DilationTestCommon<ArmnnType, ArmnnBType>(
1218 tensorHandleFactory,
1219 inputNoQuantizedValues,
1221 kernelNoQuantizedValues,
1223 outputExpectedNoQuantizedValues,
1231 template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
typename T>
1240 std::vector<float> inputNoQuantizedValues =
1242 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1243 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1244 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1245 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1246 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1247 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1248 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1249 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1250 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1251 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
1255 std::vector<float> kernelNoQuantizedValues =
1265 std::vector<float> outputExpectedNoQuantizedValues =
1272 uint32_t padLeft = 1;
1273 uint32_t padTop = 1;
1274 uint32_t padRight = 1;
1275 uint32_t padBottom = 1;
1277 return Convolution2d3x3DilationTestCommon<ArmnnType, ArmnnBType>(
1280 tensorHandleFactory,
1281 inputNoQuantizedValues,
1283 kernelNoQuantizedValues,
1285 outputExpectedNoQuantizedValues,
1300 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
1308 unsigned int inputHeight = 8;
1309 unsigned int inputWidth = 16;
1310 unsigned int inputChannels = 3;
1311 unsigned int inputNum = 5;
1313 unsigned int kernelHeight = 3;
1314 unsigned int kernelWidth = 3;
1316 unsigned int strideX = 2;
1317 unsigned int strideY = 3;
1318 unsigned int padX = 1;
1319 unsigned int padY = 1;
1321 unsigned int outputNum = inputNum;
1322 unsigned int outputChannels = 2;
1323 unsigned int outputHeight = (inputHeight + 2 * padY - kernelHeight + strideY) / strideY;
1324 unsigned int outputWidth = (inputWidth + 2 * padX - kernelWidth + strideX) / strideX;
1331 unsigned int inputShape[] = {inputNum, inputChannels, inputHeight, inputWidth};
1332 unsigned int outputShape[] = {outputNum, outputChannels, outputHeight, outputWidth};
1333 unsigned int kernelShape[] = {outputChannels, inputChannels, kernelHeight, kernelWidth};
1334 unsigned int biasShape[] = {outputChannels};
1343 auto input = MakeRandomTensor<T, 4>(inputTensorInfo, 124908);
1344 auto kernel = MakeRandomTensor<T, 4>(kernelDesc, 891234);
1345 auto bias = MakeRandomTensor<T, 1>(biasDesc, 1028);
1347 std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.
CreateTensorHandle(inputTensorInfo);
1348 std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.
CreateTensorHandle(outputTensorInfo);
1358 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
1359 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1361 data.
m_Bias = &biasTensor;
1370 std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refTensorHandleFactory.
CreateTensorHandle(outputTensorInfo);
1371 std::unique_ptr<armnn::ITensorHandle> inputHandleRef = refTensorHandleFactory.
CreateTensorHandle(inputTensorInfo);
1375 SetWorkloadInput(refData, refInfo, 0, inputTensorInfo, inputHandleRef.get());
1376 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
1378 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.
CreateConvolution2d(data, info);
1379 std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.
CreateConvolution2d(refData, refInfo);
1381 outputHandleRef->Allocate();
1382 inputHandleRef->Allocate();
1384 inputHandle->Allocate();
1385 outputHandle->Allocate();
1390 ExecuteWorkload(*workload, memoryManager);
1392 workloadRef->PostAllocationConfigure();
1393 workloadRef->Execute();
1414 std::vector<armnn::BFloat16> inputValues = armnnUtils::QuantizedVector<armnn::BFloat16>(
1444 auto input = MakeTensor<armnn::BFloat16, 4>(inputDesc, inputValues);
1449 std::vector<armnn::BFloat16> kernelValues = armnnUtils::QuantizedVector<armnn::BFloat16>(
1463 auto kernel = MakeTensor<armnn::BFloat16, 4>(kernelDesc, kernelValues);
1469 const std::vector<float> outputData =
1482 boost::multi_array<float, 4> expectedOutput = MakeTensor<float, 4>(outputDesc, outputData);
1484 uint32_t padLeft = 1;
1485 uint32_t padTop = 1;
1486 uint32_t padRight = 1;
1487 uint32_t padBottom = 1;
1488 uint32_t strideX = 2;
1489 uint32_t strideY = 2;
1495 tensorHandleFactory,
1498 boost::multi_array<float, 1>(),
1524 std::vector<armnn::BFloat16> inputValues = armnnUtils::QuantizedVector<armnn::BFloat16>(
1554 auto input = MakeTensor<armnn::BFloat16, 4>(inputDesc, inputValues);
1559 std::vector<armnn::BFloat16> kernelValues = armnnUtils::QuantizedVector<armnn::BFloat16>(
1573 auto kernel = MakeTensor<armnn::BFloat16, 4>(kernelDesc, kernelValues);
1579 const std::vector<float> outputData =
1592 boost::multi_array<float, 4> expectedOutput = MakeTensor<float, 4>(outputDesc, outputData);
1594 uint32_t padLeft = 1;
1595 uint32_t padTop = 1;
1596 uint32_t padRight = 1;
1597 uint32_t padBottom = 1;
1598 uint32_t strideX = 2;
1599 uint32_t strideY = 2;
1605 tensorHandleFactory,
1608 boost::multi_array<float, 1>(),
1631 const boost::multi_array<T, 4>& input,
1632 const boost::multi_array<T, 4>& kernel,
1633 const boost::multi_array<B, 1>& bias,
1634 const boost::multi_array<T, 4>& outputExpected,
1638 uint32_t padLeft = 0,
1639 uint32_t padTop = 0,
1640 uint32_t padRight = 0,
1641 uint32_t padBottom = 0,
1642 uint32_t strideX = 1,
1643 uint32_t strideY = 1)
1654 unsigned int outputChannels =
armnn::numeric_cast<
unsigned int>(outputExpected.shape()[1]);
1659 bool biasEnabled = bias.size() > 0;
1660 ARMNN_ASSERT(!biasEnabled || bias.size() == outputChannels);
1667 armnn::TensorInfo kernelDesc({kernelChanMul, kernelChannels, kernelHeight, kernelWidth}, ArmnnType);
1668 armnn::TensorInfo biasDesc({
static_cast<unsigned int>(bias.size())}, ArmnnBType);
1671 if (armnn::IsQuantizedType<T>())
1673 inputTensorInfo.SetQuantizationScale(qScale);
1674 inputTensorInfo.SetQuantizationOffset(qOffset);
1677 kernelDesc.SetQuantizationScale(qScale);
1678 kernelDesc.SetQuantizationOffset(qOffset);
1679 biasDesc.SetQuantizationScale(qScale*qScale);
1680 biasDesc.SetQuantizationOffset(0);
1684 std::vector<T> inputData;
1685 inputData.assign(input.data(), input.data() + inputChannels*inputHeight*inputWidth);
1691 std::vector<T> tmp(inputData.size());
1692 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(),
sizeof(T));
1696 auto batchedInput = MakeTensor<T, 4>(inputTensorInfo, inputData);
1699 std::vector<T> outputData;
1700 outputData.assign(outputExpected.data(), outputExpected.data() + outputChannels*outputHeight*outputWidth);
1703 std::vector<T> biasV;
1704 biasV.assign(bias.data(), bias.data() + outputChannels);
1706 biasV, biasDesc.GetQuantizationScale(), biasDesc.GetQuantizationOffset(),
1707 outputWidth, outputHeight);
1715 std::vector<T> tmp(outputData.size());
1720 ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outputData);
1722 std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.
CreateTensorHandle(inputTensorInfo);
1723 std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.
CreateTensorHandle(outputTensorInfo);
1737 data.
m_Bias = &biasTensor;
1748 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
1749 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1752 inputHandle->Allocate();
1753 outputHandle->Allocate();
1757 ExecuteWorkload(*workload, memoryManager);
1764 template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
typename T = armnn::ResolveType<ArmnnType>>
1776 unsigned int inputHeight = 3;
1777 unsigned int inputWidth = 3;
1778 unsigned int inputChannels = 2;
1779 unsigned int inputNum = 1;
1781 unsigned int kernelHeight = 3;
1782 unsigned int kernelWidth = 3;
1783 unsigned int kernelChannels = inputChannels;
1784 unsigned int kernelDepthMultiplier = 1;
1786 unsigned int outputHeight = 1;
1787 unsigned int outputWidth = 1;
1788 unsigned int outputChannels = kernelChannels;
1789 unsigned int outputNum = inputNum;
1795 armnn::TensorInfo kernelDesc({kernelDepthMultiplier, kernelChannels, kernelHeight, kernelWidth},
1800 if(armnn::IsQuantizedType<T>())
1806 kernelDesc.SetQuantizationScale(qScale);
1807 kernelDesc.SetQuantizationOffset(qOffset);
1808 biasDesc.SetQuantizationScale(qScale*qScale);
1809 biasDesc.SetQuantizationOffset(0);
1811 std::vector<T> inputData = std::vector<T>(
1812 QuantizedVector<T>({
1828 std::vector<T> tmp(inputData.size());
1832 auto input = MakeTensor<T, 4>(inputTensorInfo, inputData);
1834 std::vector<B> biasV(QuantizedVector<B>({ 0, 2 },
1835 biasDesc.GetQuantizationScale(),
1836 biasDesc.GetQuantizationOffset()));
1838 auto bias = MakeTensor<B, 1>(biasDesc, biasV);
1840 std::vector<T> kernelData = std::vector<T>(
1841 QuantizedVector<T>({
1850 kernelDesc.GetQuantizationScale(),
1851 kernelDesc.GetQuantizationOffset()));
1853 auto kernel = MakeTensor<T, 4>(kernelDesc, kernelData);
1856 std::vector<T> outputImage(
1857 QuantizedVector<T>({ 0.f, 0.f },
1866 biasV, biasDesc.GetQuantizationScale(), biasDesc.GetQuantizationOffset(),
1867 outputWidth, outputHeight);
1873 std::vector<T> tmp(outputImage.size());
1878 ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outputImage);
1880 std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.
CreateTensorHandle(inputTensorInfo);
1881 std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.
CreateTensorHandle(outputTensorInfo);
1891 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
1892 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1895 data.
m_Bias = &biasTensor;
1906 inputHandle->Allocate();
1907 outputHandle->Allocate();
1911 ExecuteWorkload(*workload, memoryManager);
1918 template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
typename T = armnn::ResolveType<ArmnnType>>
1930 unsigned int depthMultiplier = 2;
1932 unsigned int inputHeight = 8;
1933 unsigned int inputWidth = 16;
1934 unsigned int inputChannels = 2;
1935 unsigned int inputBatchSize = 1;
1937 unsigned int kernelHeight = 5;
1938 unsigned int kernelWidth = 3;
1940 unsigned int outputHeight = inputHeight - kernelHeight + 1 + 2;
1941 unsigned int outputWidth = (inputWidth - kernelWidth + 1)/2;
1942 unsigned int outputChannels = inputChannels * depthMultiplier;
1943 unsigned int outputBatchSize = inputBatchSize;
1946 inputBatchSize, inputChannels, inputHeight, inputWidth, layout, ArmnnType);
1948 outputBatchSize, outputChannels, outputHeight, outputWidth, layout, ArmnnType);
1949 armnn::TensorInfo kernelDesc({depthMultiplier, inputChannels, kernelHeight, kernelWidth},
1954 if(armnn::IsQuantizedType<T>())
1960 kernelDesc.SetQuantizationScale(qScale);
1961 kernelDesc.SetQuantizationOffset(qOffset);
1962 biasDesc.SetQuantizationScale(qScale*qScale);
1963 biasDesc.SetQuantizationOffset(0);
1967 std::vector<T> originalInputData = std::vector<T>(
1968 QuantizedVector<T>({
1969 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
1970 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
1971 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
1972 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
1973 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
1974 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
1975 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
1976 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
1977 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
1978 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
1979 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
1980 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
1981 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
1982 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
1983 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
1984 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f
1989 std::vector<T> inputData = originalInputData;
1995 originalInputData.data(), inputData.data(),
sizeof(T));
1997 auto input = MakeTensor<T, 4>(inputTensorInfo, inputData);
1999 std::vector<B> biasV = QuantizedVector<B>({ 0, 2, 1, -1 },
2000 biasDesc.GetQuantizationScale(),
2001 biasDesc.GetQuantizationOffset());
2003 auto bias = MakeTensor<B, 1>(biasDesc, biasV);
2005 std::vector<T> kernelData = std::vector<T>(
2006 QuantizedVector<T>({
2031 kernelDesc.GetQuantizationScale(),
2032 kernelDesc.GetQuantizationOffset()));
2034 auto kernel = MakeTensor<T, 4>(kernelDesc, kernelData);
2037 std::vector<T> originalOutputImage = std::vector<T>(
2038 QuantizedVector<T>({
2039 3.5f, 3.5f, 3.5f, 3.5f, 3.5f, 3.5f, 3.5f,
2040 6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f,
2041 5.0f, 5.0f, 5.0f, 5.0f, 5.0f, 5.0f, 5.0f,
2042 6.5f, 6.5f, 6.5f, 6.5f, 6.5f, 6.5f, 6.5f,
2043 6.5f, 6.5f, 6.5f, 6.5f, 6.5f, 6.5f, 6.5f,
2044 5.0f, 5.0f, 5.0f, 5.0f, 5.0f, 5.0f, 5.0f,
2046 -0.5f, -0.5f, -0.5f, -0.5f, -0.5f, -0.5f, -0.5f,
2047 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
2048 -0.5f, -0.5f, -0.5f, -0.5f, -0.5f, -0.5f, -0.5f,
2049 -0.5f, -0.5f, -0.5f, -0.5f, -0.5f, -0.5f, -0.5f,
2050 -0.5f, -0.5f, -0.5f, -0.5f, -0.5f, -0.5f, -0.5f,
2051 -0.5f, -0.5f, -0.5f, -0.5f, -0.5f, -0.5f, -0.5f,
2053 8.0f, 8.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
2054 10.0f, 10.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
2055 10.0f, 10.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
2056 10.0f, 10.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
2057 10.0f, 10.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
2058 8.0f, 8.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
2060 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
2061 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
2062 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
2063 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
2064 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
2065 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f
2077 biasDesc.GetQuantizationScale(),
2078 biasDesc.GetQuantizationOffset(),
2084 std::vector<T> outputImage = originalOutputImage;
2088 originalOutputImage.data(), outputImage.data(),
sizeof(T));
2091 ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outputImage);
2093 std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.
CreateTensorHandle(inputTensorInfo);
2094 std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.
CreateTensorHandle(outputTensorInfo);
2104 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
2105 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
2108 data.
m_Bias = &biasTensor;
2119 inputHandle->Allocate();
2120 outputHandle->Allocate();
2124 ExecuteWorkload(*workload, memoryManager);
2137 const boost::multi_array<T, 4>& originalInput,
2138 const boost::multi_array<T, 4>& originalKernel,
2139 const boost::multi_array<B, 1>& bias,
2140 const boost::multi_array<T, 4>& originalOutputExpected,
2144 uint32_t padLeft = 0,
2145 uint32_t padTop = 0,
2146 uint32_t padRight = 0,
2147 uint32_t padBottom = 0,
2148 uint32_t strideX = 1,
2149 uint32_t strideY = 1,
2150 uint32_t dilationX = 1,
2151 uint32_t dilationY = 1)
2158 unsigned int outputHeight =
armnn::numeric_cast<
unsigned int>(originalOutputExpected.shape()[2]);
2159 unsigned int outputWidth =
armnn::numeric_cast<
unsigned int>(originalOutputExpected.shape()[3]);
2160 unsigned int outputChannels =
armnn::numeric_cast<
unsigned int>(originalOutputExpected.shape()[1]);
2161 unsigned int outputNum =
armnn::numeric_cast<
unsigned int>(originalOutputExpected.shape()[0]);
2165 unsigned int kernelChannels =
armnn::numeric_cast<
unsigned int>(originalKernel.shape()[1]);
2166 unsigned int kernelDepthMul =
armnn::numeric_cast<
unsigned int>(originalKernel.shape()[0]);
2168 bool biasEnabled = bias.size() > 0;
2175 ARMNN_ASSERT(!biasEnabled || bias.size() == outputChannels);
2185 armnn::TensorInfo kernelDesc({kernelDepthMul, kernelChannels, kernelHeight, kernelWidth}, ArmnnType);
2187 armnn::TensorInfo biasDesc({
static_cast<unsigned int>(bias.size())}, ArmnnBType);
2190 if(armnn::IsQuantizedType<T>())
2192 inputTensorInfo.SetQuantizationScale(qScale);
2193 inputTensorInfo.SetQuantizationOffset(qOffset);
2196 kernelDesc.SetQuantizationScale(qScale);
2197 kernelDesc.SetQuantizationOffset(qOffset);
2198 biasDesc.SetQuantizationScale(qScale*qScale);
2199 biasDesc.SetQuantizationOffset(0);
2205 std::vector<T> input;
2206 input.assign(originalInput.data(), originalInput.data() + 1*inputChannels*inputHeight*inputWidth);
2207 std::vector<T> inputData;
2208 inputData.insert(inputData.end(), input.begin(), input.end());
2209 inputData.insert(inputData.end(), input.begin(), input.end());
2215 std::vector<T> tmp(inputData.size());
2216 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(),
sizeof(T));
2220 auto batchedInput = MakeTensor<T, 4>(inputTensorInfo, inputData);
2222 std::vector<T> output;
2223 output.assign(originalOutputExpected.data(),
2224 originalOutputExpected.data() + outputChannels*outputHeight*outputWidth);
2229 std::vector<T> biasV;
2230 biasV.assign(bias.data(), bias.data() + outputChannels);
2232 biasV, biasDesc.GetQuantizationScale(), biasDesc.GetQuantizationOffset(),
2233 outputWidth, outputHeight);
2237 std::vector<T> outputData;
2238 outputData.insert(outputData.end(), output.begin(), output.end());
2239 outputData.insert(outputData.end(), output.begin(), output.end());
2244 std::vector<T> tmp(outputData.size());
2248 ret.
outputExpected = MakeTensor<T, 4>(outputTensorInfo, outputData);
2250 std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.
CreateTensorHandle(inputTensorInfo);
2251 std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.
CreateTensorHandle(outputTensorInfo);
2258 boost::multi_array<T, 4> kernel = boost::multi_array<T, 4>(originalKernel);
2266 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
2267 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
2270 data.
m_Bias = &biasTensor;
2283 inputHandle->Allocate();
2284 outputHandle->Allocate();
2288 ExecuteWorkload(*workload, memoryManager);
2308 auto input = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(
2309 QuantizedVector<T>({
2322 inputTensorInfo.GetQuantizationScale(),
2323 inputTensorInfo.GetQuantizationOffset())));
2327 auto kernel = MakeTensor<T, 4>(kernelTensorInfo, std::vector<T>(
2328 QuantizedVector<T>({
2339 kernelTensorInfo.GetQuantizationScale(),
2340 kernelTensorInfo.GetQuantizationOffset())));
2345 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputTensorInfo, std::vector<T>(
2346 QuantizedVector<T>({
2347 1062, 1580, 1850, 1530, 1117,
2348 2140, 3108, 3500, 2842, 2042,
2349 3580, 5068, 5460, 4342, 3062,
2350 3618, 5072, 5390, 4248, 2971,
2351 3074, 4282, 4510, 3533, 2457,
2353 1550, 2284, 2362, 1955, 1428,
2354 2910, 4206, 4342, 3528, 2536,
2355 3390, 4886, 5022, 4068, 2916,
2356 3566, 5056, 5182, 4133, 2922,
2357 3100, 4352, 4452, 3517, 2465
2359 outputTensorInfo.GetQuantizationScale(),
2360 outputTensorInfo.GetQuantizationOffset())));
2362 return DepthwiseConvolution2dAsymmetricTestImpl<ArmnnType, ArmnnBType>(
2365 tensorHandleFactory,
2368 GetBias2<ArmnnBType>(biasEnabled, qScale * qScale),
2394 auto input = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(
2395 QuantizedVector<T>({
2408 inputTensorInfo.GetQuantizationScale(),
2409 inputTensorInfo.GetQuantizationOffset())));
2412 auto kernel = MakeTensor<T, 4>(kernelTensorInfo, std::vector<T>(
2413 QuantizedVector<T>({
2424 kernelTensorInfo.GetQuantizationScale(),
2425 kernelTensorInfo.GetQuantizationOffset())));
2428 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputTensorInfo, std::vector<T>(
2429 QuantizedVector<T>({
2430 1062, 1580, 1850, 1530, 1117,
2431 2140, 3108, 3500, 2842, 2042,
2432 3580, 5068, 5460, 4342, 3062,
2433 3618, 5072, 5390, 4248, 2971,
2434 3074, 4282, 4510, 3533, 2457,
2436 1550, 2284, 2362, 1955, 1428,
2437 2910, 4206, 4342, 3528, 2536,
2438 3390, 4886, 5022, 4068, 2916,
2439 3566, 5056, 5182, 4133, 2922,
2440 3100, 4352, 4452, 3517, 2465
2442 outputTensorInfo.GetQuantizationScale(),
2443 outputTensorInfo.GetQuantizationOffset())));
2445 return DepthwiseConvolution2dTestImpl<ArmnnType, ArmnnBType>(
2448 tensorHandleFactory,
2451 GetBias2<ArmnnBType>(biasEnabled, qScale * qScale),
2477 auto input = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(
2478 QuantizedVector<T>({
2479 0, 0, 0, 0, 0, 0, 0, 0, 0,
2480 0, 0, 0, 0, 0, 0, 0, 0, 0,
2481 0, 0, 0, 0, 0, 0, 0, 0, 0,
2482 0, 0, 0, 1, 1, 1, 0, 0, 0,
2483 0, 0, 0, 1, 1, 1, 0, 0, 0,
2484 0, 0, 0, 1, 1, 1, 0, 0, 0,
2485 0, 0, 0, 0, 0, 0, 0, 0, 0,
2486 0, 0, 0, 0, 0, 0, 0, 0, 0,
2487 0, 0, 0, 0, 0, 0, 0, 0, 0
2489 inputTensorInfo.GetQuantizationScale(),
2490 inputTensorInfo.GetQuantizationOffset())));
2493 auto kernel = MakeTensor<T, 4>(kernelTensorInfo, std::vector<T>(
2494 QuantizedVector<T>({
2499 kernelTensorInfo.GetQuantizationScale(),
2500 kernelTensorInfo.GetQuantizationOffset())));
2502 uint32_t padLeft = 0;
2503 uint32_t padTop = 0;
2504 uint32_t padRight = 0;
2505 uint32_t padBottom = 0;
2506 uint32_t strideX = 1;
2507 uint32_t strideY = 1;
2508 uint32_t dilationX = 3;
2509 uint32_t dilationY = 3;
2513 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputTensorInfo, std::vector<T>(
2514 QuantizedVector<T>({
2519 outputTensorInfo.GetQuantizationScale(),
2520 outputTensorInfo.GetQuantizationOffset())));
2522 return DepthwiseConvolution2dTestImpl<ArmnnType, ArmnnBType>(
2525 tensorHandleFactory,
2528 GetBias2<ArmnnBType>(biasEnabled, qScale * qScale),
2543 template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
typename T = armnn::ResolveType<ArmnnType>>
2548 const std::vector<float>& inputNoQuantizedValues,
2550 const std::vector<float>& kernelNoQuantizedValues,
2552 const std::vector<float>& outputExpectedNoQuantizedValues,
2557 bool biasEnabled =
false)
2592 auto input = MakeTensor<T, 4>(inputTensorInfo,
2593 std::vector<T>(QuantizedVector<T>(inputNoQuantizedValues,
2594 inputTensorInfo.GetQuantizationScale(),
2595 inputTensorInfo.GetQuantizationOffset())));
2596 auto kernel = MakeTensor<T, 4>(kernelTensorInfo,
2597 std::vector<T>(QuantizedVector<T>(kernelNoQuantizedValues,
2598 kernelTensorInfo.GetQuantizationScale(),
2599 kernelTensorInfo.GetQuantizationOffset())));
2600 auto expectedOutput =
2601 MakeTensor<T, 4>(outputTensorInfo,
2602 std::vector<T>(QuantizedVector<T>(outputExpectedNoQuantizedValues,
2603 outputTensorInfo.GetQuantizationScale(),
2604 outputTensorInfo.GetQuantizationOffset())));
2606 uint32_t padLeft = 0;
2607 uint32_t padTop = 0;
2608 uint32_t padRight = 0;
2609 uint32_t padBottom = 0;
2610 uint32_t strideX = 1;
2611 uint32_t strideY = 1;
2613 return DepthwiseConvolution2dTestImpl<ArmnnType, ArmnnBType>(
2616 tensorHandleFactory,
2619 GetBias<ArmnnBType>(biasEnabled, qScale * qScale, outputTensorInfo, layout),
2634 template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
typename T>
2643 std::vector<float> inputNoQuantizedValues =
2645 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2646 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2647 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2648 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
2649 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
2650 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
2651 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2652 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2653 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2654 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
2658 std::vector<float> kernelNoQuantizedValues =
2668 std::vector<float> outputExpectedNoQuantizedValues =
2676 return DepthwiseConvolution2d3x3DilationTestCommon<ArmnnType, ArmnnBType>(
2679 tensorHandleFactory,
2680 inputNoQuantizedValues,
2682 kernelNoQuantizedValues,
2684 outputExpectedNoQuantizedValues,
2692 template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
typename T>
2701 std::vector<float> inputNoQuantizedValues =
2703 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2704 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2705 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2706 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
2707 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
2708 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
2709 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2710 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2711 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2712 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2714 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2715 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2716 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2717 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
2718 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
2719 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
2720 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2721 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2722 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2723 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
2727 std::vector<float> kernelNoQuantizedValues =
2741 std::vector<float> outputExpectedNoQuantizedValues =
2754 return DepthwiseConvolution2d3x3DilationTestCommon<ArmnnType, ArmnnBType>(
2757 tensorHandleFactory,
2758 inputNoQuantizedValues,
2760 kernelNoQuantizedValues,
2762 outputExpectedNoQuantizedValues,
2770 template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
typename T>
2779 std::vector<float> inputNoQuantizedValues =
2792 std::vector<float> kernelNoQuantizedValues =
2820 std::vector<float> outputExpectedNoQuantizedValues =
2848 return DepthwiseConvolution2d3x3DilationTestCommon<ArmnnType, ArmnnBType>(
2851 tensorHandleFactory,
2852 inputNoQuantizedValues,
2854 kernelNoQuantizedValues,
2856 outputExpectedNoQuantizedValues,
2864 template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
typename T>
2873 std::vector<float> inputNoQuantizedValues =
2886 std::vector<float> kernelNoQuantizedValues =
2903 std::vector<float> outputExpectedNoQuantizedValues =
2919 return DepthwiseConvolution2d3x3DilationTestCommon<ArmnnType, ArmnnBType>(
2922 tensorHandleFactory,
2923 inputNoQuantizedValues,
2925 kernelNoQuantizedValues,
2927 outputExpectedNoQuantizedValues,
2935 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
2944 unsigned int inputHeight = 8;
2945 unsigned int inputWidth = 16;
2946 unsigned int inputChannels = 3;
2947 unsigned int inputNum = 5;
2949 unsigned int kernelHeight = 3;
2950 unsigned int kernelWidth = 3;
2951 unsigned int channelMultiplier = 1;
2953 unsigned int strideX = 2;
2954 unsigned int strideY = 3;
2955 unsigned int padX = 1;
2956 unsigned int padY = 1;
2958 unsigned int outputNum = inputNum;
2959 unsigned int outputChannels = inputChannels * channelMultiplier;
2960 unsigned int outputHeight = (inputHeight + 2 * padY - kernelHeight + strideY) / strideY;
2961 unsigned int outputWidth = (inputWidth + 2 * padX - kernelWidth + strideX) / strideX;
2969 std::vector<unsigned int> inputShape;
2970 std::vector<unsigned int> outputShape;
2971 std::vector<unsigned int> kernelShape{ channelMultiplier, inputChannels, kernelHeight, kernelWidth };
2972 std::vector<unsigned int> biasShape{ outputChannels };
2976 inputShape = { inputNum, inputChannels, inputHeight, inputWidth };
2977 outputShape = { outputNum, outputChannels, outputHeight, outputWidth };
2980 inputShape = { inputNum, inputHeight, inputWidth, inputChannels };
2981 outputShape = { outputNum, outputHeight, outputWidth, outputChannels };
2985 + std::to_string(static_cast<int>(layout.
GetDataLayout())) +
"]");
2988 float inputsQScale = armnn::IsQuantizedType<T>() ? 1.0f : 0;
2989 float outputQScale = armnn::IsQuantizedType<T>() ? 2.0f : 0;
2990 int32_t qOffset = 0;
2992 inputTensorInfo =
armnn::TensorInfo(4, inputShape.data(), ArmnnType, inputsQScale, qOffset);
2993 outputTensorInfo =
armnn::TensorInfo(4, outputShape.data(), ArmnnType, outputQScale, qOffset);
2994 kernelDesc =
armnn::TensorInfo(4, kernelShape.data(), ArmnnType, inputsQScale, qOffset);
3000 auto input = MakeRandomTensor<T, 4>(inputTensorInfo, 124908, 0.0f, 255.0f);
3001 auto kernel = MakeRandomTensor<T, 4>(kernelDesc, 891234, 0.0f, 255.0f);
3002 auto bias = MakeRandomTensor<typename FullyConnectedBiasTypeForInputType<T>::Type, 1>(
3003 biasDesc, 1028, 0.0f, 255.0f);
3005 std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.
CreateTensorHandle(inputTensorInfo);
3006 std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.
CreateTensorHandle(outputTensorInfo);
3016 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
3017 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
3019 data.
m_Bias = &biasTensor;
3029 std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refTensorHandleFactory.
CreateTensorHandle(outputTensorInfo);
3030 std::unique_ptr<armnn::ITensorHandle> inputHandleRef = refTensorHandleFactory.
CreateTensorHandle(inputTensorInfo);
3034 SetWorkloadInput(refData, refInfo, 0, inputTensorInfo, inputHandleRef.get());
3035 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
3040 outputHandleRef->Allocate();
3041 inputHandleRef->Allocate();
3043 inputHandle->Allocate();
3044 outputHandle->Allocate();
3049 ExecuteWorkload(*workload, memoryManager);
3051 workloadRef->PostAllocationConfigure();
3052 workloadRef->Execute();
3064 Convolution2d3x3Dilation3x3Test<armnn::DataType::BFloat16, armnn::DataType::BFloat16>(
3072 Convolution2d3x3Dilation3x3Test<armnn::DataType::Float32, armnn::DataType::Float32>(
3073 armnn::IWorkloadFactory&,
3074 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
3075 const armnn::ITensorHandleFactory&,
3080 Convolution2d3x3Dilation3x3Test<armnn::DataType::QAsymmS8, armnn::DataType::Signed32>(
3081 armnn::IWorkloadFactory&,
3082 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
3083 const armnn::ITensorHandleFactory&,
3088 Convolution2d3x3Dilation3x3Test<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
3089 armnn::IWorkloadFactory&,
3090 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
3091 const armnn::ITensorHandleFactory&,
3096 Convolution2d3x3Dilation3x3Test<armnn::DataType::QSymmS16, armnn::DataType::Signed32>(
3097 armnn::IWorkloadFactory&,
3098 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
3099 const armnn::ITensorHandleFactory&,
3103 template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
3104 Convolution2d2x3x3Dilation3x3Test<armnn::DataType::Float32, armnn::DataType::Float32>(
3105 armnn::IWorkloadFactory&,
3106 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
3107 const armnn::ITensorHandleFactory&,
3112 Convolution2d2x3x3Dilation3x3Test<armnn::DataType::BFloat16, armnn::DataType::BFloat16>(
3113 armnn::IWorkloadFactory&,
3114 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
3115 const armnn::ITensorHandleFactory&,
3119 template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmS8>, 4>
3120 Convolution2d2x3x3Dilation3x3Test<armnn::DataType::QAsymmS8, armnn::DataType::Signed32>(
3121 armnn::IWorkloadFactory&,
3122 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
3123 const armnn::ITensorHandleFactory&,
3127 template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 4>
3128 Convolution2d2x3x3Dilation3x3Test<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
3129 armnn::IWorkloadFactory&,
3130 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
3131 const armnn::ITensorHandleFactory&,
3135 template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 4>
3136 Convolution2d2x3x3Dilation3x3Test<armnn::DataType::QSymmS16, armnn::DataType::Signed32>(
3137 armnn::IWorkloadFactory&,
3138 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
3139 const armnn::ITensorHandleFactory&,
3143 template LayerTestResult<armnn::ResolveType<armnn::DataType::BFloat16>, 4>
3144 Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test<armnn::DataType::BFloat16, armnn::DataType::BFloat16>(
3145 armnn::IWorkloadFactory &workloadFactory,
3146 const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager,
3147 const armnn::ITensorHandleFactory& tensorHandleFactory,
3151 template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
3152 Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test<armnn::DataType::Float32, armnn::DataType::Float32>(
3153 armnn::IWorkloadFactory &workloadFactory,
3154 const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager,
3155 const armnn::ITensorHandleFactory& tensorHandleFactory,
3159 template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmS8>, 4>
3160 Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test<armnn::DataType::QAsymmS8, armnn::DataType::Signed32>(
3161 armnn::IWorkloadFactory &workloadFactory,
3162 const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager,
3163 const armnn::ITensorHandleFactory& tensorHandleFactory,
3167 template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 4>
3168 Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
3169 armnn::IWorkloadFactory &workloadFactory,
3170 const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager,
3171 const armnn::ITensorHandleFactory& tensorHandleFactory,
3175 template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 4>
3176 Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test<armnn::DataType::QSymmS16, armnn::DataType::Signed32>(
3177 armnn::IWorkloadFactory &workloadFactory,
3178 const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager,
3179 const armnn::ITensorHandleFactory& tensorHandleFactory,
3183 template LayerTestResult<armnn::ResolveType<armnn::DataType::BFloat16>, 4>
3184 DepthwiseConvolution2d3x3Dilation3x3Test<armnn::DataType::BFloat16, armnn::DataType::BFloat16>(
3185 armnn::IWorkloadFactory&,
3186 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
3187 const armnn::ITensorHandleFactory&,
3191 template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
3192 DepthwiseConvolution2d3x3Dilation3x3Test<armnn::DataType::Float32, armnn::DataType::Float32>(
3193 armnn::IWorkloadFactory&,
3194 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
3195 const armnn::ITensorHandleFactory&,
3199 template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmS8>, 4>
3200 DepthwiseConvolution2d3x3Dilation3x3Test<armnn::DataType::QAsymmS8, armnn::DataType::Signed32>(
3201 armnn::IWorkloadFactory&,
3202 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
3203 const armnn::ITensorHandleFactory&,
3207 template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 4>
3208 DepthwiseConvolution2d3x3Dilation3x3Test<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
3209 armnn::IWorkloadFactory&,
3210 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
3211 const armnn::ITensorHandleFactory&,
3215 template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 4>
3216 DepthwiseConvolution2d3x3Dilation3x3Test<armnn::DataType::QSymmS16, armnn::DataType::Signed32>(
3217 armnn::IWorkloadFactory&,
3218 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
3219 const armnn::ITensorHandleFactory&,
3223 template LayerTestResult<armnn::ResolveType<armnn::DataType::BFloat16>, 4>
3224 DepthwiseConvolution2d2x3x3Dilation3x3Test<armnn::DataType::BFloat16, armnn::DataType::BFloat16>(
3225 armnn::IWorkloadFactory&,
3226 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
3227 const armnn::ITensorHandleFactory&,
3231 template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
3232 DepthwiseConvolution2d2x3x3Dilation3x3Test<armnn::DataType::Float32, armnn::DataType::Float32>(
3233 armnn::IWorkloadFactory&,
3234 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
3235 const armnn::ITensorHandleFactory&,
3239 template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmS8>, 4>
3240 DepthwiseConvolution2d2x3x3Dilation3x3Test<armnn::DataType::QAsymmS8, armnn::DataType::Signed32>(
3241 armnn::IWorkloadFactory&,
3242 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
3243 const armnn::ITensorHandleFactory&,
3247 template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 4>
3248 DepthwiseConvolution2d2x3x3Dilation3x3Test<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
3249 armnn::IWorkloadFactory&,
3250 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
3251 const armnn::ITensorHandleFactory&,
3255 template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 4>
3256 DepthwiseConvolution2d2x3x3Dilation3x3Test<armnn::DataType::QSymmS16, armnn::DataType::Signed32>(
3257 armnn::IWorkloadFactory&,
3258 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
3259 const armnn::ITensorHandleFactory&,
3263 template LayerTestResult<armnn::ResolveType<armnn::DataType::BFloat16>, 4>
3264 DepthwiseConvolution2dMult4Test<armnn::DataType::BFloat16, armnn::DataType::BFloat16>(
3265 armnn::IWorkloadFactory &workloadFactory,
3266 const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager,
3267 const armnn::ITensorHandleFactory& tensorHandleFactory,
3271 template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
3272 DepthwiseConvolution2dMult4Test<armnn::DataType::Float32, armnn::DataType::Float32>(
3273 armnn::IWorkloadFactory &workloadFactory,
3274 const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager,
3275 const armnn::ITensorHandleFactory& tensorHandleFactory,
3279 template LayerTestResult<armnn::ResolveType<armnn::DataType::BFloat16>, 4>
3280 DepthwiseConvolution2dMult2Test<armnn::DataType::BFloat16, armnn::DataType::BFloat16>(
3281 armnn::IWorkloadFactory &workloadFactory,
3282 const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager,
3283 const armnn::ITensorHandleFactory& tensorHandleFactory,
3287 template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
3288 DepthwiseConvolution2dMult2Test<armnn::DataType::Float32, armnn::DataType::Float32>(
3289 armnn::IWorkloadFactory &workloadFactory,
3290 const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager,
3291 const armnn::ITensorHandleFactory& tensorHandleFactory,
3300 armnn::IWorkloadFactory& workloadFactory,
3301 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3302 const armnn::ITensorHandleFactory& tensorHandleFactory,
3306 return SimpleConvolution2d3x5TestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
3307 workloadFactory, memoryManager, tensorHandleFactory, 0.f, 0, biasEnabled, layout);
3311 armnn::IWorkloadFactory& workloadFactory,
3312 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3313 const armnn::ITensorHandleFactory& tensorHandleFactory,
3317 return SimpleConvolution2d3x5TestCommon<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
3318 workloadFactory, memoryManager, tensorHandleFactory, 0.5f, 50, biasEnabled, layout);
3322 armnn::IWorkloadFactory& workloadFactory,
3323 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3324 const armnn::ITensorHandleFactory& tensorHandleFactory,
3328 return SimpleConvolution2d3x3TestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
3329 workloadFactory, memoryManager, tensorHandleFactory, 0.f, 0, biasEnabled, layout);
3333 armnn::IWorkloadFactory& workloadFactory,
3334 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3335 const armnn::ITensorHandleFactory& tensorHandleFactory,
3338 return SimpleConvolution2d3x3NhwcTestCommon<armnn::DataType::Float32>(
3341 tensorHandleFactory,
3349 armnn::IWorkloadFactory& workloadFactory,
3350 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3351 const armnn::ITensorHandleFactory& tensorHandleFactory,
3355 return SimpleConvolution2d3x3Stride2x2TestCommon<armnn::DataType::Float32>(
3358 tensorHandleFactory,
3366 armnn::IWorkloadFactory& workloadFactory,
3367 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3368 const armnn::ITensorHandleFactory& tensorHandleFactory,
3372 return SimpleConvolution2d3x3TestCommon<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
3373 workloadFactory, memoryManager, tensorHandleFactory, 0.5f, 50, biasEnabled, layout);
3377 armnn::IWorkloadFactory& workloadFactory,
3378 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3379 const armnn::ITensorHandleFactory& tensorHandleFactory,
3383 return SimpleConvolution2d3x5TestCommon<armnn::DataType::QSymmS16, armnn::DataType::Signed32>(
3384 workloadFactory, memoryManager, tensorHandleFactory, 0.5f, 50, biasEnabled, layout);
3388 armnn::IWorkloadFactory& workloadFactory,
3389 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3390 const armnn::ITensorHandleFactory& tensorHandleFactory,
3394 return SimpleConvolution2d3x3TestCommon<armnn::DataType::QSymmS16, armnn::DataType::Signed32>(
3395 workloadFactory, memoryManager, tensorHandleFactory, 0.5f, 50, biasEnabled, layout);
3399 armnn::IWorkloadFactory& workloadFactory,
3400 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3401 const armnn::ITensorHandleFactory& tensorHandleFactory,
3404 return SimpleConvolution2dAsymmetricPaddingTestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
3405 workloadFactory, memoryManager, tensorHandleFactory, layout, 0.0f, 0);
3409 armnn::IWorkloadFactory& workloadFactory,
3410 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3411 const armnn::ITensorHandleFactory& tensorHandleFactory,
3416 workloadFactory, memoryManager, tensorHandleFactory, layout, 0.0f, 0);
3420 armnn::IWorkloadFactory& workloadFactory,
3421 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3422 const armnn::ITensorHandleFactory& tensorHandleFactory,
3425 return Convolution1dTestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
3426 workloadFactory, memoryManager, tensorHandleFactory, 0.0f, 0, biasEnabled);
3430 armnn::IWorkloadFactory& workloadFactory,
3431 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3432 const armnn::ITensorHandleFactory& tensorHandleFactory,
3435 return Convolution1dTestImpl<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
3436 workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 128, biasEnabled);
3440 armnn::IWorkloadFactory& workloadFactory,
3441 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3442 const armnn::ITensorHandleFactory& tensorHandleFactory,
3445 using namespace armnn;
3447 const DataType inputType = DataType::QAsymmU8;
3448 const DataType kernelType = DataType::QSymmS8;
3449 const DataType biasType = DataType::Signed32;
3451 TensorInfo inputInfo ({ 1, 3, 1, 2 }, inputType, 0.5f, 128);
3452 TensorInfo outputInfo({ 1, 3, 1, 3 }, inputType, 1.0f, 128);
3454 const std::vector<float> quantScales{ 0.5f, 0.75f, 1.0f };
3455 constexpr
unsigned int quantDimension = 0;
3457 TensorInfo kernelInfo({ 3, 1, 1, 2 }, kernelType, quantScales, quantDimension);
3459 const std::vector<float> biasQuantScales{ 0.25f, 0.375f, 0.5f };
3460 TensorInfo biasInfo({ 3 }, biasType, biasQuantScales, quantDimension);
3462 std::vector<uint8_t> inputData =
3464 138, 108, 138, 108, 138, 108
3467 std::vector<int8_t> kernelData =
3472 std::vector<int32_t> biasData =
3477 std::vector<uint8_t> expectedOutputData =
3479 121, 118, 115, 121, 118, 115, 121, 118, 115
3500 std::unique_ptr<ITensorHandle> inputHandle = tensorHandleFactory.
CreateTensorHandle(inputInfo);
3501 std::unique_ptr<ITensorHandle> outputHandle = tensorHandleFactory.
CreateTensorHandle(outputInfo);
3513 queueDescriptor.m_Weight = &weightTensor;
3514 queueDescriptor.m_Bias = &biasTensor;
3516 AddInputToWorkload(queueDescriptor, workloadInfo, inputInfo, inputHandle.get());
3517 AddOutputToWorkload(queueDescriptor, workloadInfo, outputInfo, outputHandle.get());
3519 std::unique_ptr<IWorkload> workload = workloadFactory.
CreateConvolution2d(queueDescriptor, workloadInfo);
3520 inputHandle->Allocate();
3521 outputHandle->Allocate();
3525 ExecuteWorkload(*workload, memoryManager);
3529 ret.
outputExpected = MakeTensor<uint8_t, 4>(outputInfo, expectedOutputData);
3535 armnn::IWorkloadFactory& workloadFactory,
3536 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3537 armnn::IWorkloadFactory& refWorkloadFactory,
3538 const armnn::ITensorHandleFactory& tensorHandleFactory,
3539 const armnn::ITensorHandleFactory& refTensorHandleFactory)
3541 return CompareConvolution2dTestImpl<armnn::DataType::Float32>(
3542 workloadFactory, memoryManager, refWorkloadFactory, tensorHandleFactory, refTensorHandleFactory);
3546 armnn::IWorkloadFactory& workloadFactory,
3547 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3548 const armnn::ITensorHandleFactory& tensorHandleFactory,
3552 return DepthwiseConvolution2dTestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
3553 workloadFactory, memoryManager, tensorHandleFactory, 0.0f, 0, biasEnabled, layout);
3557 armnn::IWorkloadFactory& workloadFactory,
3558 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3559 const armnn::ITensorHandleFactory& tensorHandleFactory,
3562 return DepthwiseConvolution2dNhwcTestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
3563 workloadFactory, memoryManager, tensorHandleFactory, 0.0f, 0, biasEnabled);
3567 armnn::IWorkloadFactory& workloadFactory,
3568 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3569 const armnn::ITensorHandleFactory& tensorHandleFactory,
3573 return DepthwiseConvolution2dDepthMul1TestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
3574 workloadFactory, memoryManager, tensorHandleFactory, 0.0f, 0, biasEnabled, layout);
3578 armnn::IWorkloadFactory& workloadFactory,
3579 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3580 const armnn::ITensorHandleFactory& tensorHandleFactory)
3583 auto input = MakeTensor<float, 4>(inputTensorInfo, { 1.f, 2.f, 3.f, 4.f });
3585 std::vector<float> kernelData;
3586 std::vector<float> singleDepthKernel{ 1.f, -1.f, -1.f, 1.f };
3587 for (
unsigned int i = 0; i < 64; ++i)
3589 kernelData.insert(kernelData.end(), singleDepthKernel.begin(), singleDepthKernel.end());
3592 auto kernel = MakeTensor<float, 4>(kernelTensorInfo, kernelData);
3594 std::vector<float> expectedOutputData(64, 0.f);
3596 auto expectedOutput = MakeTensor<float, 4>(outputTensorInfo, expectedOutputData);
3598 return DepthwiseConvolution2dTestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
3601 tensorHandleFactory,
3604 boost::multi_array<float, 1>(),
3612 armnn::IWorkloadFactory& workloadFactory,
3613 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3614 const armnn::ITensorHandleFactory& tensorHandleFactory,
3618 return DepthwiseConvolution2dAsymmetricTestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
3619 workloadFactory, memoryManager, tensorHandleFactory, 0.0f, 0, biasEnabled, layout);
3623 armnn::IWorkloadFactory& workloadFactory,
3624 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3625 const armnn::ITensorHandleFactory& tensorHandleFactory,
3629 return DepthwiseConvolution2dTestImpl<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
3630 workloadFactory, memoryManager, tensorHandleFactory, 0.5f, 50, biasEnabled, layout);
3634 armnn::IWorkloadFactory& workloadFactory,
3635 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3636 const armnn::ITensorHandleFactory& tensorHandleFactory,
3640 return DepthwiseConvolution2dDepthMul1TestImpl<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
3641 workloadFactory, memoryManager, tensorHandleFactory, 0.5f, 50, biasEnabled, layout);
3645 armnn::IWorkloadFactory& workloadFactory,
3646 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3647 const armnn::ITensorHandleFactory& tensorHandleFactory)
3649 return SimpleDepthwiseConvolution2d3x3Dilation3x3NhwcTestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
3652 tensorHandleFactory,
3659 armnn::IWorkloadFactory& workloadFactory,
3660 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3661 const armnn::ITensorHandleFactory& tensorHandleFactory,
3665 return DepthwiseConvolution2dTestImpl<armnn::DataType::QSymmS16, armnn::DataType::Signed32>(
3666 workloadFactory, memoryManager, tensorHandleFactory, 0.5f, 50, biasEnabled, layout);
3670 armnn::IWorkloadFactory& workloadFactory,
3671 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3672 const armnn::ITensorHandleFactory& tensorHandleFactory,
3676 return DepthwiseConvolution2dDepthMul1TestImpl<armnn::DataType::QSymmS16, armnn::DataType::Signed32>(
3677 workloadFactory, memoryManager, tensorHandleFactory, 0.5f, 50, biasEnabled, layout);
3681 armnn::IWorkloadFactory& workloadFactory,
3682 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3683 const armnn::ITensorHandleFactory& tensorHandleFactory,
3686 using namespace armnn;
3688 const DataType inputType = DataType::QAsymmU8;
3689 const DataType kernelType = DataType::QSymmS8;
3690 const DataType biasType = DataType::Signed32;
3692 TensorInfo inputInfo ({ 1, 3, 3, 2 }, inputType, 0.5f, 128);
3693 TensorInfo outputInfo({ 1, 2, 2, 4 }, inputType, 1.0f, 128);
3695 const std::vector<float> quantScales{ 1.0f, 0.5f, 1.0f, 0.5f };
3696 const unsigned int quantDimension = 0;
3697 TensorInfo kernelInfo({ 2, 2, 2, 2 }, kernelType, quantScales, quantDimension);
3699 const std::vector<float> biasQuantScales{ 0.5f, 0.25f, 0.5f, 0.25f };
3700 constexpr
unsigned int biasQuantDimension = 0;
3701 TensorInfo biasInfo({ 4 }, biasType, biasQuantScales, biasQuantDimension);
3703 std::vector<uint8_t> inputData =
3716 std::vector<int8_t> kernelData =
3724 std::vector<int32_t> biasData =
3729 std::vector<uint8_t> expectedOutputData =
3755 std::unique_ptr<ITensorHandle> inputHandle = tensorHandleFactory.
CreateTensorHandle(inputInfo);
3756 std::unique_ptr<ITensorHandle> outputHandle = tensorHandleFactory.
CreateTensorHandle(outputInfo);
3767 queueDescriptor.m_Weight = &weightTensor;
3768 queueDescriptor.m_Bias = &biasTensor;
3770 AddInputToWorkload(queueDescriptor, workloadInfo, inputInfo, inputHandle.get());
3771 AddOutputToWorkload(queueDescriptor, workloadInfo, outputInfo, outputHandle.get());
3774 inputHandle->Allocate();
3775 outputHandle->Allocate();
3779 ExecuteWorkload(*workload, memoryManager);
3784 ret.
outputExpected = MakeTensor<uint8_t, 4>(outputInfo, expectedOutputData);
3790 armnn::IWorkloadFactory& workloadFactory,
3791 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3792 armnn::IWorkloadFactory& refWorkloadFactory,
3793 const armnn::ITensorHandleFactory& tensorHandleFactory,
3794 const armnn::ITensorHandleFactory& refTensorHandleFactory,
3797 return CompareDepthwiseConvolution2dTestImpl<armnn::DataType::Float32>(
3798 workloadFactory, memoryManager, refWorkloadFactory, tensorHandleFactory, refTensorHandleFactory, layout);
3802 armnn::IWorkloadFactory& workloadFactory,
3803 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3804 armnn::IWorkloadFactory& refWorkloadFactory,
3805 const armnn::ITensorHandleFactory& tensorHandleFactory,
3806 const armnn::ITensorHandleFactory& refTensorHandleFactory,
3809 return CompareDepthwiseConvolution2dTestImpl<armnn::DataType::QAsymmU8>(
3810 workloadFactory, memoryManager, refWorkloadFactory, tensorHandleFactory, refTensorHandleFactory, layout);
LayerTestResult< T, 4 > DepthwiseConvolution2dMult2Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool biasEnabled, const armnn::DataLayout layout)
LayerTestResult< float, 4 > Convolution2d3x3Stride2x2BFloat16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool biasEnabled, const armnn::DataLayout &dataLayout)
uint32_t m_PadBottom
Padding bottom value in the height dimension.
bool m_BiasEnabled
Enable/disable bias.
const ConstCpuTensorHandle * m_Bias
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
LayerTestResult< uint8_t, 4 > DepthwiseConvolution2dUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool biasEnabled, const armnn::DataLayout layout)
bool m_BiasEnabled
Enable/disable bias.
LayerTestResult< int16_t, 4 > SimpleConvolution2d3x5QSymm16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool biasEnabled, const armnn::DataLayout layout)
LayerTestResult< T, 4 > SimpleConvolution2d3x3Stride2x2TestCommon(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, float qScale, int32_t qOffset, bool biasEnabled, const armnn::DataLayout &dataLayout)
LayerTestResult< T, 4 > Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTestCommon(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::DataLayout layout, float qScale, int32_t qOffset)
const TensorShape & GetShape() const
uint32_t m_PadBottom
Padding bottom value in the height dimension.
LayerTestResult< float, 4 > Convolution2d3x3Stride2x2BFloat16SmallValueTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool biasEnabled, const armnn::DataLayout &dataLayout)
void ApplyBias(std::vector< T > &v, float vScale, int32_t vOffset, const std::vector< B > &bias, float bScale, int32_t bOffset, uint32_t w, uint32_t h)
LayerTestResult< T, 4 > Convolution2d2x3x3Dilation3x3Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool biasEnabled, const armnn::DataLayout layout)
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
LayerTestResult< uint8_t, 4 > SimpleConvolution2d3x5Uint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool biasEnabled, const armnn::DataLayout layout)
const ConstCpuTensorHandle * m_Bias
A Convolution2dDescriptor for the Convolution2dLayer.
uint32_t m_PadLeft
Padding left value in the width dimension.
boost::multi_array< T, n > outputExpected
LayerTestResult< T, 4 > CompareConvolution2dTestImpl(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, armnn::IWorkloadFactory &refWorkloadFactory, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::ITensorHandleFactory &refTensorHandleFactory)
LayerTestResult< T, 4 > Convolution2d3x3DilationTestCommon(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, const std::vector< float > &inputNoQuantizedValues, armnn::TensorInfo &inputTensorInfo, const std::vector< float > &kernelNoQuantizedValues, armnn::TensorInfo &kernelTensorInfo, const std::vector< float > &outputExpectedNoQuantizedValues, armnn::TensorInfo &outputTensorInfo, uint32_t dilationX, uint32_t dilationY, armnn::DataLayout layout=armnn::DataLayout::NCHW, uint32_t padLeft=0, uint32_t padTop=0, uint32_t padRight=0, uint32_t padBottom=0, uint32_t strideX=1, uint32_t strideY=1, bool biasEnabled=false)
LayerTestResult< float, 4 > SimpleConvolution2d3x3NhwcTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool biasEnabled)
LayerTestResult< T, 4 > DepthwiseConvolution2dDepthMul1TestImpl(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, float qScale, int32_t qOffset, bool biasEnabled, const armnn::DataLayout layout)
typename ResolveTypeImpl< DT >::Type ResolveType
LayerTestResult< T, 4 > SimpleConvolution2d3x3NhwcTestCommon(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, float qScale, int32_t qOffset, bool biasEnabled, armnn::DataLayout dataLayout)
LayerTestResult< T, 4 > DepthwiseConvolution2dTestImpl(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, float qScale, int32_t qOffset, bool biasEnabled, const armnn::DataLayout layout)
uint32_t m_PadRight
Padding right value in the width dimension.
LayerTestResult< uint8_t, 4 > Convolution1dUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool biasEnabled)
LayerTestResult< T, 4 > SimpleConvolution2dAsymmetricPaddingTestCommon(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::DataLayout layout, float qScale, int32_t qOffset)
Copyright (c) 2021 ARM Limited and Contributors.
void IgnoreUnused(Ts &&...)
LayerTestResult< float, 4 > Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, armnn::DataLayout layout)
uint32_t m_DilationY
Dilation along y axis.
LayerDescriptor m_Parameters
uint32_t m_DilationY
Dilation factor value for height dimension.
LayerTestResult< T, 4 > DepthwiseConvolution2dAsymmetricTestImpl(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, const boost::multi_array< T, 4 > &input, const boost::multi_array< T, 4 > &kernel, const boost::multi_array< B, 1 > &bias, const boost::multi_array< T, 4 > &outputExpected, float qScale, int32_t qOffset, const armnn::DataLayout layout, uint32_t padLeft=0, uint32_t padTop=0, uint32_t padRight=0, uint32_t padBottom=0, uint32_t strideX=1, uint32_t strideY=1)
const ConstCpuTensorHandle * m_Weight
LayerTestResult< T, 4 > DepthwiseConvolution2dMult4Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool biasEnabled, const armnn::DataLayout layout)
LayerTestResult< float, 4 > DepthwiseConvolution2dDepthMul64Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< int16_t, 4 > SimpleConvolution2d3x3QSymm16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool biasEnabled, const armnn::DataLayout layout)
uint32_t m_PadTop
Padding top value in the height dimension.
void Permute(const armnn::TensorShape &dstShape, const armnn::PermutationVector &mappings, const void *src, void *dst, size_t dataTypeSize)
LayerTestResult< T, 4 > DepthwiseConvolution2dAsymmetricTestCommon(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, float qScale, int32_t qOffset, bool biasEnabled, const armnn::DataLayout layout)
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
LayerTestResult< T, 4 > DepthwiseConvolution2d3x3Dilation3x3Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool biasEnabled, const armnn::DataLayout layout)
LayerTestResult< uint8_t, 4 > DepthwiseConvolution2dDepthMul1Uint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool biasEnabled, const armnn::DataLayout layout)
uint32_t m_DilationX
Dilation factor value for width dimension.
uint32_t m_PadTop
Padding top value in the height dimension.
#define ARMNN_ASSERT_MSG(COND, MSG)
std::shared_ptr< IMemoryManager > IMemoryManagerSharedPtr
int32_t GetQuantizationOffset() const
LayerTestResult< T, 4 > DepthwiseConvolution2d3x3DilationTestCommon(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, const std::vector< float > &inputNoQuantizedValues, armnn::TensorInfo &inputTensorInfo, const std::vector< float > &kernelNoQuantizedValues, armnn::TensorInfo &kernelTensorInfo, const std::vector< float > &outputExpectedNoQuantizedValues, armnn::TensorInfo &outputTensorInfo, uint32_t dilationX, uint32_t dilationY, armnn::DataLayout layout=armnn::DataLayout::NCHW, bool biasEnabled=false)
float GetQuantizationScale() const
Provides access to the appropriate indexes for Channels, Height and Width based on DataLayout...
const ConstCpuTensorHandle * m_Weight
LayerTestResult< T, 4 > DepthwiseConvolution2d2x3x3Dilation3x3Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool biasEnabled, const armnn::DataLayout layout)
boost::multi_array< T, 1 > GetBias2(bool biasEnabled, float qScale)
LayerTestResult< float, 4 > SimpleConvolution2d3x5Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool biasEnabled, const armnn::DataLayout layout)
void SetQuantizationScale(float scale)
#define ARMNN_ASSERT(COND)
void AllocateAndCopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)
armnn::DataLayout GetDataLayout() const
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
boost::multi_array< T, 1 > GetBias4(bool biasEnabled, float qScale)
LayerTestResult< T, 4 > CompareDepthwiseConvolution2dTestImpl(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, armnn::IWorkloadFactory &refWorkloadFactory, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::ITensorHandleFactory &refTensorHandleFactory, const armnnUtils::DataLayoutIndexed &layout)
LayerTestResult< T, 4 > SimpleConvolution2dTestImpl(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, const boost::multi_array< T, 4 > &originalInput, const boost::multi_array< T, 4 > &originalKernel, const boost::multi_array< B, 1 > &bias, const boost::multi_array< T, 4 > &originalOutputExpected, float qScale, int32_t qOffset, const armnn::DataLayout layout=armnn::DataLayout::NCHW, uint32_t padLeft=0, uint32_t padTop=0, uint32_t padRight=0, uint32_t padBottom=0, uint32_t strideX=1, uint32_t strideY=1, uint32_t dilationX=1, uint32_t dilationY=1)
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
LayerTestResult< float, 4 > DepthwiseConvolution2dDepthMul1Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool biasEnabled, const armnn::DataLayout layout)
DataType GetBiasDataType(DataType inputDataType)
LayerTestResult< uint8_t, 4 > SimpleConvolution2d3x3Uint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool biasEnabled, const armnn::DataLayout layout)
LayerTestResult< float, 4 > Convolution2dAsymmetricPaddingTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, armnn::DataLayout layout)
LayerTestResult< O, 4 > SimpleConvolution2dNhwcTestImpl(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, const boost::multi_array< T, 4 > &input, const boost::multi_array< T, 4 > &kernel, const boost::multi_array< B, 1 > &bias, const boost::multi_array< O, 4 > &outputExpected, const armnn::DataLayout dataLayout, float qScale, int32_t qOffset, uint32_t padLeft=1, uint32_t padTop=1, uint32_t padRight=1, uint32_t padBottom=1, uint32_t strideX=1, uint32_t strideY=1)
boost::multi_array< T, n > output
LayerTestResult< float, 4 > SimpleConvolution2d3x3Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool biasEnabled, const armnn::DataLayout layout)
LayerTestResult< T, 4 > Convolution2d3x3Dilation3x3Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool biasEnabled, const armnn::DataLayout layout)
uint32_t m_DilationX
Dilation along x axis.
boost::multi_array< T, 1 > GetBias8(bool biasEnabled, float qScale)
LayerTestResult< float, 4 > Convolution1dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool biasEnabled)
LayerTestResult< T, 4 > SimpleConvolution2d3x5TestCommon(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, float qScale, int32_t qOffset, bool biasEnabled, const armnn::DataLayout layout)
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
LayerTestResult< uint8_t, 4 > DepthwiseConvolution2dPerAxisQuantTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::DataLayout layout)
boost::multi_array< T, 1 > GetBias(bool biasEnabled, float qScale, armnn::TensorInfo outputInfo, armnn::DataLayout layout)
LayerTestResult< T, 4 > DepthwiseConvolution2dNhwcTestCommon(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, float qScale, int32_t qOffset, bool biasEnabled)
LayerTestResult< T, 4 > Convolution1dTestImpl(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, float qScale, int32_t qOffset, bool biasEnabled)
LayerTestResult< int16_t, 4 > DepthwiseConvolution2dDepthMul1Int16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool biasEnabled, const armnn::DataLayout layout)
std::enable_if_t< std::is_unsigned< Source >::value &&std::is_unsigned< Dest >::value, Dest > numeric_cast(Source source)
armnn::TensorInfo GetTensorInfo(unsigned int numberOfBatches, unsigned int numberOfChannels, unsigned int height, unsigned int width, const armnn::DataLayout dataLayout, const armnn::DataType dataType)
LayerTestResult< float, 4 > DepthwiseConvolution2dAsymmetricTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool biasEnabled, const armnn::DataLayout layout)
LayerTestResult< float, 4 > DepthwiseConvolution2dDepthNhwcTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool biasEnabled)
Contains information about inputs and outputs to a layer.
LayerTestResult< float, 4 > CompareDepthwiseConvolution2dFloatTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, armnn::IWorkloadFactory &refWorkloadFactory, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::ITensorHandleFactory &refTensorHandleFactory, const armnn::DataLayout layout)
float SelectiveDequantize(T value, float scale, int32_t offset)
LayerTestResult< float, 4 > SimpleDepthwiseConvolution2d3x3Dilation3x3NhwcTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
void SetQuantizationOffset(int32_t offset)
LayerTestResult< uint8_t, 4 > Convolution2dPerAxisQuantTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::DataLayout layout)
LayerTestResult< float, 4 > CompareConvolution2dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, armnn::IWorkloadFactory &refWorkloadFactory, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::ITensorHandleFactory &refTensorHandleFactory)
LayerTestResult< T, 4 > SimpleConvolution2d3x3TestCommon(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, float qScale, int32_t qOffset, bool biasEnabled, const armnn::DataLayout layout)
virtual std::unique_ptr< IWorkload > CreateDepthwiseConvolution2d(const DepthwiseConvolution2dQueueDescriptor &descriptor, const WorkloadInfo &info) const
LayerTestResult< T, 4 > SimpleDepthwiseConvolution2d3x3Dilation3x3NhwcTestCommon(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, float qScale, int32_t qOffset, bool biasEnabled)
LayerTestResult< int16_t, 4 > DepthwiseConvolution2dInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool biasEnabled, const armnn::DataLayout layout)
unsigned int GetChannelsIndex() const
LayerTestResult< uint8_t, 4 > CompareDepthwiseConvolution2dUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, armnn::IWorkloadFactory &refWorkloadFactory, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::ITensorHandleFactory &refTensorHandleFactory, const armnn::DataLayout layout)
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo) const =0
LayerTestResult< float, 4 > DepthwiseConvolution2dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool biasEnabled, const armnn::DataLayout layout)
A DepthwiseConvolution2dDescriptor for the DepthwiseConvolution2dLayer.
uint32_t m_PadLeft
Padding left value in the width dimension.
LayerTestResult< float, 4 > SimpleConvolution2d3x3Stride2x2Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool biasEnabled, const armnn::DataLayout layout)
virtual std::unique_ptr< IWorkload > CreateConvolution2d(const Convolution2dQueueDescriptor &descriptor, const WorkloadInfo &info) const
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)
LayerTestResult< T, 4 > Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool biasEnabled, const armnn::DataLayout layout)
uint32_t m_PadRight
Padding right value in the width dimension.
void PermuteTensorNhwcToNchw(armnn::TensorInfo &tensorInfo, std::vector< T > &tensorData)