31 static std::vector<float> Bias2({0, 2});
33 static std::vector<float> Bias4({1, 2, 3, 4});
35 static std::vector<float> Bias8({1, 2, 3, 4, 1, 2, 3, 4});
38 static std::vector<float> ConvInput3x8x16({
39 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
40 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
41 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
42 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
43 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
44 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
45 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
46 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
47 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
48 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
49 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
50 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
51 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
52 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
53 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
54 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
55 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
56 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
57 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
58 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
59 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
60 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
61 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
62 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1
72 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
73 std::vector<T>
GetBias2(
bool biasEnabled,
float qScale)
77 return QuantizedVector<T>(Bias2, qScale, 0);
81 return std::vector<T>();
86 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
87 std::vector<T>
GetBias4(
bool biasEnabled,
float qScale)
91 return QuantizedVector<T>(Bias4, qScale, 0);
95 return std::vector<T>();
100 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
101 std::vector<T>
GetBias8(
bool biasEnabled,
float qScale)
105 return QuantizedVector<T>(Bias8, qScale, 0);
109 return std::vector<T>();
114 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
119 const unsigned int outputChannels = outputInfo.
GetShape()[channelsIndex];
121 switch (outputChannels)
126 return GetBias2<ArmnnType>(biasEnabled, qScale);
130 return GetBias4<ArmnnType>(biasEnabled, qScale);
134 return GetBias8<ArmnnType>(biasEnabled, qScale);
146 struct FullyConnectedBiasTypeForInputType;
149 struct FullyConnectedBiasTypeForInputType<float>
155 struct FullyConnectedBiasTypeForInputType<uint8_t>
157 using Type = int32_t;
161 template<
typename T,
typename B>
162 void ApplyBias(std::vector<T>& v,
float vScale, int32_t vOffset,
163 const std::vector<B>& bias,
float bScale, int32_t bOffset, uint32_t w, uint32_t h)
165 ARMNN_ASSERT_MSG((armnn::IsQuantizedType<T>() && vScale != 0.0f) || (!armnn::IsQuantizedType<T>()),
166 "Invalid type and parameter combination.");
167 ARMNN_ASSERT_MSG((armnn::IsQuantizedType<B>() && bScale != 0.0f) || (!armnn::IsQuantizedType<B>()),
168 "Invalid type and parameter combination.");
171 for (uint32_t i = 0; i < bias.size(); ++i)
174 for (uint32_t y = 0; y < h; ++y)
176 for (uint32_t x = 0; x < w; ++x)
178 uint32_t offset = (i * h + y) * w + x;
180 T& outRef = v[offset];
182 outRef = SelectiveQuantize<T>(dOutput + dBias, vScale, vOffset);
198 const std::vector<T>& originalInput,
199 const std::vector<T>& originalKernel,
200 const std::vector<B>& bias,
201 const std::vector<T>& originalOutputExpected,
208 uint32_t padLeft = 0,
210 uint32_t padRight = 0,
211 uint32_t padBottom = 0,
212 uint32_t strideX = 1,
213 uint32_t strideY = 1,
214 uint32_t dilationX = 1,
215 uint32_t dilationY = 1)
223 unsigned int outputHeight =
armnn::numeric_cast<
unsigned int>(originalOutputExpectedShape[2]);
224 unsigned int outputWidth =
armnn::numeric_cast<
unsigned int>(originalOutputExpectedShape[3]);
225 unsigned int outputChannels =
armnn::numeric_cast<
unsigned int>(originalOutputExpectedShape[1]);
226 unsigned int outputNum =
armnn::numeric_cast<
unsigned int>(originalOutputExpectedShape[0]);
233 bool biasEnabled = bias.size() > 0;
240 ARMNN_ASSERT(!biasEnabled || bias.size() == outputChannels);
249 armnn::TensorInfo biasDesc({
static_cast<unsigned int>(bias.size())}, ArmnnBType);
252 if(armnn::IsQuantizedType<T>())
254 inputTensorInfo.SetQuantizationScale(qScale);
255 inputTensorInfo.SetQuantizationOffset(qOffset);
265 std::vector<T> inputImage;
266 inputImage.assign(originalInput.data(), originalInput.data() + 1*inputChannels*inputHeight*inputWidth);
267 std::vector<T> inputData;
268 inputData.insert(inputData.end(), inputImage.begin(), inputImage.end());
269 inputData.insert(inputData.end(), inputImage.begin(), inputImage.end());
275 std::vector<T> tmp(inputData.size());
276 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(),
sizeof(T));
280 std::vector<T> outputImage;
281 outputImage.assign(originalOutputExpected.data(),
282 originalOutputExpected.data() + outputChannels*outputHeight*outputWidth);
287 std::vector<T> biasV;
288 biasV.assign(bias.data(), bias.data() + outputChannels);
291 outputWidth, outputHeight);
298 std::vector<T> expectedOutput;
299 expectedOutput.insert(expectedOutput.end(), outputImage.begin(), outputImage.end());
300 expectedOutput.insert(expectedOutput.end(), outputImage.begin(), outputImage.end());
305 std::vector<T> tmp(expectedOutput.size());
307 expectedOutput = tmp;
310 std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.
CreateTensorHandle(inputTensorInfo);
311 std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.
CreateTensorHandle(outputTensorInfo);
319 std::vector<T> kernel = originalKernel;
331 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
332 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
335 data.
m_Bias = &biasTensor;
350 inputHandle->Allocate();
351 outputHandle->Allocate();
355 ExecuteWorkload(*workload, memoryManager);
361 outputHandle->GetShape(),
372 const std::vector<T>& input,
373 const std::vector<T>& kernel,
374 const std::vector<B>& bias,
375 const std::vector<O>& outputExpected,
382 uint32_t padLeft = 1,
384 uint32_t padRight = 1,
385 uint32_t padBottom = 1,
386 uint32_t strideX = 1,
387 uint32_t strideY = 1)
405 bool biasEnabled = bias.size() > 0;
408 armnn::TensorInfo inputTensorInfo({inputNum, inputHeight, inputWidth, inputChannels}, ArmnnType);
409 armnn::TensorInfo outputTensorInfo({outputNum, outputHeight, outputWidth, outputChannels},
411 armnn::TensorInfo kernelDesc({kernelChanMul, kernelHeight, kernelWidth, kernelChannels}, ArmnnType);
412 armnn::TensorInfo biasDesc({
static_cast<unsigned int>(bias.size())}, ArmnnBType);
415 std::vector<T> inputData;
416 inputData.assign(input.data(), input.data() + inputHeight*inputWidth*inputChannels);
419 std::vector<O> outputData;
420 outputData.assign(outputExpected.data(), outputExpected.data() + outputHeight*outputWidth*outputChannels);
422 std::vector<O> actualOutput(outputTensorInfo.GetNumElements());
424 std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.
CreateTensorHandle(inputTensorInfo);
425 std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.
CreateTensorHandle(outputTensorInfo);
435 data.
m_Bias = &biasTensor;
446 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
447 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
452 inputHandle->Allocate();
453 outputHandle->Allocate();
457 ExecuteWorkload(*workload, memoryManager);
463 outputHandle->GetShape(),
464 outputTensorInfo.GetShape());
467 template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
typename T = armnn::ResolveType<ArmnnType>>
482 unsigned int batchSize = 1;
483 unsigned int inputChannels = 2;
484 unsigned int outputChannels = 3;
485 unsigned int inputSize = 5;
486 unsigned int kernelSize = 3;
487 unsigned int padSize = 2;
488 unsigned int stride = 1;
489 unsigned int outputSize = 7;
491 armnn::TensorInfo inputInfo({batchSize, inputChannels, inputSize, 1}, ArmnnType);
492 armnn::TensorInfo outputInfo({batchSize, outputChannels, outputSize, 1}, ArmnnType);
493 armnn::TensorInfo kernelInfo({outputChannels, inputChannels, kernelSize, 1}, ArmnnType);
497 if(armnn::IsQuantizedType<T>())
500 inputInfo.SetQuantizationOffset(qOffset);
501 outputInfo.SetQuantizationScale(qScale);
502 outputInfo.SetQuantizationOffset(qOffset);
503 kernelInfo.SetQuantizationScale(qScale);
504 kernelInfo.SetQuantizationOffset(qOffset);
505 biasInfo.SetQuantizationScale(inputInfo.GetQuantizationScale()*kernelInfo.GetQuantizationScale());
506 biasInfo.SetQuantizationOffset(0);
509 std::vector<T> inputData = QuantizedVector<T>(
511 5.0f, -2.0f, 2.5f, 0.0f, 1.0f,
512 -3.0f, 3.2f, 5.0f, 2.0f, 3.0f,
514 inputInfo.GetQuantizationScale(),
515 inputInfo.GetQuantizationOffset());
517 std::vector<T> kernelData = QuantizedVector<T>(
528 kernelInfo.GetQuantizationScale(),
529 kernelInfo.GetQuantizationOffset());
531 std::vector<B> biasData =
532 QuantizedVector<B>({ 1.0f, 0.0f, 0.0f }, biasInfo.GetQuantizationScale(), biasInfo.GetQuantizationOffset());
534 std::vector<T> outputData = QuantizedVector<T>(
536 4.5f, -10.8f, 5.0f + 6.4f - 7.5f, -2.0f + 10.0f -3.0f, 2.5f + 4.0f - 4.5f, 6.0f, 1.0f,
537 -0.6f, -0.6f + 0.64f, -0.6f + 0.64f + 1.0f, 0.64f + 1.0f + 0.4f, 1.0f + 0.4f + 0.6f, 0.4f + 0.6f, 0.6f,
538 2.5f, -1.0f + 3.0f, 1.25f - 3.2f + 2.5f, -1.0f - 5.0f, 1.25f + 0.5f - 2.0f, -3.0f, 0.5f
540 outputInfo.GetQuantizationScale(),
541 outputInfo.GetQuantizationOffset());
543 std::vector<T> actualOutput(outputInfo.GetNumElements());
548 ApplyBias(outputData, outputInfo.GetQuantizationScale(), outputInfo.GetQuantizationOffset(),
549 biasData, biasInfo.GetQuantizationScale(), biasInfo.GetQuantizationOffset(),
553 std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.
CreateTensorHandle(inputInfo);
554 std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.
CreateTensorHandle(outputInfo);
564 AddInputToWorkload(data, info, inputInfo, inputHandle.get());
565 AddOutputToWorkload(data, info, outputInfo, outputHandle.get());
568 data.
m_Bias = &biasTensor;
580 inputHandle->Allocate();
581 outputHandle->Allocate();
585 ExecuteWorkload(*workload, memoryManager);
591 outputHandle->GetShape(),
592 outputInfo.GetShape());
595 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
609 std::vector<T> input =
618 std::vector<T> kernel =
627 const std::vector<float> outputData =
634 return SimpleConvolution2dNhwcTestImpl<ArmnnType, ArmnnType>(
642 inputDesc.GetShape(),
643 kernelDesc.GetShape(),
644 outputDesc.GetShape(),
650 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
664 std::vector<T> input =
675 std::vector<T> kernel =
684 std::vector<T> outputData =
691 uint32_t padLeft = 1;
693 uint32_t padRight = 1;
694 uint32_t padBottom = 1;
695 uint32_t strideX = 2;
696 uint32_t strideY = 2;
698 return SimpleConvolution2dNhwcTestImpl<ArmnnType, ArmnnType>(
706 inputDesc.GetShape(),
707 kernelDesc.GetShape(),
708 outputDesc.GetShape(),
720 template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
typename T = armnn::ResolveType<ArmnnType>>
732 std::vector<T> input = QuantizedVector<T>(ConvInput3x8x16, qScale, qOffset);
736 std::vector<T> kernel = QuantizedVector<T>({
778 std::vector<T> expectedOutput = QuantizedVector<T>({
779 -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24,
780 -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25,
781 -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f,
782 -23.5f, -23.5f, -23.5f,
783 -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f,
784 -23.5f, -23.5f, -23.5f,
786 5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
787 5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
788 5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
789 5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
793 return SimpleConvolution2dTestImpl<ArmnnType, ArmnnBType>(
799 GetBias2<ArmnnBType>(biasEnabled, qScale * qScale),
801 inputDesc.GetShape(),
802 kernelDesc.GetShape(),
803 outputDesc.GetShape(),
824 std::vector<unsigned int> inputShape = { 1, 3, 8, 16 };
825 std::vector<T> input = QuantizedVector<T>(ConvInput3x8x16, qScale, qOffset);
829 std::vector<T> kernel = QuantizedVector<T>({
859 std::vector<T> expectedOutput = QuantizedVector<T>({
860 -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15,
861 -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16,
862 -14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,
863 -14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,
864 -14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,
865 -14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,
867 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
868 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
869 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
870 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
871 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
872 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
876 return SimpleConvolution2dTestImpl<ArmnnType, ArmnnBType>(
882 GetBias2<ArmnnBType>(biasEnabled, qScale * qScale),
884 inputDesc.GetShape(),
885 kernelDesc.GetShape(),
886 outputDesc.GetShape(),
904 std::vector<T> input =
914 std::vector<T> kernel =
931 std::vector<T> expectedOutput =
934 -242, -594, -934, -372, 0, 0,
935 -495, -1190, -1850, -725, 0, 0,
936 -538, -1256, -1916, -748, 0, 0,
937 -273, -626, -946, -363, 0, 0,
944 return SimpleConvolution2dTestImpl<ArmnnType, ArmnnBType>(
950 GetBias2<ArmnnBType>(
false, qScale * qScale),
952 inputDesc.GetShape(),
953 kernelDesc.GetShape(),
954 outputDesc.GetShape(),
976 std::vector<T> input =
987 std::vector<T> kernel =
998 std::vector<T> expectedOutput =
1000 -7140, -10580, -13940, -9300, -5230,
1001 -9590, -14120, -18520, -12290, -6860,
1002 -9980, -14560, -18960, -12560, -7000,
1003 -7518, -10904, -14144, -9318, -5152,
1004 -5032, -7256, -9376, -6142, -3368,
1008 return SimpleConvolution2dTestImpl<ArmnnType, ArmnnBType>(
1011 tensorHandleFactory,
1014 GetBias2<ArmnnBType>(
false, qScale * qScale),
1016 inputDesc.GetShape(),
1017 kernelDesc.GetShape(),
1018 outputDesc.GetShape(),
1028 template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
typename T = armnn::ResolveType<ArmnnType>>
1033 const std::vector<float>& inputNoQuantizedValues,
1035 const std::vector<float>& kernelNoQuantizedValues,
1037 const std::vector<float>& outputExpectedNoQuantizedValues,
1042 uint32_t padLeft = 0,
1043 uint32_t padTop = 0,
1044 uint32_t padRight = 0,
1045 uint32_t padBottom = 0,
1046 uint32_t strideX = 1,
1047 uint32_t strideY = 1,
1048 bool biasEnabled =
false 1084 auto input = QuantizedVector<T>(inputNoQuantizedValues,
1087 auto kernel = QuantizedVector<T>(kernelNoQuantizedValues,
1090 auto expectedOutput = QuantizedVector<T>(outputExpectedNoQuantizedValues,
1094 return SimpleConvolution2dTestImpl<ArmnnType, ArmnnBType>(
1097 tensorHandleFactory,
1100 GetBias2<ArmnnBType>(biasEnabled, qScale * qScale),
1118 template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
typename T>
1127 std::vector<float> inputNoQuantizedValues =
1129 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1130 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1131 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1132 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
1133 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
1134 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
1135 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1136 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1137 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1138 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
1142 std::vector<float> kernelNoQuantizedValues =
1152 std::vector<float> outputExpectedNoQuantizedValues =
1160 return Convolution2d3x3DilationTestCommon<ArmnnType, ArmnnBType>(
1163 tensorHandleFactory,
1164 inputNoQuantizedValues,
1166 kernelNoQuantizedValues,
1168 outputExpectedNoQuantizedValues,
1176 template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
typename T>
1185 std::vector<float> inputNoQuantizedValues =
1187 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1188 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1189 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1190 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
1191 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
1192 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
1193 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1194 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1195 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1196 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1198 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1199 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1200 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1201 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
1202 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
1203 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
1204 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1205 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1206 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1207 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
1211 std::vector<float> kernelNoQuantizedValues =
1225 std::vector<float> outputExpectedNoQuantizedValues =
1233 return Convolution2d3x3DilationTestCommon<ArmnnType, ArmnnBType>(
1236 tensorHandleFactory,
1237 inputNoQuantizedValues,
1239 kernelNoQuantizedValues,
1241 outputExpectedNoQuantizedValues,
1249 template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
typename T>
1258 std::vector<float> inputNoQuantizedValues =
1260 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1261 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1262 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1263 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1264 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1265 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1266 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1267 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1268 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1269 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
1273 std::vector<float> kernelNoQuantizedValues =
1283 std::vector<float> outputExpectedNoQuantizedValues =
1290 uint32_t padLeft = 1;
1291 uint32_t padTop = 1;
1292 uint32_t padRight = 1;
1293 uint32_t padBottom = 1;
1295 return Convolution2d3x3DilationTestCommon<ArmnnType, ArmnnBType>(
1298 tensorHandleFactory,
1299 inputNoQuantizedValues,
1301 kernelNoQuantizedValues,
1303 outputExpectedNoQuantizedValues,
1318 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
1326 unsigned int inputHeight = 8;
1327 unsigned int inputWidth = 16;
1328 unsigned int inputChannels = 3;
1329 unsigned int inputNum = 5;
1331 unsigned int kernelHeight = 3;
1332 unsigned int kernelWidth = 3;
1334 unsigned int strideX = 2;
1335 unsigned int strideY = 3;
1336 unsigned int padX = 1;
1337 unsigned int padY = 1;
1339 unsigned int outputNum = inputNum;
1340 unsigned int outputChannels = 2;
1341 unsigned int outputHeight = (inputHeight + 2 * padY - kernelHeight + strideY) / strideY;
1342 unsigned int outputWidth = (inputWidth + 2 * padX - kernelWidth + strideX) / strideX;
1349 unsigned int inputShape[] = {inputNum, inputChannels, inputHeight, inputWidth};
1350 unsigned int outputShape[] = {outputNum, outputChannels, outputHeight, outputWidth};
1351 unsigned int kernelShape[] = {outputChannels, inputChannels, kernelHeight, kernelWidth};
1352 unsigned int biasShape[] = {outputChannels};
1359 auto input = MakeRandomTensor<T>(inputTensorInfo, 124908);
1360 auto kernel = MakeRandomTensor<T>(kernelDesc, 891234);
1361 auto bias = MakeRandomTensor<T>(biasDesc, 1028);
1364 std::vector<T> expectedOutput(outputTensorInfo.
GetNumElements());
1366 std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.
CreateTensorHandle(inputTensorInfo);
1367 std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.
CreateTensorHandle(outputTensorInfo);
1377 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
1378 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1380 data.
m_Bias = &biasTensor;
1389 std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refTensorHandleFactory.
CreateTensorHandle(outputTensorInfo);
1390 std::unique_ptr<armnn::ITensorHandle> inputHandleRef = refTensorHandleFactory.
CreateTensorHandle(inputTensorInfo);
1394 SetWorkloadInput(refData, refInfo, 0, inputTensorInfo, inputHandleRef.get());
1395 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
1397 std::unique_ptr<armnn::IWorkload> workload
1399 std::unique_ptr<armnn::IWorkload> workloadRef
1402 outputHandleRef->Allocate();
1403 inputHandleRef->Allocate();
1405 inputHandle->Allocate();
1406 outputHandle->Allocate();
1411 ExecuteWorkload(*workload, memoryManager);
1413 workloadRef->PostAllocationConfigure();
1414 workloadRef->Execute();
1421 outputHandle->GetShape(),
1438 std::vector<armnn::BFloat16> inputValues = armnnUtils::QuantizedVector<armnn::BFloat16>(
1471 std::vector<armnn::BFloat16> kernelValues = armnnUtils::QuantizedVector<armnn::BFloat16>(
1489 const std::vector<float> outputData =
1502 uint32_t padLeft = 1;
1503 uint32_t padTop = 1;
1504 uint32_t padRight = 1;
1505 uint32_t padBottom = 1;
1506 uint32_t strideX = 2;
1507 uint32_t strideY = 2;
1513 tensorHandleFactory,
1516 std::vector<float>(),
1518 inputDesc.GetShape(),
1519 kernelDesc.GetShape(),
1520 outputDesc.GetShape(),
1545 std::vector<armnn::BFloat16> inputValues = armnnUtils::QuantizedVector<armnn::BFloat16>(
1578 std::vector<armnn::BFloat16> kernelValues = armnnUtils::QuantizedVector<armnn::BFloat16>(
1596 const std::vector<float> outputData =
1609 uint32_t padLeft = 1;
1610 uint32_t padTop = 1;
1611 uint32_t padRight = 1;
1612 uint32_t padBottom = 1;
1613 uint32_t strideX = 2;
1614 uint32_t strideY = 2;
1620 tensorHandleFactory,
1623 std::vector<float>(),
1625 inputDesc.GetShape(),
1626 kernelDesc.GetShape(),
1627 outputDesc.GetShape(),
1649 const std::vector<T>& input,
1650 const std::vector<T>& kernel,
1651 const std::vector<B>& bias,
1652 const std::vector<T>& outputExpected,
1659 uint32_t padLeft = 0,
1660 uint32_t padTop = 0,
1661 uint32_t padRight = 0,
1662 uint32_t padBottom = 0,
1663 uint32_t strideX = 1,
1664 uint32_t strideY = 1)
1679 bool biasEnabled = bias.size() > 0;
1680 ARMNN_ASSERT(!biasEnabled || bias.size() == outputChannels);
1687 armnn::TensorInfo kernelDesc({1, kernelHeight, kernelWidth, kernelChannels}, ArmnnType);
1688 armnn::TensorInfo biasDesc({
static_cast<unsigned int>(bias.size())}, ArmnnBType);
1691 if (armnn::IsQuantizedType<T>())
1693 inputTensorInfo.SetQuantizationScale(qScale);
1694 inputTensorInfo.SetQuantizationOffset(qOffset);
1697 kernelDesc.SetQuantizationScale(qScale);
1698 kernelDesc.SetQuantizationOffset(qOffset);
1699 biasDesc.SetQuantizationScale(qScale*qScale);
1700 biasDesc.SetQuantizationOffset(0);
1704 std::vector<T> inputData;
1705 inputData.assign(input.data(), input.data() + inputChannels*inputHeight*inputWidth);
1711 std::vector<T> tmp(inputData.size());
1712 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(),
sizeof(T));
1717 std::vector<T> outputData;
1718 outputData.assign(outputExpected.data(), outputExpected.data() + outputChannels*outputHeight*outputWidth);
1721 std::vector<T> biasV;
1722 biasV.assign(bias.data(), bias.data() + outputChannels);
1724 biasV, biasDesc.GetQuantizationScale(), biasDesc.GetQuantizationOffset(),
1725 outputWidth, outputHeight);
1733 std::vector<T> tmp(outputData.size());
1738 std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.
CreateTensorHandle(inputTensorInfo);
1739 std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.
CreateTensorHandle(outputTensorInfo);
1753 data.m_Bias = &biasTensor;
1754 data.m_Parameters.m_StrideX = strideX;
1755 data.m_Parameters.m_StrideY = strideY;
1756 data.m_Parameters.m_PadLeft = padLeft;
1757 data.m_Parameters.m_PadRight = padRight;
1758 data.m_Parameters.m_PadTop = padTop;
1759 data.m_Parameters.m_PadBottom = padBottom;
1760 data.m_Parameters.m_BiasEnabled = biasEnabled;
1761 data.m_Parameters.m_DataLayout = layout;
1764 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
1765 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1767 std::unique_ptr<armnn::IWorkload> workload
1769 inputHandle->Allocate();
1770 outputHandle->Allocate();
1774 ExecuteWorkload(*workload, memoryManager);
1780 outputHandle->GetShape(),
1784 template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
typename T = armnn::ResolveType<ArmnnType>>
1796 unsigned int inputHeight = 3;
1797 unsigned int inputWidth = 3;
1798 unsigned int inputChannels = 2;
1799 unsigned int inputNum = 1;
1801 unsigned int kernelHeight = 3;
1802 unsigned int kernelWidth = 3;
1804 unsigned int outputHeight = 1;
1805 unsigned int outputWidth = 1;
1806 unsigned int outputChannels = inputChannels;
1807 unsigned int outputNum = inputNum;
1818 if(armnn::IsQuantizedType<T>())
1824 kernelDesc.SetQuantizationScale(qScale);
1825 kernelDesc.SetQuantizationOffset(qOffset);
1826 biasDesc.SetQuantizationScale(qScale*qScale);
1827 biasDesc.SetQuantizationOffset(0);
1829 std::vector<T> inputData = std::vector<T>(
1830 QuantizedVector<T>({
1846 std::vector<T> tmp(inputData.size());
1851 std::vector<B> biasV(QuantizedVector<B>({ 0, 2 },
1852 biasDesc.GetQuantizationScale(),
1853 biasDesc.GetQuantizationOffset()));
1855 std::vector<T> kernelData = std::vector<T>(
1856 QuantizedVector<T>({
1865 kernelDesc.GetQuantizationScale(),
1866 kernelDesc.GetQuantizationOffset()));
1869 std::vector<T> outputImage(
1870 QuantizedVector<T>({ 0.f, 0.f },
1879 biasV, biasDesc.GetQuantizationScale(), biasDesc.GetQuantizationOffset(),
1880 outputWidth, outputHeight);
1885 std::vector<T> tmp(outputImage.size());
1892 std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.
CreateTensorHandle(inputTensorInfo);
1893 std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.
CreateTensorHandle(outputTensorInfo);
1903 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
1904 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1907 data.
m_Bias = &biasTensor;
1917 std::unique_ptr<armnn::IWorkload> workload
1919 inputHandle->Allocate();
1920 outputHandle->Allocate();
1924 ExecuteWorkload(*workload, memoryManager);
1930 outputHandle->GetShape(),
1934 template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
typename T = armnn::ResolveType<ArmnnType>>
1946 unsigned int depthMultiplier = 2;
1948 unsigned int inputHeight = 8;
1949 unsigned int inputWidth = 16;
1950 unsigned int inputChannels = 2;
1951 unsigned int inputBatchSize = 1;
1953 unsigned int kernelHeight = 5;
1954 unsigned int kernelWidth = 3;
1956 unsigned int outputHeight = inputHeight - kernelHeight + 1 + 2;
1957 unsigned int outputWidth = (inputWidth - kernelWidth + 1)/2;
1958 unsigned int outputChannels = inputChannels * depthMultiplier;
1959 unsigned int outputBatchSize = inputBatchSize;
1962 inputBatchSize, inputChannels, inputHeight, inputWidth, layout, ArmnnType);
1964 outputBatchSize, outputChannels, outputHeight, outputWidth, layout, ArmnnType);
1970 if(armnn::IsQuantizedType<T>())
1976 kernelDesc.SetQuantizationScale(qScale);
1977 kernelDesc.SetQuantizationOffset(qOffset);
1978 biasDesc.SetQuantizationScale(qScale*qScale);
1979 biasDesc.SetQuantizationOffset(0);
1983 std::vector<T> originalInputData = std::vector<T>(
1984 QuantizedVector<T>({
1985 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
1986 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
1987 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
1988 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
1989 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
1990 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
1991 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
1992 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
1993 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
1994 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
1995 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
1996 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
1997 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
1998 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
1999 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
2000 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f
2005 std::vector<T> inputData = originalInputData;
2011 originalInputData.data(), inputData.data(),
sizeof(T));
2014 std::vector<B> biasV = QuantizedVector<B>({ 0, 2, 1, -1 },
2015 biasDesc.GetQuantizationScale(),
2016 biasDesc.GetQuantizationOffset());
2018 std::vector<T> kernelData = std::vector<T>(
2019 QuantizedVector<T>({
2044 kernelDesc.GetQuantizationScale(),
2045 kernelDesc.GetQuantizationOffset()));
2048 std::vector<T> originalOutputImage = std::vector<T>(
2049 QuantizedVector<T>({
2050 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
2051 5, 5, 5, 5, 5, 5, 5, 5.5, 5.5, 5.5, 5.5, 5.5, 5.5, 5.5,
2052 5.5, 5.5, 5.5, 5.5, 5.5, 5.5, 5.5, 5, 5, 5, 5, 5, 5, 5,
2053 2.5, 2.5, 2.5, 2.5, 2.5, 2.5, 2.5, 3.5, 3.5, 3.5, 3.5, 3.5, 3.5, 3.5,
2054 4.5, 4.5, 4.5, 4.5, 4.5, 4.5, 4.5, 6, 6, 6, 6, 6, 6, 6,
2055 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
2056 1, 3, 0, 0, 0, 0, 0, 2, 4, 0, 0, 0, 0, 0,
2057 2, 4, 0, 0, 0, 0, 0, 2, 4, 0, 0, 0, 0, 0,
2058 2, 4, 0, 0, 0, 0, 0, 2, 4, 0, 0, 0, 0, 0,
2059 2, 4, 0, 0, 0, 0, 0, 3, 5, 0, 0, 0, 0, 0,
2060 3, 5, 0, 0, 0, 0, 0, 3, 5, 0, 0, 0, 0, 0,
2061 3, 5, 0, 0, 0, 0, 0, 3, 5, 0, 0, 0, 0, 0
2073 biasDesc.GetQuantizationScale(),
2074 biasDesc.GetQuantizationOffset(),
2079 std::vector<T> outputImage = originalOutputImage;
2083 originalOutputImage.data(), outputImage.data(),
sizeof(T));
2088 std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.
CreateTensorHandle(inputTensorInfo);
2089 std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.
CreateTensorHandle(outputTensorInfo);
2099 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
2100 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
2103 data.
m_Bias = &biasTensor;
2113 std::unique_ptr<armnn::IWorkload> workload
2115 inputHandle->Allocate();
2116 outputHandle->Allocate();
2120 ExecuteWorkload(*workload, memoryManager);
2126 outputHandle->GetShape(),
2137 const std::vector<T>& originalInput,
2138 const std::vector<T>& originalKernel,
2139 const std::vector<B>& bias,
2140 const std::vector<T>& originalOutputExpected,
2147 uint32_t padLeft = 0,
2148 uint32_t padTop = 0,
2149 uint32_t padRight = 0,
2150 uint32_t padBottom = 0,
2151 uint32_t strideX = 1,
2152 uint32_t strideY = 1,
2153 uint32_t dilationX = 1,
2154 uint32_t dilationY = 1)
2161 unsigned int outputHeight =
armnn::numeric_cast<
unsigned int>(originalOutputExpectedShape[2]);
2162 unsigned int outputWidth =
armnn::numeric_cast<
unsigned int>(originalOutputExpectedShape[3]);
2163 unsigned int outputChannels =
armnn::numeric_cast<
unsigned int>(originalOutputExpectedShape[1]);
2164 unsigned int outputNum =
armnn::numeric_cast<
unsigned int>(originalOutputExpectedShape[0]);
2170 bool biasEnabled = bias.size() > 0;
2177 ARMNN_ASSERT(!biasEnabled || bias.size() == outputChannels);
2187 armnn::TensorInfo kernelDesc({1, kernelHeight, kernelWidth, kernelChannels}, ArmnnType);
2189 armnn::TensorInfo biasDesc({
static_cast<unsigned int>(bias.size())}, ArmnnBType);
2192 if(armnn::IsQuantizedType<T>())
2194 inputTensorInfo.SetQuantizationScale(qScale);
2195 inputTensorInfo.SetQuantizationOffset(qOffset);
2198 kernelDesc.SetQuantizationScale(qScale);
2199 kernelDesc.SetQuantizationOffset(qOffset);
2200 biasDesc.SetQuantizationScale(qScale*qScale);
2201 biasDesc.SetQuantizationOffset(0);
2205 std::vector<T> input;
2206 input.assign(originalInput.data(), originalInput.data() + 1*inputChannels*inputHeight*inputWidth);
2207 std::vector<T> inputData;
2208 inputData.insert(inputData.end(), input.begin(), input.end());
2209 inputData.insert(inputData.end(), input.begin(), input.end());
2215 std::vector<T> tmp(inputData.size());
2216 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(),
sizeof(T));
2220 std::vector<T> output;
2221 output.assign(originalOutputExpected.data(),
2222 originalOutputExpected.data() + outputChannels*outputHeight*outputWidth);
2227 std::vector<T> biasV;
2228 biasV.assign(bias.data(), bias.data() + outputChannels);
2230 biasV, biasDesc.GetQuantizationScale(), biasDesc.GetQuantizationOffset(),
2231 outputWidth, outputHeight);
2237 std::vector<T> outputData;
2238 outputData.insert(outputData.end(), output.begin(), output.end());
2239 outputData.insert(outputData.end(), output.begin(), output.end());
2244 std::vector<T> tmp(outputData.size());
2249 std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.
CreateTensorHandle(inputTensorInfo);
2250 std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.
CreateTensorHandle(outputTensorInfo);
2264 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
2265 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
2268 data.
m_Bias = &biasTensor;
2280 std::unique_ptr<armnn::IWorkload> workload
2282 inputHandle->Allocate();
2283 outputHandle->Allocate();
2287 ExecuteWorkload(*workload, memoryManager);
2293 outputHandle->GetShape(),
2310 auto input = QuantizedVector<T>(
2324 inputTensorInfo.GetQuantizationScale(),
2325 inputTensorInfo.GetQuantizationOffset());
2330 auto kernel = QuantizedVector<T>({
2341 kernelTensorInfo.GetQuantizationScale(),
2342 kernelTensorInfo.GetQuantizationOffset());
2347 auto expectedOutput = QuantizedVector<T>(
2349 396, 664, 820, 756, 602, 1016, 1608, 1880, 1652, 1268, 1976, 2968, 3240, 2732,
2350 2028, 2628, 3808, 4060, 3312, 2390, 2596, 3700, 3900, 3130, 2226, 2817, 4186,
2351 4330, 3609, 2651, 5414, 7864, 8120, 6626, 4780, 6314, 9144, 9400, 7646, 5500,
2352 6759, 9610, 9850, 7875, 5579, 5935, 8348, 8540, 6757, 4742
2354 outputTensorInfo.GetQuantizationScale(),
2355 outputTensorInfo.GetQuantizationOffset());
2357 return DepthwiseConvolution2dAsymmetricTestImpl<ArmnnType, ArmnnBType>(
2360 tensorHandleFactory,
2363 GetBias2<ArmnnBType>(biasEnabled, qScale * qScale),
2365 inputTensorInfo.GetShape(),
2366 kernelTensorInfo.GetShape(),
2367 outputTensorInfo.GetShape(),
2392 auto input = QuantizedVector<T>(
2406 inputTensorInfo.GetQuantizationScale(),
2407 inputTensorInfo.GetQuantizationOffset());
2410 auto kernel = QuantizedVector<T>({
2421 kernelTensorInfo.GetQuantizationScale(),
2422 kernelTensorInfo.GetQuantizationOffset());
2425 auto expectedOutput = QuantizedVector<T>(
2427 396,664,820,756,602,
2428 1016,1608,1880,1652,1268,
2429 1976,2968,3240,2732,2028,
2430 2628,3808,4060,3312,2390,
2431 2596,3700,3900,3130,2226,
2433 2817,4186,4330,3609,2651,
2434 5414,7864,8120,6626,4780,
2435 6314,9144,9400,7646,5500,
2436 6759,9610,9850,7875,5579,
2437 5935,8348,8540,6757,4742
2439 outputTensorInfo.GetQuantizationScale(),
2440 outputTensorInfo.GetQuantizationOffset());
2442 return DepthwiseConvolution2dTestImpl<ArmnnType, ArmnnBType>(
2445 tensorHandleFactory,
2448 GetBias2<ArmnnBType>(biasEnabled, qScale * qScale),
2450 inputTensorInfo.GetShape(),
2451 kernelTensorInfo.GetShape(),
2452 outputTensorInfo.GetShape(),
2477 auto input = QuantizedVector<T>(
2479 0, 0, 0, 0, 0, 0, 0, 0, 0,
2480 0, 0, 0, 0, 0, 0, 0, 0, 0,
2481 0, 0, 0, 0, 0, 0, 0, 0, 0,
2482 0, 0, 0, 1, 1, 1, 0, 0, 0,
2483 0, 0, 0, 1, 1, 1, 0, 0, 0,
2484 0, 0, 0, 1, 1, 1, 0, 0, 0,
2485 0, 0, 0, 0, 0, 0, 0, 0, 0,
2486 0, 0, 0, 0, 0, 0, 0, 0, 0,
2487 0, 0, 0, 0, 0, 0, 0, 0, 0
2489 inputTensorInfo.GetQuantizationScale(),
2490 inputTensorInfo.GetQuantizationOffset());
2493 auto kernel = QuantizedVector<T>({
2498 kernelTensorInfo.GetQuantizationScale(),
2499 kernelTensorInfo.GetQuantizationOffset());
2501 uint32_t padLeft = 0;
2502 uint32_t padTop = 0;
2503 uint32_t padRight = 0;
2504 uint32_t padBottom = 0;
2505 uint32_t strideX = 1;
2506 uint32_t strideY = 1;
2507 uint32_t dilationX = 3;
2508 uint32_t dilationY = 3;
2512 auto expectedOutput = QuantizedVector<T>(
2518 outputTensorInfo.GetQuantizationScale(),
2519 outputTensorInfo.GetQuantizationOffset());
2521 return DepthwiseConvolution2dTestImpl<ArmnnType, ArmnnBType>(
2524 tensorHandleFactory,
2527 GetBias2<ArmnnBType>(biasEnabled, qScale * qScale),
2529 inputTensorInfo.GetShape(),
2530 kernelTensorInfo.GetShape(),
2531 outputTensorInfo.GetShape(),
2545 template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
typename T = armnn::ResolveType<ArmnnType>>
2550 const std::vector<float>& inputNoQuantizedValues,
2552 const std::vector<float>& kernelNoQuantizedValues,
2554 const std::vector<float>& outputExpectedNoQuantizedValues,
2559 bool biasEnabled =
false)
2594 auto input = QuantizedVector<T>(inputNoQuantizedValues,
2597 auto kernel = QuantizedVector<T>(kernelNoQuantizedValues,
2600 auto expectedOutput = QuantizedVector<T>(outputExpectedNoQuantizedValues,
2604 uint32_t padLeft = 0;
2605 uint32_t padTop = 0;
2606 uint32_t padRight = 0;
2607 uint32_t padBottom = 0;
2608 uint32_t strideX = 1;
2609 uint32_t strideY = 1;
2611 return DepthwiseConvolution2dTestImpl<ArmnnType, ArmnnBType>(
2614 tensorHandleFactory,
2617 GetBias<ArmnnBType>(biasEnabled, qScale * qScale, outputTensorInfo, layout),
2621 outputTensorInfo.GetShape(),
2635 template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
typename T>
2644 std::vector<float> inputNoQuantizedValues =
2646 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2647 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2648 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2649 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
2650 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
2651 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
2652 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2653 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2654 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2655 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
2659 std::vector<float> kernelNoQuantizedValues =
2669 std::vector<float> outputExpectedNoQuantizedValues =
2677 return DepthwiseConvolution2d3x3DilationTestCommon<ArmnnType, ArmnnBType>(
2680 tensorHandleFactory,
2681 inputNoQuantizedValues,
2683 kernelNoQuantizedValues,
2685 outputExpectedNoQuantizedValues,
2693 template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
typename T>
2702 std::vector<float> inputNoQuantizedValues =
2704 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2705 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2706 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2707 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
2708 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
2709 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
2710 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2711 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2712 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2713 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2715 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2716 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2717 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2718 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
2719 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
2720 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
2721 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2722 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2723 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2724 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
2728 std::vector<float> kernelNoQuantizedValues =
2742 std::vector<float> outputExpectedNoQuantizedValues =
2744 2, 9, 9, 9, 2, 9, 9, 9, 2, 9, 9, 9, 5, 3, 3, 3, 3,
2746 1, 1, 1, 3, 1, 1, 1, 3, 1, 1, 1, 6, 4, 4, 4
2749 return DepthwiseConvolution2d3x3DilationTestCommon<ArmnnType, ArmnnBType>(
2752 tensorHandleFactory,
2753 inputNoQuantizedValues,
2755 kernelNoQuantizedValues,
2757 outputExpectedNoQuantizedValues,
2765 template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
typename T>
2774 std::vector<float> inputNoQuantizedValues =
2787 std::vector<float> kernelNoQuantizedValues =
2815 std::vector<float> outputExpectedNoQuantizedValues =
2817 4.5f, 4.5f, 4.5f, 4.5f, 5.5f, 5.5f, 5.5f, 5.5f,
2818 2.5f, 2.5f, 2.5f, 2.5f, 3.5f, 3.5f, 3.5f, 3.5f,
2819 10.05f, 10.5f, 11.4f, 11.85f, 12.75f, 13.3f, 14.4f, 14.95f,
2820 5.25f, 5.5f, 6.0f, 6.25f, 7.45f, 7.8f, 8.5f, 8.85f
2824 return DepthwiseConvolution2d3x3DilationTestCommon<ArmnnType, ArmnnBType>(
2827 tensorHandleFactory,
2828 inputNoQuantizedValues,
2830 kernelNoQuantizedValues,
2832 outputExpectedNoQuantizedValues,
2840 template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
typename T>
2849 std::vector<float> inputNoQuantizedValues =
2862 std::vector<float> kernelNoQuantizedValues =
2879 std::vector<float> outputExpectedNoQuantizedValues =
2881 4.5f, 4.5f, 4.5f, 4.5f,
2882 5.5f, 5.5f, 5.5f, 5.5f,
2883 5.25f, 5.5f, 6.0f, 6.25f,
2884 7.65f, 8.0f, 8.7f, 9.05f
2888 return DepthwiseConvolution2d3x3DilationTestCommon<ArmnnType, ArmnnBType>(
2891 tensorHandleFactory,
2892 inputNoQuantizedValues,
2894 kernelNoQuantizedValues,
2896 outputExpectedNoQuantizedValues,
2904 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
2913 unsigned int inputHeight = 8;
2914 unsigned int inputWidth = 16;
2915 unsigned int inputChannels = 3;
2916 unsigned int inputNum = 5;
2918 unsigned int kernelHeight = 3;
2919 unsigned int kernelWidth = 3;
2920 unsigned int channelMultiplier = 1;
2922 unsigned int strideX = 2;
2923 unsigned int strideY = 3;
2924 unsigned int padX = 1;
2925 unsigned int padY = 1;
2927 unsigned int outputNum = inputNum;
2928 unsigned int outputChannels = inputChannels * channelMultiplier;
2929 unsigned int outputHeight = (inputHeight + 2 * padY - kernelHeight + strideY) / strideY;
2930 unsigned int outputWidth = (inputWidth + 2 * padX - kernelWidth + strideX) / strideX;
2937 std::vector<unsigned int> inputShape;
2938 std::vector<unsigned int> outputShape;
2939 std::vector<unsigned int> kernelShape{ 1, kernelHeight, kernelWidth, outputChannels };
2940 std::vector<unsigned int> biasShape{ outputChannels };
2944 inputShape = { inputNum, inputChannels, inputHeight, inputWidth };
2945 outputShape = { outputNum, outputChannels, outputHeight, outputWidth };
2948 inputShape = { inputNum, inputHeight, inputWidth, inputChannels };
2949 outputShape = { outputNum, outputHeight, outputWidth, outputChannels };
2953 + std::to_string(static_cast<int>(layout.
GetDataLayout())) +
"]");
2956 float inputsQScale = armnn::IsQuantizedType<T>() ? 1.0f : 0;
2957 float outputQScale = armnn::IsQuantizedType<T>() ? 2.0f : 0;
2958 int32_t qOffset = 0;
2960 inputTensorInfo =
armnn::TensorInfo(4, inputShape.data(), ArmnnType, inputsQScale, qOffset);
2961 outputTensorInfo =
armnn::TensorInfo(4, outputShape.data(), ArmnnType, outputQScale, qOffset);
2962 kernelDesc =
armnn::TensorInfo(4, kernelShape.data(), ArmnnType, inputsQScale, qOffset);
2965 auto input = MakeRandomTensor<T>(inputTensorInfo, 124908, 0.0f, 255.0f);
2966 auto kernel = MakeRandomTensor<T>(kernelDesc, 891234, 0.0f, 255.0f);
2967 auto bias = MakeRandomTensor<typename FullyConnectedBiasTypeForInputType<T>::Type>(biasDesc, 1028, 0.0f, 255.0f);
2970 std::vector<T> expectedOutput(outputTensorInfo.
GetNumElements());
2972 std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.
CreateTensorHandle(inputTensorInfo);
2973 std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.
CreateTensorHandle(outputTensorInfo);
2983 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
2984 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
2986 data.
m_Bias = &biasTensor;
2996 std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refTensorHandleFactory.
CreateTensorHandle(outputTensorInfo);
2997 std::unique_ptr<armnn::ITensorHandle> inputHandleRef = refTensorHandleFactory.
CreateTensorHandle(inputTensorInfo);
3001 SetWorkloadInput(refData, refInfo, 0, inputTensorInfo, inputHandleRef.get());
3002 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
3004 std::unique_ptr<armnn::IWorkload> workload
3006 std::unique_ptr<armnn::IWorkload> workloadRef
3009 outputHandleRef->Allocate();
3010 inputHandleRef->Allocate();
3012 inputHandle->Allocate();
3013 outputHandle->Allocate();
3018 ExecuteWorkload(*workload, memoryManager);
3020 workloadRef->PostAllocationConfigure();
3021 workloadRef->Execute();
3028 outputHandle->GetShape(),
3036 Convolution2d3x3Dilation3x3Test<armnn::DataType::BFloat16, armnn::DataType::BFloat16>(
3044 Convolution2d3x3Dilation3x3Test<armnn::DataType::Float32, armnn::DataType::Float32>(
3045 armnn::IWorkloadFactory&,
3046 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
3047 const armnn::ITensorHandleFactory&,
3052 Convolution2d3x3Dilation3x3Test<armnn::DataType::QAsymmS8, armnn::DataType::Signed32>(
3053 armnn::IWorkloadFactory&,
3054 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
3055 const armnn::ITensorHandleFactory&,
3060 Convolution2d3x3Dilation3x3Test<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
3061 armnn::IWorkloadFactory&,
3062 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
3063 const armnn::ITensorHandleFactory&,
3068 Convolution2d3x3Dilation3x3Test<armnn::DataType::QSymmS16, armnn::DataType::Signed32>(
3069 armnn::IWorkloadFactory&,
3070 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
3071 const armnn::ITensorHandleFactory&,
3075 template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
3076 Convolution2d2x3x3Dilation3x3Test<armnn::DataType::Float32, armnn::DataType::Float32>(
3077 armnn::IWorkloadFactory&,
3078 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
3079 const armnn::ITensorHandleFactory&,
3083 template LayerTestResult<armnn::ResolveType<armnn::DataType::BFloat16>, 4>
3084 Convolution2d2x3x3Dilation3x3Test<armnn::DataType::BFloat16, armnn::DataType::BFloat16>(
3085 armnn::IWorkloadFactory&,
3086 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
3087 const armnn::ITensorHandleFactory&,
3091 template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmS8>, 4>
3092 Convolution2d2x3x3Dilation3x3Test<armnn::DataType::QAsymmS8, armnn::DataType::Signed32>(
3093 armnn::IWorkloadFactory&,
3094 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
3095 const armnn::ITensorHandleFactory&,
3099 template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 4>
3100 Convolution2d2x3x3Dilation3x3Test<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
3101 armnn::IWorkloadFactory&,
3102 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
3103 const armnn::ITensorHandleFactory&,
3107 template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 4>
3108 Convolution2d2x3x3Dilation3x3Test<armnn::DataType::QSymmS16, armnn::DataType::Signed32>(
3109 armnn::IWorkloadFactory&,
3110 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
3111 const armnn::ITensorHandleFactory&,
3115 template LayerTestResult<armnn::ResolveType<armnn::DataType::BFloat16>, 4>
3116 Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test<armnn::DataType::BFloat16, armnn::DataType::BFloat16>(
3117 armnn::IWorkloadFactory &workloadFactory,
3118 const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager,
3119 const armnn::ITensorHandleFactory& tensorHandleFactory,
3123 template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
3124 Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test<armnn::DataType::Float32, armnn::DataType::Float32>(
3125 armnn::IWorkloadFactory &workloadFactory,
3126 const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager,
3127 const armnn::ITensorHandleFactory& tensorHandleFactory,
3131 template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmS8>, 4>
3132 Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test<armnn::DataType::QAsymmS8, armnn::DataType::Signed32>(
3133 armnn::IWorkloadFactory &workloadFactory,
3134 const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager,
3135 const armnn::ITensorHandleFactory& tensorHandleFactory,
3139 template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 4>
3140 Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
3141 armnn::IWorkloadFactory &workloadFactory,
3142 const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager,
3143 const armnn::ITensorHandleFactory& tensorHandleFactory,
3147 template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 4>
3148 Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test<armnn::DataType::QSymmS16, armnn::DataType::Signed32>(
3149 armnn::IWorkloadFactory &workloadFactory,
3150 const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager,
3151 const armnn::ITensorHandleFactory& tensorHandleFactory,
3155 template LayerTestResult<armnn::ResolveType<armnn::DataType::BFloat16>, 4>
3156 DepthwiseConvolution2d3x3Dilation3x3Test<armnn::DataType::BFloat16, armnn::DataType::BFloat16>(
3157 armnn::IWorkloadFactory&,
3158 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
3159 const armnn::ITensorHandleFactory&,
3163 template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
3164 DepthwiseConvolution2d3x3Dilation3x3Test<armnn::DataType::Float32, armnn::DataType::Float32>(
3165 armnn::IWorkloadFactory&,
3166 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
3167 const armnn::ITensorHandleFactory&,
3171 template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmS8>, 4>
3172 DepthwiseConvolution2d3x3Dilation3x3Test<armnn::DataType::QAsymmS8, armnn::DataType::Signed32>(
3173 armnn::IWorkloadFactory&,
3174 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
3175 const armnn::ITensorHandleFactory&,
3179 template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 4>
3180 DepthwiseConvolution2d3x3Dilation3x3Test<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
3181 armnn::IWorkloadFactory&,
3182 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
3183 const armnn::ITensorHandleFactory&,
3187 template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 4>
3188 DepthwiseConvolution2d3x3Dilation3x3Test<armnn::DataType::QSymmS16, armnn::DataType::Signed32>(
3189 armnn::IWorkloadFactory&,
3190 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
3191 const armnn::ITensorHandleFactory&,
3195 template LayerTestResult<armnn::ResolveType<armnn::DataType::BFloat16>, 4>
3196 DepthwiseConvolution2d2x3x3Dilation3x3Test<armnn::DataType::BFloat16, armnn::DataType::BFloat16>(
3197 armnn::IWorkloadFactory&,
3198 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
3199 const armnn::ITensorHandleFactory&,
3203 template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
3204 DepthwiseConvolution2d2x3x3Dilation3x3Test<armnn::DataType::Float32, armnn::DataType::Float32>(
3205 armnn::IWorkloadFactory&,
3206 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
3207 const armnn::ITensorHandleFactory&,
3211 template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmS8>, 4>
3212 DepthwiseConvolution2d2x3x3Dilation3x3Test<armnn::DataType::QAsymmS8, armnn::DataType::Signed32>(
3213 armnn::IWorkloadFactory&,
3214 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
3215 const armnn::ITensorHandleFactory&,
3219 template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 4>
3220 DepthwiseConvolution2d2x3x3Dilation3x3Test<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
3221 armnn::IWorkloadFactory&,
3222 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
3223 const armnn::ITensorHandleFactory&,
3227 template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 4>
3228 DepthwiseConvolution2d2x3x3Dilation3x3Test<armnn::DataType::QSymmS16, armnn::DataType::Signed32>(
3229 armnn::IWorkloadFactory&,
3230 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
3231 const armnn::ITensorHandleFactory&,
3235 template LayerTestResult<armnn::ResolveType<armnn::DataType::BFloat16>, 4>
3236 DepthwiseConvolution2dMult4Test<armnn::DataType::BFloat16, armnn::DataType::BFloat16>(
3237 armnn::IWorkloadFactory &workloadFactory,
3238 const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager,
3239 const armnn::ITensorHandleFactory& tensorHandleFactory,
3243 template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
3244 DepthwiseConvolution2dMult4Test<armnn::DataType::Float32, armnn::DataType::Float32>(
3245 armnn::IWorkloadFactory &workloadFactory,
3246 const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager,
3247 const armnn::ITensorHandleFactory& tensorHandleFactory,
3251 template LayerTestResult<armnn::ResolveType<armnn::DataType::BFloat16>, 4>
3252 DepthwiseConvolution2dMult2Test<armnn::DataType::BFloat16, armnn::DataType::BFloat16>(
3253 armnn::IWorkloadFactory &workloadFactory,
3254 const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager,
3255 const armnn::ITensorHandleFactory& tensorHandleFactory,
3259 template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
3260 DepthwiseConvolution2dMult2Test<armnn::DataType::Float32, armnn::DataType::Float32>(
3261 armnn::IWorkloadFactory &workloadFactory,
3262 const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager,
3263 const armnn::ITensorHandleFactory& tensorHandleFactory,
3272 armnn::IWorkloadFactory& workloadFactory,
3273 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3274 const armnn::ITensorHandleFactory& tensorHandleFactory,
3278 return SimpleConvolution2d3x5TestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
3279 workloadFactory, memoryManager, tensorHandleFactory, 0.f, 0, biasEnabled, layout);
3283 armnn::IWorkloadFactory& workloadFactory,
3284 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3285 const armnn::ITensorHandleFactory& tensorHandleFactory,
3289 return SimpleConvolution2d3x5TestCommon<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
3290 workloadFactory, memoryManager, tensorHandleFactory, 0.5f, 50, biasEnabled, layout);
3294 armnn::IWorkloadFactory& workloadFactory,
3295 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3296 const armnn::ITensorHandleFactory& tensorHandleFactory,
3300 return SimpleConvolution2d3x3TestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
3301 workloadFactory, memoryManager, tensorHandleFactory, 0.f, 0, biasEnabled, layout);
3305 armnn::IWorkloadFactory& workloadFactory,
3306 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3307 const armnn::ITensorHandleFactory& tensorHandleFactory,
3310 return SimpleConvolution2d3x3NhwcTestCommon<armnn::DataType::Float32>(
3313 tensorHandleFactory,
3321 armnn::IWorkloadFactory& workloadFactory,
3322 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3323 const armnn::ITensorHandleFactory& tensorHandleFactory,
3327 return SimpleConvolution2d3x3Stride2x2TestCommon<armnn::DataType::Float32>(
3330 tensorHandleFactory,
3338 armnn::IWorkloadFactory& workloadFactory,
3339 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3340 const armnn::ITensorHandleFactory& tensorHandleFactory,
3344 return SimpleConvolution2d3x3TestCommon<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
3345 workloadFactory, memoryManager, tensorHandleFactory, 0.5f, 50, biasEnabled, layout);
3349 armnn::IWorkloadFactory& workloadFactory,
3350 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3351 const armnn::ITensorHandleFactory& tensorHandleFactory,
3355 return SimpleConvolution2d3x5TestCommon<armnn::DataType::QSymmS16, armnn::DataType::Signed32>(
3356 workloadFactory, memoryManager, tensorHandleFactory, 0.5f, 50, biasEnabled, layout);
3360 armnn::IWorkloadFactory& workloadFactory,
3361 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3362 const armnn::ITensorHandleFactory& tensorHandleFactory,
3366 return SimpleConvolution2d3x3TestCommon<armnn::DataType::QSymmS16, armnn::DataType::Signed32>(
3367 workloadFactory, memoryManager, tensorHandleFactory, 0.5f, 50, biasEnabled, layout);
3371 armnn::IWorkloadFactory& workloadFactory,
3372 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3373 const armnn::ITensorHandleFactory& tensorHandleFactory,
3376 return SimpleConvolution2dAsymmetricPaddingTestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
3377 workloadFactory, memoryManager, tensorHandleFactory, layout, 0.0f, 0);
3381 armnn::IWorkloadFactory& workloadFactory,
3382 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3383 const armnn::ITensorHandleFactory& tensorHandleFactory,
3388 workloadFactory, memoryManager, tensorHandleFactory, layout, 0.0f, 0);
3392 armnn::IWorkloadFactory& workloadFactory,
3393 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3394 const armnn::ITensorHandleFactory& tensorHandleFactory,
3397 return Convolution1dTestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
3398 workloadFactory, memoryManager, tensorHandleFactory, 0.0f, 0, biasEnabled);
3402 armnn::IWorkloadFactory& workloadFactory,
3403 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3404 const armnn::ITensorHandleFactory& tensorHandleFactory,
3407 return Convolution1dTestImpl<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
3408 workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 128, biasEnabled);
3412 armnn::IWorkloadFactory& workloadFactory,
3413 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3414 const armnn::ITensorHandleFactory& tensorHandleFactory,
3417 using namespace armnn;
3419 const DataType inputType = DataType::QAsymmU8;
3420 const DataType kernelType = DataType::QSymmS8;
3421 const DataType biasType = DataType::Signed32;
3423 TensorInfo inputInfo ({ 1, 3, 1, 2 }, inputType, 0.5f, 128);
3424 TensorInfo outputInfo({ 1, 3, 1, 3 }, inputType, 1.0f, 128);
3426 const std::vector<float> quantScales{ 0.5f, 0.75f, 1.0f };
3427 constexpr
unsigned int quantDimension = 0;
3429 TensorInfo kernelInfo({ 3, 1, 1, 2 }, kernelType, quantScales, quantDimension);
3431 const std::vector<float> biasQuantScales{ 0.25f, 0.375f, 0.5f };
3432 TensorInfo biasInfo({ 3 }, biasType, biasQuantScales, quantDimension);
3434 std::vector<uint8_t> inputData =
3436 138, 108, 138, 108, 138, 108
3439 std::vector<int8_t> kernelData =
3444 std::vector<int32_t> biasData =
3449 std::vector<uint8_t> expectedOutputData =
3451 121, 118, 115, 121, 118, 115, 121, 118, 115
3461 std::vector<uint8_t> actualOutput(outputInfo.GetNumElements());
3465 descriptor.m_StrideY = 1;
3466 descriptor.m_PadLeft = 0;
3467 descriptor.m_PadRight = 0;
3468 descriptor.m_PadTop = 0;
3469 descriptor.m_PadBottom = 0;
3470 descriptor.m_BiasEnabled =
true;
3471 descriptor.m_DataLayout = layout;
3473 std::unique_ptr<ITensorHandle> inputHandle = tensorHandleFactory.
CreateTensorHandle(inputInfo);
3474 std::unique_ptr<ITensorHandle> outputHandle = tensorHandleFactory.
CreateTensorHandle(outputInfo);
3485 queueDescriptor.m_Weight = &weightTensor;
3486 queueDescriptor.m_Bias = &biasTensor;
3488 AddInputToWorkload(queueDescriptor, workloadInfo, inputInfo, inputHandle.get());
3489 AddOutputToWorkload(queueDescriptor, workloadInfo, outputInfo, outputHandle.get());
3494 inputHandle->Allocate();
3495 outputHandle->Allocate();
3499 ExecuteWorkload(*workload, memoryManager);
3505 outputHandle->GetShape(),
3506 outputInfo.GetShape());
3510 armnn::IWorkloadFactory& workloadFactory,
3511 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3512 armnn::IWorkloadFactory& refWorkloadFactory,
3513 const armnn::ITensorHandleFactory& tensorHandleFactory,
3514 const armnn::ITensorHandleFactory& refTensorHandleFactory)
3516 return CompareConvolution2dTestImpl<armnn::DataType::Float32>(
3517 workloadFactory, memoryManager, refWorkloadFactory, tensorHandleFactory, refTensorHandleFactory);
3521 armnn::IWorkloadFactory& workloadFactory,
3522 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3523 const armnn::ITensorHandleFactory& tensorHandleFactory,
3527 return DepthwiseConvolution2dTestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
3528 workloadFactory, memoryManager, tensorHandleFactory, 0.0f, 0, biasEnabled, layout);
3532 armnn::IWorkloadFactory& workloadFactory,
3533 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3534 const armnn::ITensorHandleFactory& tensorHandleFactory,
3537 return DepthwiseConvolution2dNhwcTestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
3538 workloadFactory, memoryManager, tensorHandleFactory, 0.0f, 0, biasEnabled);
3542 armnn::IWorkloadFactory& workloadFactory,
3543 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3544 const armnn::ITensorHandleFactory& tensorHandleFactory,
3548 return DepthwiseConvolution2dDepthMul1TestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
3549 workloadFactory, memoryManager, tensorHandleFactory, 0.0f, 0, biasEnabled, layout);
3553 armnn::IWorkloadFactory& workloadFactory,
3554 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3555 const armnn::ITensorHandleFactory& tensorHandleFactory)
3558 std::vector<float> input = { 1.f, 2.f, 3.f, 4.f };
3560 std::vector<float> kernelData;
3561 std::vector<float> singleDepthKernel{ 1.f, -1.f, -1.f, 1.f };
3562 for (
unsigned int i = 0; i < 64; ++i)
3564 kernelData.insert(kernelData.end(), singleDepthKernel.begin(), singleDepthKernel.end());
3571 std::vector<float> kernelPermuted(kernelTensorInfo.GetNumElements());
3573 kernelData.data(), kernelPermuted.data(),
3576 std::vector<float> expectedOutputData(64, 0.f);
3579 return DepthwiseConvolution2dTestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
3582 tensorHandleFactory,
3585 std::vector<float>(),
3587 inputTensorInfo.GetShape(),
3588 kernelTensorInfo.GetShape(),
3589 outputTensorInfo.GetShape(),
3596 armnn::IWorkloadFactory& workloadFactory,
3597 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3598 const armnn::ITensorHandleFactory& tensorHandleFactory,
3602 return DepthwiseConvolution2dAsymmetricTestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
3603 workloadFactory, memoryManager, tensorHandleFactory, 0.0f, 0, biasEnabled, layout);
3607 armnn::IWorkloadFactory& workloadFactory,
3608 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3609 const armnn::ITensorHandleFactory& tensorHandleFactory,
3613 return DepthwiseConvolution2dTestImpl<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
3614 workloadFactory, memoryManager, tensorHandleFactory, 0.5f, 50, biasEnabled, layout);
3618 armnn::IWorkloadFactory& workloadFactory,
3619 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3620 const armnn::ITensorHandleFactory& tensorHandleFactory,
3624 return DepthwiseConvolution2dDepthMul1TestImpl<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
3625 workloadFactory, memoryManager, tensorHandleFactory, 0.5f, 50, biasEnabled, layout);
3629 armnn::IWorkloadFactory& workloadFactory,
3630 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3631 const armnn::ITensorHandleFactory& tensorHandleFactory)
3633 return SimpleDepthwiseConvolution2d3x3Dilation3x3NhwcTestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
3636 tensorHandleFactory,
3643 armnn::IWorkloadFactory& workloadFactory,
3644 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3645 const armnn::ITensorHandleFactory& tensorHandleFactory,
3649 return DepthwiseConvolution2dTestImpl<armnn::DataType::QSymmS16, armnn::DataType::Signed32>(
3650 workloadFactory, memoryManager, tensorHandleFactory, 0.5f, 50, biasEnabled, layout);
3654 armnn::IWorkloadFactory& workloadFactory,
3655 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3656 const armnn::ITensorHandleFactory& tensorHandleFactory,
3660 return DepthwiseConvolution2dDepthMul1TestImpl<armnn::DataType::QSymmS16, armnn::DataType::Signed32>(
3661 workloadFactory, memoryManager, tensorHandleFactory, 0.5f, 50, biasEnabled, layout);
3665 armnn::IWorkloadFactory& workloadFactory,
3666 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3667 const armnn::ITensorHandleFactory& tensorHandleFactory,
3670 using namespace armnn;
3672 const DataType inputType = DataType::QAsymmU8;
3673 const DataType kernelType = DataType::QSymmS8;
3674 const DataType biasType = DataType::Signed32;
3676 TensorInfo inputInfo ({ 1, 3, 3, 2 }, inputType, 0.5f, 128);
3677 TensorInfo outputInfo({ 1, 2, 2, 4 }, inputType, 1.0f, 128);
3679 const std::vector<float> quantScales{ 1.0f, 0.5f, 1.0f, 0.5f };
3680 const unsigned int quantDimension = 3;
3681 TensorInfo kernelInfo({ 1, 2, 2, 4 }, kernelType, quantScales, quantDimension);
3683 const std::vector<float> biasQuantScales{ 0.5f, 0.25f, 0.5f, 0.25f };
3684 constexpr
unsigned int biasQuantDimension = 0;
3685 TensorInfo biasInfo({ 4 }, biasType, biasQuantScales, biasQuantDimension);
3687 std::vector<uint8_t> inputData =
3700 std::vector<int8_t> kernelData =
3708 std::vector<int32_t> biasData =
3713 std::vector<uint8_t> expectedOutputData =
3727 std::vector<uint8_t> actualOutput(outputInfo.GetNumElements());
3731 descriptor.m_StrideY = 1;
3732 descriptor.m_PadLeft = 0;
3733 descriptor.m_PadRight = 0;
3734 descriptor.m_PadTop = 0;
3735 descriptor.m_PadBottom = 0;
3736 descriptor.m_DilationX = 1;
3737 descriptor.m_DilationY = 1;
3738 descriptor.m_BiasEnabled =
true;
3739 descriptor.m_DataLayout = layout;
3741 std::unique_ptr<ITensorHandle> inputHandle = tensorHandleFactory.
CreateTensorHandle(inputInfo);
3742 std::unique_ptr<ITensorHandle> outputHandle = tensorHandleFactory.
CreateTensorHandle(outputInfo);
3753 queueDescriptor.m_Weight = &weightTensor;
3754 queueDescriptor.m_Bias = &biasTensor;
3756 AddInputToWorkload(queueDescriptor, workloadInfo, inputInfo, inputHandle.get());
3757 AddOutputToWorkload(queueDescriptor, workloadInfo, outputInfo, outputHandle.get());
3762 inputHandle->Allocate();
3763 outputHandle->Allocate();
3767 ExecuteWorkload(*workload, memoryManager);
3775 outputHandle->GetShape(),
3776 outputInfo.GetShape());
3780 armnn::IWorkloadFactory& workloadFactory,
3781 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3782 armnn::IWorkloadFactory& refWorkloadFactory,
3783 const armnn::ITensorHandleFactory& tensorHandleFactory,
3784 const armnn::ITensorHandleFactory& refTensorHandleFactory,
3787 return CompareDepthwiseConvolution2dTestImpl<armnn::DataType::Float32>(
3788 workloadFactory, memoryManager, refWorkloadFactory, tensorHandleFactory, refTensorHandleFactory, layout);
3792 armnn::IWorkloadFactory& workloadFactory,
3793 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3794 armnn::IWorkloadFactory& refWorkloadFactory,
3795 const armnn::ITensorHandleFactory& tensorHandleFactory,
3796 const armnn::ITensorHandleFactory& refTensorHandleFactory,
3799 return CompareDepthwiseConvolution2dTestImpl<armnn::DataType::QAsymmU8>(
3800 workloadFactory, memoryManager, refWorkloadFactory, tensorHandleFactory, refTensorHandleFactory, layout);
LayerTestResult< T, 4 > DepthwiseConvolution2dMult2Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool biasEnabled, const armnn::DataLayout layout)
LayerTestResult< float, 4 > Convolution2d3x3Stride2x2BFloat16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool biasEnabled, const armnn::DataLayout &dataLayout)
uint32_t m_PadBottom
Padding bottom value in the height dimension.
bool m_BiasEnabled
Enable/disable bias.
LayerTestResult< O, 4 > SimpleConvolution2dNhwcTestImpl(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, const std::vector< T > &input, const std::vector< T > &kernel, const std::vector< B > &bias, const std::vector< O > &outputExpected, const armnn::TensorShape &inputShape, const armnn::TensorShape &kernelShape, const armnn::TensorShape &outputExpectedShape, const armnn::DataLayout dataLayout, float qScale, int32_t qOffset, uint32_t padLeft=1, uint32_t padTop=1, uint32_t padRight=1, uint32_t padBottom=1, uint32_t strideX=1, uint32_t strideY=1)
std::vector< T > GetBias(bool biasEnabled, float qScale, armnn::TensorInfo outputInfo, armnn::DataLayout layout)
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
LayerTestResult< uint8_t, 4 > DepthwiseConvolution2dUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool biasEnabled, const armnn::DataLayout layout)
void PermuteTensorNhwcToNchw(armnn::TensorInfo &tensorInfo, std::vector< T > &tensorData)
bool m_BiasEnabled
Enable/disable bias.
LayerTestResult< int16_t, 4 > SimpleConvolution2d3x5QSymm16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool biasEnabled, const armnn::DataLayout layout)
LayerTestResult< T, 4 > SimpleConvolution2d3x3Stride2x2TestCommon(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, float qScale, int32_t qOffset, bool biasEnabled, const armnn::DataLayout &dataLayout)
LayerTestResult< T, 4 > Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTestCommon(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::DataLayout layout, float qScale, int32_t qOffset)
const TensorShape & GetShape() const
uint32_t m_PadBottom
Padding bottom value in the height dimension.
LayerTestResult< float, 4 > Convolution2d3x3Stride2x2BFloat16SmallValueTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool biasEnabled, const armnn::DataLayout &dataLayout)
void ApplyBias(std::vector< T > &v, float vScale, int32_t vOffset, const std::vector< B > &bias, float bScale, int32_t bOffset, uint32_t w, uint32_t h)
std::vector< T > GetBias8(bool biasEnabled, float qScale)
LayerTestResult< T, 4 > Convolution2d2x3x3Dilation3x3Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool biasEnabled, const armnn::DataLayout layout)
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
LayerTestResult< uint8_t, 4 > SimpleConvolution2d3x5Uint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool biasEnabled, const armnn::DataLayout layout)
A Convolution2dDescriptor for the Convolution2dLayer.
uint32_t m_PadLeft
Padding left value in the width dimension.
LayerTestResult< T, 4 > CompareConvolution2dTestImpl(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, armnn::IWorkloadFactory &refWorkloadFactory, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::ITensorHandleFactory &refTensorHandleFactory)
LayerTestResult< T, 4 > Convolution2d3x3DilationTestCommon(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, const std::vector< float > &inputNoQuantizedValues, armnn::TensorInfo &inputTensorInfo, const std::vector< float > &kernelNoQuantizedValues, armnn::TensorInfo &kernelTensorInfo, const std::vector< float > &outputExpectedNoQuantizedValues, armnn::TensorInfo &outputTensorInfo, uint32_t dilationX, uint32_t dilationY, armnn::DataLayout layout=armnn::DataLayout::NCHW, uint32_t padLeft=0, uint32_t padTop=0, uint32_t padRight=0, uint32_t padBottom=0, uint32_t strideX=1, uint32_t strideY=1, bool biasEnabled=false)
LayerTestResult< float, 4 > SimpleConvolution2d3x3NhwcTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool biasEnabled)
LayerTestResult< T, 4 > DepthwiseConvolution2dDepthMul1TestImpl(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, float qScale, int32_t qOffset, bool biasEnabled, const armnn::DataLayout layout)
const ConstTensorHandle * m_Weight
typename ResolveTypeImpl< DT >::Type ResolveType
const ConstTensorHandle * m_Bias
LayerTestResult< T, 4 > SimpleConvolution2d3x3NhwcTestCommon(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, float qScale, int32_t qOffset, bool biasEnabled, armnn::DataLayout dataLayout)
LayerTestResult< T, 4 > DepthwiseConvolution2dTestImpl(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, float qScale, int32_t qOffset, bool biasEnabled, const armnn::DataLayout layout)
uint32_t m_PadRight
Padding right value in the width dimension.
LayerTestResult< uint8_t, 4 > Convolution1dUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool biasEnabled)
LayerTestResult< T, 4 > SimpleConvolution2dAsymmetricPaddingTestCommon(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::DataLayout layout, float qScale, int32_t qOffset)
Copyright (c) 2021 ARM Limited and Contributors.
void IgnoreUnused(Ts &&...)
LayerTestResult< T, 4 > DepthwiseConvolution2dAsymmetricTestImpl(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, const std::vector< T > &input, const std::vector< T > &kernel, const std::vector< B > &bias, const std::vector< T > &outputExpected, const armnn::TensorShape &inputShape, const armnn::TensorShape &kernelShape, const armnn::TensorShape &outputExpectedShape, float qScale, int32_t qOffset, const armnn::DataLayout layout, uint32_t padLeft=0, uint32_t padTop=0, uint32_t padRight=0, uint32_t padBottom=0, uint32_t strideX=1, uint32_t strideY=1)
LayerTestResult< float, 4 > Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, armnn::DataLayout layout)
uint32_t m_DilationY
Dilation along y axis.
LayerDescriptor m_Parameters
void AllocateAndCopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)
uint32_t m_DilationY
Dilation factor value for height dimension.
LayerTestResult< T, 4 > DepthwiseConvolution2dMult4Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool biasEnabled, const armnn::DataLayout layout)
LayerTestResult< float, 4 > DepthwiseConvolution2dDepthMul64Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< int16_t, 4 > SimpleConvolution2d3x3QSymm16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool biasEnabled, const armnn::DataLayout layout)
uint32_t m_PadTop
Padding top value in the height dimension.
void Permute(const armnn::TensorShape &dstShape, const armnn::PermutationVector &mappings, const void *src, void *dst, size_t dataTypeSize)
LayerTestResult< T, 4 > DepthwiseConvolution2dAsymmetricTestCommon(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, float qScale, int32_t qOffset, bool biasEnabled, const armnn::DataLayout layout)
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
LayerTestResult< T, 4 > DepthwiseConvolution2d3x3Dilation3x3Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool biasEnabled, const armnn::DataLayout layout)
LayerTestResult< uint8_t, 4 > DepthwiseConvolution2dDepthMul1Uint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool biasEnabled, const armnn::DataLayout layout)
uint32_t m_DilationX
Dilation factor value for width dimension.
uint32_t m_PadTop
Padding top value in the height dimension.
#define ARMNN_ASSERT_MSG(COND, MSG)
std::shared_ptr< IMemoryManager > IMemoryManagerSharedPtr
int32_t GetQuantizationOffset() const
LayerTestResult< T, 4 > DepthwiseConvolution2d3x3DilationTestCommon(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, const std::vector< float > &inputNoQuantizedValues, armnn::TensorInfo &inputTensorInfo, const std::vector< float > &kernelNoQuantizedValues, armnn::TensorInfo &kernelTensorInfo, const std::vector< float > &outputExpectedNoQuantizedValues, armnn::TensorInfo &outputTensorInfo, uint32_t dilationX, uint32_t dilationY, armnn::DataLayout layout=armnn::DataLayout::NCHW, bool biasEnabled=false)
float GetQuantizationScale() const
Provides access to the appropriate indexes for Channels, Height and Width based on DataLayout...
LayerTestResult< T, 4 > DepthwiseConvolution2d2x3x3Dilation3x3Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool biasEnabled, const armnn::DataLayout layout)
LayerTestResult< T, 4 > SimpleConvolution2dTestImpl(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, const std::vector< T > &originalInput, const std::vector< T > &originalKernel, const std::vector< B > &bias, const std::vector< T > &originalOutputExpected, const armnn::TensorShape &originalInputShape, const armnn::TensorShape &originalKernelShape, const armnn::TensorShape &originalOutputExpectedShape, float qScale, int32_t qOffset, const armnn::DataLayout layout=armnn::DataLayout::NCHW, uint32_t padLeft=0, uint32_t padTop=0, uint32_t padRight=0, uint32_t padBottom=0, uint32_t strideX=1, uint32_t strideY=1, uint32_t dilationX=1, uint32_t dilationY=1)
const ConstTensorHandle * m_Bias
void CopyDataFromITensorHandle(void *mem, const armnn::ITensorHandle *tensorHandle)
LayerTestResult< float, 4 > SimpleConvolution2d3x5Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool biasEnabled, const armnn::DataLayout layout)
void SetQuantizationScale(float scale)
#define ARMNN_ASSERT(COND)
armnn::DataLayout GetDataLayout() const
LayerTestResult< T, 4 > CompareDepthwiseConvolution2dTestImpl(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, armnn::IWorkloadFactory &refWorkloadFactory, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::ITensorHandleFactory &refTensorHandleFactory, const armnnUtils::DataLayoutIndexed &layout)
const ConstTensorHandle * m_Weight
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
LayerTestResult< float, 4 > DepthwiseConvolution2dDepthMul1Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool biasEnabled, const armnn::DataLayout layout)
DataType GetBiasDataType(DataType inputDataType)
LayerTestResult< uint8_t, 4 > SimpleConvolution2d3x3Uint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool biasEnabled, const armnn::DataLayout layout)
LayerTestResult< float, 4 > Convolution2dAsymmetricPaddingTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, armnn::DataLayout layout)
LayerTestResult< float, 4 > SimpleConvolution2d3x3Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool biasEnabled, const armnn::DataLayout layout)
LayerTestResult< T, 4 > Convolution2d3x3Dilation3x3Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool biasEnabled, const armnn::DataLayout layout)
uint32_t m_DilationX
Dilation along x axis.
LayerTestResult< float, 4 > Convolution1dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool biasEnabled)
LayerTestResult< T, 4 > SimpleConvolution2d3x5TestCommon(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, float qScale, int32_t qOffset, bool biasEnabled, const armnn::DataLayout layout)
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
LayerTestResult< uint8_t, 4 > DepthwiseConvolution2dPerAxisQuantTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::DataLayout layout)
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)
LayerTestResult< T, 4 > DepthwiseConvolution2dNhwcTestCommon(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, float qScale, int32_t qOffset, bool biasEnabled)
LayerTestResult< T, 4 > Convolution1dTestImpl(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, float qScale, int32_t qOffset, bool biasEnabled)
LayerTestResult< int16_t, 4 > DepthwiseConvolution2dDepthMul1Int16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool biasEnabled, const armnn::DataLayout layout)
std::enable_if_t< std::is_unsigned< Source >::value &&std::is_unsigned< Dest >::value, Dest > numeric_cast(Source source)
armnn::TensorInfo GetTensorInfo(unsigned int numberOfBatches, unsigned int numberOfChannels, unsigned int height, unsigned int width, const armnn::DataLayout dataLayout, const armnn::DataType dataType)
LayerTestResult< float, 4 > DepthwiseConvolution2dAsymmetricTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool biasEnabled, const armnn::DataLayout layout)
LayerTestResult< float, 4 > DepthwiseConvolution2dDepthNhwcTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool biasEnabled)
Contains information about TensorInfos of a layer.
LayerTestResult< float, 4 > CompareDepthwiseConvolution2dFloatTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, armnn::IWorkloadFactory &refWorkloadFactory, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::ITensorHandleFactory &refTensorHandleFactory, const armnn::DataLayout layout)
float SelectiveDequantize(T value, float scale, int32_t offset)
LayerTestResult< float, 4 > SimpleDepthwiseConvolution2d3x3Dilation3x3NhwcTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
void SetQuantizationOffset(int32_t offset)
LayerTestResult< uint8_t, 4 > Convolution2dPerAxisQuantTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::DataLayout layout)
LayerTestResult< float, 4 > CompareConvolution2dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, armnn::IWorkloadFactory &refWorkloadFactory, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::ITensorHandleFactory &refTensorHandleFactory)
LayerTestResult< T, 4 > SimpleConvolution2d3x3TestCommon(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, float qScale, int32_t qOffset, bool biasEnabled, const armnn::DataLayout layout)
LayerTestResult< T, 4 > SimpleDepthwiseConvolution2d3x3Dilation3x3NhwcTestCommon(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, float qScale, int32_t qOffset, bool biasEnabled)
LayerTestResult< int16_t, 4 > DepthwiseConvolution2dInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool biasEnabled, const armnn::DataLayout layout)
std::vector< T > GetBias4(bool biasEnabled, float qScale)
virtual std::unique_ptr< IWorkload > CreateWorkload(LayerType type, const QueueDescriptor &descriptor, const WorkloadInfo &info) const
unsigned int GetChannelsIndex() const
LayerTestResult< uint8_t, 4 > CompareDepthwiseConvolution2dUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, armnn::IWorkloadFactory &refWorkloadFactory, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::ITensorHandleFactory &refTensorHandleFactory, const armnn::DataLayout layout)
armnn::TensorShape Permuted(const armnn::TensorShape &srcShape, const armnn::PermutationVector &mappings)
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo) const =0
LayerTestResult< float, 4 > DepthwiseConvolution2dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool biasEnabled, const armnn::DataLayout layout)
A DepthwiseConvolution2dDescriptor for the DepthwiseConvolution2dLayer.
Depthwise Convolution 2D layer workload data.
uint32_t m_PadLeft
Padding left value in the width dimension.
unsigned int GetNumElements() const
LayerTestResult< float, 4 > SimpleConvolution2d3x3Stride2x2Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool biasEnabled, const armnn::DataLayout layout)
constexpr unsigned int GetDataTypeSize(DataType dataType)
LayerTestResult< T, 4 > Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool biasEnabled, const armnn::DataLayout layout)
uint32_t m_PadRight
Padding right value in the width dimension.
std::vector< T > GetBias2(bool biasEnabled, float qScale)