22 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
33 const std::vector<T>& inputData,
34 const std::vector<T>& outputExpectedData,
35 unsigned int inputWidth,
36 unsigned int inputHeight,
37 unsigned int inputChannels,
38 unsigned int inputBatchSize)
41 unsigned int outputWidth = inputWidth;
42 unsigned int outputHeight = inputHeight;
43 unsigned int outputChannels = inputChannels;
44 unsigned int outputBatchSize = inputBatchSize;
46 armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth }, ArmnnType);
48 armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth }, ArmnnType);
50 if(armnn::IsQuantizedType<T>())
53 inputTensorInfo.SetQuantizationOffset(inputOffset);
55 outputTensorInfo.SetQuantizationScale(outputScale);
56 outputTensorInfo.SetQuantizationOffset(outputOffset);
59 std::vector<T> actualOutput(outputTensorInfo.GetNumElements());
61 std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.
CreateTensorHandle(inputTensorInfo);
62 std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.
CreateTensorHandle(outputTensorInfo);
67 AddInputToWorkload(descriptor, workloadInfo, inputTensorInfo, inputHandle.get());
68 AddOutputToWorkload(descriptor, workloadInfo, outputTensorInfo, outputHandle.get());
75 descriptor, workloadInfo);
77 inputHandle->Allocate();
78 outputHandle->Allocate();
88 outputHandle->GetShape(),
89 outputTensorInfo.GetShape());
97 unsigned int inputWidth = 4u;
98 unsigned int inputHeight = 5u;
99 unsigned int inputChannels = 1u;
100 unsigned int inputBatchSize = 1;
102 std::vector<float> input = std::vector<float>{
103 -2.0f, 0.1f, 0.5f, 1.25f,
104 0.786f, 0.9875f, -1.5f, 0.384f,
105 1.0001f, 3.5f, 7.5f, 0.896f,
106 2.126f, 2.0f, 0.3f, 0.15f,
107 0.999f, 1.2f, 0.89f, 6.1f,
111 std::vector<float> output = std::vector<float>{
112 -1.0f, 0.1f, 0.5f, 1.0f,
113 0.786f, 0.9875f, -1.0f, 0.384f,
114 1.0f, 1.0f, 1.0f, 0.896f,
115 1.0f, 1.0f, 0.3f, 0.15f,
116 0.999f, 1.0f, 0.89f, 1.0f,
119 return BoundedReLuTestCommon<armnn::DataType::Float32>(
120 workloadFactory, memoryManager, tensorHandleFactory, 1.0f, -1.0f, 1.0f, 0, 1.0f, 0, input, output,
121 inputWidth, inputHeight, inputChannels, inputBatchSize);
129 unsigned int inputWidth = 4u;
130 unsigned int inputHeight = 5u;
131 unsigned int inputChannels = 1u;
132 unsigned int inputBatchSize = 1;
134 std::vector<float> input = std::vector<float>{
135 -1.0f, 0.1f, 0.5f, 6.25f,
136 0.786f, 5.9875f, -0.5f, 0.384f,
137 6.0001f, 3.5f, 7.5f, 0.896f,
138 2.126f, 12.0f, 0.3f, 0.15f,
139 0.999f, 1.2f, 0.89f, 6.1f,
143 std::vector<float> output = std::vector<float>{
144 0.0f, 0.1f, 0.5f, 6.0f,
145 0.786f, 5.9875f, 0.0f, 0.384f,
146 6.0f, 3.5f, 6.0f, 0.896f,
147 2.126f, 6.0f, 0.3f, 0.15f,
148 0.999f, 1.2f, 0.89f, 6.0f,
151 return BoundedReLuTestCommon<armnn::DataType::Float32>(
152 workloadFactory, memoryManager, tensorHandleFactory, 6.0f, 0.0f, 1.0f, 0, 1.0f, 0, input, output,
153 inputWidth, inputHeight, inputChannels, inputBatchSize);
161 unsigned int inputWidth = 3u;
162 unsigned int inputHeight = 2u;
163 unsigned int inputChannels = 1u;
164 unsigned int inputBatchSize = 1;
166 std::vector<uint8_t> input = std::vector<uint8_t>{
172 std::vector<uint8_t> output = std::vector<uint8_t>{
177 float inputScale = 12.0f / 255.0f;
178 int32_t inputOffset = 63;
179 float outputScale = 6.0f / 255.0f;
180 int32_t outputOffset = 0;
182 return BoundedReLuTestCommon<armnn::DataType::QAsymmU8>(
183 workloadFactory, memoryManager, tensorHandleFactory, 6.0f, 0.0f,
184 inputScale, inputOffset, outputScale, outputOffset,
185 input, output, inputWidth, inputHeight, inputChannels, inputBatchSize);
193 unsigned int inputWidth = 3u;
194 unsigned int inputHeight = 2u;
195 unsigned int inputChannels = 1u;
196 unsigned int inputBatchSize = 1;
198 std::vector<uint8_t> input = std::vector<uint8_t>{
204 std::vector<uint8_t> output = std::vector<uint8_t>{
209 int32_t inputOffset = 112;
210 float inputScale = 0.0125f;
212 return BoundedReLuTestCommon<armnn::DataType::QAsymmU8>(
213 workloadFactory, memoryManager, tensorHandleFactory, 1.0f, -1.0f,
214 inputScale, inputOffset, inputScale, inputOffset,
215 input, output, inputWidth, inputHeight, inputChannels, inputBatchSize);
221 struct BoundedReLuRandomInputTestTraits
223 constexpr
static unsigned int inputHeight = 31u;
224 constexpr
static unsigned int inputWidth = 19u;
225 constexpr
static unsigned int inputChannels = 4u;
226 constexpr
static unsigned int inputBatchSize = 2;
228 constexpr
static unsigned int outputHeight = inputHeight;
229 constexpr
static unsigned int outputWidth = inputWidth;
230 constexpr
static unsigned int outputChannels = inputChannels;
231 constexpr
static unsigned int outputBatchSize = inputBatchSize;
235 return armnn::TensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
241 return armnn::TensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
246 std::vector<float> BoundedReLuRandomInputTest(
255 const armnn::TensorInfo inputTensorInfo = BoundedReLuRandomInputTestTraits::GetInputTensorInfo();
256 const armnn::TensorInfo outputTensorInfo = BoundedReLuRandomInputTestTraits::GetOutputTensorInfo();
260 std::vector<float> input = MakeRandomTensor<float>(inputTensorInfo, 4605828, lowerBound - 5.0f, upperBound * 2.0f);
261 std::vector<float> actualOutput(outputTensorInfo.
GetNumElements());
263 std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.
CreateTensorHandle(inputTensorInfo);
264 std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.
CreateTensorHandle(outputTensorInfo);
269 AddInputToWorkload(descriptor, workloadInfo, inputTensorInfo, inputHandle.get());
270 AddOutputToWorkload(descriptor, workloadInfo, outputTensorInfo, outputHandle.get());
274 descriptor, workloadInfo);
276 inputHandle->Allocate();
277 outputHandle->Allocate();
303 activationDescriptor.
m_A = upperBound;
304 activationDescriptor.
m_B = lowerBound;
307 workloadFactory, memoryManager, tensorHandleFactory, 0.0f, upperBound, activationDescriptor);
309 refWorkloadFactory,
nullptr, refTensorHandleFactory, 0.0f, upperBound, activationDescriptor);
314 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
323 unsigned int inputHeight = 20;
324 unsigned int inputWidth = 17;
325 unsigned int inputChannels = 3;
326 unsigned int batchSize = 5;
331 unsigned int shape[] = {batchSize, inputChannels, inputHeight, inputWidth};
337 if(armnn::IsQuantizedType<T>())
345 std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.
CreateTensorHandle(inputTensorInfo);
346 std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.
CreateTensorHandle(outputTensorInfo);
351 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
352 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
360 inputHandle->Allocate();
361 outputHandle->Allocate();
363 std::vector<T> input = MakeRandomTensor<T>(inputTensorInfo, 7123561);
375 outputHandle->GetShape(),
384 return ConstantLinearActivationTestCommon<armnn::DataType::Float32>(workloadFactory,
386 tensorHandleFactory);
394 return ConstantLinearActivationTestCommon<armnn::DataType::QAsymmU8>(
395 workloadFactory, memoryManager, tensorHandleFactory, 4.0f, 3);
403 return ConstantLinearActivationTestCommon<armnn::DataType::QSymmS16>(
404 workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 0);
407 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
413 float activationParameterA,
414 float activationParameterB,
417 const std::vector<float>& inputData,
420 const std::vector<float>& outputExpectedData)
423 constexpr
static unsigned int inputWidth = 16u;
424 constexpr
static unsigned int inputHeight = 1u;
425 constexpr
static unsigned int inputChannels = 1u;
426 constexpr
static unsigned int inputBatchSize = 1u;
428 constexpr
static unsigned int outputWidth = inputWidth;
429 constexpr
static unsigned int outputHeight = inputHeight;
430 constexpr
static unsigned int outputChannels = inputChannels;
431 constexpr
static unsigned int outputBatchSize = inputBatchSize;
433 armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth }, ArmnnType);
434 armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth }, ArmnnType);
437 if(armnn::IsQuantizedType<T>())
440 inputTensorInfo.SetQuantizationOffset(offset);
441 outputTensorInfo.SetQuantizationScale(outScale);
442 outputTensorInfo.SetQuantizationOffset(outOffset);
445 std::vector<T> input = armnnUtils::QuantizedVector<T>(inputData, scale, offset);
448 std::vector<T> actualOutput(outputTensorInfo.GetNumElements());
449 std::vector<T> outputExpected = armnnUtils::QuantizedVector<T>(outputExpectedData, outScale, outOffset);
451 std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.
CreateTensorHandle(inputTensorInfo);
452 std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.
CreateTensorHandle(outputTensorInfo);
457 AddInputToWorkload(descriptor, workloadInfo, inputTensorInfo, inputHandle.get());
458 AddOutputToWorkload(descriptor, workloadInfo, outputTensorInfo, outputHandle.get());
465 descriptor, workloadInfo);
467 inputHandle->Allocate();
468 outputHandle->Allocate();
478 outputHandle->GetShape(),
479 outputTensorInfo.GetShape());
482 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
490 std::vector<float> inputData =
492 -0.1f, -0.2f, -0.3f, -0.4f,
493 0.1f, 0.2f, 0.3f, 0.4f,
494 -1.0f, -2.0f, -3.0f, -4.0f,
495 1.0f, 2.0f, 3.0f, 4.0f
499 auto f = [](
float value)
501 return 1.0f / (1.0f + std::exp(-value));
503 std::vector<float> m_OutputExpected(inputData.size());
504 std::transform(inputData.begin(), inputData.end(), m_OutputExpected.begin(), f);
506 return SimpleActivationTest<ArmnnType>(workloadFactory,
525 return SimpleSigmoidTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager,
526 tensorHandleFactory, 0.0f, 0);
534 return SimpleSigmoidTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager,
535 tensorHandleFactory, 0.1f, 50);
543 return SimpleSigmoidTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager,
544 tensorHandleFactory, 0.1f, 0);
547 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
555 std::vector<float> inputData = {
556 -0.1f, -0.2f, -0.3f, -0.4f,
557 0.1f, 0.2f, 0.3f, 0.4f,
558 -1.0f, -2.0f, -3.0f, -4.0f,
559 1.0f, 2.0f, 3.0f, 4.0f
563 auto f = [](
float value)
565 return std::fmax(0.0f, value);
567 std::vector<float> outputExpected(inputData.size());
568 std::transform(inputData.begin(), inputData.end(), outputExpected.begin(), f);
570 return SimpleActivationTest<ArmnnType>(workloadFactory,
589 return ReLuTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 0);
598 return ReLuTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 0);
606 return ReLuTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 0);
610 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
618 std::vector<float> inputData = {
619 -0.1f, -0.2f, -0.3f, -0.4f,
620 0.1f, 0.2f, 0.3f, 0.4f,
621 -1.0f, -2.0f, -3.0f, -4.0f,
622 1.0f, 2.0f, 3.0f, 4.0f
624 const float a = 1.0f;
625 const float b = -1.0f;
627 auto f = [a, b](
float value)
629 return std::min(a, std::max(b, value));
631 std::vector<float> outputExpected(inputData.size());
632 std::transform(inputData.begin(), inputData.end(), outputExpected.begin(), f);
634 return SimpleActivationTest<ArmnnType>(workloadFactory,
653 return ReLuTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 0);
658 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
666 std::vector<float> inputData = {
667 -0.1f, -0.2f, -0.3f, -0.4f,
668 0.1f, 0.2f, 0.3f, 0.4f,
669 -1.0f, -2.0f, -3.0f, -4.0f,
670 1.0f, 2.0f, 3.0f, 4.0f
674 auto f = [](
float value)
676 return std::log(1.0f + std::exp(value));
678 std::vector<float> outputExpected(inputData.size());
679 std::transform(inputData.begin(), inputData.end(), outputExpected.begin(), f);
681 return SimpleActivationTest<ArmnnType>(workloadFactory,
700 return SoftReLuTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 0);
708 return SoftReLuTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager,
709 tensorHandleFactory, 0.0625f, 64);
717 return SoftReLuTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 0);
720 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
728 std::vector<float> inputData = {
729 -0.1f, -0.2f, -0.3f, -0.4f,
730 0.1f, 0.2f, 0.3f, 0.4f,
731 -1.0f, -2.0f, -3.0f, -4.0f,
732 1.0f, 2.0f, 3.0f, 4.0f
735 const float a = 0.01f;
737 auto f = [a](
float value)
739 return value > 0.0f ? value : (value * a);
741 std::vector<float> outputExpected(inputData.size());
742 std::transform(inputData.begin(), inputData.end(), outputExpected.begin(), f);
744 return SimpleActivationTest<ArmnnType>(workloadFactory,
763 return LeakyReLuTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 0);
771 return LeakyReLuTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager,
772 tensorHandleFactory, 0.0625f, 64);
780 return LeakyReLuTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 0);
783 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
791 std::vector<float> inputData = {
792 -0.1f, -0.2f, -0.3f, -0.4f,
793 0.1f, 0.2f, 0.3f, 0.4f,
794 -1.0f, -2.0f, -3.0f, -4.0f,
795 1.0f, 2.0f, 3.0f, 4.0f
799 auto f = [](
float value)
801 return std::abs(value);
803 std::vector<float> outputExpected(inputData.size());
804 std::transform(inputData.begin(), inputData.end(), outputExpected.begin(), f);
806 return SimpleActivationTest<ArmnnType>(workloadFactory,
825 return AbsTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 0);
833 return AbsTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, tensorHandleFactory, 0.0625f, 64);
841 return AbsTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 0);
850 const int inputDataSize = 120;
851 std::vector<float> inputData(inputDataSize);
853 for (
unsigned int i = 0u; i < inputDataSize; ++i)
855 inputData[i] =
static_cast<float>(i) / 10;
858 auto f = [](
float value)
860 return std::sqrt(value);
862 std::vector<float> expectedOutput(inputDataSize);
863 std::transform(inputData.begin(), inputData.end(), expectedOutput.begin(), f);
870 std::vector<float> actualOutput(outputTensorInfo.GetNumElements());
872 std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.
CreateTensorHandle(inputTensorInfo);
873 std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.
CreateTensorHandle(outputTensorInfo);
877 AddInputToWorkload(descriptor, workloadInfo, inputTensorInfo, inputHandle.get());
878 AddOutputToWorkload(descriptor, workloadInfo, outputTensorInfo, outputHandle.get());
883 descriptor, workloadInfo);
885 inputHandle->Allocate();
886 outputHandle->Allocate();
896 outputHandle->GetShape(),
897 outputTensorInfo.GetShape());
900 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
908 std::vector<float> inputData = {
909 0.1f, 0.2f, 0.3f, 0.4f,
910 0.1f, 0.2f, 0.3f, 0.4f,
911 1.0f, 2.0f, 3.0f, 4.0f,
912 1.0f, 2.0f, 3.0f, 4.0f
916 auto f = [](
float value)
918 return std::sqrt(value);
920 std::vector<float> expectedOutput(inputData.size());
921 std::transform(inputData.begin(), inputData.end(), expectedOutput.begin(), f);
923 return SimpleActivationTest<ArmnnType>(workloadFactory,
942 return SqrtTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 0);
950 return SqrtTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, tensorHandleFactory, 0.0625f, 64);
958 return SqrtTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 0);
961 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
969 std::vector<float> inputData = {
970 -0.1f, -0.2f, -0.3f, -0.4f,
971 0.1f, 0.2f, 0.3f, 0.4f,
972 -1.0f, -2.0f, -3.0f, -4.0f,
973 1.0f, 2.0f, 3.0f, 4.0f
977 auto f = [](
float value)
979 return std::pow(value,2);
981 std::vector<float> expectedOutput(inputData.size());
982 std::transform(inputData.begin(), inputData.end(), expectedOutput.begin(), f);
984 return SimpleActivationTest<ArmnnType>(workloadFactory,
1003 return SquareTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 0);
1011 return SquareTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager,
1012 tensorHandleFactory, 0.0625f, 64);
1020 return SquareTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 0);
1023 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
1031 std::vector<float> inputData = {
1032 -0.1f, -0.2f, -0.3f, -0.4f,
1033 0.1f, 0.2f, 0.3f, 0.4f,
1034 -1.0f, -2.0f, -3.0f, -4.0f,
1035 1.0f, 2.0f, 3.0f, 4.0f
1038 const float a = 2.0f;
1039 const float b = 3.0f;
1041 auto f = [a, b](
float value)
1043 return a * tanhf(b * value);
1045 std::vector<float> expectedOutput(inputData.size());
1046 std::transform(inputData.begin(), inputData.end(), expectedOutput.begin(), f);
1048 return SimpleActivationTest<ArmnnType>(workloadFactory,
1050 tensorHandleFactory,
1067 return TanhTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 0);
1075 return TanhTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 64);
1083 return TanhTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 0);
1087 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
1095 std::vector<float> inputData = {
1096 -0.1f, -0.2f, -0.3f, -0.4f,
1097 0.1f, 0.2f, 0.3f, 0.4f,
1098 -1.0f, -2.0f, -3.0f, -4.0f,
1099 1.0f, 2.0f, 3.0f, 4.0f
1103 const float a = 0.01f;
1105 auto f = [a](
float value)
1107 return (value >= 0) ? value : a * (expf(value) - 1);
1109 std::vector<float> expectedOutput(inputData.size());
1110 std::transform(inputData.begin(), inputData.end(), expectedOutput.begin(), f);
1112 return SimpleActivationTest<ArmnnType>(workloadFactory,
1114 tensorHandleFactory,
1131 return EluTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 0);
1139 return EluTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 64);
1147 return EluTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 0);
1151 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
1159 std::vector<float> inputData = {
1160 -0.1f, -0.2f, -0.3f, -0.4f,
1161 0.1f, 0.2f, 0.3f, 0.4f,
1162 -1.0f, -2.0f, -3.0f, -4.0f,
1163 1.0f, 2.0f, 3.0f, 4.0f
1166 auto f = [](
float x)
1171 float reLu6_step1 = std::max((x + 3),0.0f);
1172 float reLu6Complete = std::min(reLu6_step1, 6.0f);
1173 float hardSwish_step1 = x * reLu6Complete;
1174 float result = hardSwish_step1 / 6;
1177 std::vector<float> expectedOutput(inputData.size());
1178 std::transform(inputData.begin(), inputData.end(), expectedOutput.begin(), f);
1180 return SimpleActivationTest<ArmnnType>(workloadFactory,
1182 tensorHandleFactory,
1199 return HardSwishTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 0);
1207 return HardSwishTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager,
1208 tensorHandleFactory, 0.1f, 64);
1216 return HardSwishTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 0);
1220 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
1228 unsigned int batchSize = 5,
1229 float qScale = 0.0f,
1230 int32_t qOffset = 0)
1233 unsigned int width = 17;
1234 unsigned int height = 29;
1235 unsigned int channels = 2;
1243 unsigned int shape[] = {batchSize, channels, height, width};
1249 if(armnn::IsQuantizedType<T>())
1257 float minVal = -10.f;
1263 std::vector<T> input = MakeRandomTensor<T>(inputTensorInfo, 21453, minVal, 10.f);
1265 std::vector<T> expectedOutput(outputTensorInfo.
GetNumElements());
1267 std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.
CreateTensorHandle(inputTensorInfo);
1268 std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.
CreateTensorHandle(outputTensorInfo);
1270 std::unique_ptr<armnn::ITensorHandle> inputHandleRef = refTensorHandleFactory.
CreateTensorHandle(inputTensorInfo);
1271 std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refTensorHandleFactory.
CreateTensorHandle(outputTensorInfo);
1275 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
1276 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1283 SetWorkloadInput(refData, refInfo, 0, inputTensorInfo, inputHandleRef.get());
1284 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
1293 inputHandle->Allocate();
1294 outputHandle->Allocate();
1295 inputHandleRef->Allocate();
1296 outputHandleRef->Allocate();
1301 workload->Execute();
1302 workloadRef->Execute();
1309 outputHandle->GetShape(),
1321 unsigned int batchSize)
1323 return CompareActivationTestImpl<armnn::DataType::Float32>(
1324 workloadFactory, memoryManager, refWorkloadFactory, tensorHandleFactory,
1325 refTensorHandleFactory, f, batchSize);
1336 return CompareActivationTestImpl<armnn::DataType::QAsymmU8>(
1337 workloadFactory, memoryManager, refWorkloadFactory,
1338 tensorHandleFactory, refTensorHandleFactory, f, 5, 0.1f, 50);
1349 return CompareActivationTestImpl<armnn::DataType::QSymmS16>(
1350 workloadFactory, memoryManager, refWorkloadFactory, tensorHandleFactory,
1351 refTensorHandleFactory, f, 5, 0.1f, 0);
LayerTestResult< T, 4 > SimpleSigmoidTestCommon(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, float qScale, int32_t qOffset)
LayerTestResult< float, 4 > SqrtTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< int16_t, 4 > ConstantLinearActivationInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< float, 4 > SimpleSigmoidTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< float, 4 > SoftReLuTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
const TensorShape & GetShape() const
LayerTestResult< float, 4 > CompareBoundedReLuTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, armnn::IWorkloadFactory &refWorkloadFactory, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::ITensorHandleFactory &refTensorHandleFactory, float upperBound, float lowerBound)
LayerTestResult< T, 4 > TanhTestCommon(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, float qScale, int32_t qOffset)
LayerTestResult< int16_t, 4 > LeakyReLuInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< uint8_t, 4 > CompareActivationUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, armnn::IWorkloadFactory &refWorkloadFactory, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::ITensorHandleFactory &refTensorHandleFactory, armnn::ActivationFunction f)
LayerTestResult< float, 4 > ConstantLinearActivationTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< int16_t, 4 > SimpleSigmoidInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< float, 4 > ReLuTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< uint8_t, 4 > TanhUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
void IgnoreUnused(Ts &&...)
LayerTestResult< T, 4 > SoftReLuTestCommon(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, float qScale, int32_t qOffset)
LayerDescriptor m_Parameters
LayerTestResult< uint8_t, 4 > SoftReLuUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< float, 5 > SqrtNNTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< float, 4 > TanhTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< int16_t, 4 > TanhInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< int16_t, 4 > SqrtInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< T, 4 > CompareActivationTestImpl(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, armnn::IWorkloadFactory &refWorkloadFactory, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::ITensorHandleFactory &refTensorHandleFactory, armnn::ActivationFunction f, unsigned int batchSize=5, float qScale=0.0f, int32_t qOffset=0)
LayerTestResult< float, 4 > BoundedReLuUpperAndLowerBoundTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< int16_t, 4 > BoundedReLuInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< float, 4 > SquareTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
std::shared_ptr< IMemoryManager > IMemoryManagerSharedPtr
LayerTestResult< float, 4 > BoundedReLuUpperBoundOnlyTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< T, 4 > AbsTestCommon(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, float qScale, int32_t qOffset)
LayerTestResult< uint8_t, 4 > EluUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
std::vector< T > m_ExpectedData
void CopyDataFromITensorHandle(void *mem, const armnn::ITensorHandle *tensorHandle)
void SetQuantizationScale(float scale)
LayerTestResult< T, 4 > SimpleActivationTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, armnn::ActivationFunction activationFunction, float activationParameterA, float activationParameterB, float scale, int32_t offset, const std::vector< float > &inputData, float outScale, int32_t outOffset, const std::vector< float > &outputExpectedData)
LayerTestResult< float, 4 > EluTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
#define ARMNN_ASSERT(COND)
LayerTestResult< int16_t, 4 > CompareActivationInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, armnn::IWorkloadFactory &refWorkloadFactory, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::ITensorHandleFactory &refTensorHandleFactory, armnn::ActivationFunction f)
LayerTestResult< uint8_t, 4 > ReLuUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< uint8_t, 4 > SquareUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< uint8_t, 4 > BoundedReLuUint8UpperBoundOnlyTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
An ActivationDescriptor for the ActivationLayer.
min(a, max(b, input)) ReLu1 & ReLu6.
LayerTestResult< float, 4 > LeakyReLuTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< T, 4 > SqrtTestCommon(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, float qScale, int32_t qOffset)
LayerTestResult< int16_t, 4 > SoftReLuInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< uint8_t, 4 > BoundedReLuUint8UpperAndLowerBoundTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
float m_A
Alpha upper bound value used by the activation functions. (BoundedReLu, Linear, TanH, Elu).
std::vector< T > m_ActualData
LayerTestResult< uint8_t, 4 > SimpleSigmoidUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< T, 4 > LeakyReLuTestCommon(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, float qScale, int32_t qOffset)
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)
LayerTestResult< T, 4 > HardSwishTestCommon(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, float qScale, int32_t qOffset)
LayerTestResult< int16_t, 4 > EluInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< T, 4 > EluTestCommon(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, float qScale, int32_t qOffset)
LayerTestResult< uint8_t, 4 > SqrtUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< int16_t, 4 > SquareInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< T, 4 > ConstantLinearActivationTestCommon(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, float qScale=0.0f, int32_t qOffset=0)
LayerTestResult< int16_t, 4 > ReLuInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< uint8_t, 4 > ConstantLinearActivationUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< T, 4 > ReLuTestCommon(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, float qScale, int32_t qOffset)
Contains information about TensorInfos of a layer.
void SetQuantizationOffset(int32_t offset)
LayerTestResult< float, 4 > AbsTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< T, 4 > BoundedReLuTestCommon(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, float upperBound, float lowerBound, float inputScale, int32_t inputOffset, float outputScale, int32_t outputOffset, const std::vector< T > &inputData, const std::vector< T > &outputExpectedData, unsigned int inputWidth, unsigned int inputHeight, unsigned int inputChannels, unsigned int inputBatchSize)
LayerTestResult< float, 4 > HardSwishTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< uint8_t, 4 > HardSwishUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< int16_t, 4 > AbsInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
virtual std::unique_ptr< IWorkload > CreateWorkload(LayerType type, const QueueDescriptor &descriptor, const WorkloadInfo &info) const
LayerTestResult< T, 4 > SquareTestCommon(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, float qScale, int32_t qOffset)
float m_B
Beta lower bound value used by the activation functions. (BoundedReLu, Linear, TanH).
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo) const =0
ActivationFunction m_Function
The activation function to use (Sigmoid, TanH, Linear, ReLu, BoundedReLu, SoftReLu, LeakyReLu, Abs, Sqrt, Square, Elu).
LayerTestResult< uint8_t, 4 > AbsUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< float, 4 > CompareActivationTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, armnn::IWorkloadFactory &refWorkloadFactory, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::ITensorHandleFactory &refTensorHandleFactory, armnn::ActivationFunction f, unsigned int batchSize)
unsigned int GetNumElements() const
LayerTestResult< int16_t, 4 > HardSwishInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< uint8_t, 4 > LeakyReLuUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)