// // Copyright © 2017 Arm Ltd. All rights reserved. // SPDX-License-Identifier: MIT // #pragma once #include "ActivationFixture.hpp" #include "QuantizeHelper.hpp" #include #include #include #include #include #include #include #include template> LayerTestResult BoundedReLuTestCommon( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, float upperBound, float lowerBound, float inputScale, int32_t inputOffset, float outputScale, int32_t outputOffset, const std::vector& inputData, const std::vector& outputExpectedData, unsigned int inputWidth, unsigned int inputHeight, unsigned int inputChannels, unsigned int inputBatchSize) { unsigned int outputWidth = inputWidth; unsigned int outputHeight = inputHeight; unsigned int outputChannels = inputChannels; unsigned int outputBatchSize = inputBatchSize; armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth }, ArmnnType); armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth }, ArmnnType); if(armnn::IsQuantizedType()) { inputTensorInfo.SetQuantizationScale(inputScale); inputTensorInfo.SetQuantizationOffset(inputOffset); outputTensorInfo.SetQuantizationScale(outputScale); outputTensorInfo.SetQuantizationOffset(outputOffset); } LayerTestResult result(inputTensorInfo); auto input = MakeTensor(inputTensorInfo, inputData); std::unique_ptr inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo); std::unique_ptr outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo); // Setup bounded ReLu. armnn::ActivationQueueDescriptor descriptor; armnn::WorkloadInfo workloadInfo; AddInputToWorkload(descriptor, workloadInfo, inputTensorInfo, inputHandle.get()); AddOutputToWorkload(descriptor, workloadInfo, outputTensorInfo, outputHandle.get()); descriptor.m_Parameters.m_Function = armnn::ActivationFunction::BoundedReLu; descriptor.m_Parameters.m_A = upperBound; descriptor.m_Parameters.m_B = lowerBound; std::unique_ptr workload = workloadFactory.CreateActivation(descriptor, workloadInfo); inputHandle->Allocate(); outputHandle->Allocate(); CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]); workload->Execute(); CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get()); result.outputExpected = MakeTensor(outputTensorInfo, outputExpectedData); return result; } LayerTestResult BoundedReLuUpperAndLowerBoundTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { unsigned int inputWidth = 4u; unsigned int inputHeight = 5u; unsigned int inputChannels = 1u; unsigned int inputBatchSize = 1; std::vector input = std::vector{ -2.0f, 0.1f, 0.5f, 1.25f, 0.786f, 0.9875f, -1.5f, 0.384f, 1.0001f, 3.5f, 7.5f, 0.896f, 2.126f, 2.0f, 0.3f, 0.15f, 0.999f, 1.2f, 0.89f, 6.1f, }; // Calculated manually. std::vector output = std::vector{ -1.0f, 0.1f, 0.5f, 1.0f, 0.786f, 0.9875f, -1.0f, 0.384f, 1.0f, 1.0f, 1.0f, 0.896f, 1.0f, 1.0f, 0.3f, 0.15f, 0.999f, 1.0f, 0.89f, 1.0f, }; return BoundedReLuTestCommon( workloadFactory, memoryManager, 1.0f, -1.0f, 1.0f, 0, 1.0f, 0, input, output, inputWidth, inputHeight, inputChannels, inputBatchSize); } LayerTestResult BoundedReLuUpperBoundOnlyTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { unsigned int inputWidth = 4u; unsigned int inputHeight = 5u; unsigned int inputChannels = 1u; unsigned int inputBatchSize = 1; std::vector input = std::vector{ -1.0f, 0.1f, 0.5f, 6.25f, 0.786f, 5.9875f, -0.5f, 0.384f, 6.0001f, 3.5f, 7.5f, 0.896f, 2.126f, 12.0f, 0.3f, 0.15f, 0.999f, 1.2f, 0.89f, 6.1f, }; // Calculated manually. std::vector output = std::vector{ 0.0f, 0.1f, 0.5f, 6.0f, 0.786f, 5.9875f, 0.0f, 0.384f, 6.0f, 3.5f, 6.0f, 0.896f, 2.126f, 6.0f, 0.3f, 0.15f, 0.999f, 1.2f, 0.89f, 6.0f, }; return BoundedReLuTestCommon( workloadFactory, memoryManager, 6.0f, 0.0f, 1.0f, 0, 1.0f, 0, input, output, inputWidth, inputHeight, inputChannels, inputBatchSize); } LayerTestResult BoundedReLuUint8UpperBoundOnlyTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { unsigned int inputWidth = 3u; unsigned int inputHeight = 2u; unsigned int inputChannels = 1u; unsigned int inputBatchSize = 1; std::vector input = std::vector{ 51, 124, 28, 251, 8, 92 }; // Calculated manually. std::vector output = std::vector{ 0, 122, 0, 255, 0, 58 }; float inputScale = 12.0f / 255.0f; int32_t inputOffset = 63; float outputScale = 6.0f / 255.0f; int32_t outputOffset = 0; return BoundedReLuTestCommon( workloadFactory, memoryManager, 6.0f, 0.0f, inputScale, inputOffset, outputScale, outputOffset, input, output, inputWidth, inputHeight, inputChannels, inputBatchSize); } LayerTestResult BoundedReLuUint8UpperAndLowerBoundTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { unsigned int inputWidth = 3u; unsigned int inputHeight = 2u; unsigned int inputChannels = 1u; unsigned int inputBatchSize = 1; std::vector input = std::vector{ 51, 230, 28, 251, 8, 92 }; // Calculated manually. std::vector output = std::vector{ 51, 192, 32, 192, 32, 92 }; int32_t inputOffset = 112; float inputScale = 0.0125f; return BoundedReLuTestCommon( workloadFactory, memoryManager, 1.0f, -1.0f, inputScale, inputOffset, inputScale, inputOffset, // Input/output scale & offset same. input, output, inputWidth, inputHeight, inputChannels, inputBatchSize); } namespace { struct BoundedReLuRandomInputTestTraits { constexpr static unsigned int inputHeight = 31u; constexpr static unsigned int inputWidth = 19u; constexpr static unsigned int inputChannels = 4u; constexpr static unsigned int inputBatchSize = 2; constexpr static unsigned int outputHeight = inputHeight; constexpr static unsigned int outputWidth = inputWidth; constexpr static unsigned int outputChannels = inputChannels; constexpr static unsigned int outputBatchSize = inputBatchSize; static armnn::TensorInfo GetInputTensorInfo() { return armnn::TensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth }, armnn::DataType::Float32); } static armnn::TensorInfo GetOutputTensorInfo() { return armnn::TensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth }, armnn::DataType::Float32); } }; boost::multi_array BoundedReLuRandomInputTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, float lowerBound, float upperBound, const armnn::ActivationDescriptor& activationDescriptor) { const armnn::TensorInfo inputTensorInfo = BoundedReLuRandomInputTestTraits::GetInputTensorInfo(); const armnn::TensorInfo outputTensorInfo = BoundedReLuRandomInputTestTraits::GetOutputTensorInfo(); boost::multi_array output(GetTensorShapeAsArray<4>(outputTensorInfo)); // Min/max random values passed to MakeRandomTensor are purposely outside of the ReLu // range [lowerBound, upperBound]. auto input = MakeRandomTensor(inputTensorInfo, 4605828, lowerBound - 5.0f, upperBound * 2.0f); std::unique_ptr inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo); std::unique_ptr outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo); // Set up bounded ReLu. armnn::ActivationQueueDescriptor descriptor; armnn::WorkloadInfo workloadInfo; AddInputToWorkload(descriptor, workloadInfo, inputTensorInfo, inputHandle.get()); AddOutputToWorkload(descriptor, workloadInfo, outputTensorInfo, outputHandle.get()); descriptor.m_Parameters = activationDescriptor; std::unique_ptr workload = workloadFactory.CreateActivation(descriptor, workloadInfo); inputHandle->Allocate(); outputHandle->Allocate(); CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]); workload->Execute(); CopyDataFromITensorHandle(&output[0][0][0][0], outputHandle.get()); return output; } } // namespace LayerTestResult CompareBoundedReLuTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, armnn::IWorkloadFactory& refWorkloadFactory, float upperBound, float lowerBound) { LayerTestResult result(BoundedReLuRandomInputTestTraits::GetOutputTensorInfo()); armnn::ActivationDescriptor activationDescriptor; activationDescriptor.m_Function = armnn::ActivationFunction::BoundedReLu; activationDescriptor.m_A = upperBound; activationDescriptor.m_B = lowerBound; result.output = BoundedReLuRandomInputTest( workloadFactory, memoryManager, 0.0f, upperBound, activationDescriptor); result.outputExpected = BoundedReLuRandomInputTest( refWorkloadFactory, nullptr, 0.0f, upperBound, activationDescriptor); return result; } template> LayerTestResult ConstantLinearActivationTestCommon( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, float qScale = 0.0f, int32_t qOffset = 0) { unsigned int inputHeight = 20; unsigned int inputWidth = 17; unsigned int inputChannels = 3; unsigned int batchSize = 5; armnn::TensorInfo inputTensorInfo; armnn::TensorInfo outputTensorInfo; unsigned int shape[] = {batchSize, inputChannels, inputHeight, inputWidth}; inputTensorInfo = armnn::TensorInfo(4, shape, ArmnnType); outputTensorInfo = armnn::TensorInfo(4, shape, ArmnnType); // Set quantization parameters if the requested type is a quantized type. if(armnn::IsQuantizedType()) { inputTensorInfo.SetQuantizationScale(qScale); inputTensorInfo.SetQuantizationOffset(qOffset); outputTensorInfo.SetQuantizationScale(qScale); outputTensorInfo.SetQuantizationOffset(qOffset); } LayerTestResult ret(outputTensorInfo); std::unique_ptr inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo); std::unique_ptr outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo); // Do linear activation that should leave the tensor unchanged. armnn::ActivationQueueDescriptor data; armnn::WorkloadInfo info; AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get()); AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get()); data.m_Parameters.m_A = 1.0f; data.m_Parameters.m_B = 0.0f; data.m_Parameters.m_Function = armnn::ActivationFunction::Linear; std::unique_ptr workload = workloadFactory.CreateActivation(data, info); inputHandle->Allocate(); outputHandle->Allocate(); boost::multi_array input = MakeRandomTensor(inputTensorInfo, 7123561); CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]); workload->Execute(); CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get()); // Ensure output equals input. ret.outputExpected = input; return ret; } LayerTestResult ConstantLinearActivationTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { return ConstantLinearActivationTestCommon(workloadFactory, memoryManager); } LayerTestResult ConstantLinearActivationUint8Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { return ConstantLinearActivationTestCommon( workloadFactory, memoryManager, 4.0f, 3); } LayerTestResult ConstantLinearActivationInt16Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { return ConstantLinearActivationTestCommon( workloadFactory, memoryManager, 0.1f, 0); } template> LayerTestResult SimpleActivationTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, armnn::ActivationFunction activationFunction, float activationParameterA, float activationParameterB, float qScale, int32_t qOffset, const std::vector& inputData, const std::vector& outputExpectedData) { constexpr static unsigned int inputWidth = 16u; constexpr static unsigned int inputHeight = 1u; constexpr static unsigned int inputChannels = 1u; constexpr static unsigned int inputBatchSize = 1u; constexpr static unsigned int outputWidth = inputWidth; constexpr static unsigned int outputHeight = inputHeight; constexpr static unsigned int outputChannels = inputChannels; constexpr static unsigned int outputBatchSize = inputBatchSize; armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth }, ArmnnType); armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth }, ArmnnType); // Set quantization parameters if the requested type is a quantized type. if(armnn::IsQuantizedType()) { inputTensorInfo.SetQuantizationScale(qScale); inputTensorInfo.SetQuantizationOffset(qOffset); outputTensorInfo.SetQuantizationScale(qScale); outputTensorInfo.SetQuantizationOffset(qOffset); } LayerTestResult result(inputTensorInfo); auto input = MakeTensor(inputTensorInfo, QuantizedVector(qScale, qOffset, inputData)); std::unique_ptr inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo); std::unique_ptr outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo); // Setup bounded ReLu. armnn::ActivationQueueDescriptor descriptor; armnn::WorkloadInfo workloadInfo; AddInputToWorkload(descriptor, workloadInfo, inputTensorInfo, inputHandle.get()); AddOutputToWorkload(descriptor, workloadInfo, outputTensorInfo, outputHandle.get()); descriptor.m_Parameters.m_Function = activationFunction; descriptor.m_Parameters.m_A = activationParameterA; descriptor.m_Parameters.m_B = activationParameterB; std::unique_ptr workload = workloadFactory.CreateActivation(descriptor, workloadInfo); inputHandle->Allocate(); outputHandle->Allocate(); CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]); workload->Execute(); CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get()); // Calculated manually. result.outputExpected = MakeTensor(outputTensorInfo, QuantizedVector(qScale, qOffset, outputExpectedData)); return result; } template> LayerTestResult SimpleSigmoidTestCommon( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, float qScale, int32_t qOffset) { std::vector inputData = { -0.1f, -0.2f, -0.3f, -0.4f, 0.1f, 0.2f, 0.3f, 0.4f, -1.0f, -2.0f, -3.0f, -4.0f, 1.0f, 2.0f, 3.0f, 4.0f }; // Calculate output values for input. auto f = [](float value) { return 1.0f / (1.0f + std::exp(-value)); }; std::vector outputExpectedData(inputData.size()); std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f); return SimpleActivationTest(workloadFactory, memoryManager, armnn::ActivationFunction::Sigmoid, 0.f, 0.f, qScale, qOffset, inputData, outputExpectedData); } LayerTestResult SimpleSigmoidTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { return SimpleSigmoidTestCommon(workloadFactory, memoryManager, 0.0f, 0); } LayerTestResult SimpleSigmoidUint8Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { return SimpleSigmoidTestCommon(workloadFactory, memoryManager, 0.1f, 50); } LayerTestResult SimpleSigmoidInt16Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { return SimpleSigmoidTestCommon(workloadFactory, memoryManager, 0.1f, 0); } template> LayerTestResult ReLuTestCommon( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, float qScale, int32_t qOffset) { std::vector inputData = { -0.1f, -0.2f, -0.3f, -0.4f, 0.1f, 0.2f, 0.3f, 0.4f, -1.0f, -2.0f, -3.0f, -4.0f, 1.0f, 2.0f, 3.0f, 4.0f }; // Calculate output values for input. auto f = [](float value) { return std::fmax(0.0f, value); }; std::vector outputExpectedData(inputData.size()); std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f); return SimpleActivationTest(workloadFactory, memoryManager, armnn::ActivationFunction::ReLu, 0.f, 0.f, qScale, qOffset, inputData, outputExpectedData); } LayerTestResult ReLuInt16Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { return ReLuTestCommon(workloadFactory, memoryManager, 0.1f, 0); } template> LayerTestResult BoundedReLuTestCommon( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, float qScale, int32_t qOffset) { std::vector inputData = { -0.1f, -0.2f, -0.3f, -0.4f, 0.1f, 0.2f, 0.3f, 0.4f, -1.0f, -2.0f, -3.0f, -4.0f, 1.0f, 2.0f, 3.0f, 4.0f }; const float a = 1.0f; const float b = -1.0f; // Calculate output values for input. auto f = [a, b](float value) { return std::min(a, std::max(b, value)); }; std::vector outputExpectedData(inputData.size()); std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f); return SimpleActivationTest(workloadFactory, memoryManager, armnn::ActivationFunction::BoundedReLu, a, b, qScale, qOffset, inputData, outputExpectedData); } LayerTestResult BoundedReLuInt16Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { return ReLuTestCommon(workloadFactory, memoryManager, 0.1f, 0); } template> LayerTestResult SoftReLuTestCommon( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, float qScale, int32_t qOffset) { std::vector inputData = { -0.1f, -0.2f, -0.3f, -0.4f, 0.1f, 0.2f, 0.3f, 0.4f, -1.0f, -2.0f, -3.0f, -4.0f, 1.0f, 2.0f, 3.0f, 4.0f }; // Calculate output values for input. auto f = [](float value) { return std::log(1.0f + std::exp(value)); }; std::vector outputExpectedData(inputData.size()); std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f); return SimpleActivationTest(workloadFactory, memoryManager, armnn::ActivationFunction::SoftReLu, 0.f, 0.f, qScale, qOffset, inputData, outputExpectedData); } LayerTestResult SoftReLuInt16Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { return SoftReLuTestCommon(workloadFactory, memoryManager, 0.1f, 0); } template> LayerTestResult LeakyReLuTestCommon( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, float qScale, int32_t qOffset) { std::vector inputData = { -0.1f, -0.2f, -0.3f, -0.4f, 0.1f, 0.2f, 0.3f, 0.4f, -1.0f, -2.0f, -3.0f, -4.0f, 1.0f, 2.0f, 3.0f, 4.0f }; const float a = 0.01f; // Calculate output values for input. auto f = [a](float value) { return value > 0.0f ? value : (value * a); }; std::vector outputExpectedData(inputData.size()); std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f); return SimpleActivationTest(workloadFactory, memoryManager, armnn::ActivationFunction::LeakyReLu, a, 0.f, qScale, qOffset, inputData, outputExpectedData); } LayerTestResult LeakyReLuInt16Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { return LeakyReLuTestCommon(workloadFactory, memoryManager, 0.1f, 0); } template> LayerTestResult AbsTestCommon( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, float qScale, int32_t qOffset) { std::vector inputData = { -0.1f, -0.2f, -0.3f, -0.4f, 0.1f, 0.2f, 0.3f, 0.4f, -1.0f, -2.0f, -3.0f, -4.0f, 1.0f, 2.0f, 3.0f, 4.0f }; // Calculate output values for input. auto f = [](float value) { return std::abs(value); }; std::vector outputExpectedData(inputData.size()); std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f); return SimpleActivationTest(workloadFactory, memoryManager, armnn::ActivationFunction::Abs, 0.f, 0.f, qScale, qOffset, inputData, outputExpectedData); } LayerTestResult AbsInt16Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { return AbsTestCommon(workloadFactory, memoryManager, 0.1f, 0); } template> LayerTestResult SqrtTestCommon( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, float qScale, int32_t qOffset) { std::vector inputData = { 0.1f, 0.2f, 0.3f, 0.4f, 0.1f, 0.2f, 0.3f, 0.4f, 1.0f, 2.0f, 3.0f, 4.0f, 1.0f, 2.0f, 3.0f, 4.0f }; // Calculate output values for input. auto f = [](float value) { return std::sqrt(value); }; std::vector outputExpectedData(inputData.size()); std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f); return SimpleActivationTest(workloadFactory, memoryManager, armnn::ActivationFunction::Sqrt, 0.f, 0.f, qScale, qOffset, inputData, outputExpectedData); } LayerTestResult SqrtInt16Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { return SqrtTestCommon(workloadFactory, memoryManager, 0.1f, 0); } template> LayerTestResult SquareTestCommon( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, float qScale, int32_t qOffset) { std::vector inputData = { -0.1f, -0.2f, -0.3f, -0.4f, 0.1f, 0.2f, 0.3f, 0.4f, -1.0f, -2.0f, -3.0f, -4.0f, 1.0f, 2.0f, 3.0f, 4.0f }; // Calculate output values for input. auto f = [](float value) { return std::pow(value,2); }; std::vector outputExpectedData(inputData.size()); std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f); return SimpleActivationTest(workloadFactory, memoryManager, armnn::ActivationFunction::Square, 0.f, 0.f, qScale, qOffset, inputData, outputExpectedData); } LayerTestResult SquareInt16Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { return SquareTestCommon(workloadFactory, memoryManager, 0.1f, 0); } template> LayerTestResult TanhTestCommon( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, float qScale, int32_t qOffset) { std::vector inputData = { -0.1f, -0.2f, -0.3f, -0.4f, 0.1f, 0.2f, 0.3f, 0.4f, -1.0f, -2.0f, -3.0f, -4.0f, 1.0f, 2.0f, 3.0f, 4.0f }; const float a = 2.0f; const float b = 3.0f; // Calculate output values for input. auto f = [a, b](float value) { return a * tanhf(b * value); }; std::vector outputExpectedData(inputData.size()); std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f); return SimpleActivationTest(workloadFactory, memoryManager, armnn::ActivationFunction::TanH, a, b, qScale, qOffset, inputData, outputExpectedData); } LayerTestResult TanhInt16Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { return TanhTestCommon(workloadFactory, memoryManager, 0.1f, 0); } template> LayerTestResult CompareActivationTestImpl( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, armnn::IWorkloadFactory& refWorkloadFactory, armnn::ActivationFunction f, unsigned int batchSize = 5, float qScale = 0.0f, int32_t qOffset = 0) { unsigned int width = 17; unsigned int height = 29; unsigned int channels = 2; float a = 0.234f; float b = -12.345f; armnn::TensorInfo inputTensorInfo; armnn::TensorInfo outputTensorInfo; unsigned int shape[] = {batchSize, channels, height, width}; inputTensorInfo = armnn::TensorInfo(4, shape, ArmnnType); outputTensorInfo = armnn::TensorInfo(4, shape, ArmnnType); // Set quantization parameters if the requested type is a quantized type. if(armnn::IsQuantizedType()) { inputTensorInfo.SetQuantizationScale(qScale); inputTensorInfo.SetQuantizationOffset(qOffset); outputTensorInfo.SetQuantizationScale(qScale); outputTensorInfo.SetQuantizationOffset(qOffset); } float minVal = -10.f; if (f == armnn::ActivationFunction::Sqrt) { minVal = 0.f; } boost::multi_array input = MakeRandomTensor(inputTensorInfo, 21453, minVal, 10.f); LayerTestResult ret(outputTensorInfo); auto boostArrayExtents = boost::extents [boost::numeric_cast(batchSize)] [boost::numeric_cast(channels)] [boost::numeric_cast(height)] [boost::numeric_cast(width)]; ret.output.resize(boostArrayExtents); ret.outputExpected.resize(boostArrayExtents); std::unique_ptr inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo); std::unique_ptr outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo); std::unique_ptr inputHandleRef = refWorkloadFactory.CreateTensorHandle(inputTensorInfo); std::unique_ptr outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo); armnn::ActivationQueueDescriptor data; armnn::WorkloadInfo info; AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get()); AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get()); data.m_Parameters.m_A = a; data.m_Parameters.m_B = b; data.m_Parameters.m_Function = f; armnn::ActivationQueueDescriptor refData = data; armnn::WorkloadInfo refInfo = info; SetWorkloadInput(refData, refInfo, 0, inputTensorInfo, inputHandleRef.get()); SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get()); std::unique_ptr workload = workloadFactory.CreateActivation(data, info); BOOST_ASSERT(workload != nullptr); std::unique_ptr workloadRef = refWorkloadFactory.CreateActivation(refData, refInfo); BOOST_ASSERT(workloadRef != nullptr); inputHandle->Allocate(); outputHandle->Allocate(); inputHandleRef->Allocate(); outputHandleRef->Allocate(); CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]); CopyDataToITensorHandle(inputHandleRef.get(), &input[0][0][0][0]); workload->Execute(); workloadRef->Execute(); CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get()); CopyDataFromITensorHandle(&ret.outputExpected[0][0][0][0], outputHandleRef.get()); return ret; } LayerTestResult CompareActivationTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, armnn::IWorkloadFactory& refWorkloadFactory, armnn::ActivationFunction f, unsigned int batchSize) { return CompareActivationTestImpl( workloadFactory, memoryManager, refWorkloadFactory, f, batchSize); } LayerTestResult CompareActivationUint8Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, armnn::IWorkloadFactory& refWorkloadFactory, armnn::ActivationFunction f) { return CompareActivationTestImpl( workloadFactory, memoryManager, refWorkloadFactory, f, 5, 0.1f, 50); } LayerTestResult CompareActivationInt16Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, armnn::IWorkloadFactory& refWorkloadFactory, armnn::ActivationFunction f) { return CompareActivationTestImpl( workloadFactory, memoryManager, refWorkloadFactory, f, 5, 0.1f, 0); }