From 03fbeaf532f2575381edc2336f834973117f6e0f Mon Sep 17 00:00:00 2001 From: Colm Donelan Date: Wed, 26 Feb 2020 15:39:23 +0000 Subject: IVGCVSW-4440 : Add HARD_SWISH Support to Activation in CpuRef * Add a new Activiation type of HardSwish. * Add CpuRef support and tests. Signed-off-by: Colm Donelan Change-Id: I68c3840aa45b7a27d5e416a5d50fe8f99f003ce8 --- src/armnn/test/QuantizerTest.cpp | 55 +++++++++++++++++++ src/armnnDeserializer/Deserializer.cpp | 2 + src/armnnSerializer/ArmnnSchema.fbs | 3 +- src/armnnSerializer/Serializer.cpp | 2 + .../test/layerTests/ActivationTestImpl.cpp | 63 ++++++++++++++++++++++ .../test/layerTests/ActivationTestImpl.hpp | 16 ++++++ src/backends/reference/RefLayerSupport.cpp | 1 + src/backends/reference/test/RefLayerTests.cpp | 4 ++ src/backends/reference/workloads/Activation.cpp | 8 +++ 9 files changed, 153 insertions(+), 1 deletion(-) (limited to 'src') diff --git a/src/armnn/test/QuantizerTest.cpp b/src/armnn/test/QuantizerTest.cpp index 6d5d212fc9..2dc054af07 100644 --- a/src/armnn/test/QuantizerTest.cpp +++ b/src/armnn/test/QuantizerTest.cpp @@ -762,6 +762,61 @@ BOOST_AUTO_TEST_CASE(QuantizeELuActivation) TestEluActivationQuantization validatorQSymmS16(qSymmS16options, shape, shape); VisitLayersTopologically(quantizedNetworkQSymmS16.get(), validatorQSymmS16); } +BOOST_AUTO_TEST_CASE(QuantizeHardSwishActivation) +{ + class TestHardSwishActivationQuantization : public TestQuantization + { + public: + TestHardSwishActivationQuantization(const TensorShape& inputShape, const TensorShape& outputShape) + : TestQuantization(inputShape, outputShape) {} + + TestHardSwishActivationQuantization(const QuantizerOptions& options, + const TensorShape& inputShape, + const TensorShape& outputShape) + : TestQuantization(options, inputShape, outputShape) {} + + void VisitActivationLayer(const IConnectableLayer* layer, + const ActivationDescriptor& descriptor, + const char* name = nullptr) override + { + boost::ignore_unused(descriptor, name); + TensorInfo info = layer->GetOutputSlot(0).GetTensorInfo(); + + // Based off default static range [-15.0f, 15.0f] + TestQuantizationParams( + info, {30.0f / g_AsymmU8QuantizationBase, 128}, + {30.0f / g_AsymmS8QuantizationBase, 0}, + {15.0f / g_SymmS8QuantizationBase, 0}, + {15.0f / g_SymmS16QuantizationBase, 0}); + } + }; + + ActivationDescriptor descriptor; + descriptor.m_Function = ActivationFunction::HardSwish; + + const TensorShape shape{1U}; + INetworkPtr network = CreateNetworkWithActivationLayer(descriptor, shape); + + INetworkPtr quantizedNetworkQAsymmU8 = INetworkQuantizer::Create(network.get())->ExportNetwork(); + TestHardSwishActivationQuantization validatorQAsymmU8(shape, shape); + VisitLayersTopologically(quantizedNetworkQAsymmU8.get(), validatorQAsymmU8); + + const QuantizerOptions qAsymmS8Options(DataType::QAsymmS8); + INetworkPtr quantizedNetworkQAsymmS8 = INetworkQuantizer::Create(network.get(), qAsymmS8Options)->ExportNetwork(); + TestHardSwishActivationQuantization validatorQAsymmS8(qAsymmS8Options, shape, shape); + VisitLayersTopologically(quantizedNetworkQAsymmS8.get(), validatorQAsymmS8); + + const QuantizerOptions qSymmS8Options(DataType::QSymmS8); + INetworkPtr quantizedNetworkQSymmS8 = INetworkQuantizer::Create(network.get(), qSymmS8Options)->ExportNetwork(); + TestHardSwishActivationQuantization validatorQSymmS8(qSymmS8Options, shape, shape); + VisitLayersTopologically(quantizedNetworkQSymmS8.get(), validatorQSymmS8); + + const QuantizerOptions qSymmS16options(DataType::QSymmS16); + INetworkPtr quantizedNetworkQSymmS16 = INetworkQuantizer::Create(network.get(), qSymmS16options)->ExportNetwork(); + TestHardSwishActivationQuantization validatorQSymmS16(qSymmS16options, shape, shape); + VisitLayersTopologically(quantizedNetworkQSymmS16.get(), validatorQSymmS16); +} + BOOST_AUTO_TEST_CASE(QuantizeBatchNorm) { diff --git a/src/armnnDeserializer/Deserializer.cpp b/src/armnnDeserializer/Deserializer.cpp index 0d81649115..ed4605b2af 100644 --- a/src/armnnDeserializer/Deserializer.cpp +++ b/src/armnnDeserializer/Deserializer.cpp @@ -427,6 +427,8 @@ armnn::ActivationFunction ToActivationFunction(armnnSerializer::ActivationFuncti return armnn::ActivationFunction::Square; case armnnSerializer::ActivationFunction_Elu: return armnn::ActivationFunction::Elu; + case armnnSerializer::ActivationFunction_HardSwish: + return armnn::ActivationFunction::HardSwish; default: return armnn::ActivationFunction::Sigmoid; } diff --git a/src/armnnSerializer/ArmnnSchema.fbs b/src/armnnSerializer/ArmnnSchema.fbs index d175d41f3f..d7565a5b9a 100644 --- a/src/armnnSerializer/ArmnnSchema.fbs +++ b/src/armnnSerializer/ArmnnSchema.fbs @@ -20,7 +20,8 @@ enum ActivationFunction : byte { Abs = 7, Sqrt = 8, Square = 9, - Elu = 10 + Elu = 10, + HardSwish = 11 } enum ArgMinMaxFunction : byte { diff --git a/src/armnnSerializer/Serializer.cpp b/src/armnnSerializer/Serializer.cpp index a3fdcf8123..39df0c2a7f 100644 --- a/src/armnnSerializer/Serializer.cpp +++ b/src/armnnSerializer/Serializer.cpp @@ -48,6 +48,8 @@ serializer::ActivationFunction GetFlatBufferActivationFunction(armnn::Activation return serializer::ActivationFunction::ActivationFunction_Square; case armnn::ActivationFunction::Elu: return serializer::ActivationFunction::ActivationFunction_Elu; + case armnn::ActivationFunction::HardSwish: + return serializer::ActivationFunction::ActivationFunction_HardSwish; default: return serializer::ActivationFunction::ActivationFunction_Sigmoid; } diff --git a/src/backends/backendsCommon/test/layerTests/ActivationTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/ActivationTestImpl.cpp index 1b6e782060..6993b9e9b1 100644 --- a/src/backends/backendsCommon/test/layerTests/ActivationTestImpl.cpp +++ b/src/backends/backendsCommon/test/layerTests/ActivationTestImpl.cpp @@ -1075,6 +1075,69 @@ LayerTestResult EluInt16Test( } +template> +LayerTestResult HardSwishTestCommon( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + float qScale, + int32_t qOffset) +{ + std::vector inputData = { + -0.1f, -0.2f, -0.3f, -0.4f, + 0.1f, 0.2f, 0.3f, 0.4f, + -1.0f, -2.0f, -3.0f, -4.0f, + 1.0f, 2.0f, 3.0f, 4.0f + }; + // Calculate output values for input. + auto f = [](float x) + { + // Break down the calculation to help with verification. + // hard_swish(x) = x * relu6(x+3) / 6 + // relu6(x) = min(max(x,0),6) + float reLu6_step1 = std::max((x + 3),0.0f); + float reLu6Complete = std::min(reLu6_step1, 6.0f); + float hardSwish_step1 = x * reLu6Complete; + float result = hardSwish_step1 / 6; + return result; + }; + std::vector outputExpectedData(inputData.size()); + std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f); + + return SimpleActivationTest(workloadFactory, + memoryManager, + armnn::ActivationFunction::HardSwish, + 0.f, + 0.f, + qScale, + qOffset, + inputData, + qScale, + qOffset, + outputExpectedData); +} + +LayerTestResult HardSwishTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) +{ + return HardSwishTestCommon(workloadFactory, memoryManager, 0.1f, 0); +} + +LayerTestResult HardSwishUint8Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) +{ + return HardSwishTestCommon(workloadFactory, memoryManager, 0.1f, 64); +} + +LayerTestResult HardSwishInt16Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) +{ + return HardSwishTestCommon(workloadFactory, memoryManager, 0.1f, 0); +} + + template> LayerTestResult CompareActivationTestImpl( armnn::IWorkloadFactory& workloadFactory, diff --git a/src/backends/backendsCommon/test/layerTests/ActivationTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/ActivationTestImpl.hpp index 28301188d5..2bd517180f 100644 --- a/src/backends/backendsCommon/test/layerTests/ActivationTestImpl.hpp +++ b/src/backends/backendsCommon/test/layerTests/ActivationTestImpl.hpp @@ -216,6 +216,22 @@ LayerTestResult SquareInt16Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); +// +// HardSwish +// + +LayerTestResult HardSwishTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); + +LayerTestResult HardSwishUint8Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); + +LayerTestResult HardSwishInt16Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); + // // Other // diff --git a/src/backends/reference/RefLayerSupport.cpp b/src/backends/reference/RefLayerSupport.cpp index 25334c3b52..7d5c3b509e 100644 --- a/src/backends/reference/RefLayerSupport.cpp +++ b/src/backends/reference/RefLayerSupport.cpp @@ -109,6 +109,7 @@ bool RefLayerSupport::IsActivationSupported(const TensorInfo& input, case ActivationFunction::Abs: case ActivationFunction::BoundedReLu: case ActivationFunction::Elu: + case ActivationFunction::HardSwish: case ActivationFunction::LeakyReLu: case ActivationFunction::Linear: case ActivationFunction::ReLu: diff --git a/src/backends/reference/test/RefLayerTests.cpp b/src/backends/reference/test/RefLayerTests.cpp index ed2b995bd5..40bf600331 100644 --- a/src/backends/reference/test/RefLayerTests.cpp +++ b/src/backends/reference/test/RefLayerTests.cpp @@ -466,6 +466,10 @@ ARMNN_AUTO_TEST_CASE(TanhInt16, TanhInt16Test) ARMNN_AUTO_TEST_CASE(Elu, EluTest) ARMNN_AUTO_TEST_CASE(EluUint8, EluUint8Test) ARMNN_AUTO_TEST_CASE(EluInt16, EluInt16Test) +// HardSwish Activation +ARMNN_AUTO_TEST_CASE(HardSwish, HardSwishTest) +ARMNN_AUTO_TEST_CASE(HardSwishUint8, HardSwishUint8Test) +ARMNN_AUTO_TEST_CASE(HardSwishInt16, HardSwishInt16Test) // Fully Connected ARMNN_AUTO_TEST_CASE(SimpleFullyConnected, FullyConnectedFloat32Test, false, false) diff --git a/src/backends/reference/workloads/Activation.cpp b/src/backends/reference/workloads/Activation.cpp index 82dd919de9..798c6e48d5 100644 --- a/src/backends/reference/workloads/Activation.cpp +++ b/src/backends/reference/workloads/Activation.cpp @@ -9,6 +9,7 @@ namespace armnn { + float Activation(float in, ActivationFunction function, float a, @@ -74,6 +75,13 @@ float Activation(float in, output = (in >= 0) ? in : a * (expf(in) - 1); break; } + case ActivationFunction::HardSwish: + { + // hard_swish(x) = x * relu6(x+3) / 6 + // relu6(x) = min(max(x,0),6) + output = in * (std::min(std::max((in + 3),0.0f),6.0f)) / 6; + break; + } default: { throw InvalidArgumentException("Unsupported activation function"); -- cgit v1.2.1