aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorColm Donelan <Colm.Donelan@arm.com>2020-02-26 15:39:23 +0000
committerColm Donelan <colm.donelan@arm.com>2020-03-02 21:31:49 +0000
commit03fbeaf532f2575381edc2336f834973117f6e0f (patch)
tree97585ec459e4069853c16468ca82513f0d899200 /src
parentc9ea45adefdde2890e9aa191a5b31563a3dd35ea (diff)
downloadarmnn-03fbeaf532f2575381edc2336f834973117f6e0f.tar.gz
IVGCVSW-4440 : Add HARD_SWISH Support to Activation in CpuRef
* Add a new Activiation type of HardSwish. * Add CpuRef support and tests. Signed-off-by: Colm Donelan <Colm.Donelan@arm.com> Change-Id: I68c3840aa45b7a27d5e416a5d50fe8f99f003ce8
Diffstat (limited to 'src')
-rw-r--r--src/armnn/test/QuantizerTest.cpp55
-rw-r--r--src/armnnDeserializer/Deserializer.cpp2
-rw-r--r--src/armnnSerializer/ArmnnSchema.fbs3
-rw-r--r--src/armnnSerializer/Serializer.cpp2
-rw-r--r--src/backends/backendsCommon/test/layerTests/ActivationTestImpl.cpp63
-rw-r--r--src/backends/backendsCommon/test/layerTests/ActivationTestImpl.hpp16
-rw-r--r--src/backends/reference/RefLayerSupport.cpp1
-rw-r--r--src/backends/reference/test/RefLayerTests.cpp4
-rw-r--r--src/backends/reference/workloads/Activation.cpp8
9 files changed, 153 insertions, 1 deletions
diff --git a/src/armnn/test/QuantizerTest.cpp b/src/armnn/test/QuantizerTest.cpp
index 6d5d212fc9..2dc054af07 100644
--- a/src/armnn/test/QuantizerTest.cpp
+++ b/src/armnn/test/QuantizerTest.cpp
@@ -762,6 +762,61 @@ BOOST_AUTO_TEST_CASE(QuantizeELuActivation)
TestEluActivationQuantization validatorQSymmS16(qSymmS16options, shape, shape);
VisitLayersTopologically(quantizedNetworkQSymmS16.get(), validatorQSymmS16);
}
+BOOST_AUTO_TEST_CASE(QuantizeHardSwishActivation)
+{
+ class TestHardSwishActivationQuantization : public TestQuantization
+ {
+ public:
+ TestHardSwishActivationQuantization(const TensorShape& inputShape, const TensorShape& outputShape)
+ : TestQuantization(inputShape, outputShape) {}
+
+ TestHardSwishActivationQuantization(const QuantizerOptions& options,
+ const TensorShape& inputShape,
+ const TensorShape& outputShape)
+ : TestQuantization(options, inputShape, outputShape) {}
+
+ void VisitActivationLayer(const IConnectableLayer* layer,
+ const ActivationDescriptor& descriptor,
+ const char* name = nullptr) override
+ {
+ boost::ignore_unused(descriptor, name);
+ TensorInfo info = layer->GetOutputSlot(0).GetTensorInfo();
+
+ // Based off default static range [-15.0f, 15.0f]
+ TestQuantizationParams(
+ info, {30.0f / g_AsymmU8QuantizationBase, 128},
+ {30.0f / g_AsymmS8QuantizationBase, 0},
+ {15.0f / g_SymmS8QuantizationBase, 0},
+ {15.0f / g_SymmS16QuantizationBase, 0});
+ }
+ };
+
+ ActivationDescriptor descriptor;
+ descriptor.m_Function = ActivationFunction::HardSwish;
+
+ const TensorShape shape{1U};
+ INetworkPtr network = CreateNetworkWithActivationLayer(descriptor, shape);
+
+ INetworkPtr quantizedNetworkQAsymmU8 = INetworkQuantizer::Create(network.get())->ExportNetwork();
+ TestHardSwishActivationQuantization validatorQAsymmU8(shape, shape);
+ VisitLayersTopologically(quantizedNetworkQAsymmU8.get(), validatorQAsymmU8);
+
+ const QuantizerOptions qAsymmS8Options(DataType::QAsymmS8);
+ INetworkPtr quantizedNetworkQAsymmS8 = INetworkQuantizer::Create(network.get(), qAsymmS8Options)->ExportNetwork();
+ TestHardSwishActivationQuantization validatorQAsymmS8(qAsymmS8Options, shape, shape);
+ VisitLayersTopologically(quantizedNetworkQAsymmS8.get(), validatorQAsymmS8);
+
+ const QuantizerOptions qSymmS8Options(DataType::QSymmS8);
+ INetworkPtr quantizedNetworkQSymmS8 = INetworkQuantizer::Create(network.get(), qSymmS8Options)->ExportNetwork();
+ TestHardSwishActivationQuantization validatorQSymmS8(qSymmS8Options, shape, shape);
+ VisitLayersTopologically(quantizedNetworkQSymmS8.get(), validatorQSymmS8);
+
+ const QuantizerOptions qSymmS16options(DataType::QSymmS16);
+ INetworkPtr quantizedNetworkQSymmS16 = INetworkQuantizer::Create(network.get(), qSymmS16options)->ExportNetwork();
+ TestHardSwishActivationQuantization validatorQSymmS16(qSymmS16options, shape, shape);
+ VisitLayersTopologically(quantizedNetworkQSymmS16.get(), validatorQSymmS16);
+}
+
BOOST_AUTO_TEST_CASE(QuantizeBatchNorm)
{
diff --git a/src/armnnDeserializer/Deserializer.cpp b/src/armnnDeserializer/Deserializer.cpp
index 0d81649115..ed4605b2af 100644
--- a/src/armnnDeserializer/Deserializer.cpp
+++ b/src/armnnDeserializer/Deserializer.cpp
@@ -427,6 +427,8 @@ armnn::ActivationFunction ToActivationFunction(armnnSerializer::ActivationFuncti
return armnn::ActivationFunction::Square;
case armnnSerializer::ActivationFunction_Elu:
return armnn::ActivationFunction::Elu;
+ case armnnSerializer::ActivationFunction_HardSwish:
+ return armnn::ActivationFunction::HardSwish;
default:
return armnn::ActivationFunction::Sigmoid;
}
diff --git a/src/armnnSerializer/ArmnnSchema.fbs b/src/armnnSerializer/ArmnnSchema.fbs
index d175d41f3f..d7565a5b9a 100644
--- a/src/armnnSerializer/ArmnnSchema.fbs
+++ b/src/armnnSerializer/ArmnnSchema.fbs
@@ -20,7 +20,8 @@ enum ActivationFunction : byte {
Abs = 7,
Sqrt = 8,
Square = 9,
- Elu = 10
+ Elu = 10,
+ HardSwish = 11
}
enum ArgMinMaxFunction : byte {
diff --git a/src/armnnSerializer/Serializer.cpp b/src/armnnSerializer/Serializer.cpp
index a3fdcf8123..39df0c2a7f 100644
--- a/src/armnnSerializer/Serializer.cpp
+++ b/src/armnnSerializer/Serializer.cpp
@@ -48,6 +48,8 @@ serializer::ActivationFunction GetFlatBufferActivationFunction(armnn::Activation
return serializer::ActivationFunction::ActivationFunction_Square;
case armnn::ActivationFunction::Elu:
return serializer::ActivationFunction::ActivationFunction_Elu;
+ case armnn::ActivationFunction::HardSwish:
+ return serializer::ActivationFunction::ActivationFunction_HardSwish;
default:
return serializer::ActivationFunction::ActivationFunction_Sigmoid;
}
diff --git a/src/backends/backendsCommon/test/layerTests/ActivationTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/ActivationTestImpl.cpp
index 1b6e782060..6993b9e9b1 100644
--- a/src/backends/backendsCommon/test/layerTests/ActivationTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/ActivationTestImpl.cpp
@@ -1076,6 +1076,69 @@ LayerTestResult<int16_t, 4> EluInt16Test(
template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<T, 4> HardSwishTestCommon(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ float qScale,
+ int32_t qOffset)
+{
+ std::vector<float> inputData = {
+ -0.1f, -0.2f, -0.3f, -0.4f,
+ 0.1f, 0.2f, 0.3f, 0.4f,
+ -1.0f, -2.0f, -3.0f, -4.0f,
+ 1.0f, 2.0f, 3.0f, 4.0f
+ };
+ // Calculate output values for input.
+ auto f = [](float x)
+ {
+ // Break down the calculation to help with verification.
+ // hard_swish(x) = x * relu6(x+3) / 6
+ // relu6(x) = min(max(x,0),6)
+ float reLu6_step1 = std::max((x + 3),0.0f);
+ float reLu6Complete = std::min(reLu6_step1, 6.0f);
+ float hardSwish_step1 = x * reLu6Complete;
+ float result = hardSwish_step1 / 6;
+ return result;
+ };
+ std::vector<float> outputExpectedData(inputData.size());
+ std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f);
+
+ return SimpleActivationTest<ArmnnType>(workloadFactory,
+ memoryManager,
+ armnn::ActivationFunction::HardSwish,
+ 0.f,
+ 0.f,
+ qScale,
+ qOffset,
+ inputData,
+ qScale,
+ qOffset,
+ outputExpectedData);
+}
+
+LayerTestResult<float, 4> HardSwishTest(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+ return HardSwishTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.1f, 0);
+}
+
+LayerTestResult<uint8_t, 4> HardSwishUint8Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+ return HardSwishTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, 0.1f, 64);
+}
+
+LayerTestResult<int16_t, 4> HardSwishInt16Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+ return HardSwishTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, 0.1f, 0);
+}
+
+
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T,4> CompareActivationTestImpl(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
diff --git a/src/backends/backendsCommon/test/layerTests/ActivationTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/ActivationTestImpl.hpp
index 28301188d5..2bd517180f 100644
--- a/src/backends/backendsCommon/test/layerTests/ActivationTestImpl.hpp
+++ b/src/backends/backendsCommon/test/layerTests/ActivationTestImpl.hpp
@@ -217,6 +217,22 @@ LayerTestResult<int16_t, 4> SquareInt16Test(
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
//
+// HardSwish
+//
+
+LayerTestResult<float, 4> HardSwishTest(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+LayerTestResult<uint8_t, 4> HardSwishUint8Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+LayerTestResult<int16_t, 4> HardSwishInt16Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+//
// Other
//
diff --git a/src/backends/reference/RefLayerSupport.cpp b/src/backends/reference/RefLayerSupport.cpp
index 25334c3b52..7d5c3b509e 100644
--- a/src/backends/reference/RefLayerSupport.cpp
+++ b/src/backends/reference/RefLayerSupport.cpp
@@ -109,6 +109,7 @@ bool RefLayerSupport::IsActivationSupported(const TensorInfo& input,
case ActivationFunction::Abs:
case ActivationFunction::BoundedReLu:
case ActivationFunction::Elu:
+ case ActivationFunction::HardSwish:
case ActivationFunction::LeakyReLu:
case ActivationFunction::Linear:
case ActivationFunction::ReLu:
diff --git a/src/backends/reference/test/RefLayerTests.cpp b/src/backends/reference/test/RefLayerTests.cpp
index ed2b995bd5..40bf600331 100644
--- a/src/backends/reference/test/RefLayerTests.cpp
+++ b/src/backends/reference/test/RefLayerTests.cpp
@@ -466,6 +466,10 @@ ARMNN_AUTO_TEST_CASE(TanhInt16, TanhInt16Test)
ARMNN_AUTO_TEST_CASE(Elu, EluTest)
ARMNN_AUTO_TEST_CASE(EluUint8, EluUint8Test)
ARMNN_AUTO_TEST_CASE(EluInt16, EluInt16Test)
+// HardSwish Activation
+ARMNN_AUTO_TEST_CASE(HardSwish, HardSwishTest)
+ARMNN_AUTO_TEST_CASE(HardSwishUint8, HardSwishUint8Test)
+ARMNN_AUTO_TEST_CASE(HardSwishInt16, HardSwishInt16Test)
// Fully Connected
ARMNN_AUTO_TEST_CASE(SimpleFullyConnected, FullyConnectedFloat32Test, false, false)
diff --git a/src/backends/reference/workloads/Activation.cpp b/src/backends/reference/workloads/Activation.cpp
index 82dd919de9..798c6e48d5 100644
--- a/src/backends/reference/workloads/Activation.cpp
+++ b/src/backends/reference/workloads/Activation.cpp
@@ -9,6 +9,7 @@
namespace armnn
{
+
float Activation(float in,
ActivationFunction function,
float a,
@@ -74,6 +75,13 @@ float Activation(float in,
output = (in >= 0) ? in : a * (expf(in) - 1);
break;
}
+ case ActivationFunction::HardSwish:
+ {
+ // hard_swish(x) = x * relu6(x+3) / 6
+ // relu6(x) = min(max(x,0),6)
+ output = in * (std::min(std::max((in + 3),0.0f),6.0f)) / 6;
+ break;
+ }
default:
{
throw InvalidArgumentException("Unsupported activation function");