aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorIsabella Gottardi <isabella.gottardi@arm.com>2018-02-12 14:59:19 +0000
committerAnthony Barbier <anthony.barbier@arm.com>2018-11-02 16:49:16 +0000
commit3f217ec4ff11e20fe686beb9a28d0bbd80a56cd6 (patch)
tree81db8baab925af5b416b66d0328be2eb49543824
parentd9eb27597eabe5b7c17520f4f9b3f8a282d72573 (diff)
downloadComputeLibrary-3f217ec4ff11e20fe686beb9a28d0bbd80a56cd6.tar.gz
COMPMID-908 - Merge Activation layer with Convolution Layer (NEON. CL, GLES)
Change-Id: Iab06d0768ecf805b841e601185608aae88cf9166 Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/120874 Tested-by: Jenkins <bsgcomp@arm.com> Reviewed-by: Anthony Barbier <anthony.barbier@arm.com>
-rw-r--r--arm_compute/core/GLES_COMPUTE/kernels/GCDirectConvolutionLayerKernel.h4
-rw-r--r--arm_compute/runtime/CL/functions/CLConvolutionLayer.h13
-rw-r--r--arm_compute/runtime/CL/functions/CLDirectConvolutionLayer.h13
-rw-r--r--arm_compute/runtime/CL/functions/CLGEMMConvolutionLayer.h9
-rw-r--r--arm_compute/runtime/CL/functions/CLWinogradConvolutionLayer.h11
-rw-r--r--arm_compute/runtime/GLES_COMPUTE/functions/GCConvolutionLayer.h8
-rw-r--r--arm_compute/runtime/GLES_COMPUTE/functions/GCDirectConvolutionLayer.h5
-rw-r--r--arm_compute/runtime/NEON/functions/NEConvolutionLayer.h9
-rw-r--r--arm_compute/runtime/NEON/functions/NEDirectConvolutionLayer.h12
-rw-r--r--arm_compute/runtime/NEON/functions/NEGEMMConvolutionLayer.h10
-rw-r--r--arm_compute/runtime/NEON/functions/NEWinogradLayer.h6
-rw-r--r--src/core/GLES_COMPUTE/GCKernelLibrary.cpp8
-rw-r--r--src/core/GLES_COMPUTE/cs_shaders/activation_layer.cs92
-rw-r--r--src/core/GLES_COMPUTE/cs_shaders/activation_layer_helpers_cs.h119
-rw-r--r--src/core/GLES_COMPUTE/cs_shaders/direct_convolution1x1.cs49
-rw-r--r--src/core/GLES_COMPUTE/cs_shaders/direct_convolution3x3.cs55
-rw-r--r--src/core/GLES_COMPUTE/cs_shaders/direct_convolution5x5.cs14
-rw-r--r--src/core/GLES_COMPUTE/kernels/GCDirectConvolutionLayerKernel.cpp14
-rw-r--r--src/runtime/CL/functions/CLConvolutionLayer.cpp17
-rw-r--r--src/runtime/CL/functions/CLDirectConvolutionLayer.cpp28
-rw-r--r--src/runtime/CL/functions/CLGEMMConvolutionLayer.cpp36
-rw-r--r--src/runtime/CL/functions/CLWinogradConvolutionLayer.cpp30
-rw-r--r--src/runtime/GLES_COMPUTE/functions/GCConvolutionLayer.cpp22
-rw-r--r--src/runtime/GLES_COMPUTE/functions/GCDirectConvolutionLayer.cpp11
-rw-r--r--src/runtime/NEON/functions/NEConvolutionLayer.cpp24
-rw-r--r--src/runtime/NEON/functions/NEDirectConvolutionLayer.cpp27
-rw-r--r--src/runtime/NEON/functions/NEGEMMConvolutionLayer.cpp44
-rw-r--r--src/runtime/NEON/functions/NEWinogradLayer.cpp21
-rw-r--r--tests/benchmark/CL/ConvolutionLayer.cpp36
-rw-r--r--tests/benchmark/CL/DirectConvolutionLayer.cpp51
-rw-r--r--tests/benchmark/GLES_COMPUTE/ConvolutionLayer.cpp33
-rw-r--r--tests/benchmark/GLES_COMPUTE/DirectConvolutionLayer.cpp52
-rw-r--r--tests/benchmark/NEON/ConvolutionLayer.cpp84
-rw-r--r--tests/benchmark/NEON/DirectConvolutionLayer.cpp62
-rw-r--r--tests/benchmark/fixtures/ConvolutionLayerFixture.h5
-rw-r--r--tests/benchmark/fixtures/DirectConvolutionLayerFixture.h5
-rw-r--r--tests/benchmark/fixtures/WinogradLayerFixture.h5
-rw-r--r--tests/validation/CL/ConvolutionLayer.cpp87
-rw-r--r--tests/validation/CL/DilatedConvolutionLayer.cpp94
-rw-r--r--tests/validation/CL/DirectConvolutionLayer.cpp56
-rw-r--r--tests/validation/CL/Winograd.cpp9
-rw-r--r--tests/validation/GLES_COMPUTE/ConvolutionLayer.cpp28
-rw-r--r--tests/validation/GLES_COMPUTE/DirectConvolutionLayer.cpp15
-rw-r--r--tests/validation/NEON/ConvolutionLayer.cpp81
-rw-r--r--tests/validation/NEON/DilatedConvolutionLayer.cpp80
-rw-r--r--tests/validation/NEON/DirectConvolutionLayer.cpp42
-rw-r--r--tests/validation/fixtures/ConvolutionLayerFixture.h32
-rw-r--r--tests/validation/fixtures/DirectConvolutionLayerFixture.h48
-rw-r--r--tests/validation/fixtures/WinogradLayerFixture.h16
-rw-r--r--utils/TypePrinter.h5
50 files changed, 1135 insertions, 502 deletions
diff --git a/arm_compute/core/GLES_COMPUTE/kernels/GCDirectConvolutionLayerKernel.h b/arm_compute/core/GLES_COMPUTE/kernels/GCDirectConvolutionLayerKernel.h
index f1c9c1933d..630253f64f 100644
--- a/arm_compute/core/GLES_COMPUTE/kernels/GCDirectConvolutionLayerKernel.h
+++ b/arm_compute/core/GLES_COMPUTE/kernels/GCDirectConvolutionLayerKernel.h
@@ -58,8 +58,10 @@ public:
* @param[out] output The output tensor. First 2 lower dimensions represent a transform of each 3D input,
* while every dimension above represents a batch. Data types supported: Same as @p input
* @param[in] conv_info Contains padding and stride information described in @ref PadStrideInfo.
+ * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
*/
- void configure(const IGCTensor *input, const IGCTensor *weights, const IGCTensor *bias, IGCTensor *output, const PadStrideInfo &conv_info);
+ void configure(const IGCTensor *input, const IGCTensor *weights, const IGCTensor *bias, IGCTensor *output,
+ const PadStrideInfo &conv_info, const ActivationLayerInfo &act_info = ActivationLayerInfo());
// Inherited methods overridden:
BorderSize border_size() const override;
diff --git a/arm_compute/runtime/CL/functions/CLConvolutionLayer.h b/arm_compute/runtime/CL/functions/CLConvolutionLayer.h
index 5f383b28ab..7df765988f 100644
--- a/arm_compute/runtime/CL/functions/CLConvolutionLayer.h
+++ b/arm_compute/runtime/CL/functions/CLConvolutionLayer.h
@@ -24,10 +24,9 @@
#ifndef __ARM_COMPUTE_CLCONVOLUTIONLAYER_H__
#define __ARM_COMPUTE_CLCONVOLUTIONLAYER_H__
-#include "arm_compute/runtime/IFunction.h"
-
#include "arm_compute/runtime/CL/functions/CLDirectConvolutionLayer.h"
#include "arm_compute/runtime/CL/functions/CLGEMMConvolutionLayer.h"
+#include "arm_compute/runtime/IFunction.h"
#include "arm_compute/runtime/IMemoryManager.h"
#include <memory>
@@ -57,9 +56,10 @@ public:
* @param[in] conv_info Contains padding and stride information described in @ref PadStrideInfo.
* @param[in] weights_info Specifies if the weights tensor has been reshaped with CLWeightsReshapeKernel. Data type supported: Same as @p input.
* @param[in] dilation (Optional) Dilation, in elements, across x and y. Defaults to (1, 1).
+ * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
*/
void configure(ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, const PadStrideInfo &conv_info, const WeightsInfo &weights_info = WeightsInfo(),
- const Size2D &dilation = Size2D(1U, 1U));
+ const Size2D &dilation = Size2D(1U, 1U), const ActivationLayerInfo &act_info = ActivationLayerInfo());
/** Static function to check if given info will lead to a valid configuration of @ref CLConvolutionLayer
*
* @param[in] input Source tensor. 3 lower dimensions represent a single input [width, height, IFM],
@@ -72,11 +72,12 @@ public:
* @param[in] conv_info Contains padding and stride information described in @ref PadStrideInfo.
* @param[in] weights_info Specifies if the weights tensor has been reshaped with CLWeightsReshapeKernel. Data type supported: Same as @p input.
* @param[in] dilation (Optional) Dilation, in elements, across x and y. Defaults to (1, 1).
+ * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
*
* @return a status
*/
static Status validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const PadStrideInfo &conv_info,
- const WeightsInfo &weights_info = WeightsInfo(), const Size2D &dilation = Size2D(1U, 1U));
+ const WeightsInfo &weights_info = WeightsInfo(), const Size2D &dilation = Size2D(1U, 1U), const ActivationLayerInfo &act_info = ActivationLayerInfo());
/** Static function to check if given info will return the convolution called by @ref CLConvolutionLayer
*
* @param[in] input Source tensor. 3 lower dimensions represent a single input [width, height, IFM],
@@ -88,14 +89,14 @@ public:
* Data types supported: Same as @p input.
* @param[in] conv_info Contains padding and stride information described in @ref PadStrideInfo.
* @param[in] weights_info Specifies if the weights tensor has been reshaped with CLWeightsReshapeKernel. Data type supported: Same as @p input.
+ * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
* @param[in] gpu_target Specifies the @p GPUTarget.
* @param[in] dilation (Optional) Dilation, in elements, across x and y. Defaults to (1, 1).
*
* @return a status
*/
static ConvolutionMethod get_convolution_method(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const PadStrideInfo &conv_info,
- const WeightsInfo &weights_info, const GPUTarget gpu_target, const Size2D &dilation = Size2D(1U, 1U));
-
+ const WeightsInfo &weights_info, const ActivationLayerInfo &act_info, const GPUTarget gpu_target, const Size2D &dilation = Size2D(1U, 1U));
// Inherited methods overridden:
void run() override;
diff --git a/arm_compute/runtime/CL/functions/CLDirectConvolutionLayer.h b/arm_compute/runtime/CL/functions/CLDirectConvolutionLayer.h
index 205c90c478..4363224a08 100644
--- a/arm_compute/runtime/CL/functions/CLDirectConvolutionLayer.h
+++ b/arm_compute/runtime/CL/functions/CLDirectConvolutionLayer.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017, 2018 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -27,6 +27,7 @@
#include "arm_compute/core/CL/kernels/CLDirectConvolutionLayerKernel.h"
#include "arm_compute/core/CL/kernels/CLFillBorderKernel.h"
#include "arm_compute/core/Types.h"
+#include "arm_compute/runtime/CL/functions/CLActivationLayer.h"
#include "arm_compute/runtime/IFunction.h"
#include <memory>
@@ -53,8 +54,9 @@ public:
* @param[out] output Destination tensor. 3 lower dimensions represent a single output [width, height, OFM], while the rest represent batch of outputs.
* Data types supported: Same as @p input.
* @param[in] conv_info Contains padding and stride information described in @ref PadStrideInfo.
+ * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
*/
- void configure(ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, const PadStrideInfo &conv_info);
+ void configure(ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, const PadStrideInfo &conv_info, const ActivationLayerInfo &act_info = ActivationLayerInfo());
/** Static function to check if given info will lead to a valid configuration of @ref CLDirectConvolutionLayer
*
* @param[in] input Source tensor. 3 lower dimensions represent a single input [width, height, IFM],
@@ -65,10 +67,12 @@ public:
* @param[in] output Destination tensor. 3 lower dimensions represent a single output [width, height, OFM], while the rest represent batch of outputs.
* Data types supported: Same as @p input.
* @param[in] conv_info Contains padding and stride information described in @ref PadStrideInfo.
+ * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
*
* @return a status
*/
- static Status validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const PadStrideInfo &conv_info);
+ static Status validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const PadStrideInfo &conv_info,
+ const ActivationLayerInfo &act_info = ActivationLayerInfo());
// Inherited methods overridden:
void run() override;
@@ -76,6 +80,9 @@ public:
private:
CLDirectConvolutionLayerKernel _direct_conv_kernel;
CLFillBorderKernel _input_border_handler;
+ CLActivationLayer _activationlayer_function;
+
+ bool _is_activationlayer_enabled;
};
}
#endif /* __ARM_COMPUTE_CLDIRECTCONVOLUTIONLAYER_H__ */
diff --git a/arm_compute/runtime/CL/functions/CLGEMMConvolutionLayer.h b/arm_compute/runtime/CL/functions/CLGEMMConvolutionLayer.h
index 91d35ca0b2..9bfbb536ef 100644
--- a/arm_compute/runtime/CL/functions/CLGEMMConvolutionLayer.h
+++ b/arm_compute/runtime/CL/functions/CLGEMMConvolutionLayer.h
@@ -36,6 +36,7 @@
#include "arm_compute/core/Types.h"
#include "arm_compute/runtime/CL/CLMemoryGroup.h"
#include "arm_compute/runtime/CL/CLTensor.h"
+#include "arm_compute/runtime/CL/functions/CLActivationLayer.h"
#include "arm_compute/runtime/CL/functions/CLGEMM.h"
#include "arm_compute/runtime/CL/functions/CLGEMMLowpMatrixMultiplyCore.h"
#include "arm_compute/runtime/CL/functions/CLGEMMLowpOutputStage.h"
@@ -125,9 +126,10 @@ public:
* @param[in] weights_info Specifies if the weights tensor has been reshaped with CLWeightsReshapeKernel. If this is not part of the fully connected layer the weights
* tensor has also been transposed with CLGEMMTranspose1xWKernel. Data type supported: Same as @p input.
* @param[in] dilation (Optional) Dilation, in elements, across x and y. Defaults to (1, 1).
+ * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
*/
void configure(const ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, const PadStrideInfo &conv_info, const WeightsInfo &weights_info = WeightsInfo(),
- const Size2D &dilation = Size2D(1U, 1U));
+ const Size2D &dilation = Size2D(1U, 1U), const ActivationLayerInfo &act_info = ActivationLayerInfo());
/** Static function to check if given info will lead to a valid configuration of @ref CLGEMMConvolutionLayer.
*
* @param[in] input Source tensor. 3 lower dimensions represent a single input [width, height, IFM],
@@ -142,11 +144,12 @@ public:
* @param[in] weights_info Specifies if the weights tensor has been reshaped with CLWeightsReshapeKernel. If this is not part of the fully connected layer the weights
* tensor has also been transposed with CLGEMMTranspose1xWKernel. Data type supported: Same as @p input.
* @param[in] dilation (Optional) Dilation, in elements, across x and y. Defaults to (1, 1).
+ * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
*
* @return a status
*/
static Status validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const PadStrideInfo &conv_info,
- const WeightsInfo &weights_info = WeightsInfo(), const Size2D &dilation = Size2D(1U, 1U));
+ const WeightsInfo &weights_info = WeightsInfo(), const Size2D &dilation = Size2D(1U, 1U), const ActivationLayerInfo &act_info = ActivationLayerInfo());
// Inherited methods overridden:
void run() override;
@@ -179,6 +182,7 @@ private:
CLGEMMLowpMatrixMultiplyCore _mm_gemmlowp;
CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPoint _gemmlowp_output_stage;
CLCol2ImKernel _col2im_kernel;
+ CLActivationLayer _activationlayer_function;
const ICLTensor *_original_weights;
@@ -189,6 +193,7 @@ private:
bool _is_quantized;
bool _is_first_run;
+ bool _is_activationlayer_enabled;
};
}
#endif /* __ARM_COMPUTE_CLGEMMCONVOLUTIONLAYER_H__ */
diff --git a/arm_compute/runtime/CL/functions/CLWinogradConvolutionLayer.h b/arm_compute/runtime/CL/functions/CLWinogradConvolutionLayer.h
index 14de169236..2cf1f77fb4 100644
--- a/arm_compute/runtime/CL/functions/CLWinogradConvolutionLayer.h
+++ b/arm_compute/runtime/CL/functions/CLWinogradConvolutionLayer.h
@@ -27,6 +27,7 @@
#include "arm_compute/core/CL/kernels/CLWinogradFilterTransformKernel.h"
#include "arm_compute/core/CL/kernels/CLWinogradOutputTransformKernel.h"
#include "arm_compute/core/Types.h"
+#include "arm_compute/runtime/CL/functions/CLActivationLayer.h"
#include "arm_compute/runtime/CL/functions/CLGEMM.h"
#include "arm_compute/runtime/CL/functions/CLWinogradInputTransform.h"
#include "arm_compute/runtime/IFunction.h"
@@ -60,8 +61,10 @@ public:
* @param[out] output Destination tensor. 3 lower dimensions represent a single output [width, height, OFM], while the rest represent batch of outputs.
* Data types supported: Same as @p input.
* @param[in] conv_info Contains padding and stride information described in @ref PadStrideInfo.
+ * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
*/
- void configure(ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, const PadStrideInfo &conv_info);
+ void configure(ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, const PadStrideInfo &conv_info,
+ const ActivationLayerInfo &act_info = ActivationLayerInfo());
/** Static function to check if given info will lead to a valid configuration of @ref CLWinogradConvolutionLayer
*
* @note: This function only works with 3x3 kernels and unit strides
@@ -74,10 +77,12 @@ public:
* @param[out] output Destination tensor. 3 lower dimensions represent a single output [width, height, OFM], while the rest represent batch of outputs.
* Data types supported: Same as @p input.
* @param[in] conv_info Contains padding and stride information described in @ref PadStrideInfo.
+ * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
*
* @return a status
*/
- static Status validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const PadStrideInfo &conv_info);
+ static Status validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const PadStrideInfo &conv_info,
+ const ActivationLayerInfo &act_info = ActivationLayerInfo());
// Inherited methods overridden:
void run() override;
@@ -88,10 +93,12 @@ private:
CLWinogradInputTransform _input_transform;
CLWinogradFilterTransformKernel _filter_transform;
CLWinogradOutputTransformKernel _output_transform;
+ CLActivationLayer _activationlayer_function;
CLTensor _input0;
CLTensor _input1;
CLTensor _batched_mm_output;
bool _is_first_run;
+ bool _is_activationlayer_enabled;
};
}
#endif /* __ARM_COMPUTE_CLWINOGRADCONVOLUTIONLAYER_H__ */
diff --git a/arm_compute/runtime/GLES_COMPUTE/functions/GCConvolutionLayer.h b/arm_compute/runtime/GLES_COMPUTE/functions/GCConvolutionLayer.h
index 198fa7ba94..54b17b40bb 100644
--- a/arm_compute/runtime/GLES_COMPUTE/functions/GCConvolutionLayer.h
+++ b/arm_compute/runtime/GLES_COMPUTE/functions/GCConvolutionLayer.h
@@ -35,6 +35,7 @@
#include "arm_compute/core/Types.h"
#include "arm_compute/runtime/GLES_COMPUTE/GCMemoryGroup.h"
#include "arm_compute/runtime/GLES_COMPUTE/GCTensor.h"
+#include "arm_compute/runtime/GLES_COMPUTE/functions/GCActivationLayer.h"
#include "arm_compute/runtime/IFunction.h"
#include <memory>
@@ -100,9 +101,10 @@ public:
* @param[in] weights_info Specifies if the weights tensor has been reshaped with GCWeightsReshapeKernel. If this is not part of the fully connected layer the weights
* tensor has also been transposed with GCGEMMTranspose1xWKernel. Data type supported: Same as @p input.
* @param[in] dilation (Optional) Dilation, in elements, across x and y. Defaults to (1, 1).
+ * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
*/
- void configure(const IGCTensor *input, const IGCTensor *weights, const IGCTensor *biases, IGCTensor *output, const PadStrideInfo &conv_info, const WeightsInfo &weights_info = WeightsInfo(),
- const Size2D &dilation = Size2D(1U, 1U));
+ void configure(const IGCTensor *input, const IGCTensor *weights, const IGCTensor *biases, IGCTensor *output, const PadStrideInfo &conv_info,
+ const WeightsInfo &weights_info = WeightsInfo(), const Size2D &dilation = Size2D(1U, 1U), const ActivationLayerInfo &act_info = ActivationLayerInfo());
// Inherited methods overridden:
void run() override;
@@ -125,6 +127,7 @@ private:
GCGEMMMatrixMultiplyKernel _mm_kernel;
GCCol2ImKernel _output_col2im_kernel;
GCFillBorderKernel _fill_border;
+ GCActivationLayer _activationlayer_function;
GCTensor _input_im2col_reshaped;
GCTensor _input_interleaved_reshaped;
@@ -136,6 +139,7 @@ private:
bool _append_bias;
bool _is_fully_connected_convolution;
bool _are_weights_reshaped;
+ bool _is_activationlayer_enabled;
};
}
diff --git a/arm_compute/runtime/GLES_COMPUTE/functions/GCDirectConvolutionLayer.h b/arm_compute/runtime/GLES_COMPUTE/functions/GCDirectConvolutionLayer.h
index 976aee7b83..9cfdec0f7e 100644
--- a/arm_compute/runtime/GLES_COMPUTE/functions/GCDirectConvolutionLayer.h
+++ b/arm_compute/runtime/GLES_COMPUTE/functions/GCDirectConvolutionLayer.h
@@ -29,6 +29,7 @@
#include "arm_compute/core/GLES_COMPUTE/kernels/GCFillBorderKernel.h"
#include "arm_compute/core/GLES_COMPUTE/kernels/GCTensorShiftKernel.h"
#include "arm_compute/core/Types.h"
+#include "arm_compute/runtime/GLES_COMPUTE/functions/GCActivationLayer.h"
#include "arm_compute/runtime/IFunction.h"
#include <memory>
@@ -62,8 +63,10 @@ public:
* @param[out] output Destination tensor. 3 lower dimensions represent a single output [width, height, OFM], while the rest represent batch of outputs.
* Data types supported: Same as @p input.
* @param[in] conv_info Contains padding and stride information described in @ref PadStrideInfo.
+ * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
*/
- void configure(IGCTensor *input, const IGCTensor *weights, const IGCTensor *biases, IGCTensor *output, const PadStrideInfo &conv_info);
+ void configure(IGCTensor *input, const IGCTensor *weights, const IGCTensor *biases, IGCTensor *output, const PadStrideInfo &conv_info,
+ const ActivationLayerInfo &act_info = ActivationLayerInfo());
// Inherited methods overridden:
void run() override final;
diff --git a/arm_compute/runtime/NEON/functions/NEConvolutionLayer.h b/arm_compute/runtime/NEON/functions/NEConvolutionLayer.h
index 3e6e5abd28..c67951a7ee 100644
--- a/arm_compute/runtime/NEON/functions/NEConvolutionLayer.h
+++ b/arm_compute/runtime/NEON/functions/NEConvolutionLayer.h
@@ -62,9 +62,10 @@ public:
* @param[in] weights_info Specifies if the weights tensor has been reshaped with NEWeightsReshapeKernel. If this is not part of the fully connected layer the weights
* tensor has also been transposed with NEGEMMTranspose1xWKernel. Data type supported: Same as @p input.
* @param[in] dilation (Optional) Dilation, in elements, across x and y. Defaults to (1, 1).
+ * @param[in] act_info (Optional) Activation layer information in case of a fused activation. Only RELU, BOUNDED_RELU and LU_BOUNDED_RELU supported.
*/
void configure(ITensor *input, const ITensor *weights, const ITensor *biases, ITensor *output, const PadStrideInfo &conv_info, const WeightsInfo &weights_info = WeightsInfo(),
- const Size2D &dilation = Size2D(1U, 1U));
+ const Size2D &dilation = Size2D(1U, 1U), const ActivationLayerInfo &act_info = ActivationLayerInfo());
/** Static function to check if given info will lead to a valid configuration of @ref NEConvolutionLayer
*
* @param[in] input Source tensor. 3 lower dimensions represent a single input [width, height, IFM],
@@ -79,11 +80,12 @@ public:
* @param[in] weights_info Specifies if the weights tensor has been reshaped with NEWeightsReshapeKernel. If this is not part of the fully connected layer the weights
* tensor has also been transposed with NEGEMMTranspose1xWKernel. Data type supported: Same as @p input.
* @param[in] dilation (Optional) Dilation, in elements, across x and y. Defaults to (1, 1).
+ * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
*
* @return a status
*/
static Status validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const PadStrideInfo &conv_info,
- const WeightsInfo &weights_info = WeightsInfo(), const Size2D &dilation = Size2D(1U, 1U));
+ const WeightsInfo &weights_info = WeightsInfo(), const Size2D &dilation = Size2D(1U, 1U), const ActivationLayerInfo &act_info = ActivationLayerInfo());
/** Static function to check if given info will return the convolution called by @ref NEConvolutionLayer
*
* @param[in] input Source tensor. 3 lower dimensions represent a single input [width, height, IFM],
@@ -98,11 +100,12 @@ public:
* @param[in] weights_info Specifies if the weights tensor has been reshaped with NEWeightsReshapeKernel. If this is not part of the fully connected layer the weights
* tensor has also been transposed with NEGEMMTranspose1xWKernel. Data type supported: Same as @p input.
* @param[in] dilation (Optional) Dilation, in elements, across x and y. Defaults to (1, 1).
+ * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
*
* @return the Convolution Method Hint
*/
static ConvolutionMethod get_convolution_method(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const PadStrideInfo &conv_info,
- const WeightsInfo &weights_info = WeightsInfo(), const Size2D &dilation = Size2D(1U, 1U));
+ const WeightsInfo &weights_info = WeightsInfo(), const Size2D &dilation = Size2D(1U, 1U), const ActivationLayerInfo &act_info = ActivationLayerInfo());
// Inherited methods overridden:
void run() override;
diff --git a/arm_compute/runtime/NEON/functions/NEDirectConvolutionLayer.h b/arm_compute/runtime/NEON/functions/NEDirectConvolutionLayer.h
index e1aa839802..1eaad5cda6 100644
--- a/arm_compute/runtime/NEON/functions/NEDirectConvolutionLayer.h
+++ b/arm_compute/runtime/NEON/functions/NEDirectConvolutionLayer.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -31,6 +31,7 @@
#include "arm_compute/runtime/IFunction.h"
#include "arm_compute/runtime/IMemoryManager.h"
#include "arm_compute/runtime/MemoryGroup.h"
+#include "arm_compute/runtime/NEON/functions/NEActivationLayer.h"
#include "arm_compute/runtime/Tensor.h"
#include <memory>
@@ -66,8 +67,9 @@ public:
* @param[out] output Output tensor.
* The 3rd dimensions must be equal to the 4th dimension of the @p kernels tensor. Data types supported: Same as @p input.
* @param[in] conv_info Contains padding and stride information described in @ref PadStrideInfo.
+ * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
*/
- void configure(ITensor *input, const ITensor *weights, const ITensor *bias, ITensor *output, const PadStrideInfo &conv_info);
+ void configure(ITensor *input, const ITensor *weights, const ITensor *bias, ITensor *output, const PadStrideInfo &conv_info, const ActivationLayerInfo &act_info = ActivationLayerInfo());
/** Static function to check if given info will lead to a valid configuration of @ref NEDirectConvolutionLayer
*
* @note: DirectConvolution only works in the following configurations:
@@ -84,10 +86,12 @@ public:
* @param[in] output Output tensor.
* The 3rd dimensions must be equal to the 4th dimension of the @p kernels tensor. Data types supported: Same as @p input.
* @param[in] conv_info Contains padding and stride information described in @ref PadStrideInfo.
+ * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
*
* @return a status
*/
- static Status validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *bias, const ITensorInfo *output, const PadStrideInfo &conv_info);
+ static Status validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *bias, const ITensorInfo *output, const PadStrideInfo &conv_info,
+ const ActivationLayerInfo &act_info = ActivationLayerInfo());
// Inherited methods overridden:
void run() override;
@@ -97,9 +101,11 @@ private:
NEDirectConvolutionLayerOutputStageKernel _output_stage_kernel;
NEDirectConvolutionLayerKernel _conv_kernel;
NEFillBorderKernel _input_border_handler;
+ NEActivationLayer _activationlayer_function;
Tensor _accumulator;
bool _has_bias;
bool _is_fixed_point;
+ bool _is_activationlayer_enabled;
};
}
#endif /* __ARM_COMPUTE_NEDIRECTCONVOLUTIONLAYER_H__ */
diff --git a/arm_compute/runtime/NEON/functions/NEGEMMConvolutionLayer.h b/arm_compute/runtime/NEON/functions/NEGEMMConvolutionLayer.h
index e733fec4b6..24e23f133a 100644
--- a/arm_compute/runtime/NEON/functions/NEGEMMConvolutionLayer.h
+++ b/arm_compute/runtime/NEON/functions/NEGEMMConvolutionLayer.h
@@ -37,6 +37,7 @@
#include "arm_compute/core/Types.h"
#include "arm_compute/runtime/MemoryGroup.h"
#include "arm_compute/runtime/NEON/AssemblyHelper.h"
+#include "arm_compute/runtime/NEON/functions/NEActivationLayer.h"
#include "arm_compute/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.h"
#include "arm_compute/runtime/NEON/functions/NEGEMMLowpOutputStage.h"
#include "arm_compute/runtime/Tensor.h"
@@ -95,6 +96,7 @@ private:
* -# @ref NEGEMMMatrixMultiplyKernel or @ref NEGEMMLowpMatrixMultiplyCore (if quantized asymmetric)
* -# @ref NEGEMMLowpQuantizeDownInt32ToUint8Scale (if quantized asymmetric)
* -# @ref NECol2ImKernel
+ * -# @ref NEActivationLayer (executed only if the activation layer is enabled)
*/
class NEGEMMConvolutionLayer : public IFunction
{
@@ -123,9 +125,10 @@ public:
* @param[in] weights_info Specifies if the weights tensor has been reshaped with NEWeightsReshapeKernel. If this is not part of the fully connected layer the weights
* tensor has also been transposed with NEGEMMTranspose1xWKernel. Data type supported: Same as @p input.
* @param[in] dilation (Optional) Dilation, in elements, across x and y. Defaults to (1, 1).
+ * @param[in] act_info (Optional) Activation layer information in case of a fused activation. Only RELU, BOUNDED_RELU and LU_BOUNDED_RELU supported.
*/
void configure(const ITensor *input, const ITensor *weights, const ITensor *biases, ITensor *output, const PadStrideInfo &conv_info, const WeightsInfo &weights_info = WeightsInfo(),
- const Size2D &dilation = Size2D(1U, 1U));
+ const Size2D &dilation = Size2D(1U, 1U), const ActivationLayerInfo &act_info = ActivationLayerInfo());
/** Static function to check if given info will lead to a valid configuration of @ref NEGEMMConvolutionLayer
*
* @param[in] input Source tensor. 3 lower dimensions represent a single input [width, height, IFM],
@@ -140,11 +143,12 @@ public:
* @param[in] weights_info Specifies if the weights tensor has been reshaped with NEWeightsReshapeKernel. If this is not part of the fully connected layer the weights
* tensor has also been transposed with NEGEMMTranspose1xWKernel. Data type supported: Same as @p input.
* @param[in] dilation (Optional) Dilation, in elements, across x and y. Defaults to (1, 1).
+ * @param[in] act_info (Optional) Activation layer information in case of a fused activation. Only RELU, BOUNDED_RELU and LU_BOUNDED_RELU supported.
*
* @return a status
*/
static Status validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const PadStrideInfo &conv_info,
- const WeightsInfo &weights_info = WeightsInfo(), const Size2D &dilation = Size2D(1U, 1U));
+ const WeightsInfo &weights_info = WeightsInfo(), const Size2D &dilation = Size2D(1U, 1U), const ActivationLayerInfo &act_info = ActivationLayerInfo());
// Inherited methods overridden:
void run() override;
@@ -171,6 +175,7 @@ private:
NEGEMMLowpMatrixMultiplyCore _mm_gemmlowp;
NEGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPoint _gemmlowp_output_stage;
NECol2ImKernel _output_col2im_kernel;
+ NEActivationLayer _activationlayer_function;
const ITensor *_original_weights;
@@ -186,6 +191,7 @@ private:
bool _are_weights_reshaped;
bool _is_quantized;
bool _is_interleaved;
+ bool _is_activationlayer_enabled;
};
}
#endif /* __ARM_COMPUTE_NECONVOLUTIONGEMMLAYER_H__ */
diff --git a/arm_compute/runtime/NEON/functions/NEWinogradLayer.h b/arm_compute/runtime/NEON/functions/NEWinogradLayer.h
index a939f82854..61a4caae3a 100644
--- a/arm_compute/runtime/NEON/functions/NEWinogradLayer.h
+++ b/arm_compute/runtime/NEON/functions/NEWinogradLayer.h
@@ -30,6 +30,7 @@
#include "arm_compute/core/Types.h"
#include "arm_compute/runtime/CPP/functions/CPPPermute.h"
#include "arm_compute/runtime/MemoryGroup.h"
+#include "arm_compute/runtime/NEON/functions/NEActivationLayer.h"
#include "arm_compute/runtime/Tensor.h"
#include <memory>
@@ -61,8 +62,9 @@ public:
* @param[out] output Destination tensor. 3 lower dimensions represent a single output [width, height, OFM], while the rest represent batch of outputs.
* Data types supported: Same as @p input.
* @param[in] conv_info Contains padding and stride information described in @ref PadStrideInfo. Currently only unit strides are supported.
+ * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
*/
- void configure(const ITensor *input, const ITensor *weights, const ITensor *biases, ITensor *output, const PadStrideInfo &conv_info);
+ void configure(const ITensor *input, const ITensor *weights, const ITensor *biases, ITensor *output, const PadStrideInfo &conv_info, const ActivationLayerInfo &act_info = ActivationLayerInfo());
// Inherited methods overridden:
void run() override;
@@ -94,6 +96,7 @@ private:
std::unique_ptr<INEKernel> _transform_input_kernel;
std::unique_ptr<INEKernel> _transform_output_kernel;
std::unique_ptr<INEKernel> _transform_weights_kernel;
+ NEActivationLayer _activationlayer_function;
CPPPermute _permute_input;
CPPPermute _permute_weights;
@@ -108,6 +111,7 @@ private:
const ITensor *_weights;
ITensor *_output;
bool _reshaped_kernel;
+ bool _is_activationlayer_enabled;
};
}
#endif /* __ARM_COMPUTE_NEWINOGRADLAYER_H__ */
diff --git a/src/core/GLES_COMPUTE/GCKernelLibrary.cpp b/src/core/GLES_COMPUTE/GCKernelLibrary.cpp
index 5d1464ace4..25ac02e8f4 100644
--- a/src/core/GLES_COMPUTE/GCKernelLibrary.cpp
+++ b/src/core/GLES_COMPUTE/GCKernelLibrary.cpp
@@ -232,6 +232,14 @@ const std::map<std::string, std::string> GCKernelLibrary::_program_source_map =
{
#ifdef EMBEDDED_KERNELS
{
+ "helpers_cs.h",
+#include "./cs_shaders/helpers_cs.hembed"
+ },
+ {
+ "activation_layer_helpers_cs.h",
+#include "./cs_shaders/activation_layer_helpers_cs.hembed"
+ },
+ {
"absdiff.cs",
#include "./cs_shaders/absdiff.csembed"
},
diff --git a/src/core/GLES_COMPUTE/cs_shaders/activation_layer.cs b/src/core/GLES_COMPUTE/cs_shaders/activation_layer.cs
index 7d3f4ee67e..9a1e233624 100644
--- a/src/core/GLES_COMPUTE/cs_shaders/activation_layer.cs
+++ b/src/core/GLES_COMPUTE/cs_shaders/activation_layer.cs
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -23,97 +23,9 @@
*/
layout(local_size_x = LOCAL_SIZE_X, local_size_y = LOCAL_SIZE_Y, local_size_z = LOCAL_SIZE_Z) in;
+#include "activation_layer_helpers_cs.h"
#include "helpers_cs.h"
-#ifdef DATA_TYPE_FP32
-precision highp float;
-#elif defined(DATA_TYPE_FP16)
-#if defined(LOGISTIC) || defined(TANH) || defined(SRELU) || defined(SQRT)
-precision highp float;
-#else /*LOGISTIC_TANH_SRELU_SQRT*/
-precision mediump float;
-#endif /*LOGISTIC_TANH_SRELU_SQRT*/
-#endif /*DATA_TYPE_FP32*/
-
-#define ABS_OP(a) abs((a))
-#define ADD_OP(a, b) ((a) + (b))
-#define SUB_OP(a, b) ((a) - (b))
-#define MUL_OP(a, b) ((a) * (b))
-#define MLA_OP(a, b, c) ((b) * (c) + (a))
-#define DIV_OP(a, b) ((a) / (b))
-#define EXP_OP(a) exp((a))
-#define LOG_OP(a) log((a))
-#define SQRT_OP(a) sqrt((a))
-#define CONST_ONE (1.f)
-
-// Logistic Activation
-float logistic_op(float x)
-{
- return DIV_OP(CONST_ONE, ADD_OP(CONST_ONE, EXP_OP(-x)));
-}
-// Hyperbolic Tangent Activation
-float tanh_op(float x)
-{
- float tmp = float(B_VAL) * x;
- if(tmp > 10.f)
- {
- return MUL_OP(float(A_VAL), 1.f);
- }
- else if(tmp < -10.f)
- {
- return MUL_OP(float(A_VAL), -1.f);
- }
- else
- {
- return MUL_OP(float(A_VAL), tanh(tmp + 0.000001f));
- }
-}
-// RELU Tangent Activation
-float relu_op(float x)
-{
- return max(0.f, x);
-}
-// Bounded RELU Activation
-float brelu_op(float x)
-{
- return min(float(A_VAL), max(float(0.0), x));
-}
-// Lower Upper Bounded RELU Activation
-float lu_brelu_op(float x)
-{
- return min(max(x, float(B_VAL)), float(A_VAL));
-}
-// Leaky RELU Activation
-float lrelu_op(float x)
-{
- return (x > float(0.0)) ? x : MUL_OP(float(A_VAL), x);
-}
-// Soft RELU Activation
-float srelu_op(float x)
-{
- return LOG_OP(ADD_OP(CONST_ONE, EXP_OP(x)));
-}
-// Absolute Activation
-float abs_op(float x)
-{
- return ABS_OP(x);
-}
-// Square Activation
-float square_op(float x)
-{
- return MUL_OP(x, x);
-}
-// Square-root Activation
-float sqrt_op(float x)
-{
- return SQRT_OP(x);
-}
-// Linear Activation
-float linear_op(float x)
-{
- return MLA_OP(float(B_VAL), float(A_VAL), x);
-}
-
/** This performs an activation function floating point inputs.
*
* @note The data type must be passed at compile time using "#define DATA_TYPE_NAME". e.g. "#define DATA_TYPE_FP32"
diff --git a/src/core/GLES_COMPUTE/cs_shaders/activation_layer_helpers_cs.h b/src/core/GLES_COMPUTE/cs_shaders/activation_layer_helpers_cs.h
new file mode 100644
index 0000000000..f43a33fe87
--- /dev/null
+++ b/src/core/GLES_COMPUTE/cs_shaders/activation_layer_helpers_cs.h
@@ -0,0 +1,119 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifdef DATA_TYPE_FP32
+precision highp float;
+#elif defined(DATA_TYPE_FP16)
+#if defined(LOGISTIC) || defined(TANH) || defined(SRELU) || defined(SQRT)
+precision highp float;
+#else /*LOGISTIC_TANH_SRELU_SQRT*/
+precision mediump float;
+#endif /*LOGISTIC_TANH_SRELU_SQRT*/
+#endif /*DATA_TYPE_FP32*/
+
+#define ABS_OP(a) abs((a))
+#define ADD_OP(a, b) ((a) + (b))
+#define SUB_OP(a, b) ((a) - (b))
+#define MUL_OP(a, b) ((a) * (b))
+#define MLA_OP(a, b, c) ((b) * (c) + (a))
+#define DIV_OP(a, b) ((a) / (b))
+#define EXP_OP(a) exp((a))
+#define LOG_OP(a) log((a))
+#define SQRT_OP(a) sqrt((a))
+#define CONST_ONE (1.f)
+
+// Logistic Activation
+float logistic_op(float x)
+{
+ return DIV_OP(CONST_ONE, ADD_OP(CONST_ONE, EXP_OP(-x)));
+}
+vec4 logistic_op(vec4 x)
+{
+ return DIV_OP(vec4(CONST_ONE), ADD_OP(CONST_ONE, EXP_OP(-x)));
+}
+// Hyperbolic Tangent Activation
+float tanh_op(float x)
+{
+ float tmp = float(B_VAL) * x;
+ if(tmp > 10.f)
+ {
+ return MUL_OP(float(A_VAL), 1.f);
+ }
+ else if(tmp < -10.f)
+ {
+ return MUL_OP(float(A_VAL), -1.f);
+ }
+ else
+ {
+ return MUL_OP(float(A_VAL), tanh(tmp + 0.000001f));
+ }
+}
+// RELU Tangent Activation
+float relu_op(float x)
+{
+ return max(0.f, x);
+}
+vec4 relu_op(vec4 x)
+{
+ return max(vec4(0.f), x);
+}
+// Bounded RELU Activation
+float brelu_op(float x)
+{
+ return min(float(A_VAL), max(float(0.0), x));
+}
+// Lower Upper Bounded RELU Activation
+float lu_brelu_op(float x)
+{
+ return min(max(x, float(B_VAL)), float(A_VAL));
+}
+// Leaky RELU Activation
+float lrelu_op(float x)
+{
+ return (x > float(0.0)) ? x : MUL_OP(float(A_VAL), x);
+}
+// Soft RELU Activation
+float srelu_op(float x)
+{
+ return LOG_OP(ADD_OP(CONST_ONE, EXP_OP(x)));
+}
+// Absolute Activation
+float abs_op(float x)
+{
+ return ABS_OP(x);
+}
+// Square Activation
+float square_op(float x)
+{
+ return MUL_OP(x, x);
+}
+// Square-root Activation
+float sqrt_op(float x)
+{
+ return SQRT_OP(x);
+}
+// Linear Activation
+float linear_op(float x)
+{
+ return MLA_OP(float(B_VAL), float(A_VAL), x);
+}
diff --git a/src/core/GLES_COMPUTE/cs_shaders/direct_convolution1x1.cs b/src/core/GLES_COMPUTE/cs_shaders/direct_convolution1x1.cs
index ea4e9c18e2..b42c09bbc7 100644
--- a/src/core/GLES_COMPUTE/cs_shaders/direct_convolution1x1.cs
+++ b/src/core/GLES_COMPUTE/cs_shaders/direct_convolution1x1.cs
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -26,6 +26,10 @@ layout(local_size_x = LOCAL_SIZE_X, local_size_y = LOCAL_SIZE_Y, local_size_z =
#include "helpers_cs.h"
+#ifdef FUSED_ACTIVATION
+#include "activation_layer_helpers_cs.h"
+#endif /* FUSED_ACTIVATION */
+
#if defined(DATA_TYPE_FP16)
precision mediump float;
#endif // DATA_TYPE_FP16
@@ -99,6 +103,10 @@ void main()
pixels += LOAD(biases_ptr, VECTOR_OFFSET(biases_iter, z_index));
#endif /* BIAS */
+#ifdef FUSED_ACTIVATION
+ pixels = ACT_OP(pixels);
+#endif /* FUSED_ACTIVATION */
+
STORE_CURRENT_ITEM(dst_ptr, dst_iter, pixels);
}
@@ -210,6 +218,10 @@ void main()
pixels += b;
#endif /* BIAS */
+#ifdef FUSED_ACTIVATION
+ pixels = ACT_OP(pixels);
+#endif /* FUSED_ACTIVATION */
+
STORE_PACK4_CURRENT_ITEM_HALF(dst_ptr, dst_iter, pixels);
}
#elif defined(PROCESS_4X_2Y_1Z)
@@ -333,6 +345,11 @@ void main()
pixels[1] += b;
#endif /* BIAS */
+#ifdef FUSED_ACTIVATION
+ pixels[0] = ACT_OP(pixels[0]);
+ pixels[1] = ACT_OP(pixels[1]);
+#endif /* FUSED_ACTIVATION */
+
STORE_PACK4_CURRENT_ITEM_HALF(dst_ptr, dst_iter, pixels[0]);
STORE_PACK4_HALF(dst_ptr, TENSOR3D_OFFSET(dst_iter, 0, 1, 0), pixels[1]);
}
@@ -470,6 +487,12 @@ void main()
pixels[2] += b;
#endif /* BIAS */
+#ifdef FUSED_ACTIVATION
+ pixels[0] = ACT_OP(pixels[0]);
+ pixels[1] = ACT_OP(pixels[1]);
+ pixels[2] = ACT_OP(pixels[2]);
+#endif /* FUSED_ACTIVATION */
+
STORE_PACK4_CURRENT_ITEM_HALF(dst_ptr, dst_iter, pixels[0]);
STORE_PACK4_HALF(dst_ptr, TENSOR3D_OFFSET(dst_iter, 0, 1, 0), pixels[1]);
STORE_PACK4_HALF(dst_ptr, TENSOR3D_OFFSET(dst_iter, 0, 2, 0), pixels[2]);
@@ -609,6 +632,13 @@ void main()
pixels1[1] += b;
#endif /* BIAS */
+#ifdef FUSED_ACTIVATION
+ pixels[0] = ACT_OP(pixels[0]);
+ pixels[1] = ACT_OP(pixels[1]);
+ pixels1[0] = ACT_OP(pixels1[0]);
+ pixels1[1] = ACT_OP(pixels1[1]);
+#endif /* FUSED_ACTIVATION */
+
STORE_PACK4_CURRENT_ITEM_HALF(dst_ptr, dst_iter, pixels[0]);
STORE_PACK4_HALF(dst_ptr, TENSOR3D_OFFSET(dst_iter, 0, 1, 0), pixels[1]);
STORE_PACK4_HALF(dst_ptr, TENSOR3D_OFFSET(dst_iter, 0, 2, 0), pixels1[0]);
@@ -745,6 +775,11 @@ void main()
pixels[1] += b;
#endif /* BIAS */
+#ifdef FUSED_ACTIVATION
+ pixels[0] = ACT_OP(pixels[0]);
+ pixels[1] = ACT_OP(pixels[1]);
+#endif /* FUSED_ACTIVATION */
+
STORE_PACK4_CURRENT_ITEM_HALF(dst_ptr, dst_iter, pixels[0]);
STORE_PACK4_HALF(dst_ptr, TENSOR3D_OFFSET(dst_iter, 0, 1, 0), pixels[1]);
@@ -868,6 +903,11 @@ void main()
pixels[1] += b;
#endif /* BIAS */
+#ifdef FUSED_ACTIVATION
+ pixels[0] = ACT_OP(pixels[0]);
+ pixels[1] = ACT_OP(pixels[1]);
+#endif /* FUSED_ACTIVATION */
+
STORE_PACK8_CURRENT_ITEM_HALF(dst_ptr, dst_iter, pixels);
}
#elif defined(PROCESS_8X_2Y_1Z)
@@ -1001,6 +1041,13 @@ void main()
pixels1[1] += b;
#endif /* BIAS */
+#ifdef FUSED_ACTIVATION
+ pixels[0] = ACT_OP(pixels[0]);
+ pixels[1] = ACT_OP(pixels[1]);
+ pixels1[0] = ACT_OP(pixels1[0]);
+ pixels1[1] = ACT_OP(pixels1[1]);
+#endif /* FUSED_ACTIVATION */
+
STORE_PACK8_CURRENT_ITEM_HALF(dst_ptr, dst_iter, pixels);
STORE_PACK8_HALF(dst_ptr, TENSOR3D_OFFSET(dst_iter, 0, 1, 0), pixels1);
}
diff --git a/src/core/GLES_COMPUTE/cs_shaders/direct_convolution3x3.cs b/src/core/GLES_COMPUTE/cs_shaders/direct_convolution3x3.cs
index 855d450335..e51cc3785a 100644
--- a/src/core/GLES_COMPUTE/cs_shaders/direct_convolution3x3.cs
+++ b/src/core/GLES_COMPUTE/cs_shaders/direct_convolution3x3.cs
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -25,6 +25,10 @@ layout(local_size_x = LOCAL_SIZE_X, local_size_y = LOCAL_SIZE_Y, local_size_z =
#include "helpers_cs.h"
+#ifdef FUSED_ACTIVATION
+#include "activation_layer_helpers_cs.h"
+#endif /* FUSED_ACTIVATION */
+
#if defined(DATA_TYPE_FP16)
precision mediump float;
#endif // DATA_TYPE_FP16
@@ -114,6 +118,10 @@ void main()
pixels += LOAD(biases_ptr, VECTOR_OFFSET(biases_iter, z_index));
#endif /* BIAS */
+#ifdef FUSED_ACTIVATION
+ pixels = ACT_OP(pixels);
+#endif /* FUSED_ACTIVATION */
+
STORE_CURRENT_ITEM(dst_ptr, dst_iter, pixels);
}
@@ -238,6 +246,11 @@ void main()
pixels[1] += vec4(b);
#endif /* BIAS */
+#ifdef FUSED_ACTIVATION
+ pixels[0] = ACT_OP(pixels[0]);
+ pixels[1] = ACT_OP(pixels[1]);
+#endif /* FUSED_ACTIVATION */
+
VSTORE2_CURRENT_ITEM(dst_ptr, dst_iter, pixels);
}
@@ -335,6 +348,10 @@ void main()
pixels += b;
#endif /* BIAS */
+#ifdef FUSED_ACTIVATION
+ pixels = ACT_OP(pixels);
+#endif /* FUSED_ACTIVATION */
+
STORE_CURRENT_ITEM(dst_ptr, dst_iter, pixels);
}
@@ -434,6 +451,12 @@ void main()
pixels[2] += vec4(b);
#endif /* BIAS */
+#ifdef FUSED_ACTIVATION
+ pixels[0] = ACT_OP(pixels[0]);
+ pixels[1] = ACT_OP(pixels[1]);
+ pixels[2] = ACT_OP(pixels[2]);
+#endif /* FUSED_ACTIVATION */
+
STORE_CURRENT_ITEM(dst_ptr, dst_iter, pixels[0]);
STORE(dst_ptr, TENSOR3D_OFFSET(dst_iter, 0, 1, 0), pixels[1]);
STORE(dst_ptr, TENSOR3D_OFFSET(dst_iter, 0, 2, 0), pixels[2]);
@@ -601,6 +624,12 @@ void main()
}
#endif /* BIAS */
+#ifdef FUSED_ACTIVATION
+ pixels[0] = ACT_OP(pixels[0]);
+ pixels[1] = ACT_OP(pixels[1]);
+ pixels[2] = ACT_OP(pixels[2]);
+#endif /* FUSED_ACTIVATION */
+
STORE_PACK8_CURRENT_ITEM_HALF(dst_ptr, dst_iter, pixels[0]);
STORE_PACK8_HALF(dst_ptr, TENSOR3D_OFFSET(dst_iter, 0, 1, 0), pixels[1]);
STORE_PACK8_HALF(dst_ptr, TENSOR3D_OFFSET(dst_iter, 0, 2, 0), pixels[2]);
@@ -728,6 +757,10 @@ void main()
pixels += vec4(b);
#endif /* BIAS */
+#ifdef FUSED_ACTIVATION
+ pixels = ACT_OP(pixels);
+#endif /* FUSED_ACTIVATION */
+
STORE_PACK4_CURRENT_ITEM_HALF(dst_ptr, dst_iter, pixels);
}
@@ -841,6 +874,12 @@ void main()
}
#endif /* BIAS */
+#ifdef FUSED_ACTIVATION
+ pixels[0] = ACT_OP(pixels[0]);
+ pixels[1] = ACT_OP(pixels[1]);
+ pixels[2] = ACT_OP(pixels[2]);
+#endif /* FUSED_ACTIVATION */
+
STORE_PACK4_CURRENT_ITEM_HALF(dst_ptr, dst_iter, pixels[0]);
STORE_PACK4_HALF(dst_ptr, TENSOR3D_OFFSET(dst_iter, 0, 1, 0), pixels[1]);
STORE_PACK4_HALF(dst_ptr, TENSOR3D_OFFSET(dst_iter, 0, 2, 0), pixels[2]);
@@ -962,6 +1001,13 @@ void main()
}
#endif /* BIAS */
+#ifdef FUSED_ACTIVATION
+ pixels[0] = ACT_OP(pixels[0]);
+ pixels[1] = ACT_OP(pixels[1]);
+ pixels[2] = ACT_OP(pixels[2]);
+ pixels[3] = ACT_OP(pixels[3]);
+#endif /* FUSED_ACTIVATION */
+
STORE_PACK4_CURRENT_ITEM_HALF(dst_ptr, dst_iter, pixels[0]);
STORE_PACK4_HALF(dst_ptr, TENSOR3D_OFFSET(dst_iter, 0, 1, 0), pixels[1]);
STORE_PACK4_HALF(dst_ptr, TENSOR3D_OFFSET(dst_iter, 0, 2, 0), pixels[2]);
@@ -1087,6 +1133,13 @@ void main()
}
#endif /* BIAS */
+#ifdef FUSED_ACTIVATION
+ pixels[0] = ACT_OP(pixels[0]);
+ pixels[1] = ACT_OP(pixels[1]);
+ pixels[2] = ACT_OP(pixels[2]);
+ pixels[3] = ACT_OP(pixels[3]);
+#endif /* FUSED_ACTIVATION */
+
STORE_PACK4_CURRENT_ITEM_HALF(dst_ptr, dst_iter, pixels[0]);
STORE_PACK4_HALF(dst_ptr, TENSOR3D_OFFSET(dst_iter, 0, 1, 0), pixels[1]);
STORE_PACK4_HALF(dst_ptr, TENSOR3D_OFFSET(dst_iter, 0, 2, 0), pixels[2]);
diff --git a/src/core/GLES_COMPUTE/cs_shaders/direct_convolution5x5.cs b/src/core/GLES_COMPUTE/cs_shaders/direct_convolution5x5.cs
index c919e4ed80..728e9644b2 100644
--- a/src/core/GLES_COMPUTE/cs_shaders/direct_convolution5x5.cs
+++ b/src/core/GLES_COMPUTE/cs_shaders/direct_convolution5x5.cs
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017, 2018 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -26,6 +26,10 @@ layout(local_size_x = LOCAL_SIZE_X, local_size_y = LOCAL_SIZE_Y, local_size_z =
#include "helpers_cs.h"
+#ifdef FUSED_ACTIVATION
+#include "activation_layer_helpers_cs.h"
+#endif /* FUSED_ACTIVATION */
+
#if defined(DATA_TYPE_FP16)
precision mediump float;
#endif // DATA_TYPE_FP16
@@ -116,6 +120,10 @@ void main()
pixels += LOAD(biases_ptr, VECTOR_OFFSET(biases_iter, z_index));
#endif /* BIAS */
+#ifdef FUSED_ACTIVATION
+ pixels = ACT_OP(pixels);
+#endif /* FUSED_ACTIVATION */
+
STORE_CURRENT_ITEM(dst_ptr, dst_iter, pixels);
}
#elif defined(DATA_TYPE_FP16)
@@ -204,6 +212,10 @@ void main()
res += vec4(b);
#endif /* BIAS */
+#ifdef FUSED_ACTIVATION
+ res = ACT_OP(res);
+#endif /* FUSED_ACTIVATION */
+
STORE_PACK4_CURRENT_ITEM_HALF(dst_ptr, dst_iter, res);
}
diff --git a/src/core/GLES_COMPUTE/kernels/GCDirectConvolutionLayerKernel.cpp b/src/core/GLES_COMPUTE/kernels/GCDirectConvolutionLayerKernel.cpp
index bef30d5042..67a1530431 100644
--- a/src/core/GLES_COMPUTE/kernels/GCDirectConvolutionLayerKernel.cpp
+++ b/src/core/GLES_COMPUTE/kernels/GCDirectConvolutionLayerKernel.cpp
@@ -50,7 +50,8 @@ BorderSize GCDirectConvolutionLayerKernel<kernel_size>::border_size(
}
template <unsigned int kernel_size>
-void GCDirectConvolutionLayerKernel<kernel_size>::configure(const IGCTensor *input, const IGCTensor *weights, const IGCTensor *bias, IGCTensor *output, const PadStrideInfo &conv_info)
+void GCDirectConvolutionLayerKernel<kernel_size>::configure(const IGCTensor *input, const IGCTensor *weights, const IGCTensor *bias, IGCTensor *output,
+ const PadStrideInfo &conv_info, const ActivationLayerInfo &act_info)
{
ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::F16, DataType::F32);
ARM_COMPUTE_ERROR_ON(weights->info()->dimension(2) != input->info()->dimension(2));
@@ -58,6 +59,7 @@ void GCDirectConvolutionLayerKernel<kernel_size>::configure(const IGCTensor *inp
ARM_COMPUTE_ERROR_ON(weights->info()->num_dimensions() > 4);
ARM_COMPUTE_ERROR_ON_MSG((kernel_size == 3 && std::get<0>(conv_info.stride()) > 2), "Strides larger than 2 not supported in 3x3 direct convolution!");
ARM_COMPUTE_ERROR_ON(kernel_size != weights->info()->dimension(0));
+ ARM_COMPUTE_ERROR_ON(act_info.enabled() && act_info.activation() != ActivationLayerInfo::ActivationFunction::RELU && act_info.activation() != ActivationLayerInfo::ActivationFunction::LOGISTIC);
if(bias != nullptr)
{
@@ -108,6 +110,16 @@ void GCDirectConvolutionLayerKernel<kernel_size>::configure(const IGCTensor *inp
std::string dt_name = (input->info()->data_type() == DataType::F32) ? "DATA_TYPE_FP32" : "DATA_TYPE_FP16";
options.emplace(("#define " + dt_name));
+ // Activation information in case of a fused activation
+ if(act_info.enabled())
+ {
+ options.emplace("#define FUSED_ACTIVATION");
+ options.emplace(("#define " + string_from_activation_func(act_info.activation())));
+ options.emplace(("#define ACT_OP " + lower_string(string_from_activation_func(act_info.activation())) + "_op"));
+ options.emplace(("#define A_VAL " + float_to_string_with_full_precision(act_info.a())));
+ options.emplace(("#define B_VAL " + float_to_string_with_full_precision(act_info.b())));
+ }
+
unsigned int num_elems_read_per_iteration_x = kernel_size * _conv_stride_x;
unsigned int num_elems_read_per_iteration_y = 1;
unsigned int num_elems_written_per_iteration_x = 1;
diff --git a/src/runtime/CL/functions/CLConvolutionLayer.cpp b/src/runtime/CL/functions/CLConvolutionLayer.cpp
index 64bda93ff0..bcb5424aab 100644
--- a/src/runtime/CL/functions/CLConvolutionLayer.cpp
+++ b/src/runtime/CL/functions/CLConvolutionLayer.cpp
@@ -43,13 +43,13 @@ CLConvolutionLayer::CLConvolutionLayer(std::shared_ptr<IMemoryManager> memory_ma
}
void CLConvolutionLayer::configure(ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, const PadStrideInfo &conv_info, const WeightsInfo &weights_info,
- const Size2D &dilation)
+ const Size2D &dilation, const ActivationLayerInfo &act_info)
{
ARM_COMPUTE_ERROR_ON_NULLPTR(input, weights, output);
- ARM_COMPUTE_ERROR_THROW_ON(CLConvolutionLayer::validate(input->info(), weights->info(), ((biases != nullptr) ? biases->info() : nullptr), output->info(), conv_info, weights_info, dilation));
+ ARM_COMPUTE_ERROR_THROW_ON(CLConvolutionLayer::validate(input->info(), weights->info(), ((biases != nullptr) ? biases->info() : nullptr), output->info(), conv_info, weights_info, dilation, act_info));
switch(CLConvolutionLayer::get_convolution_method(input->info(), weights->info(), ((biases != nullptr) ? biases->info() : nullptr), output->info(), conv_info,
- weights_info, CLScheduler::get().target(), dilation))
+ weights_info, act_info, CLScheduler::get().target(), dilation))
{
case ConvolutionMethod::DIRECT:
{
@@ -72,25 +72,25 @@ void CLConvolutionLayer::configure(ICLTensor *input, const ICLTensor *weights, c
}
Status CLConvolutionLayer::validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const PadStrideInfo &conv_info,
- const WeightsInfo &weights_info, const Size2D &dilation)
+ const WeightsInfo &weights_info, const Size2D &dilation, const ActivationLayerInfo &act_info)
{
ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, weights, output);
//Configure if the parameters match the direct convolution or the gemm-based
const GPUTarget gpu_target = CLScheduler::get().target();
- switch(CLConvolutionLayer::get_convolution_method(input, weights, biases, output, conv_info, weights_info, gpu_target, dilation))
+ switch(CLConvolutionLayer::get_convolution_method(input, weights, biases, output, conv_info, weights_info, act_info, gpu_target, dilation))
{
case ConvolutionMethod::DIRECT:
{
// Validate direct convolution layer
- CLDirectConvolutionLayer::validate(input, weights, biases, output, conv_info);
+ CLDirectConvolutionLayer::validate(input, weights, biases, output, conv_info, act_info);
break;
}
case ConvolutionMethod::GEMM:
{
// Validate gemm-based convolution layer
- CLGEMMConvolutionLayer::validate(input, weights, biases, output, conv_info, weights_info, dilation);
+ CLGEMMConvolutionLayer::validate(input, weights, biases, output, conv_info, weights_info, dilation, act_info);
break;
}
default:
@@ -102,7 +102,7 @@ Status CLConvolutionLayer::validate(const ITensorInfo *input, const ITensorInfo
}
ConvolutionMethod CLConvolutionLayer::get_convolution_method(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const PadStrideInfo &conv_info,
- const WeightsInfo &weights_info, const GPUTarget gpu_target, const Size2D &dilation)
+ const WeightsInfo &weights_info, const ActivationLayerInfo &act_info, const GPUTarget gpu_target, const Size2D &dilation)
{
ARM_COMPUTE_UNUSED(input);
ARM_COMPUTE_UNUSED(weights);
@@ -112,6 +112,7 @@ ConvolutionMethod CLConvolutionLayer::get_convolution_method(const ITensorInfo *
ARM_COMPUTE_UNUSED(weights_info);
ARM_COMPUTE_UNUSED(gpu_target);
ARM_COMPUTE_UNUSED(dilation);
+ ARM_COMPUTE_UNUSED(act_info);
return ConvolutionMethod::GEMM;
}
diff --git a/src/runtime/CL/functions/CLDirectConvolutionLayer.cpp b/src/runtime/CL/functions/CLDirectConvolutionLayer.cpp
index c48865a0cc..c451bd4b4c 100644
--- a/src/runtime/CL/functions/CLDirectConvolutionLayer.cpp
+++ b/src/runtime/CL/functions/CLDirectConvolutionLayer.cpp
@@ -33,11 +33,11 @@
using namespace arm_compute;
CLDirectConvolutionLayer::CLDirectConvolutionLayer()
- : _direct_conv_kernel(), _input_border_handler()
+ : _direct_conv_kernel(), _input_border_handler(), _activationlayer_function(), _is_activationlayer_enabled(false)
{
}
-void CLDirectConvolutionLayer::configure(ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, const PadStrideInfo &conv_info)
+void CLDirectConvolutionLayer::configure(ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, const PadStrideInfo &conv_info, const ActivationLayerInfo &act_info)
{
// Set GPU target
_direct_conv_kernel.set_target(CLScheduler::get().target());
@@ -55,11 +55,25 @@ void CLDirectConvolutionLayer::configure(ICLTensor *input, const ICLTensor *weig
// Tune kernels
CLScheduler::get().tune_kernel_static(_direct_conv_kernel);
+
+ _is_activationlayer_enabled = act_info.enabled();
+
+ //Configure Activation Layer
+ if(_is_activationlayer_enabled)
+ {
+ _activationlayer_function.configure(output, nullptr, act_info);
+ }
}
-Status CLDirectConvolutionLayer::validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const PadStrideInfo &conv_info)
+Status CLDirectConvolutionLayer::validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const PadStrideInfo &conv_info,
+ const ActivationLayerInfo &act_info)
{
- return CLDirectConvolutionLayerKernel::validate(input, weights, biases, output, conv_info, CLScheduler::get().target());
+ ARM_COMPUTE_RETURN_ON_ERROR(CLDirectConvolutionLayerKernel::validate(input, weights, biases, output, conv_info, CLScheduler::get().target()));
+ if(act_info.enabled())
+ {
+ ARM_COMPUTE_RETURN_ON_ERROR(CLActivationLayer::validate(output, nullptr, act_info));
+ }
+ return Status{};
}
void CLDirectConvolutionLayer::run()
@@ -69,4 +83,10 @@ void CLDirectConvolutionLayer::run()
// Run direct convolution
CLScheduler::get().enqueue(_direct_conv_kernel);
+
+ //Run Activation Layer
+ if(_is_activationlayer_enabled)
+ {
+ _activationlayer_function.run();
+ }
}
diff --git a/src/runtime/CL/functions/CLGEMMConvolutionLayer.cpp b/src/runtime/CL/functions/CLGEMMConvolutionLayer.cpp
index f43e100565..084c4df718 100644
--- a/src/runtime/CL/functions/CLGEMMConvolutionLayer.cpp
+++ b/src/runtime/CL/functions/CLGEMMConvolutionLayer.cpp
@@ -90,8 +90,8 @@ void CLConvolutionLayerReshapeWeights::run()
}
CLGEMMConvolutionLayer::CLGEMMConvolutionLayer(std::shared_ptr<IMemoryManager> memory_manager)
- : _memory_group(memory_manager), _reshape_weights(), _im2col_kernel(), _mm_gemm(memory_manager), _mm_gemmlowp(memory_manager), _gemmlowp_output_stage(), _col2im_kernel(), _original_weights(nullptr),
- _im2col_output(), _weights_reshaped(), _gemm_output(), _tmp_output(), _is_quantized(false), _is_first_run(true)
+ : _memory_group(memory_manager), _reshape_weights(), _im2col_kernel(), _mm_gemm(memory_manager), _mm_gemmlowp(memory_manager), _gemmlowp_output_stage(), _col2im_kernel(), _activationlayer_function(),
+ _original_weights(nullptr), _im2col_output(), _weights_reshaped(), _gemm_output(), _tmp_output(), _is_quantized(false), _is_first_run(true), _is_activationlayer_enabled(false)
{
}
@@ -152,7 +152,7 @@ Status CLGEMMConvolutionLayer::validate_mm(const ITensorInfo *input, const ITens
}
void CLGEMMConvolutionLayer::configure(const ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, const PadStrideInfo &conv_info, const WeightsInfo &weights_info,
- const Size2D &dilation)
+ const Size2D &dilation, const ActivationLayerInfo &act_info)
{
ARM_COMPUTE_ERROR_ON_NULLPTR(input, weights, output);
@@ -162,7 +162,8 @@ void CLGEMMConvolutionLayer::configure(const ICLTensor *input, const ICLTensor *
output->info(),
conv_info,
weights_info,
- dilation));
+ dilation,
+ act_info));
_is_first_run = true;
_original_weights = weights;
@@ -260,11 +261,19 @@ void CLGEMMConvolutionLayer::configure(const ICLTensor *input, const ICLTensor *
// Allocate intermediate tensor
_weights_reshaped.allocator()->allocate();
+ //Configure Activation Layer
+ _is_activationlayer_enabled = act_info.enabled();
+
+ if(_is_activationlayer_enabled)
+ {
+ _activationlayer_function.configure(output, nullptr, act_info);
+ }
+
ARM_COMPUTE_UNUSED(weights_info);
}
Status CLGEMMConvolutionLayer::validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const PadStrideInfo &conv_info,
- const WeightsInfo &weights_info, const Size2D &dilation)
+ const WeightsInfo &weights_info, const Size2D &dilation, const ActivationLayerInfo &act_info)
{
ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, weights, output);
ARM_COMPUTE_RETURN_ERROR_ON_MSG(weights_info.are_reshaped(), "Weights already reshaped are not supported!");
@@ -274,6 +283,11 @@ Status CLGEMMConvolutionLayer::validate(const ITensorInfo *input, const ITensorI
ARM_COMPUTE_RETURN_ERROR_ON(weights->dimension(2) != input->dimension(2));
ARM_COMPUTE_RETURN_ERROR_ON(weights->num_dimensions() > 4);
+ if(act_info.enabled())
+ {
+ ARM_COMPUTE_ERROR_ON(act_info.b() > act_info.a());
+ }
+
const bool is_quantized = is_data_type_quantized_asymmetric(input->data_type());
const bool append_bias = (biases != nullptr) && (!is_quantized);
const unsigned bias_element = (append_bias) ? 1 : 0;
@@ -343,6 +357,12 @@ Status CLGEMMConvolutionLayer::validate(const ITensorInfo *input, const ITensorI
ARM_COMPUTE_RETURN_ERROR_ON(biases->num_dimensions() > 1);
}
+ //Validate Activation Layer
+ if(act_info.enabled())
+ {
+ CLActivationLayer::validate(output, nullptr, act_info);
+ }
+
return Status{};
}
@@ -383,5 +403,11 @@ void CLGEMMConvolutionLayer::run()
// Reshape output matrix
CLScheduler::get().enqueue(_col2im_kernel, false);
+ //Run Activation Layer if enabled
+ if(_is_activationlayer_enabled)
+ {
+ _activationlayer_function.run();
+ }
+
_memory_group.release();
}
diff --git a/src/runtime/CL/functions/CLWinogradConvolutionLayer.cpp b/src/runtime/CL/functions/CLWinogradConvolutionLayer.cpp
index a861e0072e..7af36bf06b 100644
--- a/src/runtime/CL/functions/CLWinogradConvolutionLayer.cpp
+++ b/src/runtime/CL/functions/CLWinogradConvolutionLayer.cpp
@@ -32,11 +32,12 @@
using namespace arm_compute;
CLWinogradConvolutionLayer::CLWinogradConvolutionLayer(std::shared_ptr<IMemoryManager> memory_manager)
- : _memory_group(memory_manager), _batched_mm(memory_manager), _input_transform(), _filter_transform(), _output_transform(), _input0(), _input1(), _batched_mm_output(), _is_first_run(true)
+ : _memory_group(memory_manager), _batched_mm(memory_manager), _input_transform(), _filter_transform(), _output_transform(), _activationlayer_function(), _input0(), _input1(), _batched_mm_output(),
+ _is_first_run(true), _is_activationlayer_enabled(false)
{
}
-void CLWinogradConvolutionLayer::configure(ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, const PadStrideInfo &conv_info)
+void CLWinogradConvolutionLayer::configure(ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, const PadStrideInfo &conv_info, const ActivationLayerInfo &act_info)
{
// TODO(COMPMID-1013): This part will be removed
// Get indeces for the width and height
@@ -73,13 +74,21 @@ void CLWinogradConvolutionLayer::configure(ICLTensor *input, const ICLTensor *we
_output_transform.configure(&_batched_mm_output, biases, output, Size2D(kernel_w, kernel_h), Size2D(output_convolved_shape[idx_width], output_convolved_shape[idx_height]), Size2D(num_tiles_x,
num_tiles_y));
+ // Configure activation layer
+ _is_activationlayer_enabled = act_info.enabled();
+ if(_is_activationlayer_enabled)
+ {
+ _activationlayer_function.configure(output, nullptr, act_info);
+ }
+
// Allocate temporary tensors
_input0.allocator()->allocate();
_input1.allocator()->allocate();
_batched_mm_output.allocator()->allocate();
}
-Status CLWinogradConvolutionLayer::validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const PadStrideInfo &conv_info)
+Status CLWinogradConvolutionLayer::validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const PadStrideInfo &conv_info,
+ const ActivationLayerInfo &act_info)
{
// TODO(COMPMID-1013): This part will be removed
// Get indeces for the width and height
@@ -107,17 +116,23 @@ Status CLWinogradConvolutionLayer::validate(const ITensorInfo *input, const ITen
const TensorInfo input1 = weights->clone()->set_tensor_shape(input1_shape);
ARM_COMPUTE_RETURN_ON_ERROR(CLWinogradFilterTransformKernel::validate(weights, &input1, Size2D(2U, 2U)));
- // Configure batched matrix multiply
+ // Validate batched matrix multiply
TensorShape batched_mm_output_shape = input0.tensor_shape();
batched_mm_output_shape[0] = input1.tensor_shape()[0];
const TensorInfo batched_mm_output = input0.clone()->set_tensor_shape(batched_mm_output_shape);
ARM_COMPUTE_RETURN_ON_ERROR(CLGEMM::validate(&input0, &input1, nullptr, &batched_mm_output, 1.0f, 0.0f, GEMMInfo(false, false, true /* Reshape weights only for the first run*/)));
- // Configure output transform
+ // Validate output transform
ARM_COMPUTE_RETURN_ON_ERROR(CLWinogradOutputTransformKernel::validate(&batched_mm_output, biases, output, Size2D(kernel_w, kernel_h), Size2D(output_convolved_shape[idx_width],
output_convolved_shape[idx_height]),
Size2D(num_tiles_x, num_tiles_y)));
+ // Validate Activation Layer
+ if(act_info.enabled())
+ {
+ CLActivationLayer::validate(output, nullptr, act_info);
+ }
+
return Status{};
}
@@ -142,5 +157,10 @@ void CLWinogradConvolutionLayer::run()
// Run output transform
CLScheduler::get().enqueue(_output_transform);
+ if(_is_activationlayer_enabled)
+ {
+ _activationlayer_function.run();
+ }
+
_memory_group.release();
}
diff --git a/src/runtime/GLES_COMPUTE/functions/GCConvolutionLayer.cpp b/src/runtime/GLES_COMPUTE/functions/GCConvolutionLayer.cpp
index c2b7e02284..b1c8665216 100644
--- a/src/runtime/GLES_COMPUTE/functions/GCConvolutionLayer.cpp
+++ b/src/runtime/GLES_COMPUTE/functions/GCConvolutionLayer.cpp
@@ -92,8 +92,9 @@ void GCConvolutionLayerReshapeWeights::run()
}
GCConvolutionLayer::GCConvolutionLayer(std::shared_ptr<IMemoryManager> memory_manager)
- : _memory_group(std::move(memory_manager)), _reshape_weights(), _input_im2col_kernel(), _input_interleave_kernel(), _mm_kernel(), _output_col2im_kernel(), _fill_border(), _input_im2col_reshaped(),
- _input_interleaved_reshaped(), _weights_reshaped(), _weights_transposed(), _gemm_output(), _tmp_output(), _append_bias(false), _is_fully_connected_convolution(false), _are_weights_reshaped(false)
+ : _memory_group(std::move(memory_manager)), _reshape_weights(), _input_im2col_kernel(), _input_interleave_kernel(), _mm_kernel(), _output_col2im_kernel(), _fill_border(), _activationlayer_function(),
+ _input_im2col_reshaped(), _input_interleaved_reshaped(), _weights_reshaped(), _weights_transposed(), _gemm_output(), _tmp_output(), _append_bias(false), _is_fully_connected_convolution(false),
+ _are_weights_reshaped(false), _is_activationlayer_enabled(false)
{
}
@@ -103,7 +104,7 @@ void GCConvolutionLayer::configure_mm(const IGCTensor *input, const IGCTensor *w
}
void GCConvolutionLayer::configure(const IGCTensor *input, const IGCTensor *weights, const IGCTensor *biases, IGCTensor *output, const PadStrideInfo &conv_info, const WeightsInfo &weights_info,
- const Size2D &dilation)
+ const Size2D &dilation, const ActivationLayerInfo &act_info)
{
ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::F16, DataType::F32);
ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(input, weights);
@@ -256,6 +257,14 @@ void GCConvolutionLayer::configure(const IGCTensor *input, const IGCTensor *weig
{
_weights_reshaped.allocator()->allocate();
}
+
+ //Configure Activation Layer
+ _is_activationlayer_enabled = act_info.enabled();
+
+ if(_is_activationlayer_enabled)
+ {
+ _activationlayer_function.configure(output, nullptr, act_info);
+ }
}
void GCConvolutionLayer::run()
@@ -290,4 +299,11 @@ void GCConvolutionLayer::run()
GCScheduler::get().dispatch(_output_col2im_kernel, false);
_memory_group.release();
+
+ GCScheduler::get().memory_barrier();
+ // Run Activation Layer
+ if(_is_activationlayer_enabled)
+ {
+ _activationlayer_function.run();
+ }
}
diff --git a/src/runtime/GLES_COMPUTE/functions/GCDirectConvolutionLayer.cpp b/src/runtime/GLES_COMPUTE/functions/GCDirectConvolutionLayer.cpp
index a2607d4c2d..c0cf09836f 100644
--- a/src/runtime/GLES_COMPUTE/functions/GCDirectConvolutionLayer.cpp
+++ b/src/runtime/GLES_COMPUTE/functions/GCDirectConvolutionLayer.cpp
@@ -39,26 +39,27 @@ GCDirectConvolutionLayer::GCDirectConvolutionLayer()
{
}
-void GCDirectConvolutionLayer::configure(IGCTensor *input, const IGCTensor *weights, const IGCTensor *biases, IGCTensor *output, const PadStrideInfo &conv_info)
+void GCDirectConvolutionLayer::configure(IGCTensor *input, const IGCTensor *weights, const IGCTensor *biases, IGCTensor *output, const PadStrideInfo &conv_info,
+ const ActivationLayerInfo &act_info)
{
int kernel_size = weights->info()->dimension(0);
if(kernel_size == 1)
{
auto k = arm_compute::support::cpp14::make_unique<GCDirectConvolutionLayer1x1Kernel>();
- k->configure(input, weights, biases, output, conv_info);
+ k->configure(input, weights, biases, output, conv_info, act_info);
_kernel = std::move(k);
}
else if(kernel_size == 3)
{
auto k = arm_compute::support::cpp14::make_unique<GCDirectConvolutionLayer3x3Kernel>();
- k->configure(input, weights, biases, output, conv_info);
+ k->configure(input, weights, biases, output, conv_info, act_info);
_kernel = std::move(k);
}
else if(kernel_size == 5)
{
auto k = arm_compute::support::cpp14::make_unique<GCDirectConvolutionLayer5x5Kernel>();
- k->configure(input, weights, biases, output, conv_info);
+ k->configure(input, weights, biases, output, conv_info, act_info);
_kernel = std::move(k);
}
else
@@ -79,4 +80,6 @@ void GCDirectConvolutionLayer::run()
GCScheduler::get().dispatch(_border_handler, false);
GCScheduler::get().memory_barrier();
GCScheduler::get().dispatch(*_kernel);
+ GCScheduler::get().memory_barrier();
+ GCScheduler::get().dispatch(_shift_handler);
}
diff --git a/src/runtime/NEON/functions/NEConvolutionLayer.cpp b/src/runtime/NEON/functions/NEConvolutionLayer.cpp
index e659495b7c..badeb07405 100644
--- a/src/runtime/NEON/functions/NEConvolutionLayer.cpp
+++ b/src/runtime/NEON/functions/NEConvolutionLayer.cpp
@@ -41,33 +41,33 @@ NEConvolutionLayer::NEConvolutionLayer(std::shared_ptr<IMemoryManager> memory_ma
}
void NEConvolutionLayer::configure(ITensor *input, const ITensor *weights, const ITensor *biases, ITensor *output, const PadStrideInfo &conv_info, const WeightsInfo &weights_info,
- const Size2D &dilation)
+ const Size2D &dilation, const ActivationLayerInfo &act_info)
{
// Perform validate step
ARM_COMPUTE_ERROR_ON_NULLPTR(input, weights, output);
- ARM_COMPUTE_ERROR_THROW_ON(NEConvolutionLayer::validate(input->info(), weights->info(), ((biases != nullptr) ? biases->info() : nullptr), output->info(), conv_info, weights_info, dilation));
+ ARM_COMPUTE_ERROR_THROW_ON(NEConvolutionLayer::validate(input->info(), weights->info(), ((biases != nullptr) ? biases->info() : nullptr), output->info(), conv_info, weights_info, dilation, act_info));
switch(NEConvolutionLayer::get_convolution_method(input->info(), weights->info(), ((biases != nullptr) ? biases->info() : nullptr), output->info(), conv_info,
- weights_info, dilation))
+ weights_info, dilation, act_info))
{
case ConvolutionMethod::WINOGRAD:
{
auto f = arm_compute::support::cpp14::make_unique<NEWinogradLayer>(_memory_manager);
- f->configure(input, weights, biases, output, conv_info);
+ f->configure(input, weights, biases, output, conv_info, act_info);
_function = std::move(f);
break;
}
case ConvolutionMethod::GEMM:
{
auto f = arm_compute::support::cpp14::make_unique<NEGEMMConvolutionLayer>(_memory_manager);
- f->configure(input, weights, biases, output, conv_info, weights_info, dilation);
+ f->configure(input, weights, biases, output, conv_info, weights_info, dilation, act_info);
_function = std::move(f);
break;
}
case ConvolutionMethod::DIRECT:
{
auto f = arm_compute::support::cpp14::make_unique<NEDirectConvolutionLayer>(_memory_manager);
- f->configure(input, weights, biases, output, conv_info);
+ f->configure(input, weights, biases, output, conv_info, act_info);
_function = std::move(f);
break;
}
@@ -78,9 +78,9 @@ void NEConvolutionLayer::configure(ITensor *input, const ITensor *weights, const
}
Status NEConvolutionLayer::validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const PadStrideInfo &conv_info,
- const WeightsInfo &weights_info, const Size2D &dilation)
+ const WeightsInfo &weights_info, const Size2D &dilation, const ActivationLayerInfo &act_info)
{
- switch(NEConvolutionLayer::get_convolution_method(input, weights, biases, output, conv_info, weights_info, dilation))
+ switch(NEConvolutionLayer::get_convolution_method(input, weights, biases, output, conv_info, weights_info, dilation, act_info))
{
case ConvolutionMethod::WINOGRAD:
//Validate Winograd
@@ -88,11 +88,11 @@ Status NEConvolutionLayer::validate(const ITensorInfo *input, const ITensorInfo
break;
case ConvolutionMethod::GEMM:
//Validate Gemm-based Convolution
- NEGEMMConvolutionLayer::validate(input, weights, biases, output, conv_info, weights_info, dilation);
+ NEGEMMConvolutionLayer::validate(input, weights, biases, output, conv_info, weights_info, dilation, act_info);
break;
case ConvolutionMethod::DIRECT:
//Validate Gemm-based Convolution
- NEDirectConvolutionLayer::validate(input, weights, biases, output, conv_info);
+ NEDirectConvolutionLayer::validate(input, weights, biases, output, conv_info, act_info);
default:
ARM_COMPUTE_ERROR("Not supported.");
break;
@@ -102,10 +102,12 @@ Status NEConvolutionLayer::validate(const ITensorInfo *input, const ITensorInfo
}
ConvolutionMethod NEConvolutionLayer::get_convolution_method(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const PadStrideInfo &conv_info,
- const WeightsInfo &weights_info, const Size2D &dilation)
+ const WeightsInfo &weights_info, const Size2D &dilation, const ActivationLayerInfo &act_info)
{
ARM_COMPUTE_UNUSED(output);
ARM_COMPUTE_UNUSED(weights_info);
+ ARM_COMPUTE_UNUSED(act_info);
+
if((input->data_type() == DataType::F32) && (weights->dimension(0) == 3) && (weights->dimension(1) == 3) && (weights->num_dimensions() <= 4) && (conv_info.stride().first == 1)
&& (conv_info.stride().second == 1) && (biases != nullptr) && (dilation == Size2D(1U, 1U)))
{
diff --git a/src/runtime/NEON/functions/NEDirectConvolutionLayer.cpp b/src/runtime/NEON/functions/NEDirectConvolutionLayer.cpp
index c26c99a0f8..00776d7cf6 100644
--- a/src/runtime/NEON/functions/NEDirectConvolutionLayer.cpp
+++ b/src/runtime/NEON/functions/NEDirectConvolutionLayer.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -34,11 +34,12 @@
using namespace arm_compute;
NEDirectConvolutionLayer::NEDirectConvolutionLayer(std::shared_ptr<IMemoryManager> memory_manager)
- : _memory_group(std::move(memory_manager)), _output_stage_kernel(), _conv_kernel(), _input_border_handler(), _accumulator(), _has_bias(false), _is_fixed_point(false)
+ : _memory_group(std::move(memory_manager)), _output_stage_kernel(), _conv_kernel(), _input_border_handler(), _activationlayer_function(), _accumulator(), _has_bias(false), _is_fixed_point(false),
+ _is_activationlayer_enabled(false)
{
}
-void NEDirectConvolutionLayer::configure(ITensor *input, const ITensor *weights, const ITensor *bias, ITensor *output, const PadStrideInfo &conv_info)
+void NEDirectConvolutionLayer::configure(ITensor *input, const ITensor *weights, const ITensor *bias, ITensor *output, const PadStrideInfo &conv_info, const ActivationLayerInfo &act_info)
{
// Free accumulator
if(_accumulator.buffer() != nullptr)
@@ -73,9 +74,17 @@ void NEDirectConvolutionLayer::configure(ITensor *input, const ITensor *weights,
// Add zero padding XY
_input_border_handler.configure(input, _conv_kernel.border_size(), BorderMode::CONSTANT, PixelValue(static_cast<float>(0.f)));
+
+ //Configure Activation Layer
+ _is_activationlayer_enabled = act_info.enabled();
+ if(_is_activationlayer_enabled)
+ {
+ _activationlayer_function.configure(output, nullptr, act_info);
+ }
}
-Status NEDirectConvolutionLayer::validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *bias, const ITensorInfo *output, const PadStrideInfo &conv_info)
+Status NEDirectConvolutionLayer::validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *bias, const ITensorInfo *output, const PadStrideInfo &conv_info,
+ const ActivationLayerInfo &act_info)
{
ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, weights, output);
@@ -101,6 +110,11 @@ Status NEDirectConvolutionLayer::validate(const ITensorInfo *input, const ITenso
// Validate bias kernel
ARM_COMPUTE_RETURN_ON_ERROR(NEDirectConvolutionLayerOutputStageKernel::validate(&accumulator, bias, output));
+ if(act_info.enabled())
+ {
+ ARM_COMPUTE_RETURN_ON_ERROR(NEActivationLayer::validate(output, nullptr, act_info));
+ }
+
return Status{};
}
@@ -115,5 +129,10 @@ void NEDirectConvolutionLayer::run()
{
NEScheduler::get().schedule(&_output_stage_kernel, Window::DimY);
}
+
+ if(_is_activationlayer_enabled)
+ {
+ _activationlayer_function.run();
+ }
_memory_group.release();
}
diff --git a/src/runtime/NEON/functions/NEGEMMConvolutionLayer.cpp b/src/runtime/NEON/functions/NEGEMMConvolutionLayer.cpp
index cdbd32373a..c339947633 100644
--- a/src/runtime/NEON/functions/NEGEMMConvolutionLayer.cpp
+++ b/src/runtime/NEON/functions/NEGEMMConvolutionLayer.cpp
@@ -165,10 +165,11 @@ TensorShape get_reshaped_weights_shape_conv(const ITensorInfo *weights, bool app
}
}
-Status validate_and_initialize_values(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const PadStrideInfo &conv_info, const WeightsInfo &weights_info, DataType &dt,
+Status validate_and_initialize_values(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const PadStrideInfo &conv_info, const WeightsInfo &weights_info,
+ const ActivationLayerInfo &act_info, DataType &dt,
bool &append_bias,
bool &are_weights_reshaped, unsigned int &kernel_width, unsigned int &kernel_height,
- bool &is_fully_connected_convolution, bool &is_interleaved, bool &is_quantized,
+ bool &is_fully_connected_convolution, bool &is_interleaved, bool &is_quantized, bool &is_activationlayer_enabled,
unsigned int &mat_weights_cols, unsigned int &mat_weights_rows,
unsigned int &conv_w, unsigned int &conv_h, const Size2D &dilation)
{
@@ -210,6 +211,7 @@ Status validate_and_initialize_values(const ITensorInfo *input, const ITensorInf
// Check if its a "fully connected" convolution
is_fully_connected_convolution = ((conv_w == 1) && (conv_h == 1));
is_interleaved = (!is_fully_connected_convolution && !is_quantized);
+ is_activationlayer_enabled = act_info.enabled();
return Status{};
}
@@ -217,8 +219,8 @@ Status validate_and_initialize_values(const ITensorInfo *input, const ITensorInf
NEGEMMConvolutionLayer::NEGEMMConvolutionLayer(const std::shared_ptr<IMemoryManager> &memory_manager)
: _asm_glue(), _memory_group(memory_manager), _input_im2col_kernel(), _input_interleave_kernel(), _reshape_weights(), _mm_kernel(), _mm_gemmlowp(memory_manager), _gemmlowp_output_stage(),
- _output_col2im_kernel(), _original_weights(nullptr), _input_im2col_reshaped(), _input_interleaved_reshaped(), _weights_reshaped(), _gemm_output(), _tmp_output(), _workspace(), _append_bias(false),
- _is_fully_connected_convolution(false), _are_weights_reshaped(false), _is_quantized(false), _is_interleaved(false)
+ _output_col2im_kernel(), _activationlayer_function(), _original_weights(nullptr), _input_im2col_reshaped(), _input_interleaved_reshaped(), _weights_reshaped(), _gemm_output(), _tmp_output(),
+ _workspace(), _append_bias(false), _is_fully_connected_convolution(false), _are_weights_reshaped(false), _is_quantized(false), _is_interleaved(false), _is_activationlayer_enabled(false)
{
}
@@ -247,7 +249,7 @@ void NEGEMMConvolutionLayer::configure_mm(const ITensor *input, const ITensor *w
}
void NEGEMMConvolutionLayer::configure(const ITensor *input, const ITensor *weights, const ITensor *biases, ITensor *output, const PadStrideInfo &conv_info, const WeightsInfo &weights_info,
- const Size2D &dilation)
+ const Size2D &dilation, const ActivationLayerInfo &act_info)
{
// Perform validate step
ARM_COMPUTE_ERROR_ON_NULLPTR(input, weights, output);
@@ -260,9 +262,10 @@ void NEGEMMConvolutionLayer::configure(const ITensor *input, const ITensor *weig
unsigned int conv_w = 0;
unsigned int conv_h = 0;
- Status status = validate_and_initialize_values(input->info(), weights->info(), (biases == nullptr) ? nullptr : biases->info(), conv_info, weights_info, dt, _append_bias, _are_weights_reshaped,
+ Status status = validate_and_initialize_values(input->info(), weights->info(), (biases == nullptr) ? nullptr : biases->info(), conv_info, weights_info, act_info, dt, _append_bias,
+ _are_weights_reshaped,
kernel_width, kernel_height,
- _is_fully_connected_convolution, _is_interleaved, _is_quantized,
+ _is_fully_connected_convolution, _is_interleaved, _is_quantized, _is_activationlayer_enabled,
mat_weights_cols, mat_weights_rows, conv_w, conv_h, dilation);
ARM_COMPUTE_ERROR_THROW_ON(status);
@@ -420,10 +423,16 @@ void NEGEMMConvolutionLayer::configure(const ITensor *input, const ITensor *weig
{
_weights_reshaped.allocator()->allocate();
}
+
+ //Configure Activation Layer
+ if(_is_activationlayer_enabled)
+ {
+ _activationlayer_function.configure(output, nullptr, act_info);
+ }
}
Status NEGEMMConvolutionLayer::validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const PadStrideInfo &conv_info,
- const WeightsInfo &weights_info, const Size2D &dilation)
+ const WeightsInfo &weights_info, const Size2D &dilation, const ActivationLayerInfo &act_info)
{
ARM_COMPUTE_UNUSED(output);
@@ -433,6 +442,7 @@ Status NEGEMMConvolutionLayer::validate(const ITensorInfo *input, const ITensorI
bool is_fully_connected_convolution{};
bool is_interleaved{};
bool is_quantized{};
+ bool is_activationlayer_enabled{};
unsigned int kernel_width = 0;
unsigned int kernel_height = 0;
unsigned int mat_weights_cols = 0;
@@ -440,8 +450,8 @@ Status NEGEMMConvolutionLayer::validate(const ITensorInfo *input, const ITensorI
unsigned int conv_w = 0;
unsigned int conv_h = 0;
- Status status = validate_and_initialize_values(input, weights, biases, conv_info, weights_info, dt, append_bias, are_weights_reshaped, kernel_width, kernel_height,
- is_fully_connected_convolution, is_interleaved, is_quantized, mat_weights_cols, mat_weights_rows,
+ Status status = validate_and_initialize_values(input, weights, biases, conv_info, weights_info, act_info, dt, append_bias, are_weights_reshaped, kernel_width, kernel_height,
+ is_fully_connected_convolution, is_interleaved, is_quantized, is_activationlayer_enabled, mat_weights_cols, mat_weights_rows,
conv_w, conv_h, dilation);
const Size2D kernel_weights = Size2D(kernel_width, kernel_height);
@@ -536,6 +546,15 @@ Status NEGEMMConvolutionLayer::validate(const ITensorInfo *input, const ITensorI
ARM_COMPUTE_RETURN_ON_ERROR(NEGEMMMatrixMultiplyKernel::validate(&im2_col_info, weights, &gemm_output_info, 1.f, is_interleaved, GEMMReshapeInfo()));
}
+ ARM_COMPUTE_RETURN_ON_ERROR(NECol2ImKernel::validate(&gemm_output_info, output, Size2D(conv_w, conv_h)));
+
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG((output->dimension(0) != conv_w) || (output->dimension(1) != conv_h), "Output shape does not match the expected one");
+
+ if(act_info.enabled())
+ {
+ ARM_COMPUTE_RETURN_ON_ERROR(NEActivationLayer::validate(output, nullptr, act_info));
+ }
+
return Status{};
}
@@ -591,6 +610,11 @@ void NEGEMMConvolutionLayer::run()
// Reshape output matrix
NEScheduler::get().schedule(&_output_col2im_kernel, Window::DimY);
+ if(_is_activationlayer_enabled)
+ {
+ _activationlayer_function.run();
+ }
+
_memory_group.release();
}
} // namespace arm_compute
diff --git a/src/runtime/NEON/functions/NEWinogradLayer.cpp b/src/runtime/NEON/functions/NEWinogradLayer.cpp
index 0a344f0cae..f82845c7ad 100644
--- a/src/runtime/NEON/functions/NEWinogradLayer.cpp
+++ b/src/runtime/NEON/functions/NEWinogradLayer.cpp
@@ -75,13 +75,13 @@ Status validate_arguments(const ITensorInfo *input, const ITensorInfo *weights,
} //namespace
NEWinogradLayer::NEWinogradLayer(std::shared_ptr<IMemoryManager> memory_manager)
- : _memory_group(std::move(memory_manager)), _batched_gemm_kernel(nullptr), _transform_input_kernel(nullptr), _transform_output_kernel(nullptr), _transform_weights_kernel(nullptr), _permute_input(),
- _permute_weights(), _permute_output(), _input_workspace(), _output_workspace(), _kernel_storage(), _input_nhwc(), _output_nhwc(), _weights_hwio(), _input(), _weights(), _output(),
- _reshaped_kernel(false)
+ : _memory_group(std::move(memory_manager)), _batched_gemm_kernel(nullptr), _transform_input_kernel(nullptr), _transform_output_kernel(nullptr), _transform_weights_kernel(nullptr),
+ _activationlayer_function(), _permute_input(), _permute_weights(), _permute_output(), _input_workspace(), _output_workspace(), _kernel_storage(), _input_nhwc(), _output_nhwc(), _weights_hwio(),
+ _input(), _weights(), _output(), _reshaped_kernel(false), _is_activationlayer_enabled(false)
{
} /* arm_compute */
-void NEWinogradLayer::configure(const ITensor *input, const ITensor *weights, const ITensor *biases, ITensor *output, const PadStrideInfo &conv_info)
+void NEWinogradLayer::configure(const ITensor *input, const ITensor *weights, const ITensor *biases, ITensor *output, const PadStrideInfo &conv_info, const ActivationLayerInfo &act_info)
{
ARM_COMPUTE_ERROR_ON_NULLPTR(input, weights, biases, output);
ARM_COMPUTE_UNUSED(conv_info);
@@ -217,6 +217,13 @@ void NEWinogradLayer::configure(const ITensor *input, const ITensor *weights, co
_transform_weights_kernel = std::move(transform_weights_kernel);
_transform_output_kernel = std::move(transform_output_kernel);
_batched_gemm_kernel = std::move(batched_gemm_kernel);
+
+ //Configure Activation Layer
+ _is_activationlayer_enabled = act_info.enabled();
+ if(_is_activationlayer_enabled)
+ {
+ _activationlayer_function.configure(output, nullptr, act_info);
+ }
}
void NEWinogradLayer::run()
@@ -242,6 +249,12 @@ void NEWinogradLayer::run()
// Reorder the convoluted output to ACL's ordering NCHW
_permute_output.run();
+
+ if(_is_activationlayer_enabled)
+ {
+ _activationlayer_function.run();
+ }
+
_memory_group.release();
}
diff --git a/tests/benchmark/CL/ConvolutionLayer.cpp b/tests/benchmark/CL/ConvolutionLayer.cpp
index bad644f8b8..ee25e12a0e 100644
--- a/tests/benchmark/CL/ConvolutionLayer.cpp
+++ b/tests/benchmark/CL/ConvolutionLayer.cpp
@@ -56,69 +56,79 @@ using CLGEMMConvolutionLayerFixture = ConvolutionLayerFixture<CLTensor, CLGEMMCo
TEST_SUITE(CL)
REGISTER_FIXTURE_DATA_TEST_CASE(AlexNetConvolutionLayer, CLGEMMConvolutionLayerFixture, framework::DatasetMode::ALL,
- framework::dataset::combine(framework::dataset::combine(datasets::AlexNetConvolutionLayerDataset(),
+ framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(datasets::AlexNetConvolutionLayerDataset(), framework::dataset::make("ActivationInfo",
+ ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))),
data_types),
framework::dataset::make("Batches", 1)));
REGISTER_FIXTURE_DATA_TEST_CASE(LeNet5ConvolutionLayer, CLGEMMConvolutionLayerFixture, framework::DatasetMode::ALL,
- framework::dataset::combine(framework::dataset::combine(datasets::LeNet5ConvolutionLayerDataset(),
+ framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(datasets::LeNet5ConvolutionLayerDataset(), framework::dataset::make("ActivationInfo", ActivationLayerInfo())),
data_types),
framework::dataset::make("Batches", 1)));
REGISTER_FIXTURE_DATA_TEST_CASE(GoogLeNetInceptionV1ConvolutionLayer, CLGEMMConvolutionLayerFixture, framework::DatasetMode::ALL,
- framework::dataset::combine(framework::dataset::combine(datasets::GoogLeNetInceptionV1ConvolutionLayerDataset(),
+ framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(datasets::GoogLeNetInceptionV1ConvolutionLayerDataset(), framework::dataset::make("ActivationInfo",
+ ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))),
data_types),
framework::dataset::make("Batches", 1)));
REGISTER_FIXTURE_DATA_TEST_CASE(GoogLeNetInceptionV4ConvolutionLayer, CLGEMMConvolutionLayerFixture, framework::DatasetMode::ALL,
- framework::dataset::combine(framework::dataset::combine(datasets::GoogLeNetInceptionV4ConvolutionLayerDataset(),
+ framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(datasets::GoogLeNetInceptionV4ConvolutionLayerDataset(), framework::dataset::make("ActivationInfo",
+ ActivationLayerInfo())),
data_types),
framework::dataset::make("Batches", 1)));
REGISTER_FIXTURE_DATA_TEST_CASE(SqueezeNetConvolutionLayer, CLGEMMConvolutionLayerFixture, framework::DatasetMode::ALL,
- framework::dataset::combine(framework::dataset::combine(datasets::SqueezeNetConvolutionLayerDataset(),
+ framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(datasets::SqueezeNetConvolutionLayerDataset(), framework::dataset::make("ActivationInfo",
+ ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))),
data_types),
framework::dataset::make("Batches", 1)));
REGISTER_FIXTURE_DATA_TEST_CASE(MobileNetConvolutionLayer, CLGEMMConvolutionLayerFixture, framework::DatasetMode::ALL,
- framework::dataset::combine(framework::dataset::combine(datasets::MobileNetConvolutionLayerDataset(),
+ framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(datasets::MobileNetConvolutionLayerDataset(), framework::dataset::make("ActivationInfo",
+ ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))),
data_types),
framework::dataset::make("Batches", 1)));
TEST_SUITE(NIGHTLY)
REGISTER_FIXTURE_DATA_TEST_CASE(AlexNetConvolutionLayer, CLGEMMConvolutionLayerFixture, framework::DatasetMode::NIGHTLY,
- framework::dataset::combine(framework::dataset::combine(datasets::AlexNetConvolutionLayerDataset(),
+ framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(datasets::AlexNetConvolutionLayerDataset(), framework::dataset::make("ActivationInfo",
+ ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))),
data_types),
framework::dataset::make("Batches", { 4, 8 })));
REGISTER_FIXTURE_DATA_TEST_CASE(LeNet5ConvolutionLayer, CLGEMMConvolutionLayerFixture, framework::DatasetMode::NIGHTLY,
- framework::dataset::combine(framework::dataset::combine(datasets::LeNet5ConvolutionLayerDataset(),
+ framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(datasets::LeNet5ConvolutionLayerDataset(), framework::dataset::make("ActivationInfo", ActivationLayerInfo())),
data_types),
framework::dataset::make("Batches", { 4, 8 })));
REGISTER_FIXTURE_DATA_TEST_CASE(GoogLeNetInceptionV1ConvolutionLayer, CLGEMMConvolutionLayerFixture, framework::DatasetMode::NIGHTLY,
- framework::dataset::combine(framework::dataset::combine(datasets::GoogLeNetInceptionV1ConvolutionLayerDataset(),
+ framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(datasets::GoogLeNetInceptionV1ConvolutionLayerDataset(), framework::dataset::make("ActivationInfo",
+ ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))),
data_types),
framework::dataset::make("Batches", { 4, 8 })));
REGISTER_FIXTURE_DATA_TEST_CASE(GoogLeNetInceptionV4ConvolutionLayer, CLGEMMConvolutionLayerFixture, framework::DatasetMode::NIGHTLY,
- framework::dataset::combine(framework::dataset::combine(datasets::GoogLeNetInceptionV4ConvolutionLayerDataset(),
+ framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(datasets::GoogLeNetInceptionV4ConvolutionLayerDataset(), framework::dataset::make("ActivationInfo",
+ ActivationLayerInfo())),
data_types),
framework::dataset::make("Batches", { 4, 8 })));
REGISTER_FIXTURE_DATA_TEST_CASE(SqueezeNetConvolutionLayer, CLGEMMConvolutionLayerFixture, framework::DatasetMode::NIGHTLY,
- framework::dataset::combine(framework::dataset::combine(datasets::SqueezeNetConvolutionLayerDataset(),
+ framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(datasets::SqueezeNetConvolutionLayerDataset(), framework::dataset::make("ActivationInfo",
+ ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))),
data_types),
framework::dataset::make("Batches", { 4, 8 })));
// 8 batches use about 1.8GB of memory which is too much for most devices!
REGISTER_FIXTURE_DATA_TEST_CASE(VGG16ConvolutionLayer, CLGEMMConvolutionLayerFixture, framework::DatasetMode::NIGHTLY,
- framework::dataset::combine(framework::dataset::combine(datasets::VGG16ConvolutionLayerDataset(),
+ framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(datasets::VGG16ConvolutionLayerDataset(), framework::dataset::make("ActivationInfo",
+ ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))),
data_types),
framework::dataset::make("Batches", { 1, 4 })));
REGISTER_FIXTURE_DATA_TEST_CASE(YOLOV2ConvolutionLayer, CLGEMMConvolutionLayerFixture, framework::DatasetMode::NIGHTLY,
- framework::dataset::combine(framework::dataset::combine(datasets::YOLOV2ConvolutionLayerDataset(),
+ framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(datasets::YOLOV2ConvolutionLayerDataset(), framework::dataset::make("ActivationInfo", ActivationLayerInfo())),
data_types),
framework::dataset::make("Batches", { 1, 4, 8 })));
TEST_SUITE_END()
diff --git a/tests/benchmark/CL/DirectConvolutionLayer.cpp b/tests/benchmark/CL/DirectConvolutionLayer.cpp
index c7b07807e9..df63d3ac5d 100644
--- a/tests/benchmark/CL/DirectConvolutionLayer.cpp
+++ b/tests/benchmark/CL/DirectConvolutionLayer.cpp
@@ -49,58 +49,67 @@ namespace
const auto data_types = framework::dataset::make("DataType", { DataType::F16, DataType::F32 });
} // namespace
-using CLConvolutionLayerFixture = DirectConvolutionLayerFixture<CLTensor, CLDirectConvolutionLayer, CLAccessor>;
+using CLDirectConvolutionLayerFixture = DirectConvolutionLayerFixture<CLTensor, CLDirectConvolutionLayer, CLAccessor>;
TEST_SUITE(CL)
-REGISTER_FIXTURE_DATA_TEST_CASE(AlexNetDirectConvolutionLayer, CLConvolutionLayerFixture, framework::DatasetMode::ALL,
- framework::dataset::combine(framework::dataset::combine(datasets::AlexNetDirectConvolutionLayerDataset(),
+REGISTER_FIXTURE_DATA_TEST_CASE(AlexNetDirectConvolutionLayer, CLDirectConvolutionLayerFixture, framework::DatasetMode::ALL,
+ framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(datasets::AlexNetDirectConvolutionLayerDataset(), framework::dataset::make("ActivationInfo",
+ ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))),
data_types),
framework::dataset::make("Batches", 1)));
-REGISTER_FIXTURE_DATA_TEST_CASE(GoogLeNetInceptionV1DirectConvolutionLayer, CLConvolutionLayerFixture, framework::DatasetMode::ALL,
- framework::dataset::combine(framework::dataset::combine(datasets::GoogLeNetInceptionV1DirectConvolutionLayerDataset(),
+REGISTER_FIXTURE_DATA_TEST_CASE(GoogLeNetInceptionV1DirectConvolutionLayer, CLDirectConvolutionLayerFixture, framework::DatasetMode::ALL,
+ framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(datasets::GoogLeNetInceptionV1DirectConvolutionLayerDataset(),
+ framework::dataset::make("ActivationInfo", ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))),
data_types),
framework::dataset::make("Batches", 1)));
-REGISTER_FIXTURE_DATA_TEST_CASE(GoogLeNetInceptionV4DirectConvolutionLayer, CLConvolutionLayerFixture, framework::DatasetMode::ALL,
- framework::dataset::combine(framework::dataset::combine(datasets::GoogLeNetInceptionV4DirectConvolutionLayerDataset(),
+REGISTER_FIXTURE_DATA_TEST_CASE(GoogLeNetInceptionV4DirectConvolutionLayer, CLDirectConvolutionLayerFixture, framework::DatasetMode::ALL,
+ framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(datasets::GoogLeNetInceptionV4DirectConvolutionLayerDataset(), framework::dataset::make("ActivationInfo",
+ ActivationLayerInfo())),
data_types),
framework::dataset::make("Batches", 1)));
-REGISTER_FIXTURE_DATA_TEST_CASE(SqueezeNetDirectConvolutionLayer, CLConvolutionLayerFixture, framework::DatasetMode::ALL,
- framework::dataset::combine(framework::dataset::combine(datasets::SqueezeNetConvolutionLayerDataset(),
+REGISTER_FIXTURE_DATA_TEST_CASE(SqueezeNetDirectConvolutionLayer, CLDirectConvolutionLayerFixture, framework::DatasetMode::ALL,
+ framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(datasets::SqueezeNetConvolutionLayerDataset(), framework::dataset::make("ActivationInfo",
+ ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))),
data_types),
framework::dataset::make("Batches", 1)));
TEST_SUITE(NIGHTLY)
-REGISTER_FIXTURE_DATA_TEST_CASE(AlexNetDirectConvolutionLayer, CLConvolutionLayerFixture, framework::DatasetMode::NIGHTLY,
- framework::dataset::combine(framework::dataset::combine(datasets::AlexNetDirectConvolutionLayerDataset(),
+REGISTER_FIXTURE_DATA_TEST_CASE(AlexNetDirectConvolutionLayer, CLDirectConvolutionLayerFixture, framework::DatasetMode::NIGHTLY,
+ framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(datasets::AlexNetDirectConvolutionLayerDataset(), framework::dataset::make("ActivationInfo",
+ ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))),
data_types),
framework::dataset::make("Batches", { 4, 8 })));
-REGISTER_FIXTURE_DATA_TEST_CASE(GoogLeNetInceptionV1DirectConvolutionLayer, CLConvolutionLayerFixture, framework::DatasetMode::NIGHTLY,
- framework::dataset::combine(framework::dataset::combine(datasets::GoogLeNetInceptionV1DirectConvolutionLayerDataset(),
+REGISTER_FIXTURE_DATA_TEST_CASE(GoogLeNetInceptionV1DirectConvolutionLayer, CLDirectConvolutionLayerFixture, framework::DatasetMode::NIGHTLY,
+ framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(datasets::GoogLeNetInceptionV1DirectConvolutionLayerDataset(), framework::dataset::make("ActivationInfo",
+ ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))),
data_types),
framework::dataset::make("Batches", { 4, 8 })));
-REGISTER_FIXTURE_DATA_TEST_CASE(GoogLeNetInceptionV4DirectConvolutionLayer, CLConvolutionLayerFixture, framework::DatasetMode::NIGHTLY,
- framework::dataset::combine(framework::dataset::combine(datasets::GoogLeNetInceptionV4DirectConvolutionLayerDataset(),
+REGISTER_FIXTURE_DATA_TEST_CASE(GoogLeNetInceptionV4DirectConvolutionLayer, CLDirectConvolutionLayerFixture, framework::DatasetMode::NIGHTLY,
+ framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(datasets::GoogLeNetInceptionV4DirectConvolutionLayerDataset(), framework::dataset::make("ActivationInfo",
+ ActivationLayerInfo())),
data_types),
framework::dataset::make("Batches", { 4, 8 })));
-REGISTER_FIXTURE_DATA_TEST_CASE(SqueezeNetDirectConvolutionLayer, CLConvolutionLayerFixture, framework::DatasetMode::NIGHTLY,
- framework::dataset::combine(framework::dataset::combine(datasets::SqueezeNetConvolutionLayerDataset(),
+REGISTER_FIXTURE_DATA_TEST_CASE(SqueezeNetDirectConvolutionLayer, CLDirectConvolutionLayerFixture, framework::DatasetMode::NIGHTLY,
+ framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(datasets::SqueezeNetConvolutionLayerDataset(), framework::dataset::make("ActivationInfo",
+ ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))),
data_types),
framework::dataset::make("Batches", { 4, 8 })));
-REGISTER_FIXTURE_DATA_TEST_CASE(VGG16DirectConvolutionLayer, CLConvolutionLayerFixture, framework::DatasetMode::NIGHTLY,
- framework::dataset::combine(framework::dataset::combine(datasets::VGG16ConvolutionLayerDataset(),
+REGISTER_FIXTURE_DATA_TEST_CASE(VGG16DirectConvolutionLayer, CLDirectConvolutionLayerFixture, framework::DatasetMode::NIGHTLY,
+ framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(datasets::VGG16ConvolutionLayerDataset(), framework::dataset::make("ActivationInfo",
+ ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))),
data_types),
framework::dataset::make("Batches", { 1, 4, 8 })));
-REGISTER_FIXTURE_DATA_TEST_CASE(YOLOV2DirectConvolutionLayer, CLConvolutionLayerFixture, framework::DatasetMode::NIGHTLY,
- framework::dataset::combine(framework::dataset::combine(datasets::YOLOV2ConvolutionLayerDataset(),
+REGISTER_FIXTURE_DATA_TEST_CASE(YOLOV2DirectConvolutionLayer, CLDirectConvolutionLayerFixture, framework::DatasetMode::NIGHTLY,
+ framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(datasets::YOLOV2ConvolutionLayerDataset(), framework::dataset::make("ActivationInfo", ActivationLayerInfo())),
data_types),
framework::dataset::make("Batches", { 1, 4, 8 })));
TEST_SUITE_END()
diff --git a/tests/benchmark/GLES_COMPUTE/ConvolutionLayer.cpp b/tests/benchmark/GLES_COMPUTE/ConvolutionLayer.cpp
index 4cb020caf4..c79e0156cf 100644
--- a/tests/benchmark/GLES_COMPUTE/ConvolutionLayer.cpp
+++ b/tests/benchmark/GLES_COMPUTE/ConvolutionLayer.cpp
@@ -55,64 +55,73 @@ using GCConvolutionLayerFixture = ConvolutionLayerFixture<GCTensor, GCConvolutio
TEST_SUITE(GC)
REGISTER_FIXTURE_DATA_TEST_CASE(AlexNetConvolutionLayer, GCConvolutionLayerFixture, framework::DatasetMode::ALL,
- framework::dataset::combine(framework::dataset::combine(datasets::AlexNetConvolutionLayerDataset(),
+ framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(datasets::AlexNetConvolutionLayerDataset(), framework::dataset::make("ActivationInfo",
+ ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))),
data_types),
framework::dataset::make("Batches", 1)));
REGISTER_FIXTURE_DATA_TEST_CASE(LeNet5ConvolutionLayer, GCConvolutionLayerFixture, framework::DatasetMode::ALL,
- framework::dataset::combine(framework::dataset::combine(datasets::LeNet5ConvolutionLayerDataset(),
+ framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(datasets::LeNet5ConvolutionLayerDataset(), framework::dataset::make("ActivationInfo", ActivationLayerInfo())),
data_types),
framework::dataset::make("Batches", 1)));
REGISTER_FIXTURE_DATA_TEST_CASE(GoogLeNetInceptionV1ConvolutionLayer, GCConvolutionLayerFixture, framework::DatasetMode::ALL,
- framework::dataset::combine(framework::dataset::combine(datasets::GoogLeNetInceptionV1ConvolutionLayerDataset(),
+ framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(datasets::GoogLeNetInceptionV1ConvolutionLayerDataset(), framework::dataset::make("ActivationInfo",
+ ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))),
data_types),
framework::dataset::make("Batches", 1)));
REGISTER_FIXTURE_DATA_TEST_CASE(GoogLeNetInceptionV4ConvolutionLayer, GCConvolutionLayerFixture, framework::DatasetMode::ALL,
- framework::dataset::combine(framework::dataset::combine(datasets::GoogLeNetInceptionV4ConvolutionLayerDataset(),
+ framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(datasets::GoogLeNetInceptionV4ConvolutionLayerDataset(), framework::dataset::make("ActivationInfo",
+ ActivationLayerInfo())),
data_types),
framework::dataset::make("Batches", 1)));
REGISTER_FIXTURE_DATA_TEST_CASE(SqueezeNetConvolutionLayer, GCConvolutionLayerFixture, framework::DatasetMode::ALL,
- framework::dataset::combine(framework::dataset::combine(datasets::SqueezeNetConvolutionLayerDataset(),
+ framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(datasets::SqueezeNetConvolutionLayerDataset(), framework::dataset::make("ActivationInfo",
+ ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))),
data_types),
framework::dataset::make("Batches", 1)));
TEST_SUITE(NIGHTLY)
REGISTER_FIXTURE_DATA_TEST_CASE(AlexNetConvolutionLayer, GCConvolutionLayerFixture, framework::DatasetMode::NIGHTLY,
- framework::dataset::combine(framework::dataset::combine(datasets::AlexNetConvolutionLayerDataset(),
+ framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(datasets::AlexNetConvolutionLayerDataset(), framework::dataset::make("ActivationInfo",
+ ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))),
data_types),
framework::dataset::make("Batches", { 4, 8 })));
REGISTER_FIXTURE_DATA_TEST_CASE(LeNet5ConvolutionLayer, GCConvolutionLayerFixture, framework::DatasetMode::NIGHTLY,
- framework::dataset::combine(framework::dataset::combine(datasets::LeNet5ConvolutionLayerDataset(),
+ framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(datasets::LeNet5ConvolutionLayerDataset(), framework::dataset::make("ActivationInfo", ActivationLayerInfo())),
data_types),
framework::dataset::make("Batches", { 4, 8 })));
REGISTER_FIXTURE_DATA_TEST_CASE(GoogLeNetInceptionV1ConvolutionLayer, GCConvolutionLayerFixture, framework::DatasetMode::NIGHTLY,
- framework::dataset::combine(framework::dataset::combine(datasets::GoogLeNetInceptionV1ConvolutionLayerDataset(),
+ framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(datasets::GoogLeNetInceptionV1ConvolutionLayerDataset(), framework::dataset::make("ActivationInfo",
+ ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))),
data_types),
framework::dataset::make("Batches", { 4, 8 })));
REGISTER_FIXTURE_DATA_TEST_CASE(GoogLeNetInceptionV4ConvolutionLayer, GCConvolutionLayerFixture, framework::DatasetMode::NIGHTLY,
- framework::dataset::combine(framework::dataset::combine(datasets::GoogLeNetInceptionV4ConvolutionLayerDataset(),
+ framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(datasets::GoogLeNetInceptionV4ConvolutionLayerDataset(), framework::dataset::make("ActivationInfo",
+ ActivationLayerInfo())),
data_types),
framework::dataset::make("Batches", { 4, 8 })));
REGISTER_FIXTURE_DATA_TEST_CASE(SqueezeNetConvolutionLayer, GCConvolutionLayerFixture, framework::DatasetMode::NIGHTLY,
- framework::dataset::combine(framework::dataset::combine(datasets::SqueezeNetConvolutionLayerDataset(),
+ framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(datasets::SqueezeNetConvolutionLayerDataset(), framework::dataset::make("ActivationInfo",
+ ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))),
data_types),
framework::dataset::make("Batches", { 4, 8 })));
// 8 batches use about 1.8GB of memory which is too much for most devices!
REGISTER_FIXTURE_DATA_TEST_CASE(VGG16ConvolutionLayer, GCConvolutionLayerFixture, framework::DatasetMode::NIGHTLY,
- framework::dataset::combine(framework::dataset::combine(datasets::VGG16ConvolutionLayerDataset(),
+ framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(datasets::VGG16ConvolutionLayerDataset(), framework::dataset::make("ActivationInfo",
+ ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))),
data_types),
framework::dataset::make("Batches", { 1, 4 })));
REGISTER_FIXTURE_DATA_TEST_CASE(YOLOV2ConvolutionLayer, GCConvolutionLayerFixture, framework::DatasetMode::NIGHTLY,
- framework::dataset::combine(framework::dataset::combine(datasets::YOLOV2ConvolutionLayerDataset(),
+ framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(datasets::YOLOV2ConvolutionLayerDataset(), framework::dataset::make("ActivationInfo", ActivationLayerInfo())),
data_types),
framework::dataset::make("Batches", { 1, 4, 8 })));
TEST_SUITE_END()
diff --git a/tests/benchmark/GLES_COMPUTE/DirectConvolutionLayer.cpp b/tests/benchmark/GLES_COMPUTE/DirectConvolutionLayer.cpp
index d319c41088..66fd8fc216 100644
--- a/tests/benchmark/GLES_COMPUTE/DirectConvolutionLayer.cpp
+++ b/tests/benchmark/GLES_COMPUTE/DirectConvolutionLayer.cpp
@@ -49,58 +49,68 @@ namespace
const auto data_types = framework::dataset::make("DataType", { DataType::F32, DataType::F16 });
} // namespace
-using GCConvolutionLayerFixture = DirectConvolutionLayerFixture<GCTensor, GCDirectConvolutionLayer, GCAccessor>;
+using GCDirectConvolutionLayerFixture = DirectConvolutionLayerFixture<GCTensor, GCDirectConvolutionLayer, GCAccessor>;
TEST_SUITE(GC)
-REGISTER_FIXTURE_DATA_TEST_CASE(AlexNetDirectConvolutionLayer, GCConvolutionLayerFixture, framework::DatasetMode::ALL,
- framework::dataset::combine(framework::dataset::combine(datasets::AlexNetDirectConvolutionLayerDataset(),
+REGISTER_FIXTURE_DATA_TEST_CASE(AlexNetDirectConvolutionLayer, GCDirectConvolutionLayerFixture, framework::DatasetMode::ALL,
+ framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(datasets::AlexNetDirectConvolutionLayerDataset(), framework::dataset::make("ActivationInfo",
+ ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))),
data_types),
framework::dataset::make("Batches", 1)));
-REGISTER_FIXTURE_DATA_TEST_CASE(GoogLeNetInceptionV1DirectConvolutionLayer, GCConvolutionLayerFixture, framework::DatasetMode::ALL,
- framework::dataset::combine(framework::dataset::combine(datasets::GoogLeNetInceptionV1DirectConvolutionLayerDataset(),
+REGISTER_FIXTURE_DATA_TEST_CASE(GoogLeNetInceptionV1DirectConvolutionLayer, GCDirectConvolutionLayerFixture, framework::DatasetMode::ALL,
+ framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(datasets::GoogLeNetInceptionV1DirectConvolutionLayerDataset(), framework::dataset::make("ActivationInfo",
+ ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))),
data_types),
framework::dataset::make("Batches", 1)));
-REGISTER_FIXTURE_DATA_TEST_CASE(GoogLeNetInceptionV4DirectConvolutionLayer, GCConvolutionLayerFixture, framework::DatasetMode::ALL,
- framework::dataset::combine(framework::dataset::combine(datasets::GoogLeNetInceptionV4DirectConvolutionLayerDataset(),
+REGISTER_FIXTURE_DATA_TEST_CASE(GoogLeNetInceptionV4DirectConvolutionLayer, GCDirectConvolutionLayerFixture, framework::DatasetMode::ALL,
+ framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(datasets::GoogLeNetInceptionV4DirectConvolutionLayerDataset(), framework::dataset::make("ActivationInfo",
+ ActivationLayerInfo())),
data_types),
framework::dataset::make("Batches", 1)));
-REGISTER_FIXTURE_DATA_TEST_CASE(SqueezeNetDirectConvolutionLayer, GCConvolutionLayerFixture, framework::DatasetMode::ALL,
- framework::dataset::combine(framework::dataset::combine(datasets::SqueezeNetConvolutionLayerDataset(),
+REGISTER_FIXTURE_DATA_TEST_CASE(SqueezeNetDirectConvolutionLayer, GCDirectConvolutionLayerFixture, framework::DatasetMode::ALL,
+ framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(datasets::SqueezeNetConvolutionLayerDataset(), framework::dataset::make("ActivationInfo",
+ ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))),
data_types),
framework::dataset::make("Batches", 1)));
TEST_SUITE(NIGHTLY)
-REGISTER_FIXTURE_DATA_TEST_CASE(AlexNetDirectConvolutionLayer, GCConvolutionLayerFixture, framework::DatasetMode::NIGHTLY,
- framework::dataset::combine(framework::dataset::combine(datasets::AlexNetDirectConvolutionLayerDataset(),
+REGISTER_FIXTURE_DATA_TEST_CASE(AlexNetDirectConvolutionLayer, GCDirectConvolutionLayerFixture, framework::DatasetMode::NIGHTLY,
+ framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(datasets::AlexNetDirectConvolutionLayerDataset(), framework::dataset::make("ActivationInfo",
+ ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))),
data_types),
framework::dataset::make("Batches", { 4, 8 })));
-REGISTER_FIXTURE_DATA_TEST_CASE(GoogLeNetInceptionV1DirectConvolutionLayer, GCConvolutionLayerFixture, framework::DatasetMode::NIGHTLY,
- framework::dataset::combine(framework::dataset::combine(datasets::GoogLeNetInceptionV1DirectConvolutionLayerDataset(),
+REGISTER_FIXTURE_DATA_TEST_CASE(GoogLeNetInceptionV1DirectConvolutionLayer, GCDirectConvolutionLayerFixture, framework::DatasetMode::NIGHTLY,
+ framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(datasets::GoogLeNetInceptionV1DirectConvolutionLayerDataset(), framework::dataset::make("ActivationInfo",
+ ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))),
data_types),
framework::dataset::make("Batches", { 4, 8 })));
-REGISTER_FIXTURE_DATA_TEST_CASE(GoogLeNetInceptionV4DirectConvolutionLayer, GCConvolutionLayerFixture, framework::DatasetMode::NIGHTLY,
- framework::dataset::combine(framework::dataset::combine(datasets::GoogLeNetInceptionV4DirectConvolutionLayerDataset(),
+REGISTER_FIXTURE_DATA_TEST_CASE(GoogLeNetInceptionV4DirectConvolutionLayer, GCDirectConvolutionLayerFixture, framework::DatasetMode::NIGHTLY,
+ framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(datasets::GoogLeNetInceptionV4DirectConvolutionLayerDataset(), framework::dataset::make("ActivationInfo",
+ ActivationLayerInfo())),
data_types),
framework::dataset::make("Batches", { 4, 8 })));
-REGISTER_FIXTURE_DATA_TEST_CASE(SqueezeNetDirectConvolutionLayer, GCConvolutionLayerFixture, framework::DatasetMode::NIGHTLY,
- framework::dataset::combine(framework::dataset::combine(datasets::SqueezeNetConvolutionLayerDataset(),
+REGISTER_FIXTURE_DATA_TEST_CASE(SqueezeNetDirectConvolutionLayer, GCDirectConvolutionLayerFixture, framework::DatasetMode::NIGHTLY,
+ framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(datasets::SqueezeNetConvolutionLayerDataset(), framework::dataset::make("ActivationInfo",
+ ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))),
data_types),
framework::dataset::make("Batches", { 4, 8 })));
-REGISTER_FIXTURE_DATA_TEST_CASE(VGG16DirectConvolutionLayer, GCConvolutionLayerFixture, framework::DatasetMode::NIGHTLY,
- framework::dataset::combine(framework::dataset::combine(datasets::VGG16ConvolutionLayerDataset(),
+REGISTER_FIXTURE_DATA_TEST_CASE(VGG16DirectConvolutionLayer, GCDirectConvolutionLayerFixture, framework::DatasetMode::NIGHTLY,
+ framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(datasets::VGG16ConvolutionLayerDataset(), framework::dataset::make("ActivationInfo",
+ ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))),
data_types),
framework::dataset::make("Batches", { 1, 4, 8 })));
-REGISTER_FIXTURE_DATA_TEST_CASE(YOLOV2DirectConvolutionLayer, GCConvolutionLayerFixture, framework::DatasetMode::NIGHTLY,
- framework::dataset::combine(framework::dataset::combine(datasets::YOLOV2ConvolutionLayerDataset(),
+REGISTER_FIXTURE_DATA_TEST_CASE(YOLOV2DirectConvolutionLayer, GCDirectConvolutionLayerFixture, framework::DatasetMode::NIGHTLY,
+ framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(datasets::YOLOV2ConvolutionLayerDataset(),
+ framework::dataset::make("ActivationInfo", ActivationLayerInfo())),
data_types),
framework::dataset::make("Batches", { 1, 4, 8 })));
TEST_SUITE_END()
diff --git a/tests/benchmark/NEON/ConvolutionLayer.cpp b/tests/benchmark/NEON/ConvolutionLayer.cpp
index a425d95a6e..e7f2788020 100644
--- a/tests/benchmark/NEON/ConvolutionLayer.cpp
+++ b/tests/benchmark/NEON/ConvolutionLayer.cpp
@@ -65,91 +65,133 @@ TEST_SUITE(NEON)
using NEWinogradLayerFixture = WinogradLayerFixture<Tensor, NEWinogradLayer, Accessor>;
REGISTER_FIXTURE_DATA_TEST_CASE(AlexNetWinogradLayer, NEWinogradLayerFixture, framework::DatasetMode::ALL,
- framework::dataset::combine(framework::dataset::combine(datasets::AlexNetWinogradLayerDataset(), framework::dataset::make("DataType", DataType::F32)),
+ framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(datasets::AlexNetWinogradLayerDataset(),
+ framework::dataset::make("ActivationInfo", ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))),
+ framework::dataset::make("DataType", DataType::F32)),
framework::dataset::make("Batches", 1)));
REGISTER_FIXTURE_DATA_TEST_CASE(GoogLeNetInceptionV1WinogradLayer, NEWinogradLayerFixture, framework::DatasetMode::ALL,
- framework::dataset::combine(framework::dataset::combine(datasets::GoogLeNetInceptionV1WinogradLayerDataset(), framework::dataset::make("DataType", DataType::F32)),
+ framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(datasets::GoogLeNetInceptionV1WinogradLayerDataset(),
+ framework::dataset::make("ActivationInfo", ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))),
+ framework::dataset::make("DataType", DataType::F32)),
framework::dataset::make("Batches", 1)));
REGISTER_FIXTURE_DATA_TEST_CASE(GoogLeNetInceptionV4WinogradLayer, NEWinogradLayerFixture, framework::DatasetMode::ALL,
- framework::dataset::combine(framework::dataset::combine(datasets::GoogLeNetInceptionV4WinogradLayerDataset(), framework::dataset::make("DataType", DataType::F32)),
+ framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(datasets::GoogLeNetInceptionV4WinogradLayerDataset(),
+ framework::dataset::make("ActivationInfo", ActivationLayerInfo())),
+ framework::dataset::make("DataType", DataType::F32)),
framework::dataset::make("Batches", 1)));
REGISTER_FIXTURE_DATA_TEST_CASE(SqueezeNetWinogradLayer, NEWinogradLayerFixture, framework::DatasetMode::ALL,
- framework::dataset::combine(framework::dataset::combine(datasets::SqueezeNetWinogradLayerDataset(), framework::dataset::make("DataType", DataType::F32)),
+ framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(datasets::SqueezeNetWinogradLayerDataset(),
+ framework::dataset::make("ActivationInfo", ActivationLayerInfo())),
+ framework::dataset::make("DataType", DataType::F32)),
framework::dataset::make("Batches", 1)));
#endif /* __aarch64__ */
REGISTER_FIXTURE_DATA_TEST_CASE(AlexNetConvolutionLayer, NEGEMMConvolutionLayerFixture, framework::DatasetMode::ALL,
- framework::dataset::combine(framework::dataset::combine(datasets::AlexNetConvolutionLayerDataset(), data_types),
+ framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(datasets::AlexNetConvolutionLayerDataset(),
+ framework::dataset::make("ActivationInfo", ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))),
+ data_types),
framework::dataset::make("Batches", 1)));
REGISTER_FIXTURE_DATA_TEST_CASE(LeNet5ConvolutionLayer, NEGEMMConvolutionLayerFixture, framework::DatasetMode::ALL,
- framework::dataset::combine(framework::dataset::combine(datasets::LeNet5ConvolutionLayerDataset(), data_types),
+ framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(datasets::LeNet5ConvolutionLayerDataset(),
+ framework::dataset::make("ActivationInfo", ActivationLayerInfo())),
+ data_types),
framework::dataset::make("Batches", 1)));
REGISTER_FIXTURE_DATA_TEST_CASE(GoogLeNetInceptionV1ConvolutionLayer, NEGEMMConvolutionLayerFixture, framework::DatasetMode::ALL,
- framework::dataset::combine(framework::dataset::combine(datasets::GoogLeNetInceptionV1ConvolutionLayerDataset(), data_types),
+ framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(datasets::GoogLeNetInceptionV1ConvolutionLayerDataset(),
+ framework::dataset::make("ActivationInfo", ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))),
+ data_types),
framework::dataset::make("Batches", 1)));
REGISTER_FIXTURE_DATA_TEST_CASE(GoogLeNetInceptionV4ConvolutionLayer, NEGEMMConvolutionLayerFixture, framework::DatasetMode::ALL,
- framework::dataset::combine(framework::dataset::combine(datasets::GoogLeNetInceptionV4ConvolutionLayerDataset(), data_types),
+ framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(datasets::GoogLeNetInceptionV4ConvolutionLayerDataset(),
+ framework::dataset::make("ActivationInfo", ActivationLayerInfo())),
+ data_types),
framework::dataset::make("Batches", 1)));
REGISTER_FIXTURE_DATA_TEST_CASE(SqueezeNetConvolutionLayer, NEGEMMConvolutionLayerFixture, framework::DatasetMode::ALL,
- framework::dataset::combine(framework::dataset::combine(datasets::SqueezeNetConvolutionLayerDataset(), data_types),
+ framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(datasets::SqueezeNetConvolutionLayerDataset(),
+ framework::dataset::make("ActivationInfo", ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))),
+ data_types),
framework::dataset::make("Batches", 1)));
REGISTER_FIXTURE_DATA_TEST_CASE(MobileNetConvolutionLayer, NEGEMMConvolutionLayerFixture, framework::DatasetMode::ALL,
- framework::dataset::combine(framework::dataset::combine(datasets::MobileNetConvolutionLayerDataset(), data_types),
+ framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(datasets::MobileNetConvolutionLayerDataset(),
+ framework::dataset::make("ActivationInfo", ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))),
+ data_types),
framework::dataset::make("Batches", 1)));
TEST_SUITE(NIGHTLY)
REGISTER_FIXTURE_DATA_TEST_CASE(AlexNetConvolutionLayer, NEGEMMConvolutionLayerFixture, framework::DatasetMode::NIGHTLY,
- framework::dataset::combine(framework::dataset::combine(datasets::AlexNetConvolutionLayerDataset(), data_types),
+ framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(datasets::AlexNetConvolutionLayerDataset(),
+ framework::dataset::make("ActivationInfo", ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))),
+ data_types),
framework::dataset::make("Batches", { 4, 8 })));
REGISTER_FIXTURE_DATA_TEST_CASE(LeNet5ConvolutionLayer, NEGEMMConvolutionLayerFixture, framework::DatasetMode::NIGHTLY,
- framework::dataset::combine(framework::dataset::combine(datasets::LeNet5ConvolutionLayerDataset(), data_types),
+ framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(datasets::LeNet5ConvolutionLayerDataset(),
+ framework::dataset::make("ActivationInfo", ActivationLayerInfo())),
+ data_types),
framework::dataset::make("Batches", { 4, 8 })));
REGISTER_FIXTURE_DATA_TEST_CASE(GoogLeNetInceptionV1ConvolutionLayer, NEGEMMConvolutionLayerFixture, framework::DatasetMode::NIGHTLY,
- framework::dataset::combine(framework::dataset::combine(datasets::GoogLeNetInceptionV1ConvolutionLayerDataset(), data_types),
+ framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(datasets::GoogLeNetInceptionV1ConvolutionLayerDataset(),
+ framework::dataset::make("ActivationInfo", ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))),
+ data_types),
framework::dataset::make("Batches", { 4, 8 })));
REGISTER_FIXTURE_DATA_TEST_CASE(GoogLeNetInceptionV4ConvolutionLayer, NEGEMMConvolutionLayerFixture, framework::DatasetMode::NIGHTLY,
- framework::dataset::combine(framework::dataset::combine(datasets::GoogLeNetInceptionV4ConvolutionLayerDataset(), data_types),
+ framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(datasets::GoogLeNetInceptionV4ConvolutionLayerDataset(),
+ framework::dataset::make("ActivationInfo", ActivationLayerInfo())),
+ data_types),
framework::dataset::make("Batches", { 4, 8 })));
REGISTER_FIXTURE_DATA_TEST_CASE(SqueezeNetConvolutionLayer, NEGEMMConvolutionLayerFixture, framework::DatasetMode::NIGHTLY,
- framework::dataset::combine(framework::dataset::combine(datasets::SqueezeNetConvolutionLayerDataset(), data_types),
+ framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(datasets::SqueezeNetConvolutionLayerDataset(),
+ framework::dataset::make("ActivationInfo", ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))),
+ data_types),
framework::dataset::make("Batches", { 4, 8 })));
// 8 batches use about 2GB of memory which is too much for most devices!
REGISTER_FIXTURE_DATA_TEST_CASE(VGG16ConvolutionLayer, NEGEMMConvolutionLayerFixture, framework::DatasetMode::NIGHTLY,
- framework::dataset::combine(framework::dataset::combine(datasets::VGG16ConvolutionLayerDataset(), data_types),
+ framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(datasets::VGG16ConvolutionLayerDataset(),
+ framework::dataset::make("ActivationInfo", ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))),
+ data_types),
framework::dataset::make("Batches", { 1, 2 })));
REGISTER_FIXTURE_DATA_TEST_CASE(YOLOV2ConvolutionLayer, NEGEMMConvolutionLayerFixture, framework::DatasetMode::NIGHTLY,
- framework::dataset::combine(framework::dataset::combine(datasets::YOLOV2ConvolutionLayerDataset(), data_types),
+ framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(datasets::YOLOV2ConvolutionLayerDataset(),
+ framework::dataset::make("ActivationInfo", ActivationLayerInfo())),
+ data_types),
framework::dataset::make("Batches", { 1, 4, 8 })));
#if defined(__aarch64__)
REGISTER_FIXTURE_DATA_TEST_CASE(AlexNetWinogradLayer, NEWinogradLayerFixture, framework::DatasetMode::NIGHTLY,
- framework::dataset::combine(framework::dataset::combine(datasets::AlexNetWinogradLayerDataset(), framework::dataset::make("DataType", DataType::F32)),
+ framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(datasets::AlexNetWinogradLayerDataset(),
+ framework::dataset::make("ActivationInfo", ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))),
+ framework::dataset::make("DataType", DataType::F32)),
framework::dataset::make("Batches", { 4, 8 })));
REGISTER_FIXTURE_DATA_TEST_CASE(GoogLeNetInceptionV1WinogradLayer, NEWinogradLayerFixture, framework::DatasetMode::NIGHTLY,
- framework::dataset::combine(framework::dataset::combine(datasets::GoogLeNetInceptionV1WinogradLayerDataset(), framework::dataset::make("DataType", DataType::F32)),
+ framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(datasets::GoogLeNetInceptionV1WinogradLayerDataset(),
+ framework::dataset::make("ActivationInfo", ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))),
+ framework::dataset::make("DataType", DataType::F32)),
framework::dataset::make("Batches", { 4, 8 })));
REGISTER_FIXTURE_DATA_TEST_CASE(GoogLeNetInceptionV4WinogradLayer, NEWinogradLayerFixture, framework::DatasetMode::NIGHTLY,
- framework::dataset::combine(framework::dataset::combine(datasets::GoogLeNetInceptionV4WinogradLayerDataset(), framework::dataset::make("DataType", DataType::F32)),
+ framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(datasets::GoogLeNetInceptionV4WinogradLayerDataset(),
+ framework::dataset::make("ActivationInfo", ActivationLayerInfo())),
+ framework::dataset::make("DataType", DataType::F32)),
framework::dataset::make("Batches", { 4, 8 })));
REGISTER_FIXTURE_DATA_TEST_CASE(SqueezeNetWinogradLayer, NEWinogradLayerFixture, framework::DatasetMode::NIGHTLY,
- framework::dataset::combine(framework::dataset::combine(datasets::SqueezeNetWinogradLayerDataset(), framework::dataset::make("DataType", DataType::F32)),
+ framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(datasets::SqueezeNetWinogradLayerDataset(),
+ framework::dataset::make("ActivationInfo", ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))),
+ framework::dataset::make("DataType", DataType::F32)),
framework::dataset::make("Batches", { 4, 8 })));
#endif /* __aarch64__ */
diff --git a/tests/benchmark/NEON/DirectConvolutionLayer.cpp b/tests/benchmark/NEON/DirectConvolutionLayer.cpp
index 8a17f3c03c..22f1bd2942 100644
--- a/tests/benchmark/NEON/DirectConvolutionLayer.cpp
+++ b/tests/benchmark/NEON/DirectConvolutionLayer.cpp
@@ -55,49 +55,69 @@ const auto data_types = framework::dataset::make("DataType", { DataType::F32 });
#endif /* ARM_COMPUTE_ENABLE_F16 */
} // namespace
-using NEConvolutionLayerFixture = DirectConvolutionLayerFixture<Tensor, NEDirectConvolutionLayer, Accessor>;
+using NEDirectConvolutionLayerFixture = DirectConvolutionLayerFixture<Tensor, NEDirectConvolutionLayer, Accessor>;
TEST_SUITE(NEON)
-REGISTER_FIXTURE_DATA_TEST_CASE(AlexNetDirectConvolutionLayer, NEConvolutionLayerFixture, framework::DatasetMode::ALL,
- framework::dataset::combine(framework::dataset::combine(datasets::AlexNetDirectConvolutionLayerDataset(), data_types),
+REGISTER_FIXTURE_DATA_TEST_CASE(AlexNetDirectConvolutionLayer, NEDirectConvolutionLayerFixture, framework::DatasetMode::ALL,
+ framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(datasets::AlexNetDirectConvolutionLayerDataset(),
+ framework::dataset::make("ActivationInfo", ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))),
+ data_types),
framework::dataset::make("Batches", 1)));
-REGISTER_FIXTURE_DATA_TEST_CASE(GoogLeNetInceptionV1DirectConvolutionLayer, NEConvolutionLayerFixture, framework::DatasetMode::ALL,
- framework::dataset::combine(framework::dataset::combine(datasets::GoogLeNetInceptionV1DirectConvolutionLayerDataset(), data_types),
+REGISTER_FIXTURE_DATA_TEST_CASE(GoogLeNetInceptionV1DirectConvolutionLayer, NEDirectConvolutionLayerFixture, framework::DatasetMode::ALL,
+ framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(datasets::GoogLeNetInceptionV1DirectConvolutionLayerDataset(),
+ framework::dataset::make("ActivationInfo", ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))),
+ data_types),
framework::dataset::make("Batches", 1)));
-REGISTER_FIXTURE_DATA_TEST_CASE(GoogLeNetInceptionV4DirectConvolutionLayer, NEConvolutionLayerFixture, framework::DatasetMode::ALL,
- framework::dataset::combine(framework::dataset::combine(datasets::GoogLeNetInceptionV4DirectConvolutionLayerDataset(), data_types),
+REGISTER_FIXTURE_DATA_TEST_CASE(GoogLeNetInceptionV4DirectConvolutionLayer, NEDirectConvolutionLayerFixture, framework::DatasetMode::ALL,
+ framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(datasets::GoogLeNetInceptionV4DirectConvolutionLayerDataset(),
+ framework::dataset::make("ActivationInfo", ActivationLayerInfo())),
+ data_types),
framework::dataset::make("Batches", 1)));
-REGISTER_FIXTURE_DATA_TEST_CASE(SqueezeNetDirectConvolutionLayer, NEConvolutionLayerFixture, framework::DatasetMode::ALL,
- framework::dataset::combine(framework::dataset::combine(datasets::SqueezeNetConvolutionLayerDataset(), data_types),
+REGISTER_FIXTURE_DATA_TEST_CASE(SqueezeNetDirectConvolutionLayer, NEDirectConvolutionLayerFixture, framework::DatasetMode::ALL,
+ framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(datasets::SqueezeNetConvolutionLayerDataset(),
+ framework::dataset::make("ActivationInfo", ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))),
+ data_types),
framework::dataset::make("Batches", 1)));
TEST_SUITE(NIGHTLY)
-REGISTER_FIXTURE_DATA_TEST_CASE(AlexNetDirectConvolutionLayer, NEConvolutionLayerFixture, framework::DatasetMode::NIGHTLY,
- framework::dataset::combine(framework::dataset::combine(datasets::AlexNetDirectConvolutionLayerDataset(), data_types),
+REGISTER_FIXTURE_DATA_TEST_CASE(AlexNetDirectConvolutionLayer, NEDirectConvolutionLayerFixture, framework::DatasetMode::NIGHTLY,
+ framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(datasets::AlexNetDirectConvolutionLayerDataset(),
+ framework::dataset::make("ActivationInfo", ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))),
+ data_types),
framework::dataset::make("Batches", { 4, 8 })));
-REGISTER_FIXTURE_DATA_TEST_CASE(GoogLeNetInceptionV1DirectConvolutionLayer, NEConvolutionLayerFixture, framework::DatasetMode::NIGHTLY,
- framework::dataset::combine(framework::dataset::combine(datasets::GoogLeNetInceptionV1DirectConvolutionLayerDataset(), data_types),
+REGISTER_FIXTURE_DATA_TEST_CASE(GoogLeNetInceptionV1DirectConvolutionLayer, NEDirectConvolutionLayerFixture, framework::DatasetMode::NIGHTLY,
+ framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(datasets::GoogLeNetInceptionV1DirectConvolutionLayerDataset(),
+ framework::dataset::make("ActivationInfo", ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))),
+ data_types),
framework::dataset::make("Batches", { 4, 8 })));
-REGISTER_FIXTURE_DATA_TEST_CASE(GoogLeNetInceptionV4DirectConvolutionLayer, NEConvolutionLayerFixture, framework::DatasetMode::NIGHTLY,
- framework::dataset::combine(framework::dataset::combine(datasets::GoogLeNetInceptionV4DirectConvolutionLayerDataset(), data_types),
+REGISTER_FIXTURE_DATA_TEST_CASE(GoogLeNetInceptionV4DirectConvolutionLayer, NEDirectConvolutionLayerFixture, framework::DatasetMode::NIGHTLY,
+ framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(datasets::GoogLeNetInceptionV4DirectConvolutionLayerDataset(),
+ framework::dataset::make("ActivationInfo", ActivationLayerInfo())),
+ data_types),
framework::dataset::make("Batches", { 4, 8 })));
-REGISTER_FIXTURE_DATA_TEST_CASE(SqueezeNetDirectConvolutionLayer, NEConvolutionLayerFixture, framework::DatasetMode::NIGHTLY,
- framework::dataset::combine(framework::dataset::combine(datasets::SqueezeNetConvolutionLayerDataset(), data_types),
+REGISTER_FIXTURE_DATA_TEST_CASE(SqueezeNetDirectConvolutionLayer, NEDirectConvolutionLayerFixture, framework::DatasetMode::NIGHTLY,
+ framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(datasets::SqueezeNetConvolutionLayerDataset(),
+ framework::dataset::make("ActivationInfo", ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))),
+ data_types),
framework::dataset::make("Batches", { 4, 8 })));
-REGISTER_FIXTURE_DATA_TEST_CASE(VGG16DirectConvolutionLayer, NEConvolutionLayerFixture, framework::DatasetMode::NIGHTLY,
- framework::dataset::combine(framework::dataset::combine(datasets::VGG16ConvolutionLayerDataset(), data_types),
+REGISTER_FIXTURE_DATA_TEST_CASE(VGG16DirectConvolutionLayer, NEDirectConvolutionLayerFixture, framework::DatasetMode::NIGHTLY,
+ framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(datasets::VGG16ConvolutionLayerDataset(),
+ framework::dataset::make("ActivationInfo", ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))),
+ data_types),
framework::dataset::make("Batches", { 1, 2 })));
-REGISTER_FIXTURE_DATA_TEST_CASE(YOLOV2DirectConvolutionLayer, NEConvolutionLayerFixture, framework::DatasetMode::NIGHTLY,
- framework::dataset::combine(framework::dataset::combine(datasets::YOLOV2ConvolutionLayerDataset(), data_types),
+REGISTER_FIXTURE_DATA_TEST_CASE(YOLOV2DirectConvolutionLayer, NEDirectConvolutionLayerFixture, framework::DatasetMode::NIGHTLY,
+ framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(datasets::YOLOV2ConvolutionLayerDataset(),
+ framework::dataset::make("ActivationInfo", ActivationLayerInfo())),
+ data_types),
framework::dataset::make("Batches", { 1, 4, 8 })));
TEST_SUITE_END()
diff --git a/tests/benchmark/fixtures/ConvolutionLayerFixture.h b/tests/benchmark/fixtures/ConvolutionLayerFixture.h
index 7558b4c9a8..511daf73b3 100644
--- a/tests/benchmark/fixtures/ConvolutionLayerFixture.h
+++ b/tests/benchmark/fixtures/ConvolutionLayerFixture.h
@@ -42,7 +42,8 @@ class ConvolutionLayerFixture : public framework::Fixture
{
public:
template <typename...>
- void setup(TensorShape src_shape, TensorShape weights_shape, TensorShape biases_shape, TensorShape dst_shape, PadStrideInfo info, Size2D dilation, DataType data_type, int batches)
+ void setup(TensorShape src_shape, TensorShape weights_shape, TensorShape biases_shape, TensorShape dst_shape, PadStrideInfo info, Size2D dilation, ActivationLayerInfo act_info, DataType data_type,
+ int batches)
{
// Set batched in source and destination shapes
const unsigned int fixed_point_position = 4;
@@ -57,7 +58,7 @@ public:
dst = create_tensor<TensorType>(dst_shape, data_type, 1, fixed_point_position);
// Create and configure function
- conv_layer.configure(&src, &weights, &biases, &dst, info, WeightsInfo(), dilation);
+ conv_layer.configure(&src, &weights, &biases, &dst, info, WeightsInfo(), dilation, act_info);
// Allocate tensors
src.allocator()->allocate();
diff --git a/tests/benchmark/fixtures/DirectConvolutionLayerFixture.h b/tests/benchmark/fixtures/DirectConvolutionLayerFixture.h
index e3289b7fbc..419f6dd822 100644
--- a/tests/benchmark/fixtures/DirectConvolutionLayerFixture.h
+++ b/tests/benchmark/fixtures/DirectConvolutionLayerFixture.h
@@ -43,7 +43,8 @@ class DirectConvolutionLayerFixture : public framework::Fixture
{
public:
template <typename...>
- void setup(TensorShape src_shape, TensorShape weights_shape, TensorShape biases_shape, TensorShape dst_shape, PadStrideInfo info, Size2D dilation, DataType data_type, int batches)
+ void setup(TensorShape src_shape, TensorShape weights_shape, TensorShape biases_shape, TensorShape dst_shape, PadStrideInfo info, Size2D dilation, ActivationLayerInfo act_info, DataType data_type,
+ int batches)
{
ARM_COMPUTE_UNUSED(dilation);
@@ -60,7 +61,7 @@ public:
dst = create_tensor<TensorType>(dst_shape, data_type, 1, fixed_point_position);
// Create and configure function
- conv_layer.configure(&src, &weights, &biases, &dst, info);
+ conv_layer.configure(&src, &weights, &biases, &dst, info, act_info);
// Allocate tensors
src.allocator()->allocate();
diff --git a/tests/benchmark/fixtures/WinogradLayerFixture.h b/tests/benchmark/fixtures/WinogradLayerFixture.h
index 31a1eb8e1c..0be535f4cc 100644
--- a/tests/benchmark/fixtures/WinogradLayerFixture.h
+++ b/tests/benchmark/fixtures/WinogradLayerFixture.h
@@ -42,7 +42,8 @@ class WinogradLayerFixture : public framework::Fixture
{
public:
template <typename...>
- void setup(TensorShape src_shape, TensorShape weights_shape, TensorShape biases_shape, TensorShape dst_shape, PadStrideInfo info, Size2D dilation, DataType data_type, int batches)
+ void setup(TensorShape src_shape, TensorShape weights_shape, TensorShape biases_shape, TensorShape dst_shape, PadStrideInfo info, Size2D dilation, ActivationLayerInfo act_info, DataType data_type,
+ int batches)
{
ARM_COMPUTE_UNUSED(dilation);
@@ -59,7 +60,7 @@ public:
dst = create_tensor<TensorType>(dst_shape, data_type, 1, fixed_point_position);
// Create and configure function
- conv_layer.configure(&src, &weights, &biases, &dst, info);
+ conv_layer.configure(&src, &weights, &biases, &dst, info, act_info);
// Allocate tensors
src.allocator()->allocate();
diff --git a/tests/validation/CL/ConvolutionLayer.cpp b/tests/validation/CL/ConvolutionLayer.cpp
index c50519b6ac..b50bb94bbb 100644
--- a/tests/validation/CL/ConvolutionLayer.cpp
+++ b/tests/validation/CL/ConvolutionLayer.cpp
@@ -60,6 +60,13 @@ const auto CNNDataTypes = framework::dataset::make("DataType",
DataType::QS16,
DataType::QASYMM8,
});
+const auto ActivationFunctionsDataset = framework::dataset::make("ActivationInfo",
+{
+ ActivationLayerInfo(),
+ ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU),
+ ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::BOUNDED_RELU, 0.5f),
+ ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, 0.5f)
+});
} // namespace
TEST_SUITE(CL)
@@ -109,18 +116,18 @@ DATA_TEST_CASE(ValidateConvolutionMethod, framework::DatasetMode::ALL, zip(zip(z
ConvolutionMethod is_valid = CLConvolutionLayer::get_convolution_method(&input_info.clone()->set_is_resizable(false),
&weights_info.clone()->set_is_resizable(false),
&biases_info.clone()->set_is_resizable(false),
- &output_info.clone()->set_is_resizable(false), conv_info, WeightsInfo(), gpu_target);
+ &output_info.clone()->set_is_resizable(false), conv_info, WeightsInfo(), ActivationLayerInfo(), gpu_target);
ARM_COMPUTE_EXPECT(is_valid == expected, framework::LogLevel::ERRORS);
}
TEST_SUITE_END()
TEST_SUITE(GEMMConvolutionLayer)
-DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(framework::dataset::concat(datasets::SmallConvolutionLayerDataset(), datasets::LargeConvolutionLayerDataset()), CNNDataTypes),
- input_shape, weights_shape, bias_shape, output_shape, info, dilation, data_type)
+DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(framework::dataset::concat(datasets::SmallConvolutionLayerDataset(), datasets::LargeConvolutionLayerDataset()),
+ CNNDataTypes),
+ ActivationFunctionsDataset),
+ input_shape, weights_shape, bias_shape, output_shape, info, dilation, data_type, act_info)
{
- ARM_COMPUTE_UNUSED(dilation);
-
// Set fixed point position data type allowed
int fixed_point_position = is_data_type_fixed_point(data_type) ? 3 : 0;
@@ -142,7 +149,7 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(framework::da
// Create and configure function
CLGEMMConvolutionLayer conv;
- conv.configure(&src, &weights, &bias, &dst, info);
+ conv.configure(&src, &weights, &bias, &dst, info, WeightsInfo(), dilation, act_info);
// Validate valid region
const ValidRegion src_valid_region = shape_to_valid_region(input_shape);
@@ -168,18 +175,22 @@ using CLGEMMConvolutionLayerFixture = ConvolutionValidationFixture<CLTensor, CLA
TEST_SUITE(Float)
TEST_SUITE(FP16)
-FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMConvolutionLayerFixture<half>, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::SmallConvolutionLayerDataset(),
+
+FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMConvolutionLayerFixture<half>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::SmallConvolutionLayerDataset(),
framework::dataset::make("ReshapeWeights", { true })),
framework::dataset::make("DataType",
- DataType::F16)))
+ DataType::F16)),
+ ActivationFunctionsDataset))
{
// Validate output
validate(CLAccessor(_target), _reference, tolerance_f16, tolerance_num);
}
-FIXTURE_DATA_TEST_CASE(RunLarge, CLGEMMConvolutionLayerFixture<half>, framework::DatasetMode::NIGHTLY, combine(combine(datasets::LargeConvolutionLayerDataset(),
+
+FIXTURE_DATA_TEST_CASE(RunLarge, CLGEMMConvolutionLayerFixture<half>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::LargeConvolutionLayerDataset(),
framework::dataset::make("ReshapeWeights", { true })),
- framework::dataset::make("DataType",
- DataType::F16)))
+ framework::dataset::make("DataType",
+ DataType::F16)),
+ ActivationFunctionsDataset))
{
// Validate output
validate(CLAccessor(_target), _reference, tolerance_f16, tolerance_num);
@@ -187,18 +198,22 @@ FIXTURE_DATA_TEST_CASE(RunLarge, CLGEMMConvolutionLayerFixture<half>, framework:
TEST_SUITE_END()
TEST_SUITE(FP32)
-FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMConvolutionLayerFixture<float>, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::SmallConvolutionLayerDataset(),
+
+FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMConvolutionLayerFixture<float>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::SmallConvolutionLayerDataset(),
framework::dataset::make("ReshapeWeights", { true })),
framework::dataset::make("DataType",
- DataType::F32)))
+ DataType::F32)),
+ ActivationFunctionsDataset))
{
// Validate output
validate(CLAccessor(_target), _reference, tolerance_f32);
}
-FIXTURE_DATA_TEST_CASE(RunLarge, CLGEMMConvolutionLayerFixture<float>, framework::DatasetMode::NIGHTLY, combine(combine(datasets::LargeConvolutionLayerDataset(),
+
+FIXTURE_DATA_TEST_CASE(RunLarge, CLGEMMConvolutionLayerFixture<float>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::LargeConvolutionLayerDataset(),
framework::dataset::make("ReshapeWeights", { true })),
- framework::dataset::make("DataType",
- DataType::F32)))
+ framework::dataset::make("DataType",
+ DataType::F32)),
+ ActivationFunctionsDataset))
{
// Validate output
validate(CLAccessor(_target), _reference, tolerance_f32);
@@ -212,20 +227,23 @@ using CLGEMMConvolutionLayerFixedPointFixture = ConvolutionValidationFixedPointF
TEST_SUITE(FixedPoint)
TEST_SUITE(QS8)
// We test for fixed point precision [4,6]
-FIXTURE_DATA_TEST_CASE(RunTiny, CLGEMMConvolutionLayerFixedPointFixture<int8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::TinyConvolutionLayerDataset(),
+FIXTURE_DATA_TEST_CASE(RunTiny, CLGEMMConvolutionLayerFixedPointFixture<int8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(datasets::TinyConvolutionLayerDataset(),
framework::dataset::make("ReshapeWeights", { true })),
framework::dataset::make("DataType",
DataType::QS8)),
- framework::dataset::make("FractionalBits", 4, 7)))
+ framework::dataset::make("FractionalBits", 4, 7)),
+ ActivationFunctionsDataset))
{
// Validate output
validate(CLAccessor(_target), _reference, tolerance_fixed);
}
-FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMConvolutionLayerFixedPointFixture<int8_t>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::SmallConvolutionLayerDataset(),
+
+FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMConvolutionLayerFixedPointFixture<int8_t>, framework::DatasetMode::NIGHTLY, combine(combine(combine(combine(datasets::SmallConvolutionLayerDataset(),
framework::dataset::make("ReshapeWeights", { true })),
framework::dataset::make("DataType",
DataType::QS8)),
- framework::dataset::make("FractionalBits", 4, 7)))
+ framework::dataset::make("FractionalBits", 4, 7)),
+ ActivationFunctionsDataset))
{
// Validate output
validate(CLAccessor(_target), _reference, tolerance_fixed);
@@ -234,20 +252,23 @@ TEST_SUITE_END()
TEST_SUITE(QS16)
// Testing for fixed point position [1,14)
-FIXTURE_DATA_TEST_CASE(RunTiny, CLGEMMConvolutionLayerFixedPointFixture<int16_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::TinyConvolutionLayerDataset(),
+FIXTURE_DATA_TEST_CASE(RunTiny, CLGEMMConvolutionLayerFixedPointFixture<int16_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(datasets::TinyConvolutionLayerDataset(),
framework::dataset::make("ReshapeWeights", { true })),
framework::dataset::make("DataType",
DataType::QS16)),
- framework::dataset::make("FractionalBits", 1, 14)))
+ framework::dataset::make("FractionalBits", 1, 14)),
+ ActivationFunctionsDataset))
{
// Validate output
validate(CLAccessor(_target), _reference, tolerance_fixed);
}
-FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMConvolutionLayerFixedPointFixture<int16_t>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::SmallConvolutionLayerDataset(),
+
+FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMConvolutionLayerFixedPointFixture<int16_t>, framework::DatasetMode::NIGHTLY, combine(combine(combine(combine(datasets::SmallConvolutionLayerDataset(),
framework::dataset::make("ReshapeWeights", { true })),
framework::dataset::make("DataType",
DataType::QS16)),
- framework::dataset::make("FractionalBits", 1, 14)))
+ framework::dataset::make("FractionalBits", 1, 14)),
+ ActivationFunctionsDataset))
{
// Validate output
validate(CLAccessor(_target), _reference, tolerance_fixed);
@@ -258,20 +279,30 @@ TEST_SUITE_END()
template <typename T>
using CLGEMMConvolutionLayerQuantizedFixture = ConvolutionValidationQuantizedFixture<CLTensor, CLAccessor, CLGEMMConvolutionLayer, T>;
+const auto QuantizedActivationFunctionsDataset = framework::dataset::make("ActivationInfo",
+{
+ ActivationLayerInfo(),
+ ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU),
+ ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, 6.f)
+});
+
TEST_SUITE(Quantized)
TEST_SUITE(QASYMM8)
-FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMConvolutionLayerQuantizedFixture<uint8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::SmallConvolutionLayerDataset(),
+
+FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMConvolutionLayerQuantizedFixture<uint8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(datasets::SmallConvolutionLayerDataset(),
framework::dataset::make("ReshapeWeights", { true })),
framework::dataset::make("DataType", DataType::QASYMM8)),
- framework::dataset::make("QuantizationInfo", { QuantizationInfo(2.f / 255.f, 10) })))
+ framework::dataset::make("QuantizationInfo", { QuantizationInfo(2.f / 255.f, 10) })),
+ QuantizedActivationFunctionsDataset))
{
// Validate output
validate(CLAccessor(_target), _reference, tolerance_qasymm8);
}
-FIXTURE_DATA_TEST_CASE(RunLarge, CLGEMMConvolutionLayerQuantizedFixture<uint8_t>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::LargeConvolutionLayerDataset(),
+FIXTURE_DATA_TEST_CASE(RunLarge, CLGEMMConvolutionLayerQuantizedFixture<uint8_t>, framework::DatasetMode::NIGHTLY, combine(combine(combine(combine(datasets::LargeConvolutionLayerDataset(),
framework::dataset::make("ReshapeWeights", { true })),
framework::dataset::make("DataType", DataType::QASYMM8)),
- framework::dataset::make("QuantizationInfo", { QuantizationInfo(2.f / 255.f, 0) })))
+ framework::dataset::make("QuantizationInfo", { QuantizationInfo(2.f / 255.f, 0) })),
+ QuantizedActivationFunctionsDataset))
{
// Validate output
validate(CLAccessor(_target), _reference, tolerance_qasymm8);
diff --git a/tests/validation/CL/DilatedConvolutionLayer.cpp b/tests/validation/CL/DilatedConvolutionLayer.cpp
index dadae2227b..532d04b431 100644
--- a/tests/validation/CL/DilatedConvolutionLayer.cpp
+++ b/tests/validation/CL/DilatedConvolutionLayer.cpp
@@ -113,7 +113,7 @@ DATA_TEST_CASE(ValidateConvolutionMethod, framework::DatasetMode::ALL, zip(zip(z
ConvolutionMethod is_valid = CLConvolutionLayer::get_convolution_method(&input_info.clone()->set_is_resizable(false),
&weights_info.clone()->set_is_resizable(false),
&biases_info.clone()->set_is_resizable(false),
- &output_info.clone()->set_is_resizable(false), conv_info, WeightsInfo(), gpu_target, dilation);
+ &output_info.clone()->set_is_resizable(false), conv_info, WeightsInfo(), ActivationLayerInfo(), gpu_target, dilation);
ARM_COMPUTE_EXPECT(is_valid == expected, framework::LogLevel::ERRORS);
}
TEST_SUITE_END()
@@ -171,18 +171,18 @@ using CLGEMMDilatedConvolutionLayerFixture = ConvolutionValidationFixture<CLTens
TEST_SUITE(Float)
TEST_SUITE(FP16)
-FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMDilatedConvolutionLayerFixture<half>, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::SmallDilatedConvolutionLayerDataset(),
+FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMDilatedConvolutionLayerFixture<half>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::SmallDilatedConvolutionLayerDataset(),
framework::dataset::make("ReshapeWeights", { true })),
- framework::dataset::make("DataType",
- DataType::F16)))
+ framework::dataset::make("DataType", DataType::F16)),
+ framework::dataset::make("ActivationLayerInfo", ActivationLayerInfo())))
{
// Validate output
validate(CLAccessor(_target), _reference, tolerance_f16, tolerance_num);
}
-FIXTURE_DATA_TEST_CASE(RunLarge, CLGEMMDilatedConvolutionLayerFixture<half>, framework::DatasetMode::NIGHTLY, combine(combine(datasets::LargeDilatedConvolutionLayerDataset(),
+FIXTURE_DATA_TEST_CASE(RunLarge, CLGEMMDilatedConvolutionLayerFixture<half>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::LargeDilatedConvolutionLayerDataset(),
framework::dataset::make("ReshapeWeights", { true })),
- framework::dataset::make("DataType",
- DataType::F16)))
+ framework::dataset::make("DataType", DataType::F16)),
+ framework::dataset::make("ActivationLayerInfo", ActivationLayerInfo())))
{
// Validate output
validate(CLAccessor(_target), _reference, tolerance_f16, tolerance_num);
@@ -190,18 +190,18 @@ FIXTURE_DATA_TEST_CASE(RunLarge, CLGEMMDilatedConvolutionLayerFixture<half>, fra
TEST_SUITE_END()
TEST_SUITE(FP32)
-FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMDilatedConvolutionLayerFixture<float>, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::SmallDilatedConvolutionLayerDataset(),
+FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMDilatedConvolutionLayerFixture<float>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::SmallDilatedConvolutionLayerDataset(),
framework::dataset::make("ReshapeWeights", { true })),
- framework::dataset::make("DataType",
- DataType::F32)))
+ framework::dataset::make("DataType", DataType::F32)),
+ framework::dataset::make("ActivationLayerInfo", ActivationLayerInfo())))
{
// Validate output
validate(CLAccessor(_target), _reference, tolerance_f32);
}
-FIXTURE_DATA_TEST_CASE(RunLarge, CLGEMMDilatedConvolutionLayerFixture<float>, framework::DatasetMode::NIGHTLY, combine(combine(datasets::LargeDilatedConvolutionLayerDataset(),
+FIXTURE_DATA_TEST_CASE(RunLarge, CLGEMMDilatedConvolutionLayerFixture<float>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::LargeDilatedConvolutionLayerDataset(),
framework::dataset::make("ReshapeWeights", { true })),
- framework::dataset::make("DataType",
- DataType::F32)))
+ framework::dataset::make("DataType", DataType::F32)),
+ framework::dataset::make("ActivationLayerInfo", ActivationLayerInfo())))
{
// Validate output
validate(CLAccessor(_target), _reference, tolerance_f32);
@@ -215,20 +215,24 @@ using CLGEMMDilatedConvolutionLayerFixedPointFixture = ConvolutionValidationFixe
TEST_SUITE(FixedPoint)
TEST_SUITE(QS8)
// We test for fixed point precision [4,6]
-FIXTURE_DATA_TEST_CASE(RunTiny, CLGEMMDilatedConvolutionLayerFixedPointFixture<int8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::TinyDilatedConvolutionLayerDataset(),
- framework::dataset::make("ReshapeWeights", { true })),
- framework::dataset::make("DataType",
- DataType::QS8)),
- framework::dataset::make("FractionalBits", 4, 7)))
+FIXTURE_DATA_TEST_CASE(RunTiny, CLGEMMDilatedConvolutionLayerFixedPointFixture<int8_t>, framework::DatasetMode::PRECOMMIT,
+ combine(combine(combine(combine(datasets::TinyDilatedConvolutionLayerDataset(),
+ framework::dataset::make("ReshapeWeights", { true })),
+ framework::dataset::make("DataType",
+ DataType::QS8)),
+ framework::dataset::make("FractionalBits", 4, 7)),
+ framework::dataset::make("ActivationLayerInfo", { ActivationLayerInfo() })))
{
// Validate output
validate(CLAccessor(_target), _reference, tolerance_fixed);
}
-FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMDilatedConvolutionLayerFixedPointFixture<int8_t>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::SmallDilatedConvolutionLayerDataset(),
- framework::dataset::make("ReshapeWeights", { true })),
- framework::dataset::make("DataType",
- DataType::QS8)),
- framework::dataset::make("FractionalBits", 4, 7)))
+FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMDilatedConvolutionLayerFixedPointFixture<int8_t>, framework::DatasetMode::NIGHTLY,
+ combine(combine(combine(combine(datasets::SmallDilatedConvolutionLayerDataset(),
+ framework::dataset::make("ReshapeWeights", { true })),
+ framework::dataset::make("DataType",
+ DataType::QS8)),
+ framework::dataset::make("FractionalBits", 4, 7)),
+ framework::dataset::make("ActivationLayerInfo", { ActivationLayerInfo() })))
{
// Validate output
validate(CLAccessor(_target), _reference, tolerance_fixed);
@@ -237,20 +241,24 @@ TEST_SUITE_END()
TEST_SUITE(QS16)
// Testing for fixed point position [1,14)
-FIXTURE_DATA_TEST_CASE(RunTiny, CLGEMMDilatedConvolutionLayerFixedPointFixture<int16_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::TinyDilatedConvolutionLayerDataset(),
- framework::dataset::make("ReshapeWeights", { true })),
- framework::dataset::make("DataType",
- DataType::QS16)),
- framework::dataset::make("FractionalBits", 1, 14)))
+FIXTURE_DATA_TEST_CASE(RunTiny, CLGEMMDilatedConvolutionLayerFixedPointFixture<int16_t>, framework::DatasetMode::PRECOMMIT,
+ combine(combine(combine(combine(datasets::TinyDilatedConvolutionLayerDataset(),
+ framework::dataset::make("ReshapeWeights", { true })),
+ framework::dataset::make("DataType",
+ DataType::QS16)),
+ framework::dataset::make("FractionalBits", 1, 14)),
+ framework::dataset::make("ActivationLayerInfo", { ActivationLayerInfo() })))
{
// Validate output
validate(CLAccessor(_target), _reference, tolerance_fixed);
}
-FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMDilatedConvolutionLayerFixedPointFixture<int16_t>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::SmallDilatedConvolutionLayerDataset(),
- framework::dataset::make("ReshapeWeights", { true })),
- framework::dataset::make("DataType",
- DataType::QS16)),
- framework::dataset::make("FractionalBits", 1, 14)))
+FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMDilatedConvolutionLayerFixedPointFixture<int16_t>, framework::DatasetMode::NIGHTLY,
+ combine(combine(combine(combine(datasets::SmallDilatedConvolutionLayerDataset(),
+ framework::dataset::make("ReshapeWeights", { true })),
+ framework::dataset::make("DataType",
+ DataType::QS16)),
+ framework::dataset::make("FractionalBits", 1, 14)),
+ framework::dataset::make("ActivationLayerInfo", { ActivationLayerInfo() })))
{
// Validate output
validate(CLAccessor(_target), _reference, tolerance_fixed);
@@ -263,18 +271,22 @@ using CLGEMMDilatedConvolutionLayerQuantizedFixture = ConvolutionValidationQuant
TEST_SUITE(Quantized)
TEST_SUITE(QASYMM8)
-FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMDilatedConvolutionLayerQuantizedFixture<uint8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::SmallDilatedConvolutionLayerDataset(),
- framework::dataset::make("ReshapeWeights", { true })),
- framework::dataset::make("DataType", DataType::QASYMM8)),
- framework::dataset::make("QuantizationInfo", { QuantizationInfo(2.f / 255.f, 10) })))
+FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMDilatedConvolutionLayerQuantizedFixture<uint8_t>, framework::DatasetMode::PRECOMMIT,
+ combine(combine(combine(combine(datasets::SmallDilatedConvolutionLayerDataset(),
+ framework::dataset::make("ReshapeWeights", { true })),
+ framework::dataset::make("DataType", DataType::QASYMM8)),
+ framework::dataset::make("QuantizationInfo", { QuantizationInfo(2.f / 255.f, 10) })),
+ framework::dataset::make("ActivationLayerInfo", { ActivationLayerInfo() })))
{
// Validate output
validate(CLAccessor(_target), _reference, tolerance_qasymm8);
}
-FIXTURE_DATA_TEST_CASE(RunLarge, CLGEMMDilatedConvolutionLayerQuantizedFixture<uint8_t>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::LargeDilatedConvolutionLayerDataset(),
- framework::dataset::make("ReshapeWeights", { true })),
- framework::dataset::make("DataType", DataType::QASYMM8)),
- framework::dataset::make("QuantizationInfo", { QuantizationInfo(2.f / 255.f, 0) })))
+FIXTURE_DATA_TEST_CASE(RunLarge, CLGEMMDilatedConvolutionLayerQuantizedFixture<uint8_t>, framework::DatasetMode::NIGHTLY,
+ combine(combine(combine(combine(datasets::LargeDilatedConvolutionLayerDataset(),
+ framework::dataset::make("ReshapeWeights", { true })),
+ framework::dataset::make("DataType", DataType::QASYMM8)),
+ framework::dataset::make("QuantizationInfo", { QuantizationInfo(2.f / 255.f, 0) })),
+ framework::dataset::make("ActivationLayerInfo", { ActivationLayerInfo() })))
{
// Validate output
validate(CLAccessor(_target), _reference, tolerance_qasymm8);
diff --git a/tests/validation/CL/DirectConvolutionLayer.cpp b/tests/validation/CL/DirectConvolutionLayer.cpp
index bf8b4057ee..4564c647b3 100644
--- a/tests/validation/CL/DirectConvolutionLayer.cpp
+++ b/tests/validation/CL/DirectConvolutionLayer.cpp
@@ -73,6 +73,14 @@ const auto data_fixed_point = combine(datasets::TinyDirectConvolutionShapes(),
combine(framework::dataset::make("PadY", 0, 2),
framework::dataset::make("KernelSize", { 3 })))),
framework::dataset::make("NumKernels", { 1, 4, 8, 16 })))));
+/** Activation function Dataset*/
+const auto ActivationFunctionsDataset = framework::dataset::make("ActivationInfo",
+{
+ ActivationLayerInfo(),
+ ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU),
+ ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::BOUNDED_RELU, 0.5f),
+ ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, 0.5f)
+});
} // namespace
TEST_SUITE(CL)
@@ -82,7 +90,7 @@ TEST_SUITE(DirectConvolutionLayer)
// *INDENT-OFF*
// clang-format off
-DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(zip(
+DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(zip(zip(
framework::dataset::make("InputInfo", { TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), // Mismatching data type input/weights
TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), // Mismatching input feature maps
TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), // Unsupported kernel width
@@ -143,10 +151,14 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(zip(
PadStrideInfo(1, 1, 0, 0),
PadStrideInfo(1, 1, 0, 0),
})),
+ framework::dataset::make("ActivationInfo",
+{
+ ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)
+})),
framework::dataset::make("Expected", { false, false, false, false, false, false, false, false, false, false, true })),
- input_info, weights_info, biases_info, output_info, conv_info, expected)
+ input_info, weights_info, biases_info, output_info, conv_info, act_info, expected)
{
- bool is_valid = bool(CLDirectConvolutionLayer::validate(&input_info.clone()->set_is_resizable(false), &weights_info.clone()->set_is_resizable(false), &biases_info.clone()->set_is_resizable(false), &output_info.clone()->set_is_resizable(false), conv_info));
+ bool is_valid = bool(CLDirectConvolutionLayer::validate(&input_info.clone()->set_is_resizable(false), &weights_info.clone()->set_is_resizable(false), &biases_info.clone()->set_is_resizable(false), &output_info.clone()->set_is_resizable(false), conv_info, act_info));
ARM_COMPUTE_EXPECT(is_valid == expected, framework::LogLevel::ERRORS);
}
// clang-format on
@@ -159,7 +171,7 @@ using CLDirectConvolutionValidationWithTensorShapesFixture = DirectConvolutionVa
TEST_SUITE(Float)
TEST_SUITE(FP16)
-FIXTURE_DATA_TEST_CASE(Run, CLDirectConvolutionLayerFixture<half>, framework::DatasetMode::ALL, combine(data, framework::dataset::make("DataType", DataType::F16)))
+FIXTURE_DATA_TEST_CASE(Run, CLDirectConvolutionLayerFixture<half>, framework::DatasetMode::ALL, combine(combine(data, framework::dataset::make("DataType", DataType::F16)), ActivationFunctionsDataset))
{
// Validate output
validate(CLAccessor(_target), _reference, tolerance_fp16, tolerance_num);
@@ -167,7 +179,8 @@ FIXTURE_DATA_TEST_CASE(Run, CLDirectConvolutionLayerFixture<half>, framework::Da
TEST_SUITE_END()
TEST_SUITE(FP32)
-FIXTURE_DATA_TEST_CASE(Run, CLDirectConvolutionLayerFixture<float>, framework::DatasetMode::ALL, combine(data, framework::dataset::make("DataType", DataType::F32)))
+FIXTURE_DATA_TEST_CASE(Run, CLDirectConvolutionLayerFixture<float>, framework::DatasetMode::ALL, combine(combine(data, framework::dataset::make("DataType", DataType::F32)),
+ ActivationFunctionsDataset))
{
// Validate output
validate(CLAccessor(_target), _reference, tolerance_fp32);
@@ -175,8 +188,9 @@ FIXTURE_DATA_TEST_CASE(Run, CLDirectConvolutionLayerFixture<float>, framework::D
TEST_SUITE_END()
TEST_SUITE(FP32_CustomDataset)
-FIXTURE_DATA_TEST_CASE(Run, CLDirectConvolutionValidationWithTensorShapesFixture<float>, framework::DatasetMode::ALL, combine(datasets::DirectConvolutionLayerDataset(),
- framework::dataset::make("DataType", DataType::F32)))
+FIXTURE_DATA_TEST_CASE(Run, CLDirectConvolutionValidationWithTensorShapesFixture<float>, framework::DatasetMode::ALL, combine(combine(datasets::DirectConvolutionLayerDataset(),
+ framework::dataset::make("DataType", DataType::F32)),
+ ActivationFunctionsDataset))
{
// Validate output
validate(CLAccessor(_target), _reference, tolerance_fp32);
@@ -189,8 +203,10 @@ using CLDirectConvolutionLayerFixedPointFixture = DirectConvolutionValidationFix
TEST_SUITE(FixedPoint)
TEST_SUITE(QS8)
-FIXTURE_DATA_TEST_CASE(Run, CLDirectConvolutionLayerFixedPointFixture<int8_t>, framework::DatasetMode::ALL, combine(combine(data_fixed_point, framework::dataset::make("DataType", DataType::QS8)),
- framework::dataset::make("FractionalBits", 2, 7)))
+FIXTURE_DATA_TEST_CASE(Run, CLDirectConvolutionLayerFixedPointFixture<int8_t>, framework::DatasetMode::ALL, combine(combine(combine(data_fixed_point, framework::dataset::make("DataType",
+ DataType::QS8)),
+ framework::dataset::make("FractionalBits", 2, 7)),
+ ActivationFunctionsDataset))
{
// Validate output
validate(CLAccessor(_target), _reference, tolerance_qs8);
@@ -198,8 +214,10 @@ FIXTURE_DATA_TEST_CASE(Run, CLDirectConvolutionLayerFixedPointFixture<int8_t>, f
TEST_SUITE_END()
TEST_SUITE(QS16)
-FIXTURE_DATA_TEST_CASE(Run, CLDirectConvolutionLayerFixedPointFixture<int16_t>, framework::DatasetMode::ALL, combine(combine(data_fixed_point, framework::dataset::make("DataType", DataType::QS16)),
- framework::dataset::make("FractionalBits", 2, 15)))
+FIXTURE_DATA_TEST_CASE(Run, CLDirectConvolutionLayerFixedPointFixture<int16_t>, framework::DatasetMode::ALL, combine(combine(combine(data_fixed_point, framework::dataset::make("DataType",
+ DataType::QS16)),
+ framework::dataset::make("FractionalBits", 2, 15)),
+ ActivationFunctionsDataset))
{
// Validate output
validate(CLAccessor(_target), _reference, tolerance_qs16);
@@ -212,10 +230,17 @@ using CLDirectConvolutionLayerQuantizedFixture = DirectConvolutionValidationQuan
template <typename T>
using CLDirectConvolutionValidationWithTensorShapesQuantizedFixture = DirectConvolutionValidationWithTensorShapesQuantizedFixture<CLTensor, CLAccessor, CLDirectConvolutionLayer, T>;
+const auto QuantizedActivationFunctionsDataset = framework::dataset::make("ActivationInfo",
+{
+ ActivationLayerInfo(),
+ ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU),
+ ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, 6.f)
+});
TEST_SUITE(Quantized)
TEST_SUITE(QASYMM8)
-FIXTURE_DATA_TEST_CASE(Run, CLDirectConvolutionLayerQuantizedFixture<uint8_t>, framework::DatasetMode::ALL, combine(combine(data, framework::dataset::make("DataType", DataType::QASYMM8)),
- framework::dataset::make("QuantizationInfo", { QuantizationInfo(2.f / 255, 10) })))
+FIXTURE_DATA_TEST_CASE(Run, CLDirectConvolutionLayerQuantizedFixture<uint8_t>, framework::DatasetMode::ALL, combine(combine(combine(data, framework::dataset::make("DataType", DataType::QASYMM8)),
+ framework::dataset::make("QuantizationInfo", { QuantizationInfo(2.f / 255, 10) })),
+ QuantizedActivationFunctionsDataset))
{
// Validate output
validate(CLAccessor(_target), _reference, tolerance_qasymm8);
@@ -223,9 +248,10 @@ FIXTURE_DATA_TEST_CASE(Run, CLDirectConvolutionLayerQuantizedFixture<uint8_t>, f
TEST_SUITE_END()
TEST_SUITE(QASYMM8_CustomDataset)
-FIXTURE_DATA_TEST_CASE(Run, CLDirectConvolutionValidationWithTensorShapesQuantizedFixture<uint8_t>, framework::DatasetMode::ALL, combine(combine(datasets::DirectConvolutionLayerDataset(),
+FIXTURE_DATA_TEST_CASE(Run, CLDirectConvolutionValidationWithTensorShapesQuantizedFixture<uint8_t>, framework::DatasetMode::ALL, combine(combine(combine(datasets::DirectConvolutionLayerDataset(),
framework::dataset::make("DataType", DataType::QASYMM8)),
- framework::dataset::make("QuantizationInfo", { QuantizationInfo(2.f / 255, 127) })))
+ framework::dataset::make("QuantizationInfo", { QuantizationInfo(2.f / 255, 127) })),
+ QuantizedActivationFunctionsDataset))
{
// Validate output
validate(CLAccessor(_target), _reference, tolerance_qasymm8);
diff --git a/tests/validation/CL/Winograd.cpp b/tests/validation/CL/Winograd.cpp
index 07a52f8ebc..9aba8f776c 100644
--- a/tests/validation/CL/Winograd.cpp
+++ b/tests/validation/CL/Winograd.cpp
@@ -373,14 +373,17 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(zip(
// *INDENT-ON*
using CLWinogradConvolutionLayerFixture = WinogradConvolutionLayerValidationFixture<CLTensor, CLAccessor, CLWinogradConvolutionLayer, float>;
-FIXTURE_DATA_TEST_CASE(RunSmall, CLWinogradConvolutionLayerFixture, framework::DatasetMode::PRECOMMIT, combine(datasets::SmallWinogradConvolutionLayer3x3Dataset(),
- framework::dataset::make("DataType", { DataType::F32 })))
+FIXTURE_DATA_TEST_CASE(RunSmall, CLWinogradConvolutionLayerFixture, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::SmallWinogradConvolutionLayer3x3Dataset(),
+ framework::dataset::make("DataType", { DataType::F32 })),
+ framework::dataset::make("ActivationLayerInfo", { ActivationLayerInfo() })))
{
// Validate output
validate(CLAccessor(_target), _reference, tolerance_f32);
}
-FIXTURE_DATA_TEST_CASE(RunLarge, CLWinogradConvolutionLayerFixture, framework::DatasetMode::NIGHTLY, combine(datasets::LargeWinogradConvolutionLayer3x3Dataset(), framework::dataset::make("DataType", { DataType::F32 })))
+FIXTURE_DATA_TEST_CASE(RunLarge, CLWinogradConvolutionLayerFixture, framework::DatasetMode::NIGHTLY, combine(combine(datasets::LargeWinogradConvolutionLayer3x3Dataset(),
+ framework::dataset::make("DataType", { DataType::F32 })),
+ framework::dataset::make("ActivationLayerInfo", { ActivationLayerInfo() })))
{
// Validate output
validate(CLAccessor(_target), _reference, tolerance_f32);
diff --git a/tests/validation/GLES_COMPUTE/ConvolutionLayer.cpp b/tests/validation/GLES_COMPUTE/ConvolutionLayer.cpp
index aaa0d159be..8808d82d34 100644
--- a/tests/validation/GLES_COMPUTE/ConvolutionLayer.cpp
+++ b/tests/validation/GLES_COMPUTE/ConvolutionLayer.cpp
@@ -53,13 +53,21 @@ const auto CNNDataTypes = framework::dataset::make("DataType",
DataType::F16,
// DataType::F32,
});
+const auto ActivationFunctionsDataset = framework::dataset::make("ActivationInfo",
+{
+ ActivationLayerInfo(),
+ ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU),
+ ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::BOUNDED_RELU, 0.5f)
+});
} // namespace
TEST_SUITE(GC)
TEST_SUITE(ConvolutionLayer)
-DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(framework::dataset::concat(datasets::SmallConvolutionLayerDataset(), datasets::LargeConvolutionLayerDataset()), CNNDataTypes),
- input_shape, weights_shape, bias_shape, output_shape, info, dilation, data_type)
+DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(framework::dataset::concat(datasets::SmallConvolutionLayerDataset(), datasets::LargeConvolutionLayerDataset()),
+ CNNDataTypes),
+ ActivationFunctionsDataset),
+ input_shape, weights_shape, bias_shape, output_shape, info, dilation, data_type, act_info)
{
// Set fixed point position data type allowed
int fixed_point_position = is_data_type_fixed_point(data_type) ? 3 : 0;
@@ -82,7 +90,7 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(framework::da
// Create and configure function
GCConvolutionLayer conv;
- conv.configure(&src, &weights, &bias, &dst, info, WeightsInfo(), dilation);
+ conv.configure(&src, &weights, &bias, &dst, info, WeightsInfo(), dilation, act_info);
// Validate valid region
const ValidRegion src_valid_region = shape_to_valid_region(input_shape);
@@ -108,18 +116,20 @@ using GCConvolutionLayerFixture = ConvolutionValidationFixture<GCTensor, GCAcces
TEST_SUITE(Float)
TEST_SUITE(FP16)
-FIXTURE_DATA_TEST_CASE(RunSmall, GCConvolutionLayerFixture<half>, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::SmallConvolutionLayerDataset(),
+FIXTURE_DATA_TEST_CASE(RunSmall, GCConvolutionLayerFixture<half>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::SmallConvolutionLayerDataset(),
framework::dataset::make("ReshapeWeights", { true, false })),
- framework::dataset::make("DataType",
- DataType::F16)))
+ framework::dataset::make("DataType",
+ DataType::F16)),
+ ActivationFunctionsDataset))
{
// Validate output
validate(GCAccessor(_target), _reference, tolerance_f16, tolerance_num);
}
-FIXTURE_DATA_TEST_CASE(RunLarge, GCConvolutionLayerFixture<half>, framework::DatasetMode::NIGHTLY, combine(combine(datasets::LargeConvolutionLayerDataset(),
+FIXTURE_DATA_TEST_CASE(RunLarge, GCConvolutionLayerFixture<half>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::LargeConvolutionLayerDataset(),
framework::dataset::make("ReshapeWeights", { true, false })),
- framework::dataset::make("DataType",
- DataType::F16)))
+ framework::dataset::make("DataType",
+ DataType::F16)),
+ ActivationFunctionsDataset))
{
// Validate output
validate(GCAccessor(_target), _reference, tolerance_f16, tolerance_num);
diff --git a/tests/validation/GLES_COMPUTE/DirectConvolutionLayer.cpp b/tests/validation/GLES_COMPUTE/DirectConvolutionLayer.cpp
index 153b060757..47c35f9c6f 100644
--- a/tests/validation/GLES_COMPUTE/DirectConvolutionLayer.cpp
+++ b/tests/validation/GLES_COMPUTE/DirectConvolutionLayer.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -68,6 +68,13 @@ const auto data = combine(datasets::SmallDirectConvolutionShapes(),
combine(framework::dataset::make("PadY", 0, 2),
framework::dataset::make("KernelSize", { 3, 5 })))),
framework::dataset::make("NumKernels", { 1, 4, 8, 16 })))));
+/** Activation function Dataset*/
+const auto ActivationFunctionsDataset = framework::dataset::make("ActivationInfo",
+{
+ ActivationLayerInfo(),
+ ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LOGISTIC),
+ ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU),
+});
} // namespace
TEST_SUITE(GC)
@@ -80,7 +87,8 @@ using GCDirectConvolutionLayerFixture = DirectConvolutionValidationFixture<GCTen
TEST_SUITE(Float)
TEST_SUITE(FP16)
-FIXTURE_DATA_TEST_CASE(Run, GCDirectConvolutionLayerFixture<half_float::half>, framework::DatasetMode::ALL, combine(data, framework::dataset::make("DataType", DataType::F16)))
+FIXTURE_DATA_TEST_CASE(Run, GCDirectConvolutionLayerFixture<half_float::half>, framework::DatasetMode::ALL, combine(combine(data, framework::dataset::make("DataType", DataType::F16)),
+ ActivationFunctionsDataset))
{
// Validate output
validate(GCAccessor(_target), _reference, tolerance_fp16, tolerance_num);
@@ -88,7 +96,8 @@ FIXTURE_DATA_TEST_CASE(Run, GCDirectConvolutionLayerFixture<half_float::half>, f
TEST_SUITE_END()
TEST_SUITE(FP32)
-FIXTURE_DATA_TEST_CASE(Run, GCDirectConvolutionLayerFixture<float>, framework::DatasetMode::ALL, combine(data, framework::dataset::make("DataType", DataType::F32)))
+FIXTURE_DATA_TEST_CASE(Run, GCDirectConvolutionLayerFixture<float>, framework::DatasetMode::ALL, combine(combine(data, framework::dataset::make("DataType", DataType::F32)),
+ ActivationFunctionsDataset))
{
// Validate output
validate(GCAccessor(_target), _reference, tolerance_fp32);
diff --git a/tests/validation/NEON/ConvolutionLayer.cpp b/tests/validation/NEON/ConvolutionLayer.cpp
index 313e4bc4d6..27216af6d1 100644
--- a/tests/validation/NEON/ConvolutionLayer.cpp
+++ b/tests/validation/NEON/ConvolutionLayer.cpp
@@ -65,6 +65,12 @@ const auto CNNDataTypes = framework::dataset::make("DataType",
DataType::QS16,
DataType::QASYMM8,
});
+const auto ActivationFunctionsDataset = framework::dataset::make("ActivationInfo",
+{
+ ActivationLayerInfo(),
+ ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU),
+ ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::BOUNDED_RELU, 0.5f)
+});
} // namespace
TEST_SUITE(NEON)
@@ -112,9 +118,11 @@ template <typename T>
using NEWinogradConvolutionLayerFixture = WinogradConvolutionLayerValidationFixture<Tensor, Accessor, NEWinogradLayer, T>;
TEST_SUITE(FP32)
-FIXTURE_DATA_TEST_CASE(RunSmall, NEWinogradConvolutionLayerFixture<float>, framework::DatasetMode::PRECOMMIT, combine(framework::dataset::concat(datasets::SmallWinogradConvolutionLayer3x3Dataset(),
- datasets::SmallWinogradConvolutionLayer5x5Dataset()),
- framework::dataset::make("DataType", { DataType::F32 })))
+FIXTURE_DATA_TEST_CASE(RunSmall, NEWinogradConvolutionLayerFixture<float>, framework::DatasetMode::PRECOMMIT,
+ combine(combine(framework::dataset::concat(datasets::SmallWinogradConvolutionLayer3x3Dataset(),
+ datasets::SmallWinogradConvolutionLayer5x5Dataset()),
+ framework::dataset::make("DataType", { DataType::F32 })),
+ ActivationFunctionsDataset))
{
// Validate output
validate(Accessor(_target), _reference, tolerance_f32);
@@ -125,11 +133,12 @@ TEST_SUITE_END()
TEST_SUITE(GEMMConvolutionLayer)
-DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(framework::dataset::concat(datasets::SmallConvolutionLayerDataset(), datasets::LargeConvolutionLayerDataset()), CNNDataTypes),
- input_shape, weights_shape, bias_shape, output_shape, info, dilation, data_type)
+DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(framework::dataset::concat(datasets::SmallConvolutionLayerDataset(), datasets::LargeConvolutionLayerDataset()),
+ CNNDataTypes),
+ framework::dataset::make("ActivationInfo",
+{ ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU) })),
+input_shape, weights_shape, bias_shape, output_shape, info, dilation, data_type, act_info)
{
- ARM_COMPUTE_UNUSED(dilation);
-
// Set fixed point position data type allowed
int fixed_point_position = is_data_type_fixed_point(data_type) ? 3 : 0;
@@ -151,7 +160,7 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(framework::da
// Create and configure function
NEGEMMConvolutionLayer conv;
- conv.configure(&src, &weights, &bias, &dst, info);
+ conv.configure(&src, &weights, &bias, &dst, info, WeightsInfo(), dilation, act_info);
// Validate valid region
const ValidRegion src_valid_region = shape_to_valid_region(input_shape);
@@ -178,16 +187,18 @@ using NEGEMMConvolutionLayerFixture = ConvolutionValidationFixture<Tensor, Acces
TEST_SUITE(Float)
#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
TEST_SUITE(FP16)
-FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMConvolutionLayerFixture<half>, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::SmallConvolutionLayerDataset(),
+FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMConvolutionLayerFixture<half>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::SmallConvolutionLayerDataset(),
framework::dataset::make("ReshapeWeights", { true, false })),
- framework::dataset::make("DataType", DataType::F16)))
+ framework::dataset::make("DataType", DataType::F16)),
+ ActivationFunctionsDataset))
{
// Validate output
validate(Accessor(_target), _reference, tolerance_f16);
}
-FIXTURE_DATA_TEST_CASE(RunLarge, NEGEMMConvolutionLayerFixture<half>, framework::DatasetMode::NIGHTLY, combine(combine(datasets::LargeConvolutionLayerDataset(),
+FIXTURE_DATA_TEST_CASE(RunLarge, NEGEMMConvolutionLayerFixture<half>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::LargeConvolutionLayerDataset(),
framework::dataset::make("ReshapeWeights", { true, false })),
- framework::dataset::make("DataType", DataType::F16)))
+ framework::dataset::make("DataType", DataType::F16)),
+ ActivationFunctionsDataset))
{
// Validate output
validate(Accessor(_target), _reference, tolerance_f16);
@@ -196,16 +207,18 @@ TEST_SUITE_END()
#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
TEST_SUITE(FP32)
-FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMConvolutionLayerFixture<float>, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::SmallConvolutionLayerDataset(),
+FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMConvolutionLayerFixture<float>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::SmallConvolutionLayerDataset(),
framework::dataset::make("ReshapeWeights", { true, false })),
- framework::dataset::make("DataType", DataType::F32)))
+ framework::dataset::make("DataType", DataType::F32)),
+ ActivationFunctionsDataset))
{
// Validate output
validate(Accessor(_target), _reference, tolerance_f32);
}
-FIXTURE_DATA_TEST_CASE(RunLarge, NEGEMMConvolutionLayerFixture<float>, framework::DatasetMode::NIGHTLY, combine(combine(datasets::LargeConvolutionLayerDataset(),
+FIXTURE_DATA_TEST_CASE(RunLarge, NEGEMMConvolutionLayerFixture<float>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::LargeConvolutionLayerDataset(),
framework::dataset::make("ReshapeWeights", { true, false })),
- framework::dataset::make("DataType", DataType::F32)))
+ framework::dataset::make("DataType", DataType::F32)),
+ ActivationFunctionsDataset))
{
// Validate output
validate(Accessor(_target), _reference, tolerance_f32);
@@ -219,18 +232,20 @@ using NEGEMMConvolutionLayerFixedPointFixture = ConvolutionValidationFixedPointF
TEST_SUITE(FixedPoint)
TEST_SUITE(QS8)
// We test for fixed point precision [4,6]
-FIXTURE_DATA_TEST_CASE(RunTiny, NEGEMMConvolutionLayerFixedPointFixture<int8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::TinyConvolutionLayerDataset(),
+FIXTURE_DATA_TEST_CASE(RunTiny, NEGEMMConvolutionLayerFixedPointFixture<int8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(datasets::TinyConvolutionLayerDataset(),
framework::dataset::make("ReshapeWeights", { true, false })),
framework::dataset::make("DataType", DataType::QS8)),
- framework::dataset::make("FractionalBits", 4, 7)))
+ framework::dataset::make("FractionalBits", 4, 7)),
+ ActivationFunctionsDataset))
{
// Validate output
validate(Accessor(_target), _reference, tolerance_q);
}
-FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMConvolutionLayerFixedPointFixture<int8_t>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::SmallConvolutionLayerDataset(),
+FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMConvolutionLayerFixedPointFixture<int8_t>, framework::DatasetMode::NIGHTLY, combine(combine(combine(combine(datasets::SmallConvolutionLayerDataset(),
framework::dataset::make("ReshapeWeights", { true, false })),
framework::dataset::make("DataType", DataType::QS8)),
- framework::dataset::make("FractionalBits", 4, 7)))
+ framework::dataset::make("FractionalBits", 4, 7)),
+ ActivationFunctionsDataset))
{
// Validate output
validate(Accessor(_target), _reference, tolerance_q);
@@ -239,18 +254,20 @@ TEST_SUITE_END()
TEST_SUITE(QS16)
// Testing for fixed point position [1,14)
-FIXTURE_DATA_TEST_CASE(RunTiny, NEGEMMConvolutionLayerFixedPointFixture<int16_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::TinyConvolutionLayerDataset(),
+FIXTURE_DATA_TEST_CASE(RunTiny, NEGEMMConvolutionLayerFixedPointFixture<int16_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(datasets::TinyConvolutionLayerDataset(),
framework::dataset::make("ReshapeWeights", { true, false })),
framework::dataset::make("DataType", DataType::QS16)),
- framework::dataset::make("FractionalBits", 1, 14)))
+ framework::dataset::make("FractionalBits", 1, 14)),
+ ActivationFunctionsDataset))
{
// Validate output
validate(Accessor(_target), _reference, tolerance_q);
}
-FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMConvolutionLayerFixedPointFixture<int16_t>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::SmallConvolutionLayerDataset(),
+FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMConvolutionLayerFixedPointFixture<int16_t>, framework::DatasetMode::NIGHTLY, combine(combine(combine(combine(datasets::SmallConvolutionLayerDataset(),
framework::dataset::make("ReshapeWeights", { true, false })),
framework::dataset::make("DataType", DataType::QS16)),
- framework::dataset::make("FractionalBits", 1, 14)))
+ framework::dataset::make("FractionalBits", 1, 14)),
+ ActivationFunctionsDataset))
{
// Validate output
validate(Accessor(_target), _reference, tolerance_q);
@@ -261,20 +278,28 @@ TEST_SUITE_END()
template <typename T>
using NEGEMMConvolutionLayerQuantizedFixture = ConvolutionValidationQuantizedFixture<Tensor, Accessor, NEGEMMConvolutionLayer, T>;
+const auto QuantizedActivationFunctionsDataset = framework::dataset::make("ActivationInfo",
+{
+ ActivationLayerInfo(),
+ ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU),
+ ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, 6.f)
+});
TEST_SUITE(Quantized)
TEST_SUITE(QASYMM8)
-FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMConvolutionLayerQuantizedFixture<uint8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::SmallConvolutionLayerDataset(),
+FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMConvolutionLayerQuantizedFixture<uint8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(datasets::SmallConvolutionLayerDataset(),
framework::dataset::make("ReshapeWeights", { true })),
framework::dataset::make("DataType", DataType::QASYMM8)),
- framework::dataset::make("QuantizationInfo", { QuantizationInfo(2.f / 255.f, 10) })))
+ framework::dataset::make("QuantizationInfo", { QuantizationInfo(2.f / 255.f, 10) })),
+ QuantizedActivationFunctionsDataset))
{
// Validate output
validate(Accessor(_target), _reference, tolerance_qasymm8);
}
-FIXTURE_DATA_TEST_CASE(RunLarge, NEGEMMConvolutionLayerQuantizedFixture<uint8_t>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::LargeConvolutionLayerDataset(),
+FIXTURE_DATA_TEST_CASE(RunLarge, NEGEMMConvolutionLayerQuantizedFixture<uint8_t>, framework::DatasetMode::NIGHTLY, combine(combine(combine(combine(datasets::LargeConvolutionLayerDataset(),
framework::dataset::make("ReshapeWeights", { true })),
framework::dataset::make("DataType", DataType::QASYMM8)),
- framework::dataset::make("QuantizationInfo", { QuantizationInfo(2.f / 255.f, 10) })))
+ framework::dataset::make("QuantizationInfo", { QuantizationInfo(2.f / 255.f, 10) })),
+ QuantizedActivationFunctionsDataset))
{
// Validate output
validate(Accessor(_target), _reference, tolerance_qasymm8);
diff --git a/tests/validation/NEON/DilatedConvolutionLayer.cpp b/tests/validation/NEON/DilatedConvolutionLayer.cpp
index c0a72ec8cb..1e8c19fc5e 100644
--- a/tests/validation/NEON/DilatedConvolutionLayer.cpp
+++ b/tests/validation/NEON/DilatedConvolutionLayer.cpp
@@ -163,16 +163,18 @@ using NEGEMMDilatedConvolutionLayerFixture = ConvolutionValidationFixture<Tensor
TEST_SUITE(Float)
#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
TEST_SUITE(FP16)
-FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMDilatedConvolutionLayerFixture<half>, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::SmallDilatedConvolutionLayerDataset(),
+FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMDilatedConvolutionLayerFixture<half>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::SmallDilatedConvolutionLayerDataset(),
framework::dataset::make("ReshapeWeights", { true, false })),
- framework::dataset::make("DataType", DataType::F16)))
+ framework::dataset::make("DataType", DataType::F16)),
+ framework::dataset::make("ActivationLayerInfo", ActivationLayerInfo())))
{
// Validate output
validate(Accessor(_target), _reference, tolerance_f16);
}
-FIXTURE_DATA_TEST_CASE(RunLarge, NEGEMMDilatedConvolutionLayerFixture<half>, framework::DatasetMode::NIGHTLY, combine(combine(datasets::LargeDilatedConvolutionLayerDataset(),
+FIXTURE_DATA_TEST_CASE(RunLarge, NEGEMMDilatedConvolutionLayerFixture<half>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::LargeDilatedConvolutionLayerDataset(),
framework::dataset::make("ReshapeWeights", { true, false })),
- framework::dataset::make("DataType", DataType::F16)))
+ framework::dataset::make("DataType", DataType::F16)),
+ framework::dataset::make("ActivationLayerInfo", ActivationLayerInfo())))
{
// Validate output
validate(Accessor(_target), _reference, tolerance_f16);
@@ -181,16 +183,18 @@ TEST_SUITE_END()
#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
TEST_SUITE(FP32)
-FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMDilatedConvolutionLayerFixture<float>, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::SmallDilatedConvolutionLayerDataset(),
+FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMDilatedConvolutionLayerFixture<float>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::SmallDilatedConvolutionLayerDataset(),
framework::dataset::make("ReshapeWeights", { true, false })),
- framework::dataset::make("DataType", DataType::F32)))
+ framework::dataset::make("DataType", DataType::F32)),
+ framework::dataset::make("ActivationLayerInfo", ActivationLayerInfo())))
{
// Validate output
validate(Accessor(_target), _reference, tolerance_f32);
}
-FIXTURE_DATA_TEST_CASE(RunLarge, NEGEMMDilatedConvolutionLayerFixture<float>, framework::DatasetMode::NIGHTLY, combine(combine(datasets::LargeDilatedConvolutionLayerDataset(),
+FIXTURE_DATA_TEST_CASE(RunLarge, NEGEMMDilatedConvolutionLayerFixture<float>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::LargeDilatedConvolutionLayerDataset(),
framework::dataset::make("ReshapeWeights", { true, false })),
- framework::dataset::make("DataType", DataType::F32)))
+ framework::dataset::make("DataType", DataType::F32)),
+ framework::dataset::make("ActivationLayerInfo", ActivationLayerInfo())))
{
// Validate output
validate(Accessor(_target), _reference, tolerance_f32);
@@ -204,18 +208,22 @@ using NEGEMMDilatedConvolutionLayerFixedPointFixture = ConvolutionValidationFixe
TEST_SUITE(FixedPoint)
TEST_SUITE(QS8)
// We test for fixed point precision [4,6]
-FIXTURE_DATA_TEST_CASE(RunTiny, NEGEMMDilatedConvolutionLayerFixedPointFixture<int8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::TinyDilatedConvolutionLayerDataset(),
- framework::dataset::make("ReshapeWeights", { true, false })),
- framework::dataset::make("DataType", DataType::QS8)),
- framework::dataset::make("FractionalBits", 4, 7)))
+FIXTURE_DATA_TEST_CASE(RunTiny, NEGEMMDilatedConvolutionLayerFixedPointFixture<int8_t>, framework::DatasetMode::PRECOMMIT,
+ combine(combine(combine(combine(datasets::TinyDilatedConvolutionLayerDataset(),
+ framework::dataset::make("ReshapeWeights", { true, false })),
+ framework::dataset::make("DataType", DataType::QS8)),
+ framework::dataset::make("FractionalBits", 4, 7)),
+ framework::dataset::make("ActivationLayerInfo", ActivationLayerInfo())))
{
// Validate output
validate(Accessor(_target), _reference, tolerance_q);
}
-FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMDilatedConvolutionLayerFixedPointFixture<int8_t>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::SmallDilatedConvolutionLayerDataset(),
- framework::dataset::make("ReshapeWeights", { true, false })),
- framework::dataset::make("DataType", DataType::QS8)),
- framework::dataset::make("FractionalBits", 4, 7)))
+FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMDilatedConvolutionLayerFixedPointFixture<int8_t>, framework::DatasetMode::NIGHTLY,
+ combine(combine(combine(combine(datasets::SmallDilatedConvolutionLayerDataset(),
+ framework::dataset::make("ReshapeWeights", { true, false })),
+ framework::dataset::make("DataType", DataType::QS8)),
+ framework::dataset::make("FractionalBits", 4, 7)),
+ framework::dataset::make("ActivationLayerInfo", ActivationLayerInfo())))
{
// Validate output
validate(Accessor(_target), _reference, tolerance_q);
@@ -224,18 +232,22 @@ TEST_SUITE_END()
TEST_SUITE(QS16)
// Testing for fixed point position [1,14)
-FIXTURE_DATA_TEST_CASE(RunTiny, NEGEMMDilatedConvolutionLayerFixedPointFixture<int16_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::TinyDilatedConvolutionLayerDataset(),
- framework::dataset::make("ReshapeWeights", { true, false })),
- framework::dataset::make("DataType", DataType::QS16)),
- framework::dataset::make("FractionalBits", 1, 14)))
+FIXTURE_DATA_TEST_CASE(RunTiny, NEGEMMDilatedConvolutionLayerFixedPointFixture<int16_t>, framework::DatasetMode::PRECOMMIT,
+ combine(combine(combine(combine(datasets::TinyDilatedConvolutionLayerDataset(),
+ framework::dataset::make("ReshapeWeights", { true, false })),
+ framework::dataset::make("DataType", DataType::QS16)),
+ framework::dataset::make("FractionalBits", 1, 14)),
+ framework::dataset::make("ActivationLayerInfo", ActivationLayerInfo())))
{
// Validate output
validate(Accessor(_target), _reference, tolerance_q);
}
-FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMDilatedConvolutionLayerFixedPointFixture<int16_t>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::SmallDilatedConvolutionLayerDataset(),
- framework::dataset::make("ReshapeWeights", { true, false })),
- framework::dataset::make("DataType", DataType::QS16)),
- framework::dataset::make("FractionalBits", 1, 14)))
+FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMDilatedConvolutionLayerFixedPointFixture<int16_t>, framework::DatasetMode::NIGHTLY,
+ combine(combine(combine(combine(datasets::SmallDilatedConvolutionLayerDataset(),
+ framework::dataset::make("ReshapeWeights", { true, false })),
+ framework::dataset::make("DataType", DataType::QS16)),
+ framework::dataset::make("FractionalBits", 1, 14)),
+ framework::dataset::make("ActivationLayerInfo", ActivationLayerInfo())))
{
// Validate output
validate(Accessor(_target), _reference, tolerance_q);
@@ -248,18 +260,22 @@ using NEGEMMDilatedConvolutionLayerQuantizedFixture = ConvolutionValidationQuant
TEST_SUITE(Quantized)
TEST_SUITE(QASYMM8)
-FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMDilatedConvolutionLayerQuantizedFixture<uint8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::SmallDilatedConvolutionLayerDataset(),
- framework::dataset::make("ReshapeWeights", { true })),
- framework::dataset::make("DataType", DataType::QASYMM8)),
- framework::dataset::make("QuantizationInfo", { QuantizationInfo(2.f / 255.f, 10) })))
+FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMDilatedConvolutionLayerQuantizedFixture<uint8_t>, framework::DatasetMode::PRECOMMIT,
+ combine(combine(combine(combine(datasets::SmallDilatedConvolutionLayerDataset(),
+ framework::dataset::make("ReshapeWeights", { true })),
+ framework::dataset::make("DataType", DataType::QASYMM8)),
+ framework::dataset::make("QuantizationInfo", { QuantizationInfo(2.f / 255.f, 10) })),
+ framework::dataset::make("ActivationLayerInfo", ActivationLayerInfo())))
{
// Validate output
validate(Accessor(_target), _reference, tolerance_qasymm8);
}
-FIXTURE_DATA_TEST_CASE(RunLarge, NEGEMMDilatedConvolutionLayerQuantizedFixture<uint8_t>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::LargeDilatedConvolutionLayerDataset(),
- framework::dataset::make("ReshapeWeights", { true })),
- framework::dataset::make("DataType", DataType::QASYMM8)),
- framework::dataset::make("QuantizationInfo", { QuantizationInfo(2.f / 255.f, 10) })))
+FIXTURE_DATA_TEST_CASE(RunLarge, NEGEMMDilatedConvolutionLayerQuantizedFixture<uint8_t>, framework::DatasetMode::NIGHTLY,
+ combine(combine(combine(combine(datasets::LargeDilatedConvolutionLayerDataset(),
+ framework::dataset::make("ReshapeWeights", { true })),
+ framework::dataset::make("DataType", DataType::QASYMM8)),
+ framework::dataset::make("QuantizationInfo", { QuantizationInfo(2.f / 255.f, 10) })),
+ framework::dataset::make("ActivationLayerInfo", ActivationLayerInfo())))
{
// Validate output
validate(Accessor(_target), _reference, tolerance_qasymm8);
diff --git a/tests/validation/NEON/DirectConvolutionLayer.cpp b/tests/validation/NEON/DirectConvolutionLayer.cpp
index 57e030c349..b6f9f62ff0 100644
--- a/tests/validation/NEON/DirectConvolutionLayer.cpp
+++ b/tests/validation/NEON/DirectConvolutionLayer.cpp
@@ -86,6 +86,14 @@ const auto data_qs16 = combine(datasets::TinyDirectConvolutionShapes(),
combine(framework::dataset::make("PadY", 0),
combine(framework::dataset::make("KernelSize", 1),
framework::dataset::make("NumKernels", { 1, 4, 8, 16 })))))));
+/** Activation function Dataset*/
+const auto ActivationFunctionsDataset = framework::dataset::make("ActivationInfo",
+{
+ ActivationLayerInfo(),
+ ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU),
+ ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::BOUNDED_RELU, 0.5f),
+ ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, 0.5f)
+});
} // namespace
TEST_SUITE(NEON)
@@ -93,7 +101,7 @@ TEST_SUITE(DirectConvolutionLayer)
// *INDENT-OFF*
// clang-format off
-DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(zip(
+DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(zip(zip(
framework::dataset::make("InputInfo", { TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), // Mismatching data type input/weights
TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), // Mismatching input feature maps
TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), // Unsupported kernel width
@@ -144,10 +152,15 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(zip(
PadStrideInfo(1, 1, 0, 0),
PadStrideInfo(1, 1, 0, 0),
})),
+ framework::dataset::make("ActivationInfo",
+{
+ ActivationLayerInfo(),
+ ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)
+})),
framework::dataset::make("Expected", { false, false, false, false, false, false, false, false, false })),
- input_info, weights_info, biases_info, output_info, conv_info, expected)
+ input_info, weights_info, biases_info, output_info, conv_info, act_info, expected)
{
- bool is_valid = bool(NEDirectConvolutionLayer::validate(&input_info.clone()->set_is_resizable(false), &weights_info.clone()->set_is_resizable(false), &biases_info.clone()->set_is_resizable(false), &output_info.clone()->set_is_resizable(false), conv_info));
+ bool is_valid = bool(NEDirectConvolutionLayer::validate(&input_info.clone()->set_is_resizable(false), &weights_info.clone()->set_is_resizable(false), &biases_info.clone()->set_is_resizable(false), &output_info.clone()->set_is_resizable(false), conv_info, act_info));
ARM_COMPUTE_EXPECT(is_valid == expected, framework::LogLevel::ERRORS);
}
// clang-format on
@@ -161,7 +174,8 @@ using NEDirectConvolutionLayerFixture = DirectConvolutionValidationFixture<Tenso
TEST_SUITE(Float)
#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
TEST_SUITE(FP16)
-FIXTURE_DATA_TEST_CASE(Run, NEDirectConvolutionLayerFixture<half>, framework::DatasetMode::ALL, combine(data_f32, framework::dataset::make("DataType", DataType::F16)))
+FIXTURE_DATA_TEST_CASE(Run, NEDirectConvolutionLayerFixture<half>, framework::DatasetMode::ALL, combine(combine(data_f32, framework::dataset::make("DataType", DataType::F16)),
+ ActivationFunctionsDataset))
{
// Validate output
validate(Accessor(_target), _reference, tolerance_fp16);
@@ -170,7 +184,8 @@ TEST_SUITE_END()
#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
TEST_SUITE(FP32)
-FIXTURE_DATA_TEST_CASE(Run, NEDirectConvolutionLayerFixture<float>, framework::DatasetMode::ALL, combine(data_f32, framework::dataset::make("DataType", DataType::F32)))
+FIXTURE_DATA_TEST_CASE(Run, NEDirectConvolutionLayerFixture<float>, framework::DatasetMode::ALL, combine(combine(data_f32, framework::dataset::make("DataType", DataType::F32)),
+ ActivationFunctionsDataset))
{
// Validate output
validate(Accessor(_target), _reference, tolerance_fp32);
@@ -181,11 +196,19 @@ TEST_SUITE_END()
template <typename T>
using NEDirectConvolutionLayerFixedPointFixture = DirectConvolutionValidationFixedPointFixture<Tensor, Accessor, NEDirectConvolutionLayer, T>;
+const auto QuantizedActivationFunctionsDataset = framework::dataset::make("ActivationInfo",
+{
+ ActivationLayerInfo(),
+ ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU),
+ ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, 6.f)
+});
+
TEST_SUITE(Quantized)
TEST_SUITE(QS8)
// We test for fixed point precision [4,6]
-FIXTURE_DATA_TEST_CASE(Run, NEDirectConvolutionLayerFixedPointFixture<int8_t>, framework::DatasetMode::ALL, combine(combine(data_qs8, framework::dataset::make("DataType", DataType::QS8)),
- framework::dataset::make("FractionalBits", 4, 7)))
+FIXTURE_DATA_TEST_CASE(Run, NEDirectConvolutionLayerFixedPointFixture<int8_t>, framework::DatasetMode::ALL, combine(combine(combine(data_qs8, framework::dataset::make("DataType", DataType::QS8)),
+ framework::dataset::make("FractionalBits", 4, 7)),
+ QuantizedActivationFunctionsDataset))
{
// Validate output
validate(Accessor(_target), _reference, tolerance_qs);
@@ -194,8 +217,9 @@ TEST_SUITE_END()
TEST_SUITE(QS16)
// We test for fixed point precision [4,13]
-FIXTURE_DATA_TEST_CASE(Run, NEDirectConvolutionLayerFixedPointFixture<int16_t>, framework::DatasetMode::ALL, combine(combine(data_qs16, framework::dataset::make("DataType", DataType::QS16)),
- framework::dataset::make("FractionalBits", 4, 14)))
+FIXTURE_DATA_TEST_CASE(Run, NEDirectConvolutionLayerFixedPointFixture<int16_t>, framework::DatasetMode::ALL, combine(combine(combine(data_qs16, framework::dataset::make("DataType", DataType::QS16)),
+ framework::dataset::make("FractionalBits", 4, 14)),
+ QuantizedActivationFunctionsDataset))
{
// Validate output
validate(Accessor(_target), _reference, tolerance_qs);
diff --git a/tests/validation/fixtures/ConvolutionLayerFixture.h b/tests/validation/fixtures/ConvolutionLayerFixture.h
index 3d073e3f79..1bcffed526 100644
--- a/tests/validation/fixtures/ConvolutionLayerFixture.h
+++ b/tests/validation/fixtures/ConvolutionLayerFixture.h
@@ -33,6 +33,7 @@
#include "tests/framework/Asserts.h"
#include "tests/framework/Fixture.h"
#include "tests/validation/Helpers.h"
+#include "tests/validation/reference/ActivationLayer.h"
#include "tests/validation/reference/ConvolutionLayer.h"
#include "tests/validation/reference/Utils.h"
@@ -55,7 +56,7 @@ public:
public:
template <typename...>
void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, Size2D dilation, bool reshape_weights,
- DataType data_type, int fractional_bits, QuantizationInfo quantization_info)
+ DataType data_type, int fractional_bits, QuantizationInfo quantization_info, ActivationLayerInfo act_info)
{
_data_type = data_type;
_is_quantized = is_data_type_quantized_asymmetric(data_type);
@@ -63,8 +64,8 @@ public:
_fractional_bits = fractional_bits;
_quantization_info = quantization_info;
- _target = compute_target(input_shape, weights_shape, bias_shape, output_shape, info, reshape_weights, dilation);
- _reference = compute_reference(input_shape, weights_shape, bias_shape, output_shape, info, dilation);
+ _target = compute_target(input_shape, weights_shape, bias_shape, output_shape, info, reshape_weights, dilation, act_info);
+ _reference = compute_reference(input_shape, weights_shape, bias_shape, output_shape, info, dilation, act_info);
}
protected:
@@ -98,7 +99,7 @@ protected:
}
TensorType compute_target(const TensorShape &input_shape, const TensorShape &weights_shape, const TensorShape &bias_shape, const TensorShape &output_shape, const PadStrideInfo &info,
- bool reshape_weights, const Size2D &dilation)
+ bool reshape_weights, const Size2D &dilation, const ActivationLayerInfo act_info)
{
const bool is_optimised = std::is_same<FunctionType, NEConvolutionLayer>::value && _data_type == DataType::F32;
@@ -140,7 +141,7 @@ protected:
// Create and configure function
FunctionType conv;
- conv.configure(&src, &weights, &bias, &dst, info, weights_info, dilation);
+ conv.configure(&src, &weights, &bias, &dst, info, weights_info, dilation, act_info);
ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
ARM_COMPUTE_EXPECT(weights.info()->is_resizable(), framework::LogLevel::ERRORS);
@@ -210,7 +211,7 @@ protected:
}
SimpleTensor<T> compute_reference(const TensorShape &input_shape, const TensorShape &weights_shape, const TensorShape &bias_shape, const TensorShape &output_shape, const PadStrideInfo &info,
- const Size2D &dilation)
+ const Size2D &dilation, const ActivationLayerInfo act_info)
{
// Create reference
SimpleTensor<T> src{ input_shape, _data_type, 1, _fractional_bits, _quantization_info };
@@ -222,7 +223,9 @@ protected:
fill(weights, 1);
fill(bias, 2);
- return reference::convolution_layer<T>(src, weights, bias, output_shape, info, dilation);
+ return (act_info.enabled()) ? reference::activation_layer<T>(reference::convolution_layer<T>(src, weights, bias, output_shape, info, dilation),
+ act_info) :
+ reference::convolution_layer<T>(src, weights, bias, output_shape, info, dilation);
}
TensorType _target{};
@@ -283,10 +286,12 @@ class ConvolutionValidationFixture : public ConvolutionValidationGenericFixture<
{
public:
template <typename...>
- void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, Size2D dilation, bool reshape_weights, DataType data_type)
+ void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, Size2D dilation, bool reshape_weights, DataType data_type,
+ ActivationLayerInfo act_info)
{
ConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(input_shape, weights_shape, bias_shape, output_shape, info, dilation, reshape_weights, data_type, 0,
- QuantizationInfo());
+ QuantizationInfo(),
+ act_info);
}
};
@@ -296,10 +301,11 @@ class ConvolutionValidationFixedPointFixture : public ConvolutionValidationGener
public:
template <typename...>
void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, Size2D dilation, bool reshape_weights, DataType data_type,
- int fractional_bits)
+ int fractional_bits,
+ ActivationLayerInfo act_info)
{
ConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(input_shape, weights_shape, bias_shape, output_shape, info, dilation, reshape_weights, data_type, fractional_bits,
- QuantizationInfo());
+ QuantizationInfo(), act_info);
}
};
@@ -309,10 +315,10 @@ class ConvolutionValidationQuantizedFixture : public ConvolutionValidationGeneri
public:
template <typename...>
void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, Size2D dilation, bool reshape_weights, DataType data_type,
- QuantizationInfo quantization_info)
+ QuantizationInfo quantization_info, ActivationLayerInfo act_info)
{
ConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(input_shape, weights_shape, bias_shape, output_shape, info, dilation, reshape_weights, data_type, 0,
- quantization_info);
+ quantization_info, act_info);
}
};
} // namespace validation
diff --git a/tests/validation/fixtures/DirectConvolutionLayerFixture.h b/tests/validation/fixtures/DirectConvolutionLayerFixture.h
index fef9d2dc6e..ef7721dd5e 100644
--- a/tests/validation/fixtures/DirectConvolutionLayerFixture.h
+++ b/tests/validation/fixtures/DirectConvolutionLayerFixture.h
@@ -49,7 +49,7 @@ public:
public:
template <typename...>
void setup(TensorShape input_shape, int stride_x, int stride_y, int pad_x, int pad_y, unsigned int kernel_size, unsigned int num_kernels,
- DataType data_type, int fractional_bits, QuantizationInfo quantization_info)
+ DataType data_type, int fractional_bits, QuantizationInfo quantization_info, ActivationLayerInfo act_info)
{
_fractional_bits = fractional_bits;
_quantization_info = quantization_info;
@@ -61,13 +61,13 @@ public:
const TensorShape output_shape = get_output_shape(input_shape, weights_shape, info);
const DataType bias_data_type = is_data_type_quantized_asymmetric(data_type) ? DataType::S32 : data_type;
- _target = compute_target(input_shape, weights_shape, bias_shape, output_shape, info, data_type, bias_data_type, fractional_bits, quantization_info);
- _reference = compute_reference(input_shape, weights_shape, bias_shape, output_shape, info, data_type, bias_data_type, fractional_bits, quantization_info);
+ _target = compute_target(input_shape, weights_shape, bias_shape, output_shape, info, data_type, bias_data_type, fractional_bits, quantization_info, act_info);
+ _reference = compute_reference(input_shape, weights_shape, bias_shape, output_shape, info, data_type, bias_data_type, fractional_bits, quantization_info, act_info);
}
template <typename...>
void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, Size2D dilation,
- DataType data_type, int fractional_bits, QuantizationInfo quantization_info)
+ DataType data_type, int fractional_bits, QuantizationInfo quantization_info, ActivationLayerInfo act_info)
{
ARM_COMPUTE_UNUSED(dilation);
@@ -77,8 +77,8 @@ public:
const DataType bias_data_type = is_data_type_quantized_asymmetric(data_type) ? DataType::S32 : data_type;
- _target = compute_target(input_shape, weights_shape, bias_shape, output_shape, info, data_type, bias_data_type, fractional_bits, quantization_info);
- _reference = compute_reference(input_shape, weights_shape, bias_shape, output_shape, info, data_type, bias_data_type, fractional_bits, quantization_info);
+ _target = compute_target(input_shape, weights_shape, bias_shape, output_shape, info, data_type, bias_data_type, fractional_bits, quantization_info, act_info);
+ _reference = compute_reference(input_shape, weights_shape, bias_shape, output_shape, info, data_type, bias_data_type, fractional_bits, quantization_info, act_info);
}
protected:
@@ -112,7 +112,7 @@ protected:
}
TensorType compute_target(const TensorShape &input_shape, const TensorShape &weights_shape, const TensorShape &bias_shape, const TensorShape &output_shape, const PadStrideInfo &info,
- DataType data_type, DataType bias_data_type, int fixed_point_position, QuantizationInfo quantization_info)
+ DataType data_type, DataType bias_data_type, int fixed_point_position, QuantizationInfo quantization_info, ActivationLayerInfo act_info)
{
// Create tensors
TensorType src = create_tensor<TensorType>(input_shape, data_type, 1, fixed_point_position, quantization_info);
@@ -122,7 +122,7 @@ protected:
// Create and configure function
FunctionType conv;
- conv.configure(&src, &weights, &bias, &dst, info);
+ conv.configure(&src, &weights, &bias, &dst, info, act_info);
ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
ARM_COMPUTE_EXPECT(weights.info()->is_resizable(), framework::LogLevel::ERRORS);
@@ -152,7 +152,7 @@ protected:
}
SimpleTensor<T> compute_reference(const TensorShape &input_shape, const TensorShape &weights_shape, const TensorShape &bias_shape, const TensorShape &output_shape, const PadStrideInfo &info,
- DataType data_type, DataType bias_data_type, int fixed_point_position, QuantizationInfo quantization_info)
+ DataType data_type, DataType bias_data_type, int fixed_point_position, QuantizationInfo quantization_info, ActivationLayerInfo act_info)
{
// Create reference
SimpleTensor<T> src{ input_shape, data_type, 1, fixed_point_position, quantization_info };
@@ -164,7 +164,9 @@ protected:
fill(weights, 1);
fill(bias, 2);
- return reference::convolution_layer<T>(src, weights, bias, output_shape, info);
+ return (act_info.enabled()) ? reference::activation_layer<T>(reference::convolution_layer<T>(src, weights, bias, output_shape, info),
+ act_info) :
+ reference::convolution_layer<T>(src, weights, bias, output_shape, info);
}
TensorType _target{};
@@ -194,9 +196,10 @@ class DirectConvolutionValidationFixture : public DirectConvolutionValidationGen
{
public:
template <typename...>
- void setup(TensorShape input_shape, int stride_x, int stride_y, int pad_x, int pad_y, unsigned int kernel_size, unsigned int num_kernels, DataType data_type)
+ void setup(TensorShape input_shape, int stride_x, int stride_y, int pad_x, int pad_y, unsigned int kernel_size, unsigned int num_kernels, DataType data_type, ActivationLayerInfo act_info)
{
- DirectConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(input_shape, stride_x, stride_y, pad_x, pad_y, kernel_size, num_kernels, data_type, 0, QuantizationInfo());
+ DirectConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(input_shape, stride_x, stride_y, pad_x, pad_y, kernel_size, num_kernels, data_type, 0, QuantizationInfo(),
+ act_info);
}
};
@@ -205,10 +208,11 @@ class DirectConvolutionValidationFixedPointFixture : public DirectConvolutionVal
{
public:
template <typename...>
- void setup(TensorShape input_shape, int stride_x, int stride_y, int pad_x, int pad_y, unsigned int kernel_size, unsigned int num_kernels, DataType data_type, int fractional_bits)
+ void setup(TensorShape input_shape, int stride_x, int stride_y, int pad_x, int pad_y, unsigned int kernel_size, unsigned int num_kernels, DataType data_type, int fractional_bits,
+ ActivationLayerInfo act_info)
{
DirectConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(input_shape, stride_x, stride_y, pad_x, pad_y, kernel_size, num_kernels, data_type, fractional_bits,
- QuantizationInfo());
+ QuantizationInfo(), act_info);
}
};
@@ -217,9 +221,11 @@ class DirectConvolutionValidationQuantizedFixture : public DirectConvolutionVali
{
public:
template <typename...>
- void setup(TensorShape input_shape, int stride_x, int stride_y, int pad_x, int pad_y, unsigned int kernel_size, unsigned int num_kernels, DataType data_type, QuantizationInfo quantization_info)
+ void setup(TensorShape input_shape, int stride_x, int stride_y, int pad_x, int pad_y, unsigned int kernel_size, unsigned int num_kernels, DataType data_type, QuantizationInfo quantization_info,
+ ActivationLayerInfo act_info)
{
- DirectConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(input_shape, stride_x, stride_y, pad_x, pad_y, kernel_size, num_kernels, data_type, 0, quantization_info);
+ DirectConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(input_shape, stride_x, stride_y, pad_x, pad_y, kernel_size, num_kernels, data_type, 0, quantization_info,
+ act_info);
}
};
@@ -229,9 +235,10 @@ class DirectConvolutionValidationWithTensorShapesQuantizedFixture : public Direc
public:
template <typename...>
void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, Size2D dilation,
- DataType data_type, QuantizationInfo quantization_info)
+ DataType data_type, QuantizationInfo quantization_info, ActivationLayerInfo act_info)
{
- DirectConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(input_shape, weights_shape, bias_shape, output_shape, info, dilation, data_type, 0, quantization_info);
+ DirectConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(input_shape, weights_shape, bias_shape, output_shape, info, dilation, data_type, 0, quantization_info,
+ act_info);
}
};
@@ -241,9 +248,10 @@ class DirectConvolutionValidationWithTensorShapesFixture : public DirectConvolut
public:
template <typename...>
void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, Size2D dilation,
- DataType data_type)
+ DataType data_type, ActivationLayerInfo act_info)
{
- DirectConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(input_shape, weights_shape, bias_shape, output_shape, info, dilation, data_type, 0, QuantizationInfo());
+ DirectConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(input_shape, weights_shape, bias_shape, output_shape, info, dilation, data_type, 0, QuantizationInfo(),
+ act_info);
}
};
diff --git a/tests/validation/fixtures/WinogradLayerFixture.h b/tests/validation/fixtures/WinogradLayerFixture.h
index a86f24f35e..5210cbf720 100644
--- a/tests/validation/fixtures/WinogradLayerFixture.h
+++ b/tests/validation/fixtures/WinogradLayerFixture.h
@@ -33,6 +33,7 @@
#include "tests/framework/Asserts.h"
#include "tests/framework/Fixture.h"
#include "tests/validation/Helpers.h"
+#include "tests/validation/reference/ActivationLayer.h"
#include "tests/validation/reference/ConvolutionLayer.h"
#include "tests/validation/reference/Utils.h"
#include "tests/validation/reference/Winograd.h"
@@ -52,12 +53,12 @@ class WinogradConvolutionLayerValidationFixture : public framework::Fixture
{
public:
template <typename...>
- void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, Size2D dilation, DataType data_type)
+ void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, Size2D dilation, DataType data_type, ActivationLayerInfo act_info)
{
ARM_COMPUTE_UNUSED(dilation);
- _target = compute_target(input_shape, weights_shape, bias_shape, output_shape, info, data_type);
- _reference = compute_reference(input_shape, weights_shape, bias_shape, output_shape, info, data_type);
+ _target = compute_target(input_shape, weights_shape, bias_shape, output_shape, info, data_type, act_info);
+ _reference = compute_reference(input_shape, weights_shape, bias_shape, output_shape, info, data_type, act_info);
}
protected:
@@ -82,7 +83,7 @@ protected:
}
TensorType compute_target(const TensorShape &input_shape, const TensorShape &weights_shape, const TensorShape &bias_shape, const TensorShape &output_shape, const PadStrideInfo &info,
- DataType data_type)
+ DataType data_type, ActivationLayerInfo act_info)
{
// Create tensors
TensorType src = create_tensor<TensorType>(input_shape, data_type, 1);
@@ -92,7 +93,7 @@ protected:
// Create and configure function
FunctionType conv;
- conv.configure(&src, &weights, &bias, &dst, info);
+ conv.configure(&src, &weights, &bias, &dst, info, act_info);
ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
ARM_COMPUTE_EXPECT(weights.info()->is_resizable(), framework::LogLevel::ERRORS);
@@ -122,7 +123,7 @@ protected:
}
SimpleTensor<T> compute_reference(const TensorShape &input_shape, const TensorShape &weights_shape, const TensorShape &bias_shape, const TensorShape &output_shape, const PadStrideInfo &info,
- DataType data_type)
+ DataType data_type, ActivationLayerInfo act_info)
{
// Create reference
SimpleTensor<T> src{ input_shape, data_type, 1 };
@@ -134,7 +135,8 @@ protected:
fill(weights, 1, -1.f, 1.f);
fill(bias, 2, -1.f, 1.f);
- return reference::convolution_layer<T>(src, weights, bias, output_shape, info);
+ return (act_info.enabled()) ? reference::activation_layer<T>(reference::convolution_layer<T>(src, weights, bias, output_shape, info), act_info) : reference::convolution_layer<T>(src, weights, bias,
+ output_shape, info);
}
TensorType _target{};
diff --git a/utils/TypePrinter.h b/utils/TypePrinter.h
index 811fb7b1e8..9ac4b343b3 100644
--- a/utils/TypePrinter.h
+++ b/utils/TypePrinter.h
@@ -341,7 +341,10 @@ inline ::std::ostream &operator<<(::std::ostream &os, const ActivationLayerInfo:
inline std::string to_string(const arm_compute::ActivationLayerInfo &info)
{
std::stringstream str;
- str << info.activation();
+ if(info.enabled())
+ {
+ str << info.activation();
+ }
return str.str();
}