aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorGiorgio Arena <giorgio.arena@arm.com>2020-02-11 17:21:31 +0000
committerSiCong Li <sicong.li@arm.com>2020-04-03 08:51:12 +0000
commit8b2a7d3aa119e7f1d6a03690d05eb27c5d178b9f (patch)
tree9fb4f4f328f7a17de13bef109834e8ad8a21d2ee /src
parent15e4d876643c37e1db36ee1190ec52319479ffaf (diff)
downloadComputeLibrary-8b2a7d3aa119e7f1d6a03690d05eb27c5d178b9f.tar.gz
COMPMID-3101 Fuse activation with floating point elementwise operation layers in CL
Signed-off-by: Giorgio Arena <giorgio.arena@arm.com> Change-Id: I1693f8664ba7c0dc8c076bbe7365cef1e667bd25 Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/2718 Tested-by: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: Gian Marco Iodice <gianmarco.iodice@arm.com> Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
Diffstat (limited to 'src')
-rw-r--r--src/core/CL/cl_kernels/activation_float_helpers.h4
-rw-r--r--src/core/CL/cl_kernels/elementwise_operation.cl12
-rw-r--r--src/core/CL/cl_kernels/pixelwise_mul_float.cl15
-rw-r--r--src/core/CL/kernels/CLElementwiseOperationKernel.cpp28
-rw-r--r--src/core/CL/kernels/CLPixelWiseMultiplicationKernel.cpp38
-rw-r--r--src/graph/mutators/NodeFusionMutator.cpp18
-rw-r--r--src/graph/nodes/EltwiseLayerNode.cpp10
-rw-r--r--src/runtime/CL/functions/CLElementwiseOperations.cpp56
-rw-r--r--src/runtime/CL/functions/CLPixelWiseMultiplication.cpp16
-rwxr-xr-xsrc/runtime/GLES_COMPUTE/functions/GCArithmeticAddition.cpp6
-rwxr-xr-xsrc/runtime/GLES_COMPUTE/functions/GCPixelWiseMultiplication.cpp3
-rw-r--r--src/runtime/NEON/functions/NEArithmeticAddition.cpp6
-rw-r--r--src/runtime/NEON/functions/NEArithmeticSubtraction.cpp6
-rw-r--r--src/runtime/NEON/functions/NEElementwiseOperators.cpp30
-rw-r--r--src/runtime/NEON/functions/NEPixelWiseMultiplication.cpp14
15 files changed, 177 insertions, 85 deletions
diff --git a/src/core/CL/cl_kernels/activation_float_helpers.h b/src/core/CL/cl_kernels/activation_float_helpers.h
index 8590f25635..a1e742da0d 100644
--- a/src/core/CL/cl_kernels/activation_float_helpers.h
+++ b/src/core/CL/cl_kernels/activation_float_helpers.h
@@ -72,6 +72,6 @@
// Identity Activation
#define identity_op(DATA_TYPE, x, A_VAL, B_VAL) (x)
-#define OP(op, DATA_TYPE, x, A_VAL, B_VAL) op##_op(DATA_TYPE, x, A_VAL, B_VAL)
+#define ACT_OP(op, DATA_TYPE, x, A_VAL, B_VAL) op##_op(DATA_TYPE, x, A_VAL, B_VAL)
-#define ACTIVATION(op, DATA_TYPE, x, A_VAL, B_VAL) OP(op, DATA_TYPE, x, A_VAL, B_VAL)
+#define ACTIVATION(op, DATA_TYPE, x, A_VAL, B_VAL) ACT_OP(op, DATA_TYPE, x, A_VAL, B_VAL)
diff --git a/src/core/CL/cl_kernels/elementwise_operation.cl b/src/core/CL/cl_kernels/elementwise_operation.cl
index 42d6d33e03..9b87b526f7 100644
--- a/src/core/CL/cl_kernels/elementwise_operation.cl
+++ b/src/core/CL/cl_kernels/elementwise_operation.cl
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2019 ARM Limited.
+ * Copyright (c) 2018-2020 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -44,6 +44,11 @@
#define OP_FUN_NAME(op) OP_FUN_NAME_STR(op)
#if defined(OP) && defined(DATA_TYPE_IN1) && defined(DATA_TYPE_IN2) && defined(DATA_TYPE_OUT) && defined(VEC_SIZE)
+
+#if defined(ACTIVATION_TYPE)
+#include "activation_float_helpers.h"
+#endif // defined(ACTIVATION_TYPE)
+
/** This function executes an element-wise operation among two tensors.
*
* @attention The input and output data_types need to be passed at compile time using -DDATA_TYPE_IN1, -DDATA_TYPE_IN2 and -DDATA_TYPE_OUT:
@@ -94,7 +99,12 @@ __kernel void OP_FUN_NAME(OP)(
in_b = CONVERT(VLOAD(VEC_SIZE)(0, (__global DATA_TYPE_IN2 *)in2.ptr), VEC_DATA_TYPE(DATA_TYPE_OUT, VEC_SIZE));
// Calculate and store result
+#if defined(ACTIVATION_TYPE)
+ VSTORE(VEC_SIZE)
+ (ACTIVATION(ACTIVATION_TYPE, DATA_TYPE_OUT, CONVERT(OP(in_a, in_b), VEC_DATA_TYPE(DATA_TYPE_OUT, VEC_SIZE)), A_VAL, B_VAL), 0, (__global DATA_TYPE_OUT *)out.ptr);
+#else // defined(ACTIVATION_TYPE)
VSTORE(VEC_SIZE)
(OP(in_a, in_b), 0, (__global DATA_TYPE_OUT *)out.ptr);
+#endif // defined(ACTIVATION_TYPE)
}
#endif /* defined(DATA_TYPE_IN1) && defined(DATA_TYPE_IN2) && defined(DATA_TYPE_OUT) && defined(VEC_SIZE) */
diff --git a/src/core/CL/cl_kernels/pixelwise_mul_float.cl b/src/core/CL/cl_kernels/pixelwise_mul_float.cl
index d0e04b2ffe..aad4becc1a 100644
--- a/src/core/CL/cl_kernels/pixelwise_mul_float.cl
+++ b/src/core/CL/cl_kernels/pixelwise_mul_float.cl
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2019 ARM Limited.
+ * Copyright (c) 2016-2020 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -31,6 +31,11 @@
#define CONVERT_OP_FLOAT(x, type, round) CONVERT_OP_FLOAT_STR(x, type, round)
#if defined(DATA_TYPE_IN1) && defined(DATA_TYPE_IN2) && defined(DATA_TYPE_RES) && defined(DATA_TYPE_OUT)
+
+#if defined(ACTIVATION_TYPE)
+#include "activation_float_helpers.h"
+#endif // defined(ACTIVATION_TYPE)
+
/** Performs a pixelwise multiplication with float scale of either integer or float inputs.
*
* @attention The inputs and output data types need to be passed at compile time using -DDATA_TYPE_IN1, -DDATA_TYPE_IN2 and -DDATA_TYPE_OUT:
@@ -91,8 +96,12 @@ __kernel void pixelwise_mul_float(
res = CONVERT_OP_FLOAT(CONVERT_OP_FLOAT((convert_float16(in1_data * in2_data) * scale), VEC_DATA_TYPE(DATA_TYPE_RES, 16), ROUND), VEC_DATA_TYPE(DATA_TYPE_OUT, 16), ROUND);
#endif /* DATA_TYPE_FLOAT */
+#if defined(ACTIVATION_TYPE)
+ vstore16(ACTIVATION(ACTIVATION_TYPE, DATA_TYPE_OUT, res, A_VAL, B_VAL), 0, (__global DATA_TYPE_OUT *)out.ptr);
+#else // defined(ACTIVATION_TYPE)
// Store result
vstore16(res, 0, (__global DATA_TYPE_OUT *)out.ptr);
+#endif // defined(ACTIVATION_TYPE)
}
#endif /* defined(DATA_TYPE_IN1) && defined(DATA_TYPE_IN2) && defined(DATA_TYPE_RES) && defined(DATA_TYPE_OUT) */
@@ -140,6 +149,10 @@ __kernel void pixelwise_mul_complex(
// Perform complex multiplication
float2 res = { vin1.x *vin2.x - vin1.y * vin2.y, vin1.x *vin2.y + vin2.x * vin1.y };
+#if defined(ACTIVATION_TYPE)
+ vstore2(ACTIVATION(ACTIVATION_TYPE, float, res, A_VAL, B_VAL), 0, (__global float *)out.ptr);
+#else // defined(ACTIVATION_TYPE)
// Store result
vstore2(res, 0, (__global float *)out.ptr);
+#endif // defined(ACTIVATION_TYPE)
}
diff --git a/src/core/CL/kernels/CLElementwiseOperationKernel.cpp b/src/core/CL/kernels/CLElementwiseOperationKernel.cpp
index 1ac35a286f..0f2e26f186 100644
--- a/src/core/CL/kernels/CLElementwiseOperationKernel.cpp
+++ b/src/core/CL/kernels/CLElementwiseOperationKernel.cpp
@@ -231,7 +231,7 @@ std::pair<Status, Window> validate_and_configure_window_for_division(ITensorInfo
} // namespace
CLElementwiseOperationKernel::CLElementwiseOperationKernel()
- : _input1(nullptr), _input2(nullptr), _output(nullptr)
+ : _act_info(), _input1(nullptr), _input2(nullptr), _output(nullptr)
{
}
@@ -256,6 +256,12 @@ void CLElementwiseOperationKernel::configure_common(const ICLTensor *input1, con
// Set kernel build options
CLBuildOptions build_opts = generate_build_options(*input1->info(), *input2->info(), *output->info());
+ if(_act_info.enabled())
+ {
+ build_opts.add_option("-DACTIVATION_TYPE=" + lower_string(string_from_activation_func(_act_info.activation())));
+ build_opts.add_option("-DA_VAL=" + float_to_string_with_full_precision(_act_info.a()));
+ build_opts.add_option("-DB_VAL=" + float_to_string_with_full_precision(_act_info.b()));
+ }
// Create kernel
_kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel(kernel_name, build_opts.options()));
@@ -320,19 +326,23 @@ BorderSize CLElementwiseOperationKernel::border_size() const
/** Arithmetic operations with saturation*/
-void CLSaturatedArithmeticOperationKernel::configure(ArithmeticOperation op, const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output, const ConvertPolicy &policy)
+void CLSaturatedArithmeticOperationKernel::configure(ArithmeticOperation op, const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output, const ConvertPolicy &policy,
+ const ActivationLayerInfo &act_info)
{
- _policy = policy;
- _op = op;
+ _policy = policy;
+ _op = op;
+ _act_info = act_info;
configure_common(input1, input2, output);
}
-Status CLSaturatedArithmeticOperationKernel::validate(ArithmeticOperation op, const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, const ConvertPolicy &policy)
+Status CLSaturatedArithmeticOperationKernel::validate(ArithmeticOperation op, const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, const ConvertPolicy &policy,
+ const ActivationLayerInfo &act_info)
{
ARM_COMPUTE_UNUSED(op, policy);
ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input1, input2, output);
ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments_with_arithmetic_rules(*input1, *input2, *output));
ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window_for_arithmetic_operators(*input1->clone(), *input2->clone(), *output->clone()).first);
+ ARM_COMPUTE_RETURN_ERROR_ON(act_info.enabled() && !is_data_type_float(output->data_type()));
return Status{};
}
@@ -369,13 +379,14 @@ std::string CLSaturatedArithmeticOperationKernel::name()
/** Arithmetic operations*/
-void CLArithmeticOperationKernel::configure(ArithmeticOperation op, const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output)
+void CLArithmeticOperationKernel::configure(ArithmeticOperation op, const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output, const ActivationLayerInfo &act_info)
{
- _op = op;
+ _op = op;
+ _act_info = act_info;
configure_common(input1, input2, output);
}
-Status CLArithmeticOperationKernel::validate(ArithmeticOperation op, const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output)
+Status CLArithmeticOperationKernel::validate(ArithmeticOperation op, const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, const ActivationLayerInfo &act_info)
{
ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input1, input2, output);
if(op == ArithmeticOperation::DIV || op == ArithmeticOperation::POWER)
@@ -389,6 +400,7 @@ Status CLArithmeticOperationKernel::validate(ArithmeticOperation op, const ITens
ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments_with_arithmetic_rules(*input1, *input2, *output));
ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window_for_arithmetic_operators(*input1->clone(), *input2->clone(), *output->clone()).first);
}
+ ARM_COMPUTE_RETURN_ERROR_ON(act_info.enabled() && !is_data_type_float(output->data_type()));
return Status{};
}
diff --git a/src/core/CL/kernels/CLPixelWiseMultiplicationKernel.cpp b/src/core/CL/kernels/CLPixelWiseMultiplicationKernel.cpp
index d31c1de402..ff5afa3d95 100644
--- a/src/core/CL/kernels/CLPixelWiseMultiplicationKernel.cpp
+++ b/src/core/CL/kernels/CLPixelWiseMultiplicationKernel.cpp
@@ -46,7 +46,7 @@ namespace
constexpr unsigned int num_elems_processed_per_iteration = 16;
Status validate_arguments(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, float scale,
- ConvertPolicy overflow_policy, RoundingPolicy rounding_policy)
+ ConvertPolicy overflow_policy, RoundingPolicy rounding_policy, const ActivationLayerInfo &act_info)
{
ARM_COMPUTE_UNUSED(overflow_policy);
ARM_COMPUTE_UNUSED(rounding_policy);
@@ -64,6 +64,7 @@ Status validate_arguments(const ITensorInfo *input1, const ITensorInfo *input2,
DataType::S16, DataType::QSYMM16, DataType::F16,
DataType::F32);
ARM_COMPUTE_RETURN_ERROR_ON_MSG(scale < 0, "Scale cannot be negative.");
+ ARM_COMPUTE_RETURN_ERROR_ON(act_info.enabled() && !is_data_type_float(output->data_type()));
const TensorShape &out_shape = TensorShape::broadcast_shape(input1->tensor_shape(), input2->tensor_shape());
@@ -148,11 +149,11 @@ CLPixelWiseMultiplicationKernel::CLPixelWiseMultiplicationKernel()
}
void CLPixelWiseMultiplicationKernel::configure(const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output, float scale,
- ConvertPolicy overflow_policy, RoundingPolicy rounding_policy)
+ ConvertPolicy overflow_policy, RoundingPolicy rounding_policy, const ActivationLayerInfo &act_info)
{
ARM_COMPUTE_ERROR_ON_NULLPTR(input1, input2, output);
ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input1->info(), input2->info(), output->info(),
- scale, overflow_policy, rounding_policy));
+ scale, overflow_policy, rounding_policy, act_info));
// Configure kernel window
auto win_config = validate_and_configure_window(input1->info(), input2->info(), output->info());
@@ -227,6 +228,12 @@ void CLPixelWiseMultiplicationKernel::configure(const ICLTensor *input1, const I
build_opts.add_option_if_else(overflow_policy == ConvertPolicy::WRAP || is_data_type_float(output->info()->data_type()), "-DWRAP", "-DSATURATE");
build_opts.add_option_if_else(rounding_policy == RoundingPolicy::TO_ZERO, "-DROUND=_rtz", "-DROUND=_rte");
build_opts.add_option("-DDATA_TYPE_RES=" + compute_type);
+ if(act_info.enabled())
+ {
+ build_opts.add_option("-DACTIVATION_TYPE=" + lower_string(string_from_activation_func(act_info.activation())));
+ build_opts.add_option("-DA_VAL=" + float_to_string_with_full_precision(act_info.a()));
+ build_opts.add_option("-DB_VAL=" + float_to_string_with_full_precision(act_info.b()));
+ }
}
// Create kernel
@@ -248,10 +255,10 @@ void CLPixelWiseMultiplicationKernel::configure(const ICLTensor *input1, const I
}
Status CLPixelWiseMultiplicationKernel::validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, float scale,
- ConvertPolicy overflow_policy, RoundingPolicy rounding_policy)
+ ConvertPolicy overflow_policy, RoundingPolicy rounding_policy, const ActivationLayerInfo &act_info)
{
ARM_COMPUTE_ERROR_ON_NULLPTR(input1, input2, output);
- ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input1, input2, output, scale, overflow_policy, rounding_policy));
+ ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input1, input2, output, scale, overflow_policy, rounding_policy, act_info));
ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window(input1->clone().get(), input2->clone().get(), output->clone().get()).first);
return Status{};
@@ -311,7 +318,7 @@ namespace
{
constexpr unsigned int num_elems_processed_per_iteration_complex = 1;
-Status validate_arguments_complex(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output)
+Status validate_arguments_complex(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, const ActivationLayerInfo &act_info)
{
ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input1, 2, DataType::F32);
ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input2, 2, DataType::F32);
@@ -319,6 +326,7 @@ Status validate_arguments_complex(const ITensorInfo *input1, const ITensorInfo *
const TensorShape &out_shape = TensorShape::broadcast_shape(input1->tensor_shape(), input2->tensor_shape());
ARM_COMPUTE_RETURN_ERROR_ON_MSG(out_shape.total_size() == 0, "Inputs are not broadcast compatible");
+ ARM_COMPUTE_RETURN_ERROR_ON(act_info.enabled() && !is_data_type_float(output->data_type()));
// Validate in case of configured output
if(output->total_size() > 0)
@@ -364,10 +372,10 @@ CLComplexPixelWiseMultiplicationKernel::CLComplexPixelWiseMultiplicationKernel()
{
}
-void CLComplexPixelWiseMultiplicationKernel::configure(const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output)
+void CLComplexPixelWiseMultiplicationKernel::configure(const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output, const ActivationLayerInfo &act_info)
{
ARM_COMPUTE_ERROR_ON_NULLPTR(input1, input2, output);
- ARM_COMPUTE_ERROR_THROW_ON(validate_arguments_complex(input1->info(), input2->info(), output->info()));
+ ARM_COMPUTE_ERROR_THROW_ON(validate_arguments_complex(input1->info(), input2->info(), output->info(), act_info));
// Configure kernel window
auto win_config = validate_and_configure_window_complex(input1->info(), input2->info(), output->info());
@@ -377,16 +385,24 @@ void CLComplexPixelWiseMultiplicationKernel::configure(const ICLTensor *input1,
_input2 = input2;
_output = output;
+ CLBuildOptions build_opts;
+ if(act_info.enabled())
+ {
+ build_opts.add_option("-DACTIVATION_TYPE=" + lower_string(string_from_activation_func(act_info.activation())));
+ build_opts.add_option("-DA_VAL=" + float_to_string_with_full_precision(act_info.a()));
+ build_opts.add_option("-DB_VAL=" + float_to_string_with_full_precision(act_info.b()));
+ }
+
// Create kernel
- _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel("pixelwise_mul_complex"));
+ _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel("pixelwise_mul_complex", build_opts.options()));
ICLKernel::configure_internal(win_config.second);
}
-Status CLComplexPixelWiseMultiplicationKernel::validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output)
+Status CLComplexPixelWiseMultiplicationKernel::validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, const ActivationLayerInfo &act_info)
{
ARM_COMPUTE_ERROR_ON_NULLPTR(input1, input2, output);
- ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments_complex(input1, input2, output));
+ ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments_complex(input1, input2, output, act_info));
ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window_complex(input1->clone().get(), input2->clone().get(), output->clone().get()).first);
return Status{};
diff --git a/src/graph/mutators/NodeFusionMutator.cpp b/src/graph/mutators/NodeFusionMutator.cpp
index 273e6ce746..ae53b8ff75 100644
--- a/src/graph/mutators/NodeFusionMutator.cpp
+++ b/src/graph/mutators/NodeFusionMutator.cpp
@@ -294,13 +294,20 @@ IGraphMutator::MutationType NodeFusionMutator::type() const
void NodeFusionMutator::mutate(Graph &g)
{
// Supported activations when fusing
- const std::set<Activation> supported_fused_activations = { Activation::RELU, Activation::BOUNDED_RELU, Activation::LU_BOUNDED_RELU };
+ const std::set<Activation> supported_fused_activations_conv = { Activation::RELU, Activation::BOUNDED_RELU, Activation::LU_BOUNDED_RELU };
+ const std::set<Activation> supported_fused_activations_eltwise = { Activation::RELU, Activation::BOUNDED_RELU, Activation::LU_BOUNDED_RELU,
+ Activation::TANH, Activation::LOGISTIC
+ };
// Preconditions
auto empty_prec = [](INode &)
{
return true;
};
+ auto cl_target_prec = [](INode & n)
+ {
+ return n.assigned_target() == Target::CL;
+ };
auto qs8_prec = [&g](INode & n)
{
ARM_COMPUTE_ERROR_ON(n.output(0) == nullptr);
@@ -315,10 +322,11 @@ void NodeFusionMutator::mutate(Graph &g)
};
// Fusion mutations
- detail::fuse_layer<BatchNormalizationLayerNode, ActivationLayerNode>(g, empty_prec, detail::fuse_node_with_activation<BatchNormalizationLayerNode>, supported_fused_activations);
- detail::fuse_layer<ConvolutionLayerNode, ActivationLayerNode>(g, empty_prec, detail::fuse_node_with_activation<ConvolutionLayerNode>, supported_fused_activations);
- detail::fuse_layer<DepthwiseConvolutionLayerNode, ActivationLayerNode>(g, qs8_prec, detail::fuse_node_with_activation<DepthwiseConvolutionLayerNode>, supported_fused_activations);
- detail::fuse_layer<FullyConnectedLayerNode, ActivationLayerNode>(g, empty_prec, detail::fuse_node_with_activation<FullyConnectedLayerNode>, supported_fused_activations);
+ detail::fuse_layer<BatchNormalizationLayerNode, ActivationLayerNode>(g, empty_prec, detail::fuse_node_with_activation<BatchNormalizationLayerNode>, supported_fused_activations_conv);
+ detail::fuse_layer<ConvolutionLayerNode, ActivationLayerNode>(g, empty_prec, detail::fuse_node_with_activation<ConvolutionLayerNode>, supported_fused_activations_conv);
+ detail::fuse_layer<DepthwiseConvolutionLayerNode, ActivationLayerNode>(g, qs8_prec, detail::fuse_node_with_activation<DepthwiseConvolutionLayerNode>, supported_fused_activations_conv);
+ detail::fuse_layer<FullyConnectedLayerNode, ActivationLayerNode>(g, empty_prec, detail::fuse_node_with_activation<FullyConnectedLayerNode>, supported_fused_activations_conv);
+ detail::fuse_layer<EltwiseLayerNode, ActivationLayerNode>(g, cl_target_prec, detail::fuse_node_with_activation<EltwiseLayerNode>, supported_fused_activations_eltwise);
detail::fuse_layer<ConvolutionLayerNode, BatchNormalizationLayerNode>(g, empty_prec, detail::fuse_convolution_with_batch_normalization);
detail::fuse_layer<DepthwiseConvolutionLayerNode, BatchNormalizationLayerNode>(g, empty_prec, detail::fuse_depthwise_convolution_with_batch_normalization);
}
diff --git a/src/graph/nodes/EltwiseLayerNode.cpp b/src/graph/nodes/EltwiseLayerNode.cpp
index a83a5fb3b2..92d183e693 100644
--- a/src/graph/nodes/EltwiseLayerNode.cpp
+++ b/src/graph/nodes/EltwiseLayerNode.cpp
@@ -52,6 +52,16 @@ RoundingPolicy EltwiseLayerNode::rounding_policy() const
return descriptor.r_policy;
}
+ActivationLayerInfo EltwiseLayerNode::fused_activation() const
+{
+ return descriptor.fused_activation;
+}
+
+void EltwiseLayerNode::set_fused_activation(ActivationLayerInfo fused_activation)
+{
+ descriptor.fused_activation = fused_activation;
+}
+
bool EltwiseLayerNode::forward_descriptors()
{
if((input_id(0) != NullTensorID) && (output_id(0) != NullTensorID))
diff --git a/src/runtime/CL/functions/CLElementwiseOperations.cpp b/src/runtime/CL/functions/CLElementwiseOperations.cpp
index 69cebc7180..7636a87e93 100644
--- a/src/runtime/CL/functions/CLElementwiseOperations.cpp
+++ b/src/runtime/CL/functions/CLElementwiseOperations.cpp
@@ -47,96 +47,96 @@ void configure_border_handler(CLFillBorderKernel &border_handler, BorderSize bor
}
} // namespace
-void CLArithmeticAddition::configure(ICLTensor *input1, ICLTensor *input2, ICLTensor *output, ConvertPolicy policy)
+void CLArithmeticAddition::configure(ICLTensor *input1, ICLTensor *input2, ICLTensor *output, ConvertPolicy policy, const ActivationLayerInfo &act_info)
{
auto k = arm_compute::support::cpp14::make_unique<CLSaturatedArithmeticOperationKernel>();
- k->configure(ArithmeticOperation::ADD, input1, input2, output, policy);
+ k->configure(ArithmeticOperation::ADD, input1, input2, output, policy, act_info);
_kernel = std::move(k);
configure_border_handler(_border_handler, _kernel->border_size(), input1, input2, output);
}
-Status CLArithmeticAddition::validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, ConvertPolicy policy)
+Status CLArithmeticAddition::validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, ConvertPolicy policy, const ActivationLayerInfo &act_info)
{
- return CLSaturatedArithmeticOperationKernel::validate(ArithmeticOperation::ADD, input1, input2, output, policy);
+ return CLSaturatedArithmeticOperationKernel::validate(ArithmeticOperation::ADD, input1, input2, output, policy, act_info);
}
-void CLArithmeticSubtraction::configure(ICLTensor *input1, ICLTensor *input2, ICLTensor *output, ConvertPolicy policy)
+void CLArithmeticSubtraction::configure(ICLTensor *input1, ICLTensor *input2, ICLTensor *output, ConvertPolicy policy, const ActivationLayerInfo &act_info)
{
auto k = arm_compute::support::cpp14::make_unique<CLSaturatedArithmeticOperationKernel>();
- k->configure(ArithmeticOperation::SUB, input1, input2, output, policy);
+ k->configure(ArithmeticOperation::SUB, input1, input2, output, policy, act_info);
_kernel = std::move(k);
configure_border_handler(_border_handler, _kernel->border_size(), input1, input2, output);
}
-Status CLArithmeticSubtraction::validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, ConvertPolicy policy)
+Status CLArithmeticSubtraction::validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, ConvertPolicy policy, const ActivationLayerInfo &act_info)
{
ARM_COMPUTE_UNUSED(policy);
- return CLSaturatedArithmeticOperationKernel::validate(ArithmeticOperation::SUB, input1, input2, output, policy);
+ return CLSaturatedArithmeticOperationKernel::validate(ArithmeticOperation::SUB, input1, input2, output, policy, act_info);
}
-void CLArithmeticDivision::configure(ICLTensor *input1, ICLTensor *input2, ICLTensor *output)
+void CLArithmeticDivision::configure(ICLTensor *input1, ICLTensor *input2, ICLTensor *output, const ActivationLayerInfo &act_info)
{
auto k = arm_compute::support::cpp14::make_unique<CLArithmeticOperationKernel>();
- k->configure(ArithmeticOperation::DIV, input1, input2, output);
+ k->configure(ArithmeticOperation::DIV, input1, input2, output, act_info);
_kernel = std::move(k);
configure_border_handler(_border_handler, _kernel->border_size(), input1, input2, output);
}
-Status CLArithmeticDivision::validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output)
+Status CLArithmeticDivision::validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, const ActivationLayerInfo &act_info)
{
- return CLArithmeticOperationKernel::validate(ArithmeticOperation::DIV, input1, input2, output);
+ return CLArithmeticOperationKernel::validate(ArithmeticOperation::DIV, input1, input2, output, act_info);
}
-void CLElementwiseMax::configure(ICLTensor *input1, ICLTensor *input2, ICLTensor *output)
+void CLElementwiseMax::configure(ICLTensor *input1, ICLTensor *input2, ICLTensor *output, const ActivationLayerInfo &act_info)
{
auto k = arm_compute::support::cpp14::make_unique<CLArithmeticOperationKernel>();
- k->configure(ArithmeticOperation::MAX, input1, input2, output);
+ k->configure(ArithmeticOperation::MAX, input1, input2, output, act_info);
_kernel = std::move(k);
configure_border_handler(_border_handler, _kernel->border_size(), input1, input2, output);
}
-Status CLElementwiseMax::validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output)
+Status CLElementwiseMax::validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, const ActivationLayerInfo &act_info)
{
- return CLArithmeticOperationKernel::validate(ArithmeticOperation::MAX, input1, input2, output);
+ return CLArithmeticOperationKernel::validate(ArithmeticOperation::MAX, input1, input2, output, act_info);
}
-void CLElementwiseMin::configure(ICLTensor *input1, ICLTensor *input2, ICLTensor *output)
+void CLElementwiseMin::configure(ICLTensor *input1, ICLTensor *input2, ICLTensor *output, const ActivationLayerInfo &act_info)
{
auto k = arm_compute::support::cpp14::make_unique<CLArithmeticOperationKernel>();
- k->configure(ArithmeticOperation::MIN, input1, input2, output);
+ k->configure(ArithmeticOperation::MIN, input1, input2, output, act_info);
_kernel = std::move(k);
configure_border_handler(_border_handler, _kernel->border_size(), input1, input2, output);
}
-Status CLElementwiseMin::validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output)
+Status CLElementwiseMin::validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, const ActivationLayerInfo &act_info)
{
- return CLArithmeticOperationKernel::validate(ArithmeticOperation::MIN, input1, input2, output);
+ return CLArithmeticOperationKernel::validate(ArithmeticOperation::MIN, input1, input2, output, act_info);
}
-void CLElementwiseSquaredDiff::configure(ICLTensor *input1, ICLTensor *input2, ICLTensor *output)
+void CLElementwiseSquaredDiff::configure(ICLTensor *input1, ICLTensor *input2, ICLTensor *output, const ActivationLayerInfo &act_info)
{
auto k = arm_compute::support::cpp14::make_unique<CLArithmeticOperationKernel>();
- k->configure(ArithmeticOperation::SQUARED_DIFF, input1, input2, output);
+ k->configure(ArithmeticOperation::SQUARED_DIFF, input1, input2, output, act_info);
_kernel = std::move(k);
configure_border_handler(_border_handler, _kernel->border_size(), input1, input2, output);
}
-Status CLElementwiseSquaredDiff::validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output)
+Status CLElementwiseSquaredDiff::validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, const ActivationLayerInfo &act_info)
{
- return CLArithmeticOperationKernel::validate(ArithmeticOperation::SQUARED_DIFF, input1, input2, output);
+ return CLArithmeticOperationKernel::validate(ArithmeticOperation::SQUARED_DIFF, input1, input2, output, act_info);
}
-void CLElementwisePower::configure(ICLTensor *input1, ICLTensor *input2, ICLTensor *output)
+void CLElementwisePower::configure(ICLTensor *input1, ICLTensor *input2, ICLTensor *output, const ActivationLayerInfo &act_info)
{
auto k = arm_compute::support::cpp14::make_unique<CLArithmeticOperationKernel>();
- k->configure(ArithmeticOperation::POWER, input1, input2, output);
+ k->configure(ArithmeticOperation::POWER, input1, input2, output, act_info);
_kernel = std::move(k);
configure_border_handler(_border_handler, _kernel->border_size(), input1, input2, output);
}
-Status CLElementwisePower::validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output)
+Status CLElementwisePower::validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, const ActivationLayerInfo &act_info)
{
- return CLArithmeticOperationKernel::validate(ArithmeticOperation::POWER, input1, input2, output);
+ return CLArithmeticOperationKernel::validate(ArithmeticOperation::POWER, input1, input2, output, act_info);
}
} // namespace arm_compute
diff --git a/src/runtime/CL/functions/CLPixelWiseMultiplication.cpp b/src/runtime/CL/functions/CLPixelWiseMultiplication.cpp
index c1c971816c..b527922d2b 100644
--- a/src/runtime/CL/functions/CLPixelWiseMultiplication.cpp
+++ b/src/runtime/CL/functions/CLPixelWiseMultiplication.cpp
@@ -32,10 +32,10 @@
namespace arm_compute
{
void CLPixelWiseMultiplication::configure(ICLTensor *input1, ICLTensor *input2, ICLTensor *output, float scale,
- ConvertPolicy overflow_policy, RoundingPolicy rounding_policy)
+ ConvertPolicy overflow_policy, RoundingPolicy rounding_policy, const ActivationLayerInfo &act_info)
{
auto k = arm_compute::support::cpp14::make_unique<CLPixelWiseMultiplicationKernel>();
- k->configure(input1, input2, output, scale, overflow_policy, rounding_policy);
+ k->configure(input1, input2, output, scale, overflow_policy, rounding_policy, act_info);
_kernel = std::move(k);
if(output->info()->dimension(0) > 1)
@@ -50,15 +50,15 @@ void CLPixelWiseMultiplication::configure(ICLTensor *input1, ICLTensor *input2,
}
Status CLPixelWiseMultiplication::validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, float scale,
- ConvertPolicy overflow_policy, RoundingPolicy rounding_policy)
+ ConvertPolicy overflow_policy, RoundingPolicy rounding_policy, const ActivationLayerInfo &act_info)
{
- return CLPixelWiseMultiplicationKernel::validate(input1, input2, output, scale, overflow_policy, rounding_policy);
+ return CLPixelWiseMultiplicationKernel::validate(input1, input2, output, scale, overflow_policy, rounding_policy, act_info);
}
-void CLComplexPixelWiseMultiplication::configure(ICLTensor *input1, ICLTensor *input2, ICLTensor *output)
+void CLComplexPixelWiseMultiplication::configure(ICLTensor *input1, ICLTensor *input2, ICLTensor *output, const ActivationLayerInfo &act_info)
{
auto k = arm_compute::support::cpp14::make_unique<CLComplexPixelWiseMultiplicationKernel>();
- k->configure(input1, input2, output);
+ k->configure(input1, input2, output, act_info);
_kernel = std::move(k);
if(output->info()->dimension(0) > 1)
@@ -72,8 +72,8 @@ void CLComplexPixelWiseMultiplication::configure(ICLTensor *input1, ICLTensor *i
}
}
-Status CLComplexPixelWiseMultiplication::validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output)
+Status CLComplexPixelWiseMultiplication::validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, const ActivationLayerInfo &act_info)
{
- return CLComplexPixelWiseMultiplicationKernel::validate(input1, input2, output);
+ return CLComplexPixelWiseMultiplicationKernel::validate(input1, input2, output, act_info);
}
} // namespace arm_compute
diff --git a/src/runtime/GLES_COMPUTE/functions/GCArithmeticAddition.cpp b/src/runtime/GLES_COMPUTE/functions/GCArithmeticAddition.cpp
index cd9c8ddad2..b0d8a3cf9f 100755
--- a/src/runtime/GLES_COMPUTE/functions/GCArithmeticAddition.cpp
+++ b/src/runtime/GLES_COMPUTE/functions/GCArithmeticAddition.cpp
@@ -30,14 +30,16 @@
using namespace arm_compute;
-void GCArithmeticAddition::configure(const IGCTensor *input1, const IGCTensor *input2, IGCTensor *output, ConvertPolicy policy)
+void GCArithmeticAddition::configure(const IGCTensor *input1, const IGCTensor *input2, IGCTensor *output, ConvertPolicy policy, const ActivationLayerInfo &act_info)
{
+ ARM_COMPUTE_UNUSED(act_info);
auto k = arm_compute::support::cpp14::make_unique<GCArithmeticAdditionKernel>();
k->configure(input1, input2, output, policy);
_kernel = std::move(k);
}
-Status GCArithmeticAddition::validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, ConvertPolicy policy)
+Status GCArithmeticAddition::validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, ConvertPolicy policy, const ActivationLayerInfo &act_info)
{
+ ARM_COMPUTE_RETURN_ERROR_ON(act_info.enabled());
return GCArithmeticAdditionKernel::validate(input1, input2, output, policy);
}
diff --git a/src/runtime/GLES_COMPUTE/functions/GCPixelWiseMultiplication.cpp b/src/runtime/GLES_COMPUTE/functions/GCPixelWiseMultiplication.cpp
index 126476d2a9..1075f0b5be 100755
--- a/src/runtime/GLES_COMPUTE/functions/GCPixelWiseMultiplication.cpp
+++ b/src/runtime/GLES_COMPUTE/functions/GCPixelWiseMultiplication.cpp
@@ -30,8 +30,9 @@
using namespace arm_compute;
-void GCPixelWiseMultiplication::configure(const IGCTensor *input1, const IGCTensor *input2, IGCTensor *output, float scale)
+void GCPixelWiseMultiplication::configure(const IGCTensor *input1, const IGCTensor *input2, IGCTensor *output, float scale, const ActivationLayerInfo &act_info)
{
+ ARM_COMPUTE_UNUSED(act_info);
auto k = arm_compute::support::cpp14::make_unique<GCPixelWiseMultiplicationKernel>();
k->configure(input1, input2, output, scale);
_kernel = std::move(k);
diff --git a/src/runtime/NEON/functions/NEArithmeticAddition.cpp b/src/runtime/NEON/functions/NEArithmeticAddition.cpp
index 6d0b207cf1..06c71db1bd 100644
--- a/src/runtime/NEON/functions/NEArithmeticAddition.cpp
+++ b/src/runtime/NEON/functions/NEArithmeticAddition.cpp
@@ -31,14 +31,16 @@
namespace arm_compute
{
-void NEArithmeticAddition::configure(ITensor *input1, ITensor *input2, ITensor *output, ConvertPolicy policy)
+void NEArithmeticAddition::configure(ITensor *input1, ITensor *input2, ITensor *output, ConvertPolicy policy, const ActivationLayerInfo &act_info)
{
+ ARM_COMPUTE_UNUSED(act_info);
auto k = arm_compute::support::cpp14::make_unique<NEArithmeticAdditionKernel>();
k->configure(input1, input2, output, policy);
_kernel = std::move(k);
}
-Status NEArithmeticAddition::validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, ConvertPolicy policy)
+Status NEArithmeticAddition::validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, ConvertPolicy policy, const ActivationLayerInfo &act_info)
{
+ ARM_COMPUTE_RETURN_ERROR_ON(act_info.enabled());
return NEArithmeticAdditionKernel::validate(input1, input2, output, policy);
}
} // namespace arm_compute
diff --git a/src/runtime/NEON/functions/NEArithmeticSubtraction.cpp b/src/runtime/NEON/functions/NEArithmeticSubtraction.cpp
index 0ad87383ce..454adc336b 100644
--- a/src/runtime/NEON/functions/NEArithmeticSubtraction.cpp
+++ b/src/runtime/NEON/functions/NEArithmeticSubtraction.cpp
@@ -31,8 +31,9 @@
namespace arm_compute
{
-void NEArithmeticSubtraction::configure(ITensor *input1, ITensor *input2, ITensor *output, ConvertPolicy policy)
+void NEArithmeticSubtraction::configure(ITensor *input1, ITensor *input2, ITensor *output, ConvertPolicy policy, const ActivationLayerInfo &act_info)
{
+ ARM_COMPUTE_UNUSED(act_info);
auto k = arm_compute::support::cpp14::make_unique<NEArithmeticSubtractionKernel>();
k->configure(input1, input2, output, policy);
_kernel = std::move(k);
@@ -48,8 +49,9 @@ void NEArithmeticSubtraction::configure(ITensor *input1, ITensor *input2, ITenso
}
}
-Status NEArithmeticSubtraction::validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, ConvertPolicy policy)
+Status NEArithmeticSubtraction::validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, ConvertPolicy policy, const ActivationLayerInfo &act_info)
{
+ ARM_COMPUTE_RETURN_ERROR_ON(act_info.enabled());
return NEArithmeticSubtractionKernel::validate(input1, input2, output, policy);
}
} // namespace arm_compute
diff --git a/src/runtime/NEON/functions/NEElementwiseOperators.cpp b/src/runtime/NEON/functions/NEElementwiseOperators.cpp
index 0ba0ddbe3a..7451c6ff2b 100644
--- a/src/runtime/NEON/functions/NEElementwiseOperators.cpp
+++ b/src/runtime/NEON/functions/NEElementwiseOperators.cpp
@@ -32,15 +32,17 @@
namespace arm_compute
{
-void NEElementwiseMax::configure(ITensor *input1, ITensor *input2, ITensor *output)
+void NEElementwiseMax::configure(ITensor *input1, ITensor *input2, ITensor *output, const ActivationLayerInfo &act_info)
{
+ ARM_COMPUTE_UNUSED(act_info);
auto k = arm_compute::support::cpp14::make_unique<NEArithmeticOperationKernel>();
k->configure(ArithmeticOperation::MAX, input1, input2, output);
_kernel = std::move(k);
}
-Status NEElementwiseMax::validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output)
+Status NEElementwiseMax::validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, const ActivationLayerInfo &act_info)
{
+ ARM_COMPUTE_RETURN_ERROR_ON(act_info.enabled());
if(input1->data_type() == DataType::QASYMM8_SIGNED)
{
ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input1, output);
@@ -49,15 +51,17 @@ Status NEElementwiseMax::validate(const ITensorInfo *input1, const ITensorInfo *
return NEArithmeticOperationKernel::validate(ArithmeticOperation::MAX, input1, input2, output);
}
-void NEElementwiseMin::configure(ITensor *input1, ITensor *input2, ITensor *output)
+void NEElementwiseMin::configure(ITensor *input1, ITensor *input2, ITensor *output, const ActivationLayerInfo &act_info)
{
+ ARM_COMPUTE_UNUSED(act_info);
auto k = arm_compute::support::cpp14::make_unique<NEArithmeticOperationKernel>();
k->configure(ArithmeticOperation::MIN, input1, input2, output);
_kernel = std::move(k);
}
-Status NEElementwiseMin::validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output)
+Status NEElementwiseMin::validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, const ActivationLayerInfo &act_info)
{
+ ARM_COMPUTE_RETURN_ERROR_ON(act_info.enabled());
if(input1->data_type() == DataType::QASYMM8_SIGNED)
{
ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input1, output);
@@ -66,39 +70,45 @@ Status NEElementwiseMin::validate(const ITensorInfo *input1, const ITensorInfo *
return NEArithmeticOperationKernel::validate(ArithmeticOperation::MIN, input1, input2, output);
}
-void NEElementwiseSquaredDiff::configure(ITensor *input1, ITensor *input2, ITensor *output)
+void NEElementwiseSquaredDiff::configure(ITensor *input1, ITensor *input2, ITensor *output, const ActivationLayerInfo &act_info)
{
+ ARM_COMPUTE_UNUSED(act_info);
auto k = arm_compute::support::cpp14::make_unique<NEArithmeticOperationKernel>();
k->configure(ArithmeticOperation::SQUARED_DIFF, input1, input2, output);
_kernel = std::move(k);
}
-Status NEElementwiseSquaredDiff::validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output)
+Status NEElementwiseSquaredDiff::validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, const ActivationLayerInfo &act_info)
{
+ ARM_COMPUTE_RETURN_ERROR_ON(act_info.enabled());
return NEArithmeticOperationKernel::validate(ArithmeticOperation::SQUARED_DIFF, input1, input2, output);
}
-void NEElementwiseDivision::configure(ITensor *input1, ITensor *input2, ITensor *output)
+void NEElementwiseDivision::configure(ITensor *input1, ITensor *input2, ITensor *output, const ActivationLayerInfo &act_info)
{
+ ARM_COMPUTE_UNUSED(act_info);
auto k = arm_compute::support::cpp14::make_unique<NEDivisionOperationKernel>();
k->configure(input1, input2, output);
_kernel = std::move(k);
}
-Status NEElementwiseDivision::validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output)
+Status NEElementwiseDivision::validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, const ActivationLayerInfo &act_info)
{
+ ARM_COMPUTE_RETURN_ERROR_ON(act_info.enabled());
return NEDivisionOperationKernel::validate(input1, input2, output);
}
-void NEElementwisePower::configure(ITensor *input1, ITensor *input2, ITensor *output)
+void NEElementwisePower::configure(ITensor *input1, ITensor *input2, ITensor *output, const ActivationLayerInfo &act_info)
{
+ ARM_COMPUTE_UNUSED(act_info);
auto k = arm_compute::support::cpp14::make_unique<NEPowerOperationKernel>();
k->configure(input1, input2, output);
_kernel = std::move(k);
}
-Status NEElementwisePower::validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output)
+Status NEElementwisePower::validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, const ActivationLayerInfo &act_info)
{
+ ARM_COMPUTE_RETURN_ERROR_ON(act_info.enabled());
return NEPowerOperationKernel::validate(input1, input2, output);
}
diff --git a/src/runtime/NEON/functions/NEPixelWiseMultiplication.cpp b/src/runtime/NEON/functions/NEPixelWiseMultiplication.cpp
index e2516e420c..eaf233b9ed 100644
--- a/src/runtime/NEON/functions/NEPixelWiseMultiplication.cpp
+++ b/src/runtime/NEON/functions/NEPixelWiseMultiplication.cpp
@@ -31,8 +31,10 @@
namespace arm_compute
{
-void NEPixelWiseMultiplication::configure(ITensor *input1, ITensor *input2, ITensor *output, float scale, ConvertPolicy overflow_policy, RoundingPolicy rounding_policy)
+void NEPixelWiseMultiplication::configure(ITensor *input1, ITensor *input2, ITensor *output, float scale, ConvertPolicy overflow_policy, RoundingPolicy rounding_policy,
+ const ActivationLayerInfo &act_info)
{
+ ARM_COMPUTE_UNUSED(act_info);
auto k = arm_compute::support::cpp14::make_unique<NEPixelWiseMultiplicationKernel>();
k->configure(input1, input2, output, scale, overflow_policy, rounding_policy);
_kernel = std::move(k);
@@ -47,13 +49,16 @@ void NEPixelWiseMultiplication::configure(ITensor *input1, ITensor *input2, ITen
}
}
}
-Status NEPixelWiseMultiplication::validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, float scale, ConvertPolicy overflow_policy, RoundingPolicy rounding_policy)
+Status NEPixelWiseMultiplication::validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, float scale, ConvertPolicy overflow_policy, RoundingPolicy rounding_policy,
+ const ActivationLayerInfo &act_info)
{
+ ARM_COMPUTE_RETURN_ERROR_ON(act_info.enabled());
return NEPixelWiseMultiplicationKernel::validate(input1, input2, output, scale, overflow_policy, rounding_policy);
}
-void NEComplexPixelWiseMultiplication::configure(ITensor *input1, ITensor *input2, ITensor *output)
+void NEComplexPixelWiseMultiplication::configure(ITensor *input1, ITensor *input2, ITensor *output, const ActivationLayerInfo &act_info)
{
+ ARM_COMPUTE_UNUSED(act_info);
auto k = arm_compute::support::cpp14::make_unique<NEComplexPixelWiseMultiplicationKernel>();
k->configure(input1, input2, output);
_kernel = std::move(k);
@@ -69,8 +74,9 @@ void NEComplexPixelWiseMultiplication::configure(ITensor *input1, ITensor *input
}
}
-Status NEComplexPixelWiseMultiplication::validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output)
+Status NEComplexPixelWiseMultiplication::validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, const ActivationLayerInfo &act_info)
{
+ ARM_COMPUTE_RETURN_ERROR_ON(act_info.enabled());
return NEComplexPixelWiseMultiplicationKernel::validate(input1, input2, output);
}