aboutsummaryrefslogtreecommitdiff
path: root/src/core/CL/kernels/CLActivationLayerKernel.cpp
diff options
context:
space:
mode:
authorGeorgios Pinitas <georgios.pinitas@arm.com>2017-11-03 19:01:44 +0000
committerAnthony Barbier <anthony.barbier@arm.com>2018-11-02 16:35:24 +0000
commitf9d3a0a12ede4db89348fd924274c9acc6809bb2 (patch)
tree73080e4a230c4db248d6f9e16181eab09c8168dc /src/core/CL/kernels/CLActivationLayerKernel.cpp
parentd6afedc775220f17317f1835a4d18b72a54525de (diff)
downloadComputeLibrary-f9d3a0a12ede4db89348fd924274c9acc6809bb2.tar.gz
COMPMID-617: Add validation functions.
Added validation routines to the following kernels. -CLActivationLayer -CLBatchNormalizationLayer -CLArithmeticAddition -CLArithmeticSubtraction -CLPixelwiseMultiplication Change-Id: I0f3a03154f9e392279f715af656683cd0ad4cef5 Reviewed-on: http://mpd-gerrit.cambridge.arm.com/94595 Tested-by: Kaizen <jeremy.johnson+kaizengerrit@arm.com> Reviewed-by: Anthony Barbier <anthony.barbier@arm.com>
Diffstat (limited to 'src/core/CL/kernels/CLActivationLayerKernel.cpp')
-rw-r--r--src/core/CL/kernels/CLActivationLayerKernel.cpp28
1 files changed, 21 insertions, 7 deletions
diff --git a/src/core/CL/kernels/CLActivationLayerKernel.cpp b/src/core/CL/kernels/CLActivationLayerKernel.cpp
index ca6760d8c2..5bfc832518 100644
--- a/src/core/CL/kernels/CLActivationLayerKernel.cpp
+++ b/src/core/CL/kernels/CLActivationLayerKernel.cpp
@@ -50,12 +50,11 @@ CLActivationLayerKernel::CLActivationLayerKernel()
void CLActivationLayerKernel::configure(ICLTensor *input, ICLTensor *output, ActivationLayerInfo act_info)
{
- ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::U8, DataType::QS8, DataType::QS16, DataType::F16, DataType::F32, DataType::QASYMM8);
- ARM_COMPUTE_ERROR_ON_MSG((input->info()->data_type() == DataType::QASYMM8) && (act_info.activation() != ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU),
- "For QASYMM8 only lower/upper bounded relu is supported");
+ ARM_COMPUTE_ERROR_ON_NULLPTR(input);
if(output != nullptr)
{
+ ARM_COMPUTE_ERROR_ON_NULLPTR(input->info(), output->info());
// Output auto inizialitation if not yet initialized
auto_init_if_empty(*output->info(),
input->info()->tensor_shape(),
@@ -63,12 +62,10 @@ void CLActivationLayerKernel::configure(ICLTensor *input, ICLTensor *output, Act
input->info()->data_type(),
input->info()->fixed_point_position(),
input->info()->quantization_info());
-
- ARM_COMPUTE_ERROR_ON_MISMATCHING_SHAPES(input, output);
- ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(input, output);
- ARM_COMPUTE_ERROR_ON_MISMATCHING_FIXED_POINT(input, output);
}
+ ARM_COMPUTE_ERROR_THROW_ON(CLActivationLayerKernel::validate(input->info(), (output != nullptr) ? output->info() : nullptr, act_info));
+
const unsigned int num_elems_processed_per_iteration = 16 / input->info()->element_size();
const DataType dt = input->info()->data_type();
const int fixed_point_position = input->info()->fixed_point_position();
@@ -156,6 +153,23 @@ void CLActivationLayerKernel::configure(ICLTensor *input, ICLTensor *output, Act
ICLKernel::configure(win);
}
+Error CLActivationLayerKernel::validate(const ITensorInfo *input, const ITensorInfo *output, const ActivationLayerInfo &act_info)
+{
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::U8, DataType::QASYMM8, DataType::QS8, DataType::QS16, DataType::F16, DataType::F32);
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG((input->data_type() == DataType::QASYMM8) && (act_info.activation() != ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU),
+ "For QASYMM8 only lower/upper bounded relu is supported");
+
+ // Checks performed when output is configured
+ if((output != nullptr) && (output->total_size() != 0))
+ {
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(input, output);
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output);
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_FIXED_POINT(input, output);
+ }
+
+ return Error{};
+}
+
void CLActivationLayerKernel::run(const Window &window, cl::CommandQueue &queue)
{
ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);