From f9b595adbdc3f6f51ffa2c1f2aa70d0262d0db2d Mon Sep 17 00:00:00 2001 From: Michele Di Giorgio Date: Fri, 3 Jul 2020 13:34:52 +0100 Subject: COMPMID-3532: Align data type support between doxygen and implementation - NEON Change-Id: I70662cfb43890873b706b3f22b348f5d8cdd63ca Signed-off-by: Michele Di Giorgio Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/3506 Tested-by: Arm Jenkins Reviewed-by: Manuel Bottini Reviewed-by: Sheri Zhang Comments-Addressed: Arm Jenkins --- .../NEON/kernels/NEAbsoluteDifferenceKernel.cpp | 11 ++----- src/core/NEON/kernels/NEActivationLayerKernel.cpp | 12 +++----- .../NEON/kernels/NEBatchConcatenateLayerKernel.cpp | 4 --- .../NEON/kernels/NEBatchToSpaceLayerKernel.cpp | 3 -- .../NEDepthwiseConvolutionLayerNativeKernel.cpp | 4 +-- src/core/NEON/kernels/NEDilateKernel.cpp | 14 ++++----- src/core/NEON/kernels/NEErodeKernel.cpp | 14 ++++----- src/core/NEON/kernels/NEFlattenLayerKernel.cpp | 11 ++++--- .../kernels/NEFuseBatchNormalizationKernel.cpp | 1 - .../NEON/kernels/NEGEMMLowpReductionKernel.cpp | 11 +------ src/core/NEON/kernels/NEGEMMTranspose1xWKernel.cpp | 15 +++------- .../NEON/kernels/NEMaxUnpoolingLayerKernel.cpp | 33 +++++++++------------ .../kernels/NEPixelWiseMultiplicationKernel.cpp | 11 +++---- src/core/NEON/kernels/NEPriorBoxLayerKernel.cpp | 2 +- src/core/NEON/kernels/NEROIPoolingLayerKernel.cpp | 7 +---- src/core/NEON/kernels/NEReverseKernel.cpp | 34 ++++------------------ src/core/NEON/kernels/NEWeightsReshapeKernel.cpp | 15 ++++------ src/runtime/NEON/functions/NERNNLayer.cpp | 1 + 18 files changed, 64 insertions(+), 139 deletions(-) (limited to 'src') diff --git a/src/core/NEON/kernels/NEAbsoluteDifferenceKernel.cpp b/src/core/NEON/kernels/NEAbsoluteDifferenceKernel.cpp index 62285e0578..28f30717ab 100644 --- a/src/core/NEON/kernels/NEAbsoluteDifferenceKernel.cpp +++ b/src/core/NEON/kernels/NEAbsoluteDifferenceKernel.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-2019 ARM Limited. + * Copyright (c) 2016-2020 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -32,15 +32,9 @@ #include "arm_compute/core/Validate.h" #include -#include - -using namespace arm_compute; namespace arm_compute { -class Coordinates; -} // namespace arm_compute - namespace { void abs_diff_U8_U8_U8(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window) @@ -140,7 +134,7 @@ void NEAbsoluteDifferenceKernel::configure(const ITensor *input1, const ITensor { set_format_if_unknown(*output->info(), Format::S16); } - else if(input1->info()->data_type() == DataType::F32 || input2->info()->data_type() == DataType::F32) + else if(input1->info()->data_type() == DataType::U8 || input2->info()->data_type() == DataType::U8) { set_format_if_unknown(*output->info(), Format::U8); } @@ -210,3 +204,4 @@ void NEAbsoluteDifferenceKernel::run(const Window &window, const ThreadInfo &inf _func(_input1, _input2, _output, window); } +} // namespace arm_compute diff --git a/src/core/NEON/kernels/NEActivationLayerKernel.cpp b/src/core/NEON/kernels/NEActivationLayerKernel.cpp index 43426dc122..1db9ff9e3f 100644 --- a/src/core/NEON/kernels/NEActivationLayerKernel.cpp +++ b/src/core/NEON/kernels/NEActivationLayerKernel.cpp @@ -27,28 +27,23 @@ #include "arm_compute/core/Helpers.h" #include "arm_compute/core/ITensor.h" #include "arm_compute/core/NEON/NEAsymm.h" -#include "arm_compute/core/NEON/NEFixedPoint.h" -#include "arm_compute/core/NEON/NEMath.h" #include "arm_compute/core/NEON/NESymm.h" #include "arm_compute/core/NEON/wrapper/wrapper.h" #include "arm_compute/core/TensorInfo.h" #include "arm_compute/core/Utils.h" -#include "arm_compute/core/Validate.h" #include "arm_compute/core/Window.h" #include -#include -#include -#include #include -using namespace arm_compute; +namespace arm_compute +{ namespace { Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, const ActivationLayerInfo &activation_info) { ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(input); - ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::U8, DataType::QASYMM8_SIGNED, DataType::QASYMM8, DataType::QSYMM16, DataType::F16, DataType::F32); + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QASYMM8_SIGNED, DataType::QASYMM8, DataType::QSYMM16, DataType::F16, DataType::F32); const static std::set qasymm8_supported_activations = { @@ -874,3 +869,4 @@ void NEActivationLayerKernel::run_op(const InputTensorMap &inputs, (this->*_func)(inputs.at(TensorType::ACL_SRC), outputs.at(TensorType::ACL_DST), window); } +} // namespace arm_compute diff --git a/src/core/NEON/kernels/NEBatchConcatenateLayerKernel.cpp b/src/core/NEON/kernels/NEBatchConcatenateLayerKernel.cpp index 5650b810d2..a5a574de63 100644 --- a/src/core/NEON/kernels/NEBatchConcatenateLayerKernel.cpp +++ b/src/core/NEON/kernels/NEBatchConcatenateLayerKernel.cpp @@ -26,17 +26,13 @@ #include "arm_compute/core/Error.h" #include "arm_compute/core/Helpers.h" #include "arm_compute/core/IAccessWindow.h" -#include "arm_compute/core/ITensor.h" #include "arm_compute/core/NEON/NEAsymm.h" -#include "arm_compute/core/NEON/NEFixedPoint.h" #include "arm_compute/core/NEON/wrapper/wrapper.h" #include "arm_compute/core/TensorInfo.h" #include "arm_compute/core/Utils.h" #include "arm_compute/core/Validate.h" #include "arm_compute/core/Window.h" -#include - namespace arm_compute { namespace diff --git a/src/core/NEON/kernels/NEBatchToSpaceLayerKernel.cpp b/src/core/NEON/kernels/NEBatchToSpaceLayerKernel.cpp index c4c0f01f0f..4ad3dd76f9 100644 --- a/src/core/NEON/kernels/NEBatchToSpaceLayerKernel.cpp +++ b/src/core/NEON/kernels/NEBatchToSpaceLayerKernel.cpp @@ -25,12 +25,9 @@ #include "arm_compute/core/Helpers.h" #include "arm_compute/core/ITensor.h" -#include "arm_compute/core/NEON/wrapper/wrapper.h" #include "arm_compute/core/Types.h" #include "arm_compute/core/Validate.h" #include "arm_compute/core/utils/misc/ShapeCalculator.h" -#include -#include using namespace arm_compute::misc::shape_calculator; diff --git a/src/core/NEON/kernels/NEDepthwiseConvolutionLayerNativeKernel.cpp b/src/core/NEON/kernels/NEDepthwiseConvolutionLayerNativeKernel.cpp index ef196ab904..a639a926ec 100644 --- a/src/core/NEON/kernels/NEDepthwiseConvolutionLayerNativeKernel.cpp +++ b/src/core/NEON/kernels/NEDepthwiseConvolutionLayerNativeKernel.cpp @@ -427,12 +427,11 @@ Status validate_arguments(const ITensorInfo *input, const ITensorInfo *weights, if(is_data_type_quantized_per_channel(weights->data_type())) { ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(weights, 1, DataType::QSYMM8_PER_CHANNEL); - ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output); ARM_COMPUTE_RETURN_ERROR_ON(weights->dimension(0) != weights->quantization_info().scale().size()); } else { - ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, weights, output); + ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, weights); } if(biases != nullptr) @@ -454,6 +453,7 @@ Status validate_arguments(const ITensorInfo *input, const ITensorInfo *weights, { const TensorShape output_shape = misc::shape_calculator::compute_depthwise_convolution_shape(*input, *weights, conv_info, depth_multiplier, dilation); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(output->tensor_shape(), output_shape); + ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output); } return Status{}; diff --git a/src/core/NEON/kernels/NEDilateKernel.cpp b/src/core/NEON/kernels/NEDilateKernel.cpp index e761815f9e..b4cc699c8f 100644 --- a/src/core/NEON/kernels/NEDilateKernel.cpp +++ b/src/core/NEON/kernels/NEDilateKernel.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-2019 ARM Limited. + * Copyright (c) 2016-2020 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -30,16 +30,9 @@ #include "arm_compute/core/Validate.h" #include -#include -#include - -using namespace arm_compute; namespace arm_compute { -class Coordinates; -} // namespace arm_compute - BorderSize NEDilateKernel::border_size() const { return BorderSize(1); @@ -47,6 +40,10 @@ BorderSize NEDilateKernel::border_size() const void NEDilateKernel::configure(const ITensor *input, ITensor *output, bool border_undefined) { + ARM_COMPUTE_ERROR_ON_NULLPTR(input, output); + ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::U8); + ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(input, output); + _input = input; _output = output; @@ -126,3 +123,4 @@ void NEDilateKernel::run(const Window &window, const ThreadInfo &info) }, in, out); } +} // namespace arm_compute diff --git a/src/core/NEON/kernels/NEErodeKernel.cpp b/src/core/NEON/kernels/NEErodeKernel.cpp index 2a538ecd0f..edfcbb50c4 100644 --- a/src/core/NEON/kernels/NEErodeKernel.cpp +++ b/src/core/NEON/kernels/NEErodeKernel.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-2019 ARM Limited. + * Copyright (c) 2016-2020 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -30,16 +30,9 @@ #include "arm_compute/core/Validate.h" #include -#include -#include - -using namespace arm_compute; namespace arm_compute { -class Coordinates; -} // namespace arm_compute - BorderSize NEErodeKernel::border_size() const { return BorderSize(1); @@ -47,6 +40,10 @@ BorderSize NEErodeKernel::border_size() const void NEErodeKernel::configure(const ITensor *input, ITensor *output, bool border_undefined) { + ARM_COMPUTE_ERROR_ON_NULLPTR(input, output); + ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::U8); + ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(input, output); + _input = input; _output = output; @@ -126,3 +123,4 @@ void NEErodeKernel::run(const Window &window, const ThreadInfo &info) }, in, out); } +} // namespace arm_compute diff --git a/src/core/NEON/kernels/NEFlattenLayerKernel.cpp b/src/core/NEON/kernels/NEFlattenLayerKernel.cpp index a48601f7b0..9dbf245c7a 100644 --- a/src/core/NEON/kernels/NEFlattenLayerKernel.cpp +++ b/src/core/NEON/kernels/NEFlattenLayerKernel.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2019 ARM Limited. + * Copyright (c) 2018-2020 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -33,19 +33,17 @@ #include "arm_compute/core/utils/misc/ShapeCalculator.h" -#include - -using namespace arm_compute; +namespace arm_compute +{ using namespace misc::shape_calculator; namespace { Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output) { - ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(output); + ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, output); ARM_COMPUTE_RETURN_ERROR_ON(input->data_type() == DataType::UNKNOWN); // Note: ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(input) is not needed here as this kernel doesn't use NEON FP16 instructions. - ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(output); // Checks performed when output is configured if(output->total_size() != 0) @@ -135,3 +133,4 @@ void NEFlattenLayerKernel::run(const Window &window, const ThreadInfo &info) } while(in_window.slide_window_slice_3D(in_slice) && out_window.slide_window_slice_1D(out_slice)); } +} // namespace arm_compute diff --git a/src/core/NEON/kernels/NEFuseBatchNormalizationKernel.cpp b/src/core/NEON/kernels/NEFuseBatchNormalizationKernel.cpp index 6e7e5ab23f..b71630dba3 100644 --- a/src/core/NEON/kernels/NEFuseBatchNormalizationKernel.cpp +++ b/src/core/NEON/kernels/NEFuseBatchNormalizationKernel.cpp @@ -32,7 +32,6 @@ #include "arm_compute/core/Validate.h" #include "arm_compute/core/Window.h" -#include "utils/TypePrinter.h" #include namespace arm_compute diff --git a/src/core/NEON/kernels/NEGEMMLowpReductionKernel.cpp b/src/core/NEON/kernels/NEGEMMLowpReductionKernel.cpp index 1acdb1efce..a8a976cd6b 100644 --- a/src/core/NEON/kernels/NEGEMMLowpReductionKernel.cpp +++ b/src/core/NEON/kernels/NEGEMMLowpReductionKernel.cpp @@ -24,19 +24,10 @@ #include "arm_compute/core/NEON/kernels/NEGEMMLowpReductionKernel.h" #include "arm_compute/core/AccessWindowStatic.h" -#include "arm_compute/core/Error.h" -#include "arm_compute/core/Helpers.h" #include "arm_compute/core/ITensor.h" #include "arm_compute/core/KernelDescriptors.h" #include "arm_compute/core/NEON/wrapper/wrapper.h" #include "arm_compute/core/TensorInfo.h" -#include "arm_compute/core/Types.h" -#include "arm_compute/core/Utils.h" -#include "arm_compute/core/Validate.h" -#include "arm_compute/core/Window.h" - -#include -#include namespace arm_compute { @@ -45,7 +36,7 @@ namespace Status validate_arguments_matrix_a_reduction(const ITensorInfo *input, const ITensorInfo *output) { ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, output); - ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QASYMM8, DataType::QASYMM8_SIGNED, DataType::QSYMM8); + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QASYMM8, DataType::QASYMM8_SIGNED, DataType::QSYMM8, DataType::QSYMM8_PER_CHANNEL); if(output->total_size() > 0) { diff --git a/src/core/NEON/kernels/NEGEMMTranspose1xWKernel.cpp b/src/core/NEON/kernels/NEGEMMTranspose1xWKernel.cpp index 88104f7297..9080051e93 100644 --- a/src/core/NEON/kernels/NEGEMMTranspose1xWKernel.cpp +++ b/src/core/NEON/kernels/NEGEMMTranspose1xWKernel.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-2019 ARM Limited. + * Copyright (c) 2016-2020 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -24,23 +24,16 @@ #include "arm_compute/core/NEON/kernels/NEGEMMTranspose1xWKernel.h" #include "arm_compute/core/AccessWindowStatic.h" -#include "arm_compute/core/Coordinates.h" -#include "arm_compute/core/Error.h" -#include "arm_compute/core/Helpers.h" #include "arm_compute/core/ITensor.h" #include "arm_compute/core/NEON/INEKernel.h" #include "arm_compute/core/TensorInfo.h" -#include "arm_compute/core/TensorShape.h" -#include "arm_compute/core/Types.h" #include "arm_compute/core/Validate.h" #include "arm_compute/core/Window.h" #include -#include -#include - -using namespace arm_compute; +namespace arm_compute +{ namespace { TensorShape get_output_shape(const ITensorInfo *input) @@ -57,7 +50,6 @@ Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output) ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input); ARM_COMPUTE_RETURN_ERROR_ON(input->data_type() == DataType::UNKNOWN); //Note: ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(input) is not needed here as this kernel doesn't use NEON FP16 instructions. - ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output); if(output->total_size() != 0) { @@ -192,3 +184,4 @@ void NEGEMMTranspose1xWKernel::run(const Window &window, const ThreadInfo &info) } } } +} // namespace arm_compute diff --git a/src/core/NEON/kernels/NEMaxUnpoolingLayerKernel.cpp b/src/core/NEON/kernels/NEMaxUnpoolingLayerKernel.cpp index 1967c553bd..1f65e3260b 100644 --- a/src/core/NEON/kernels/NEMaxUnpoolingLayerKernel.cpp +++ b/src/core/NEON/kernels/NEMaxUnpoolingLayerKernel.cpp @@ -23,16 +23,8 @@ */ #include "arm_compute/core/NEON/kernels/NEMaxUnpoolingLayerKernel.h" -#include "arm_compute/core/AccessWindowStatic.h" #include "arm_compute/core/CPP/Validate.h" -#include "arm_compute/core/Error.h" -#include "arm_compute/core/Helpers.h" -#include "arm_compute/core/ITensor.h" -#include "arm_compute/core/NEON/NEAsymm.h" -#include "arm_compute/core/NEON/NEFixedPoint.h" -#include "arm_compute/core/NEON/NEMath.h" #include "arm_compute/core/TensorInfo.h" -#include "arm_compute/core/Utils.h" #include "arm_compute/core/Validate.h" #include "arm_compute/core/Window.h" #include "arm_compute/core/utils/misc/ShapeCalculator.h" @@ -48,6 +40,10 @@ namespace Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, const PoolingLayerInfo &pool_info, const ITensorInfo *indices) { ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, output, indices); + ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(input); + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QASYMM8, DataType::QASYMM8_SIGNED, DataType::F16, DataType::F32); + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(indices, 1, DataType::U32); + int pool_stride_x = 0; int pool_stride_y = 0; PoolingType pool_type = pool_info.pool_type; @@ -56,10 +52,8 @@ Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, c const int pool_size_x = pool_info.pool_size.width; const int pool_size_y = pool_info.pool_size.height; const Size2D pool_size(pool_size_x, pool_size_y); - ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(input); - ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(indices, 1, DataType::U32); + ARM_COMPUTE_RETURN_ERROR_ON_MSG(pool_type != PoolingType::MAX, "Pooling indices only supported for MAX pooling method"); - ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::F32); ARM_COMPUTE_RETURN_ERROR_ON_MSG((pool_size != Size2D(2, 2)), "Pooling indices only supported for pool size 2x2"); if(output->total_size() != 0) { @@ -72,20 +66,19 @@ Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, c } // namespace NEMaxUnpoolingLayerKernel::NEMaxUnpoolingLayerKernel() - : _func(nullptr), _input(nullptr), _output(nullptr), _indices(nullptr), _pool_info(), _data_layout(DataLayout::UNKNOWN), _num_elems_processed_per_iteration(0) + : _func(nullptr), _input(nullptr), _output(nullptr), _indices(nullptr) { } void NEMaxUnpoolingLayerKernel::configure(const ITensor *input, const ITensor *indices, ITensor *output, const PoolingLayerInfo &pool_info) { ARM_COMPUTE_ERROR_ON_NULLPTR(input, output); - const Size2D pool_size(pool_info.pool_size.width, pool_info.pool_size.height); ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), output->info(), pool_info, indices->info())); - _input = input; - _output = output; - _indices = indices; - _pool_info = pool_info; - _data_layout = input->info()->data_layout(); + + _input = input; + _output = output; + _indices = indices; + switch(input->info()->data_type()) { case DataType::F32: @@ -107,8 +100,8 @@ void NEMaxUnpoolingLayerKernel::configure(const ITensor *input, const ITensor *i } const TensorShape output_shape = compute_unpool_shape(*input->info(), pool_info); auto_init_if_empty(*output->info(), input->info()->clone()->set_tensor_shape(output_shape)); - _num_elems_processed_per_iteration = 1; - auto window = calculate_max_window(*input->info(), Steps(_num_elems_processed_per_iteration)); + + auto window = calculate_max_window(*input->info(), Steps()); INEKernel::configure(window); } template diff --git a/src/core/NEON/kernels/NEPixelWiseMultiplicationKernel.cpp b/src/core/NEON/kernels/NEPixelWiseMultiplicationKernel.cpp index cd1c4b28cc..4b2352f4c2 100644 --- a/src/core/NEON/kernels/NEPixelWiseMultiplicationKernel.cpp +++ b/src/core/NEON/kernels/NEPixelWiseMultiplicationKernel.cpp @@ -62,17 +62,18 @@ inline Status validate_arguments(const ITensorInfo *input1, const ITensorInfo *i if(output->total_size() > 0) { - if(is_data_type_quantized(output->data_type())) - { - ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input1, input2, output); - } - const TensorShape &out_shape = TensorShape::broadcast_shape(input1->tensor_shape(), input2->tensor_shape()); ARM_COMPUTE_RETURN_ERROR_ON_MSG(detail::have_different_dimensions(out_shape, output->tensor_shape(), 0), "Wrong shape for output"); ARM_COMPUTE_RETURN_ERROR_ON_MSG(out_shape.total_size() == 0, "Inputs are not broadcast compatible"); ARM_COMPUTE_RETURN_ERROR_ON_MSG(output->data_type() == DataType::U8 && (input1->data_type() != DataType::U8 || input2->data_type() != DataType::U8), "Output can only be U8 if both inputs are U8"); + ARM_COMPUTE_RETURN_ERROR_ON_MSG(output->data_type() == DataType::QASYMM8 && (input1->data_type() != DataType::QASYMM8 || input2->data_type() != DataType::QASYMM8), + "Output can only be QASYMM8 if both inputs are QASYMM8"); + ARM_COMPUTE_RETURN_ERROR_ON_MSG(output->data_type() == DataType::QASYMM8_SIGNED && (input1->data_type() != DataType::QASYMM8_SIGNED || input2->data_type() != DataType::QASYMM8_SIGNED), + "Output can only be QASYMM8 if both inputs are QASYMM8"); + ARM_COMPUTE_RETURN_ERROR_ON_MSG(output->data_type() == DataType::QSYMM16 && (input1->data_type() != DataType::QSYMM16 || input2->data_type() != DataType::QSYMM16), + "Output can only be QSYMM16 if both inputs are QSYMM16"); ARM_COMPUTE_RETURN_ERROR_ON_MSG(output->data_type() == DataType::S32 && (input1->data_type() != DataType::QSYMM16 || input2->data_type() != DataType::QSYMM16), "Output can only be S32 if both inputs are QSYMM16"); ARM_COMPUTE_RETURN_ERROR_ON_MSG(output->data_type() == DataType::S32 && scale != 1.f, "Unsupported scale for QSYMM16 inputs and S32 output"); diff --git a/src/core/NEON/kernels/NEPriorBoxLayerKernel.cpp b/src/core/NEON/kernels/NEPriorBoxLayerKernel.cpp index d830d0db67..a1180d5e61 100644 --- a/src/core/NEON/kernels/NEPriorBoxLayerKernel.cpp +++ b/src/core/NEON/kernels/NEPriorBoxLayerKernel.cpp @@ -29,7 +29,6 @@ #include "arm_compute/core/Validate.h" #include -#include namespace arm_compute { @@ -68,6 +67,7 @@ Status validate_arguments(const ITensorInfo *input1, const ITensorInfo *input2, if(output != nullptr && output->total_size() != 0) { ARM_COMPUTE_RETURN_ERROR_ON(output->dimension(1) != 2); + ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input1, output); } return Status{}; diff --git a/src/core/NEON/kernels/NEROIPoolingLayerKernel.cpp b/src/core/NEON/kernels/NEROIPoolingLayerKernel.cpp index 00c3f98334..5cf2bd288c 100644 --- a/src/core/NEON/kernels/NEROIPoolingLayerKernel.cpp +++ b/src/core/NEON/kernels/NEROIPoolingLayerKernel.cpp @@ -23,18 +23,13 @@ */ #include "arm_compute/core/NEON/kernels/NEROIPoolingLayerKernel.h" -#include "arm_compute/core/AccessWindowStatic.h" #include "arm_compute/core/CPP/Validate.h" -#include "arm_compute/core/Error.h" -#include "arm_compute/core/Helpers.h" -#include "arm_compute/core/ITensor.h" #include "arm_compute/core/TensorInfo.h" #include "arm_compute/core/Validate.h" #include "arm_compute/core/Window.h" #include "support/ToolchainSupport.h" #include -#include namespace arm_compute { @@ -53,7 +48,7 @@ void NEROIPoolingLayerKernel::configure(const ITensor *input, const ITensor *roi ARM_COMPUTE_ERROR_ON(rois->info()->dimension(0) != 5); ARM_COMPUTE_ERROR_ON(rois->info()->num_dimensions() > 2); ARM_COMPUTE_ERROR_ON_CPU_F16_UNSUPPORTED(input); - ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::F32, DataType::F16); + ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::F32); ARM_COMPUTE_ERROR_ON((pool_info.pooled_width() == 0) || (pool_info.pooled_height() == 0)); if(output->info()->total_size() != 0) diff --git a/src/core/NEON/kernels/NEReverseKernel.cpp b/src/core/NEON/kernels/NEReverseKernel.cpp index 5a8c446ddd..8c3c59559f 100644 --- a/src/core/NEON/kernels/NEReverseKernel.cpp +++ b/src/core/NEON/kernels/NEReverseKernel.cpp @@ -23,24 +23,11 @@ */ #include "arm_compute/core/NEON/kernels/NEReverseKernel.h" -#include "arm_compute/core/AccessWindowStatic.h" -#include "arm_compute/core/CPP/Validate.h" -#include "arm_compute/core/Helpers.h" -#include "arm_compute/core/ITensor.h" -#include "arm_compute/core/NEON/NEAsymm.h" -#include "arm_compute/core/NEON/NEFixedPoint.h" -#include "arm_compute/core/NEON/NEMath.h" #include "arm_compute/core/NEON/wrapper/wrapper.h" #include "arm_compute/core/TensorInfo.h" -#include "arm_compute/core/Utils.h" #include "arm_compute/core/Validate.h" #include "arm_compute/core/Window.h" -#include -#include -#include -#include - namespace arm_compute { namespace @@ -48,7 +35,7 @@ namespace Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, const ITensorInfo *axis) { ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, output, axis); - ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(input); + //Note: ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(input) is not needed here as this kernel doesn't use NEON FP16 instructions. ARM_COMPUTE_RETURN_ERROR_ON(input->data_type() == DataType::UNKNOWN); ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(axis, 1, DataType::U32); ARM_COMPUTE_RETURN_ERROR_ON_MSG(axis->num_dimensions() > 1, "Axis must be a 1D tensor"); @@ -159,28 +146,19 @@ void NEReverseKernel::run(const Window &window, const ThreadInfo &info) ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this); ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(INEKernel::window(), window); - switch(_input->info()->data_type()) + switch(_input->info()->element_size()) { - case DataType::F32: - case DataType::U32: - case DataType::S32: + case 4: run_reverse(window, _input, _axis, _output); break; -#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC - case DataType::F16: -#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC - case DataType::S16: - case DataType::U16: + case 2: run_reverse(window, _input, _axis, _output); break; - case DataType::QASYMM8: - case DataType::QASYMM8_SIGNED: - case DataType::U8: - case DataType::S8: + case 1: run_reverse(window, _input, _axis, _output); break; default: - ARM_COMPUTE_ERROR("Data type not supported"); + ARM_COMPUTE_ERROR("Element size not supported"); } } } // namespace arm_compute \ No newline at end of file diff --git a/src/core/NEON/kernels/NEWeightsReshapeKernel.cpp b/src/core/NEON/kernels/NEWeightsReshapeKernel.cpp index d376d53081..f271f57f19 100644 --- a/src/core/NEON/kernels/NEWeightsReshapeKernel.cpp +++ b/src/core/NEON/kernels/NEWeightsReshapeKernel.cpp @@ -23,15 +23,11 @@ */ #include "arm_compute/core/NEON/kernels/NEWeightsReshapeKernel.h" -#include "arm_compute/core/Dimensions.h" -#include "arm_compute/core/Error.h" #include "arm_compute/core/Helpers.h" -#include "arm_compute/core/ITensor.h" -#include "arm_compute/core/Types.h" #include "arm_compute/core/Validate.h" -using namespace arm_compute; - +namespace arm_compute +{ namespace { TensorShape get_output_shape(const ITensorInfo *input, bool has_bias) @@ -48,11 +44,9 @@ TensorShape get_output_shape(const ITensorInfo *input, bool has_bias) Status validate_arguments(const ITensorInfo *input, const ITensorInfo *biases, const ITensorInfo *output) { + ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, output); //Note: ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(input) is not needed here as this kernel doesn't use NEON FP16 instructions. - ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, - DataType::QASYMM8, DataType::QASYMM8_SIGNED, DataType::QSYMM8_PER_CHANNEL, - DataType::BFLOAT16, DataType::F16, DataType::F32); - ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(output); + ARM_COMPUTE_RETURN_ERROR_ON(input->data_type() == DataType::UNKNOWN); if(biases != nullptr) { @@ -179,3 +173,4 @@ void NEWeightsReshapeKernel::run(const Window &window, const ThreadInfo &info) }, in); } +} // namespace arm_compute diff --git a/src/runtime/NEON/functions/NERNNLayer.cpp b/src/runtime/NEON/functions/NERNNLayer.cpp index 19b84e7fb8..5385192f16 100644 --- a/src/runtime/NEON/functions/NERNNLayer.cpp +++ b/src/runtime/NEON/functions/NERNNLayer.cpp @@ -43,6 +43,7 @@ Status NERNNLayer::validate(const ITensorInfo *input, const ITensorInfo *weights const ITensorInfo *output, const ActivationLayerInfo &info) { ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, weights, recurrent_weights, bias, hidden_state, output); + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_NOT_IN(input, DataType::F16, DataType::F32); const int idx_width = get_data_layout_dimension_index(input->data_layout(), DataLayoutDimension::WIDTH); const int idx_height = get_data_layout_dimension_index(input->data_layout(), DataLayoutDimension::HEIGHT); -- cgit v1.2.1