From 9c7fed85d339df64937e8edac3b591b8571ccce8 Mon Sep 17 00:00:00 2001 From: morgolock Date: Wed, 5 Aug 2020 12:30:56 +0100 Subject: COMPMID-3656: Disabled reduce_axis in LOG_SOFTMAX and SOFTMAX Our implementation of reduce_axis is only compliant for default_axis. Validate will throw an error when trying to use a different axis. Change-Id: I4c02aa055bb4474593a3114ec9c83884d3c9120f Signed-off-by: morgolock Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/3658 Tested-by: Arm Jenkins Comments-Addressed: Arm Jenkins Reviewed-by: SiCong Li --- arm_compute/runtime/CL/functions/CLSoftmaxLayer.h | 56 ++++++++-------------- .../runtime/NEON/functions/NESoftmaxLayer.h | 49 +++++++------------ docs/00_introduction.dox | 7 +-- src/runtime/CL/functions/CLSoftmaxLayer.cpp | 17 +++---- src/runtime/NEON/functions/NESoftmaxLayer.cpp | 13 ++--- tests/validation/CL/LogSoftmaxLayer.cpp | 12 ++--- tests/validation/CL/SoftmaxLayer.cpp | 30 ++++++------ tests/validation/NEON/LogSoftmaxLayer.cpp | 18 +++---- tests/validation/NEON/SoftmaxLayer.cpp | 30 ++++++------ 9 files changed, 101 insertions(+), 131 deletions(-) diff --git a/arm_compute/runtime/CL/functions/CLSoftmaxLayer.h b/arm_compute/runtime/CL/functions/CLSoftmaxLayer.h index ec57bacf07..bb01584ff4 100644 --- a/arm_compute/runtime/CL/functions/CLSoftmaxLayer.h +++ b/arm_compute/runtime/CL/functions/CLSoftmaxLayer.h @@ -48,10 +48,6 @@ class ICLTensor; * * This function runs the following kernels: * -# @ref CLLogits1DNormKernel - * And if the reduce_end_axis is not 0, the function will use one of the the following kernels to reshape the input and - * perform softmax on the reshaped input: - * -# @ref CLFlattenLayerKernel - * -# @ref CLReshapeLayerKernel */ template class CLSoftmaxLayerGeneric : public IFunction @@ -61,39 +57,31 @@ public: CLSoftmaxLayerGeneric(std::shared_ptr memory_manager = nullptr); /** Set the input and output tensors. * - * @param[in] input Source tensor. Data types supported: QASYMM8/QASYMM8_SIGNED/F16/F32 for Softmax and F16/F32 for Log Softmax - * @param[out] output Destination tensor. Data types supported: same as @p input - * @param[in] beta (Optional) A scaling factor for the exponent. Defaults to 1.f - * @param[in] reduce_end_axis (Optional) The last axis of the first n dimensions (inclusive)to reduce. Defaults to 0. - * It has the purpose of squashing together the first n dimensions till (including) the @p reduce_end_axis. For instance, given a [2x3x4x5] image, - * when @p reduce_end_axis is 1, the reduction will be applied to axes 0 and 1, and the Softmax op will be applied on each of the [2x3] planes of the input image. - * Must be in range [0, input_num_dimensions). + * @param[in] input Source tensor. Data types supported: QASYMM8/QASYMM8_SIGNED/F16/F32 for Softmax and F16/F32 for Log Softmax + * @param[out] output Destination tensor. Data types supported: same as @p input + * @param[in] beta (Optional) A scaling factor for the exponent. Defaults to 1.f + * @param[in] axis (Optional) The last axis of the first n dimensions (inclusive)to reduce. Only supports axis 0. */ - void configure(const ICLTensor *input, ICLTensor *output, float beta = 1.0f, size_t reduce_end_axis = 0); + void configure(const ICLTensor *input, ICLTensor *output, float beta = 1.0f, size_t axis = 0); /** Set the input and output tensors. * * @param[in] compile_context The compile context to be used. * @param[in] input Source tensor. Data types supported: QASYMM8/QASYMM8_SIGNED/F16/F32 for Softmax and F16/F32 for Log Softmax * @param[out] output Destination tensor. Data types supported: same as @p input * @param[in] beta (Optional) A scaling factor for the exponent. Defaults to 1.f - * @param[in] reduce_end_axis (Optional) The last axis of the first n dimensions (inclusive)to reduce. Defaults to 0. - * It has the purpose of squashing together the first n dimensions till (including) the @p reduce_end_axis. For instance, given a [2x3x4x5] image, - * when @p reduce_end_axis is 1, the reduction will be applied to axes 0 and 1, and the Softmax op will be applied on each of the [2x3] planes of the input image. - * Must be in range [0, input_num_dimensions). + * @param[in] axis (Optional) The last axis of the first n dimensions (inclusive)to reduce. Only supports axis 0. */ - void configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, float beta = 1.0f, size_t reduce_end_axis = 0); + void configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, float beta = 1.0f, size_t axis = 0); /** Static function to check if given info will lead to a valid configuration of @ref CLSoftmaxLayer * - * @param[in] input Source tensor. Data types supported: QASYMM8/QASYMM8_SIGNED/F16/F32 for Softmax and F16/F32 for Log Softmax - * @param[in] output Destination tensor. Data types supported: same as @p input - * @param[in] beta (Optional) A scaling factor for the exponent. Defaults to 1.f - * @param[in] reduce_end_axis (Optional) The last axis of the first n dimensions (inclusive)to reduce. Defaults to 0. - * It has the purpose of squashing together the first n dimensions till (including) the @p reduce_end_axis. For instance, given a [2x3x4x5] image, - * when @p reduce_end_axis is 1, the reduction will be applied to axes 0 and 1, and the Softmax op will be applied on each of the [2x3] planes of the input image. - * Must be in range [0, input_num_dimensions). + * @param[in] input Source tensor. Data types supported: QASYMM8/QASYMM8_SIGNED/F16/F32 for Softmax and F16/F32 for Log Softmax + * @param[in] output Destination tensor. Data types supported: same as @p input + * @param[in] beta (Optional) A scaling factor for the exponent. Defaults to 1.f + * @param[in] axis (Optional) The last axis of the first n dimensions (inclusive)to reduce. Only supports axis 0. + * * @return a status */ - static Status validate(const ITensorInfo *input, const ITensorInfo *output, float beta = 1.0f, size_t reduce_end_axis = 0); + static Status validate(const ITensorInfo *input, const ITensorInfo *output, float beta = 1.0f, size_t axis = 0); // Inherited methods overridden: void run() override; @@ -106,14 +94,11 @@ private: * it initializes the kernel @p _flatten_kernel and the tensors @p _input_flat and * @p _output_flat * - * @param[in] input Original source tensor. - * @param[in] output Original destination tensor. - * @param[in] reduce_end_axis (Optional) The last axis of the first n dimensions (inclusive)to reduce. Defaults to 0. - * It has the purpose of squashing together the first n dimensions till (including) the @p reduce_end_axis. For instance, given a [2x3x4x5] image, - * when @p reduce_end_axis is 1, the reduction will be applied to axes 0 and 1, and the Softmax op will be applied on each of the [2x3] planes of the input image. - * Must be in range [0, input_num_dimensions). + * @param[in] input Original source tensor. + * @param[in] output Original destination tensor. + * @param[in] axis (Optional) The last axis of the first n dimensions (inclusive)to reduce. Only supports axis 0. */ - void configure_reshape_input_kernel(const ICLTensor *input, const ICLTensor *output, size_t reduce_end_axis); + void configure_reshape_input_kernel(const ICLTensor *input, const ICLTensor *output, size_t axis); /** Utility method to configure the kernels needed to flatten the input * tensor. * @@ -124,12 +109,9 @@ private: * @param[in] compile_context The compile context to be used. * @param[in] input Original source tensor. * @param[in] output Original destination tensor. - * @param[in] reduce_end_axis (Optional) The last axis of the first n dimensions (inclusive)to reduce. Defaults to 0. - * It has the purpose of squashing together the first n dimensions till (including) the @p reduce_end_axis. For instance, given a [2x3x4x5] image, - * when @p reduce_end_axis is 1, the reduction will be applied to axes 0 and 1, and the Softmax op will be applied on each of the [2x3] planes of the input image. - * Must be in range [0, input_num_dimensions). + * @param[in] axis (Optional) The last axis of the first n dimensions (inclusive)to reduce. Only supports axis 0. */ - void configure_reshape_input_kernel(const CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *output, size_t reduce_end_axis); + void configure_reshape_input_kernel(const CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *output, size_t axis); MemoryGroup _memory_group; CLLogits1DMaxShiftExpSumKernel _max_shift_exp_sum_kernel; diff --git a/arm_compute/runtime/NEON/functions/NESoftmaxLayer.h b/arm_compute/runtime/NEON/functions/NESoftmaxLayer.h index d37f6ba4e0..9fb4d85262 100644 --- a/arm_compute/runtime/NEON/functions/NESoftmaxLayer.h +++ b/arm_compute/runtime/NEON/functions/NESoftmaxLayer.h @@ -48,10 +48,6 @@ class ITensor; * -# @ref NEFillBorderKernel * -# @ref NELogits1DMaxKernel * -# @ref NELogits1DSoftmaxKernel - * And if the reduce_end_axis is not 0 or -input_num_dimensions, the function will use one of the the following kernels - * to reshape the input and perform softmax on the reshaped input: - * -# @ref NEFlattenLayerKernel - * -# @ref NEReshapeLayerKernel */ template class NESoftmaxLayerGeneric : public IFunction @@ -69,31 +65,24 @@ public: NESoftmaxLayerGeneric &operator=(NESoftmaxLayerGeneric &&) = default; /** Set the input and output tensors. * - * @param[in,out] input Source tensor. Data types supported: QASYMM8/QASYMM8_SIGNED/F16/F32. If the width is not a - * multiple of the internal processing block size, @ref NEFillBorderKernel replicates the - * last value of each row to the nearest multiple. - * @param[out] output Destination tensor. Data types supported: same as @p input. - * @param[in] beta (Optional) A scaling factor for the exponent. - * @param[in] reduce_end_axis (Optional) The last axis of the first n dimensions (inclusive)to reduce. Defaults to 0. - * It has the purpose of squashing together the first n dimensions till (including) the @p reduce_end_axis. For instance, given a [2x3x4x5] image, - * when @p reduce_end_axis is 1, the reduction will be applied to axes 0 and 1, and the Softmax op will be applied on each of the [2x3] planes of the input image. - * Negative index is used to specify axis from the end (e.g. -1 for the last axis). - * Must be in range [-input_num_dimensions, input_num_dimensions). + * @param[in,out] input Source tensor. Data types supported: QASYMM8/QASYMM8_SIGNED/F16/F32. If the width is not a + * multiple of the internal processing block size, @ref NEFillBorderKernel replicates the + * last value of each row to the nearest multiple. + * @param[out] output Destination tensor. Data types supported: same as @p input. + * @param[in] beta (Optional) A scaling factor for the exponent. + * @param[in] axis (Optional) The last axis of the first n dimensions (inclusive)to reduce. Only supports axis 0. */ - void configure(ITensor *input, ITensor *output, float beta = 1.0f, int32_t reduce_end_axis = 0); + void configure(ITensor *input, ITensor *output, float beta = 1.0f, int32_t axis = 0); /** Static function to check if given info will lead to a valid configuration of @ref NESoftmaxLayer * - * @param[in] input Source tensor info. Data types supported: QASYMM8/QASYMM8_SIGNED/F16/F32. - * @param[in] output Destination tensor info. Data types supported: same as @p input - * @param[in] beta (Optional) A scaling factor for the exponent. - * @param[in] reduce_end_axis (Optional) The last axis of the first n dimensions (inclusive)to reduce. Defaults to 0. - * It has the purpose of squashing together the first n dimensions till (including) the @p reduce_end_axis. For instance, given a [2x3x4x5] image, - * when @p reduce_end_axis is 1, the reduction will be applied to axes 0 and 1, and the Softmax op will be applied on each of the [2x3] planes of the input image. - * Negative index is used to specify axis from the end (e.g. -1 for the last axis). - * Must be in range [-input_num_dimensions, input_num_dimensions). + * @param[in] input Source tensor info. Data types supported: QASYMM8/QASYMM8_SIGNED/F16/F32. + * @param[in] output Destination tensor info. Data types supported: same as @p input + * @param[in] beta (Optional) A scaling factor for the exponent. + * @param[in] axis (Optional) The last axis of the first n dimensions (inclusive)to reduce. Only supports axis 0. + * * @return a status */ - static Status validate(const ITensorInfo *input, const ITensorInfo *output, float beta = 1.0f, int32_t reduce_end_axis = 0); + static Status validate(const ITensorInfo *input, const ITensorInfo *output, float beta = 1.0f, int32_t axis = 0); // Inherited methods overridden: void run() override; @@ -106,15 +95,11 @@ private: * it initializes the kernel @p _flatten_kernel and the tensors @p _input_flat and * @p _output_flat * - * @param[in] input Original source tensor. - * @param[in] output Original destination tensor. - * @param[in] reduce_end_axis (Optional) The last axis of the first n dimensions (inclusive)to reduce. Defaults to 0. - * It has the purpose of squashing together the first n dimensions till (including) the @p reduce_end_axis. For instance, given a [2x3x4x5] image, - * when @p reduce_end_axis is 1, the reduction will be applied to axes 0 and 1, and the Softmax op will be applied on each of the [2x3] planes of the input image. - * Negative index is used to specify axis from the end (e.g. -1 for the last axis). - * Must be in range [-input_num_dimensions, input_num_dimensions). + * @param[in] input Original source tensor. + * @param[in] output Original destination tensor. + * @param[in] axis (Optional) The last axis of the first n dimensions (inclusive)to reduce. Only supports axis 0. */ - void configure_reshape_input_kernel(const ITensor *input, const ITensor *output, int32_t reduce_end_axis); + void configure_reshape_input_kernel(const ITensor *input, const ITensor *output, int32_t axis); MemoryGroup _memory_group; NELogits1DMaxKernel _max_kernel; diff --git a/docs/00_introduction.dox b/docs/00_introduction.dox index ed04bc840d..906ddf27bf 100644 --- a/docs/00_introduction.dox +++ b/docs/00_introduction.dox @@ -260,9 +260,10 @@ v20.08 Public major release - Deprecated functions / interfaces: - Non-descriptor based interfaces for @ref NEThreshold, @ref CLThreshold - In @ref NESoftmaxLayer, @ref NELogSoftmaxLayer, @ref CLSoftmaxLayer, @ref CLLogSoftmaxLayer and @ref GCSoftmaxLayer : - "axis" has been renamed to "reduce_end_axis", which is the last axis (inclusive) before which all dimensions are reduced/collapsed. - The default "axis" (now "reduce_end_axis") value for @ref NESoftmaxLayer and @ref NELogSoftmaxLayer is changed from -1 to 0. - The default "axis" (now "reduce_end_axis") value for @ref CLSoftmaxLayer, @ref CLLogSoftmaxLayer and @ref GCSoftmaxLayer is changed from 1 to 0. + The default "axis" value for @ref CLSoftmaxLayer, @ref CLLogSoftmaxLayer and @ref GCSoftmaxLayer is changed from 1 to 0. + Only axis 0 is supported. + The default "axis" value for @ref NESoftmaxLayer, @ref NELogSoftmaxLayer is changed from 1 to 0. + Only axis 0 is supported. - The support for quantized data types has been removed from @ref CLLogSoftmaxLayer due to implementation complexity. v20.05 Public major release diff --git a/src/runtime/CL/functions/CLSoftmaxLayer.cpp b/src/runtime/CL/functions/CLSoftmaxLayer.cpp index cd4fdaedd8..f7b2935622 100644 --- a/src/runtime/CL/functions/CLSoftmaxLayer.cpp +++ b/src/runtime/CL/functions/CLSoftmaxLayer.cpp @@ -81,20 +81,20 @@ void CLSoftmaxLayerGeneric::configure_reshape_input_kernel(const CLCompi } template -void CLSoftmaxLayerGeneric::configure(const ICLTensor *input, ICLTensor *output, float beta, size_t reduce_end_axis) +void CLSoftmaxLayerGeneric::configure(const ICLTensor *input, ICLTensor *output, float beta, size_t axis) { - configure(CLKernelLibrary::get().get_compile_context(), input, output, beta, reduce_end_axis); + configure(CLKernelLibrary::get().get_compile_context(), input, output, beta, axis); } template -void CLSoftmaxLayerGeneric::configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, float beta, size_t reduce_end_axis) +void CLSoftmaxLayerGeneric::configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, float beta, size_t axis) { // Perform validation step ARM_COMPUTE_ERROR_ON_NULLPTR(input, output); - ARM_COMPUTE_ERROR_THROW_ON(CLSoftmaxLayerGeneric::validate(input->info(), output->info(), beta, reduce_end_axis)); + ARM_COMPUTE_ERROR_THROW_ON(CLSoftmaxLayerGeneric::validate(input->info(), output->info(), beta, axis)); // Convert reduce-before axis (inclusive) to first n axes to reduce - size_t first_n_reduce_axes = dim_index_2_num_dims(reduce_end_axis, input->info()->num_dimensions()); + size_t first_n_reduce_axes = dim_index_2_num_dims(axis, input->info()->num_dimensions()); // We only need flattening when the number of axes to reduce is greater than 1 _needs_flattening = first_n_reduce_axes > 1; @@ -171,15 +171,16 @@ void CLSoftmaxLayerGeneric::configure(const CLCompileContext &compile_co } template -Status CLSoftmaxLayerGeneric::validate(const ITensorInfo *input, const ITensorInfo *output, float beta, size_t reduce_end_axis) +Status CLSoftmaxLayerGeneric::validate(const ITensorInfo *input, const ITensorInfo *output, float beta, size_t axis) { ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, output); ARM_COMPUTE_RETURN_ERROR_ON_MSG(input->num_dimensions() > 4, "Only up to 4 dimensions are supported"); + ARM_COMPUTE_RETURN_ERROR_ON_MSG(axis != 0, "Only axis 0 supported in tensors"); ARM_COMPUTE_UNUSED(beta); - ARM_COMPUTE_RETURN_ERROR_ON(input->num_dimensions() <= reduce_end_axis); + ARM_COMPUTE_RETURN_ERROR_ON(input->num_dimensions() <= axis); // Convert reduce-before axis (inclusive) to first n axes to reduce - size_t first_n_reduce_axes = dim_index_2_num_dims(reduce_end_axis, input->num_dimensions()); + size_t first_n_reduce_axes = dim_index_2_num_dims(axis, input->num_dimensions()); // Create intermediate tensor info DataType tmp_data_type = is_data_type_quantized_asymmetric(input->data_type()) ? DataType::S32 : input->data_type(); diff --git a/src/runtime/NEON/functions/NESoftmaxLayer.cpp b/src/runtime/NEON/functions/NESoftmaxLayer.cpp index 8099029735..750992fca6 100644 --- a/src/runtime/NEON/functions/NESoftmaxLayer.cpp +++ b/src/runtime/NEON/functions/NESoftmaxLayer.cpp @@ -68,14 +68,14 @@ void NESoftmaxLayerGeneric::configure_reshape_input_kernel(const ITensor } template -void NESoftmaxLayerGeneric::configure(ITensor *input, ITensor *output, float beta, int32_t reduce_end_axis) +void NESoftmaxLayerGeneric::configure(ITensor *input, ITensor *output, float beta, int32_t axis) { // Perform validation step ARM_COMPUTE_ERROR_ON_NULLPTR(input, output); - ARM_COMPUTE_ERROR_THROW_ON(NESoftmaxLayerGeneric::validate(input->info(), output->info(), beta, reduce_end_axis)); + ARM_COMPUTE_ERROR_THROW_ON(NESoftmaxLayerGeneric::validate(input->info(), output->info(), beta, axis)); // Convert reduce-before axis (inclusive) to first n axes to reduce - size_t first_n_reduce_axes = dim_index_2_num_dims(reduce_end_axis, static_cast(input->info()->num_dimensions())); + size_t first_n_reduce_axes = dim_index_2_num_dims(axis, static_cast(input->info()->num_dimensions())); // We only need flattening when the number of axes to reduce is greater than 1 _needs_flattening = first_n_reduce_axes > 1; @@ -142,16 +142,17 @@ void NESoftmaxLayerGeneric::configure(ITensor *input, ITensor *output, f } template -Status NESoftmaxLayerGeneric::validate(const ITensorInfo *input, const ITensorInfo *output, float beta, int32_t reduce_end_axis) +Status NESoftmaxLayerGeneric::validate(const ITensorInfo *input, const ITensorInfo *output, float beta, int32_t axis) { // Perform validation step ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, output); ARM_COMPUTE_RETURN_ERROR_ON_MSG(input->num_dimensions() > 4, "Only up to 4 dimensions are supported"); ARM_COMPUTE_UNUSED(beta); - ARM_COMPUTE_RETURN_ERROR_ON(reduce_end_axis < static_cast(-input->num_dimensions()) || static_cast(input->num_dimensions()) <= reduce_end_axis); + ARM_COMPUTE_RETURN_ERROR_ON_MSG(axis != 0, "Only axis 0 supported"); + ARM_COMPUTE_RETURN_ERROR_ON(axis < static_cast(-input->num_dimensions()) || static_cast(input->num_dimensions()) <= axis); // Convert reduce-before axis (inclusive) to first n axes to reduce - size_t first_n_reduce_axes = dim_index_2_num_dims(reduce_end_axis, static_cast(input->num_dimensions())); + size_t first_n_reduce_axes = dim_index_2_num_dims(axis, static_cast(input->num_dimensions())); // Create intermediate tensor info DataType tmp_data_type = input->data_type(); diff --git a/tests/validation/CL/LogSoftmaxLayer.cpp b/tests/validation/CL/LogSoftmaxLayer.cpp index 8e9c6fb446..15466affc4 100644 --- a/tests/validation/CL/LogSoftmaxLayer.cpp +++ b/tests/validation/CL/LogSoftmaxLayer.cpp @@ -59,7 +59,7 @@ TEST_SUITE(FP16) FIXTURE_DATA_TEST_CASE(RunSmall, CLLogSoftmaxLayerFixture, framework::DatasetMode::ALL, combine(combine(combine(datasets::SoftmaxLayerSmallShapes(), framework::dataset::make("DataType", DataType::F16)), framework::dataset::make("Beta", { 1.0f, 2.0f })), - framework::dataset::make("ReduceEndAxis", { 0, 1 }))) + framework::dataset::make("Axis", { 0 }))) { // Validate output validate(CLAccessor(_target), _reference, tolerance_f16); @@ -67,7 +67,7 @@ FIXTURE_DATA_TEST_CASE(RunSmall, CLLogSoftmaxLayerFixture, framework::Data FIXTURE_DATA_TEST_CASE(RunLarge, CLLogSoftmaxLayerFixture, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::SoftmaxLayerLargeShapes(), framework::dataset::make("DataType", DataType::F16)), framework::dataset::make("Beta", { 1.0f, 2.0f })), - framework::dataset::make("ReduceEndAxis", { 0, 1 }))) + framework::dataset::make("Axis", { 0 }))) { // Validate output validate(CLAccessor(_target), _reference, tolerance_f16); @@ -75,7 +75,7 @@ FIXTURE_DATA_TEST_CASE(RunLarge, CLLogSoftmaxLayerFixture, framework::Data FIXTURE_DATA_TEST_CASE(Run4D, CLLogSoftmaxLayerFixture, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::SoftmaxLayer4DShapes(), framework::dataset::make("DataType", DataType::F16)), framework::dataset::make("Beta", { 1.0f, 2.0f })), - framework::dataset::make("ReduceEndAxis", { 0, 1, 2 }))) + framework::dataset::make("Axis", { 0 }))) { // Validate output validate(CLAccessor(_target), _reference, tolerance_f16); @@ -86,7 +86,7 @@ TEST_SUITE(FP32) FIXTURE_DATA_TEST_CASE(RunSmall, CLLogSoftmaxLayerFixture, framework::DatasetMode::ALL, combine(combine(combine(datasets::SoftmaxLayerSmallShapes(), framework::dataset::make("DataType", DataType::F32)), framework::dataset::make("Beta", { 1.0f, 2.0f })), - framework::dataset::make("ReduceEndAxis", { 0, 1 }))) + framework::dataset::make("Axis", { 0 }))) { // Validate output validate(CLAccessor(_target), _reference, tolerance_f32); @@ -94,7 +94,7 @@ FIXTURE_DATA_TEST_CASE(RunSmall, CLLogSoftmaxLayerFixture, framework::Dat FIXTURE_DATA_TEST_CASE(RunLarge, CLLogSoftmaxLayerFixture, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::SoftmaxLayerLargeShapes(), framework::dataset::make("DataType", DataType::F32)), framework::dataset::make("Beta", { 1.0f, 2.0f })), - framework::dataset::make("ReduceEndAxis", { 0, 1 }))) + framework::dataset::make("Axis", { 0 }))) { // Validate output validate(CLAccessor(_target), _reference, tolerance_f32); @@ -102,7 +102,7 @@ FIXTURE_DATA_TEST_CASE(RunLarge, CLLogSoftmaxLayerFixture, framework::Dat FIXTURE_DATA_TEST_CASE(Run4D, CLLogSoftmaxLayerFixture, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::SoftmaxLayer4DShapes(), framework::dataset::make("DataType", DataType::F32)), framework::dataset::make("Beta", { 1.0f, 2.0f })), - framework::dataset::make("ReduceEndAxis", { 0, 1, 2 }))) + framework::dataset::make("Axis", { 0 }))) { // Validate output validate(CLAccessor(_target), _reference, tolerance_f32); diff --git a/tests/validation/CL/SoftmaxLayer.cpp b/tests/validation/CL/SoftmaxLayer.cpp index 1894b4a64c..ce05edc6d4 100644 --- a/tests/validation/CL/SoftmaxLayer.cpp +++ b/tests/validation/CL/SoftmaxLayer.cpp @@ -153,22 +153,22 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip( 1.0, 2.0, })), - framework::dataset::make("reduce_end_axis", { + framework::dataset::make("axis", { + 0, + 0, 0, 0, 0, 0, - 1, 0, - 1, 0, 2, -1, })), framework::dataset::make("Expected", { false, false, false, false, false, true, true, true, false, false })), - input_info, output_info, beta, reduce_end_axis, expected) + input_info, output_info, beta, axis, expected) { - ARM_COMPUTE_EXPECT(bool(CLSoftmaxLayer::validate(&input_info.clone()->set_is_resizable(false), &output_info.clone()->set_is_resizable(false), beta, reduce_end_axis)) == expected, framework::LogLevel::ERRORS); + ARM_COMPUTE_EXPECT(bool(CLSoftmaxLayer::validate(&input_info.clone()->set_is_resizable(false), &output_info.clone()->set_is_resizable(false), beta, axis)) == expected, framework::LogLevel::ERRORS); } // clang-format on // *INDENT-ON* @@ -181,7 +181,7 @@ TEST_SUITE(FP16) FIXTURE_DATA_TEST_CASE(RunSmall, CLSoftmaxLayerFixture, framework::DatasetMode::ALL, combine(combine(combine(datasets::SoftmaxLayerSmallShapes(), framework::dataset::make("DataType", DataType::F16)), framework::dataset::make("Beta", { 1.0f, 2.0f })), - framework::dataset::make("ReduceEndAxis", { 0, 1 }))) + framework::dataset::make("Axis", { 0 }))) { // Validate output validate(CLAccessor(_target), _reference, tolerance_f16); @@ -189,7 +189,7 @@ FIXTURE_DATA_TEST_CASE(RunSmall, CLSoftmaxLayerFixture, framework::Dataset FIXTURE_DATA_TEST_CASE(RunLarge, CLSoftmaxLayerFixture, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::SoftmaxLayerLargeShapes(), framework::dataset::make("DataType", DataType::F16)), framework::dataset::make("Beta", { 1.0f, 2.0f })), - framework::dataset::make("ReduceEndAxis", { 0, 1 }))) + framework::dataset::make("Axis", { 0 }))) { // Validate output validate(CLAccessor(_target), _reference, tolerance_f16); @@ -197,7 +197,7 @@ FIXTURE_DATA_TEST_CASE(RunLarge, CLSoftmaxLayerFixture, framework::Dataset FIXTURE_DATA_TEST_CASE(Run4D, CLSoftmaxLayerFixture, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::SoftmaxLayer4DShapes(), framework::dataset::make("DataType", DataType::F16)), framework::dataset::make("Beta", { 1.0f, 2.0f })), - framework::dataset::make("ReduceEndAxis", { 0, 1, 2 }))) + framework::dataset::make("Axis", { 0 }))) { // Validate output validate(CLAccessor(_target), _reference, tolerance_f16); @@ -208,7 +208,7 @@ TEST_SUITE(FP32) FIXTURE_DATA_TEST_CASE(RunSmall, CLSoftmaxLayerFixture, framework::DatasetMode::ALL, combine(combine(combine(datasets::SoftmaxLayerSmallShapes(), framework::dataset::make("DataType", DataType::F32)), framework::dataset::make("Beta", { 1.0f, 2.0f })), - framework::dataset::make("ReduceEndAxis", { 0, 1 }))) + framework::dataset::make("Axis", { 0 }))) { // Validate output validate(CLAccessor(_target), _reference, tolerance_f32); @@ -216,7 +216,7 @@ FIXTURE_DATA_TEST_CASE(RunSmall, CLSoftmaxLayerFixture, framework::Datase FIXTURE_DATA_TEST_CASE(RunLarge, CLSoftmaxLayerFixture, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::SoftmaxLayerLargeShapes(), framework::dataset::make("DataType", DataType::F32)), framework::dataset::make("Beta", { 1.0f, 2.0f })), - framework::dataset::make("ReduceEndAxis", { 0, 1 }))) + framework::dataset::make("Axis", { 0 }))) { // Validate output validate(CLAccessor(_target), _reference, tolerance_f32); @@ -224,7 +224,7 @@ FIXTURE_DATA_TEST_CASE(RunLarge, CLSoftmaxLayerFixture, framework::Datase FIXTURE_DATA_TEST_CASE(Run4D, CLSoftmaxLayerFixture, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::SoftmaxLayer4DShapes(), framework::dataset::make("DataType", DataType::F32)), framework::dataset::make("Beta", { 1.0f, 2.0f })), - framework::dataset::make("ReduceEndAxis", { 0, 1, 2 }))) + framework::dataset::make("Axis", { 0 }))) { // Validate output validate(CLAccessor(_target), _reference, tolerance_f32); @@ -241,7 +241,7 @@ FIXTURE_DATA_TEST_CASE(RunSmall, CLSoftmaxLayerQuantizedFixture, framew framework::dataset::make("DataType", DataType::QASYMM8)), combine(framework::dataset::make("QuantizationInfo", { QuantizationInfo(0.5f, -10) }), framework::dataset::make("Beta", { 1.0f, 2.f }))), - framework::dataset::make("ReduceEndAxis", { 0, 1 }))) + framework::dataset::make("Axis", { 0 }))) { // Validate output validate(CLAccessor(_target), _reference, tolerance_qasymm8); @@ -250,7 +250,7 @@ FIXTURE_DATA_TEST_CASE(RunLarge, CLSoftmaxLayerQuantizedFixture, framew framework::dataset::make("DataType", DataType::QASYMM8)), combine(framework::dataset::make("QuantizationInfo", { QuantizationInfo(0.5f, -10) }), framework::dataset::make("Beta", { 1.0f, 2.0f }))), - framework::dataset::make("ReduceEndAxis", { 0 }))) + framework::dataset::make("Axis", { 0 }))) { // Validate output validate(CLAccessor(_target), _reference, tolerance_qasymm8); @@ -259,7 +259,7 @@ FIXTURE_DATA_TEST_CASE(Run4D, CLSoftmaxLayerQuantizedFixture, framework framework::dataset::make("DataType", DataType::QASYMM8)), combine(framework::dataset::make("QuantizationInfo", { QuantizationInfo(0.5f, -10) }), framework::dataset::make("Beta", { 1.0f, 2.0f }))), - framework::dataset::make("ReduceEndAxis", { 0, 1, 2 }))) + framework::dataset::make("Axis", { 0 }))) { // Validate output validate(CLAccessor(_target), _reference, tolerance_qasymm8); @@ -273,7 +273,7 @@ FIXTURE_DATA_TEST_CASE(RunSmall, CLSoftmaxLayerQuantizedFixture, framewo framework::dataset::make("DataType", DataType::QASYMM8_SIGNED)), combine(framework::dataset::make("QuantizationInfo", { QuantizationInfo(0.5f, -10) }), framework::dataset::make("Beta", { 1.0f, 2.f }))), - framework::dataset::make("ReduceEndAxis", { 0, 1 }))) + framework::dataset::make("Axis", { 0 }))) { // Validate output validate(CLAccessor(_target), _reference, tolerance_qasymm8_signed, tolerance_number_qasymm8_signed); diff --git a/tests/validation/NEON/LogSoftmaxLayer.cpp b/tests/validation/NEON/LogSoftmaxLayer.cpp index 67fc4be648..3f85e3f7a2 100644 --- a/tests/validation/NEON/LogSoftmaxLayer.cpp +++ b/tests/validation/NEON/LogSoftmaxLayer.cpp @@ -71,7 +71,7 @@ TEST_SUITE(FP16) FIXTURE_DATA_TEST_CASE(RunSmall, NELogSoftmaxLayerFixture, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::Small4DShapes(), framework::dataset::make("DataType", DataType::F16)), framework::dataset::make("Beta", { 1.0f, 2.0f })), - framework::dataset::make("ReduceEndAxis", { 0 }))) + framework::dataset::make("Axis", { 0 }))) { // Validate output validate(Accessor(_target), _reference, tolerance_f16); @@ -79,7 +79,7 @@ FIXTURE_DATA_TEST_CASE(RunSmall, NELogSoftmaxLayerFixture, framework::Data FIXTURE_DATA_TEST_CASE(RunSmall4D, NELogSoftmaxLayerFixture, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::Small4DShapes(), framework::dataset::make("DataType", DataType::F16)), framework::dataset::make("Beta", { 1.0f, 2.0f })), - framework::dataset::make("ReduceEndAxis", { 0, 1, 2 }))) + framework::dataset::make("Axis", { 0 }))) { // Validate output validate(Accessor(_target), _reference, tolerance_f16); @@ -87,7 +87,7 @@ FIXTURE_DATA_TEST_CASE(RunSmall4D, NELogSoftmaxLayerFixture, framework::Da FIXTURE_DATA_TEST_CASE(RunLarge, NELogSoftmaxLayerFixture, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::SoftmaxLayerLargeShapes(), framework::dataset::make("DataType", DataType::F16)), framework::dataset::make("Beta", { 1.0f, 2.0f })), - framework::dataset::make("ReduceEndAxis", { 0 }))) + framework::dataset::make("Axis", { 0 }))) { // Validate output validate(Accessor(_target), _reference, tolerance_f16); @@ -99,7 +99,7 @@ TEST_SUITE(FP32) FIXTURE_DATA_TEST_CASE(RunSmall2D, NELogSoftmaxLayerFixture, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::SoftmaxLayerSmallShapes(), framework::dataset::make("DataType", DataType::F32)), framework::dataset::make("Beta", { 1.0f, 2.0f })), - framework::dataset::make("ReduceEndAxis", { 0 }))) + framework::dataset::make("Axis", { 0 }))) { // Validate output validate(Accessor(_target), _reference, tolerance_f32); @@ -107,7 +107,7 @@ FIXTURE_DATA_TEST_CASE(RunSmall2D, NELogSoftmaxLayerFixture, framework::D FIXTURE_DATA_TEST_CASE(RunSmall4D, NELogSoftmaxLayerFixture, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::Small4DShapes(), framework::dataset::make("DataType", DataType::F32)), framework::dataset::make("Beta", { 1.0f, 2.0f })), - framework::dataset::make("ReduceEndAxis", { 0, 1, 2 }))) + framework::dataset::make("Axis", { 0 }))) { // Validate output validate(Accessor(_target), _reference, tolerance_f32); @@ -115,7 +115,7 @@ FIXTURE_DATA_TEST_CASE(RunSmall4D, NELogSoftmaxLayerFixture, framework::D FIXTURE_DATA_TEST_CASE(RunLarge, NELogSoftmaxLayerFixture, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::SoftmaxLayerLargeShapes(), framework::dataset::make("DataType", DataType::F32)), framework::dataset::make("Beta", { 1.0f, 2.0f })), - framework::dataset::make("ReduceEndAxis", { 0 }))) + framework::dataset::make("Axis", { 0 }))) { // Validate output validate(Accessor(_target), _reference, tolerance_f32); @@ -132,7 +132,7 @@ FIXTURE_DATA_TEST_CASE(RunSmall2D, NELogSoftmaxLayerQuantizedFixture, f framework::dataset::make("DataType", DataType::QASYMM8)), combine(framework::dataset::make("QuantizationInfo", { QuantizationInfo(0.5f, -10) }), framework::dataset::make("Beta", { 1.0f, 2.f }))), - framework::dataset::make("ReduceEndAxis", { 0 }))) + framework::dataset::make("Axis", { 0 }))) { // Validate output validate(Accessor(_target), _reference, tolerance_qasymm8); @@ -141,7 +141,7 @@ FIXTURE_DATA_TEST_CASE(RunSmall4D, NELogSoftmaxLayerQuantizedFixture, f framework::dataset::make("DataType", DataType::QASYMM8)), combine(framework::dataset::make("QuantizationInfo", { QuantizationInfo(0.5f, -10) }), framework::dataset::make("Beta", { 1.0f, 2.f }))), - framework::dataset::make("ReduceEndAxis", { 0, 1, 2 }))) + framework::dataset::make("Axis", { 0 }))) { // Validate output validate(Accessor(_target), _reference, tolerance_qasymm8); @@ -150,7 +150,7 @@ FIXTURE_DATA_TEST_CASE(RunLarge, NELogSoftmaxLayerQuantizedFixture, fra framework::dataset::make("DataType", DataType::QASYMM8)), combine(framework::dataset::make("QuantizationInfo", { QuantizationInfo(0.5f, -10) }), framework::dataset::make("Beta", { 1.0f, 2.0f }))), - framework::dataset::make("ReduceEndAxis", { 0 }))) + framework::dataset::make("Axis", { 0 }))) { // Validate output validate(Accessor(_target), _reference, tolerance_qasymm8); diff --git a/tests/validation/NEON/SoftmaxLayer.cpp b/tests/validation/NEON/SoftmaxLayer.cpp index cc252146cf..70203d9ce9 100644 --- a/tests/validation/NEON/SoftmaxLayer.cpp +++ b/tests/validation/NEON/SoftmaxLayer.cpp @@ -98,18 +98,18 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip( 2.0, 1.0, })), - framework::dataset::make("reduce_end_axis", { 0, + framework::dataset::make("axis", { 0, + 0, 0, 0, - -1, 0, 2, -3, })), framework::dataset::make("Expected", { false, false, false, true, true, false, false })), - input_info, output_info, beta, reduce_end_axis, expected) + input_info, output_info, beta, axis, expected) { - ARM_COMPUTE_EXPECT(bool(NESoftmaxLayer::validate(&input_info.clone()->set_is_resizable(false), &output_info.clone()->set_is_resizable(false), beta, reduce_end_axis)) == expected, framework::LogLevel::ERRORS); + ARM_COMPUTE_EXPECT(bool(NESoftmaxLayer::validate(&input_info.clone()->set_is_resizable(false), &output_info.clone()->set_is_resizable(false), beta, axis)) == expected, framework::LogLevel::ERRORS); } // clang-format on // *INDENT-ON* @@ -123,7 +123,7 @@ TEST_SUITE(FP16) FIXTURE_DATA_TEST_CASE(RunSmall, NESoftmaxLayerFixture, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::Small4DShapes(), framework::dataset::make("DataType", DataType::F16)), framework::dataset::make("Beta", { 1.0f, 2.0f })), - framework::dataset::make("ReduceEndAxis", { 0 }))) + framework::dataset::make("Axis", { 0 }))) { // Validate output validate(Accessor(_target), _reference, tolerance_f16); @@ -131,7 +131,7 @@ FIXTURE_DATA_TEST_CASE(RunSmall, NESoftmaxLayerFixture, framework::Dataset FIXTURE_DATA_TEST_CASE(RunSmall4D, NESoftmaxLayerFixture, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::Small4DShapes(), framework::dataset::make("DataType", DataType::F16)), framework::dataset::make("Beta", { 1.0f, 2.0f })), - framework::dataset::make("ReduceEndAxis", { 0, 1, 2 }))) + framework::dataset::make("Axis", { 0 }))) { // Validate output validate(Accessor(_target), _reference, tolerance_f16); @@ -139,7 +139,7 @@ FIXTURE_DATA_TEST_CASE(RunSmall4D, NESoftmaxLayerFixture, framework::Datas FIXTURE_DATA_TEST_CASE(RunLarge, NESoftmaxLayerFixture, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::SoftmaxLayerLargeShapes(), framework::dataset::make("DataType", DataType::F16)), framework::dataset::make("Beta", { 1.0f, 2.0f })), - framework::dataset::make("ReduceEndAxis", { 0 }))) + framework::dataset::make("Axis", { 0 }))) { // Validate output validate(Accessor(_target), _reference, tolerance_f16); @@ -151,7 +151,7 @@ TEST_SUITE(FP32) FIXTURE_DATA_TEST_CASE(RunSmall2D, NESoftmaxLayerFixture, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::SoftmaxLayerSmallShapes(), framework::dataset::make("DataType", DataType::F32)), framework::dataset::make("Beta", { 1.0f, 2.0f })), - framework::dataset::make("ReduceEndAxis", { 0 }))) + framework::dataset::make("Axis", { 0 }))) { // Validate output validate(Accessor(_target), _reference, tolerance_f32); @@ -159,7 +159,7 @@ FIXTURE_DATA_TEST_CASE(RunSmall2D, NESoftmaxLayerFixture, framework::Data FIXTURE_DATA_TEST_CASE(RunSmall4D, NESoftmaxLayerFixture, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::Small4DShapes(), framework::dataset::make("DataType", DataType::F32)), framework::dataset::make("Beta", { 1.0f, 2.0f })), - framework::dataset::make("ReduceEndAxis", { 0, 1, 2 }))) + framework::dataset::make("Axis", { 0 }))) { // Validate output validate(Accessor(_target), _reference, tolerance_f32); @@ -167,7 +167,7 @@ FIXTURE_DATA_TEST_CASE(RunSmall4D, NESoftmaxLayerFixture, framework::Data FIXTURE_DATA_TEST_CASE(RunLarge, NESoftmaxLayerFixture, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::SoftmaxLayerLargeShapes(), framework::dataset::make("DataType", DataType::F32)), framework::dataset::make("Beta", { 1.0f, 2.0f })), - framework::dataset::make("ReduceEndAxis", { 0 }))) + framework::dataset::make("Axis", { 0 }))) { // Validate output validate(Accessor(_target), _reference, tolerance_f32); @@ -184,7 +184,7 @@ FIXTURE_DATA_TEST_CASE(RunSmall2D, NESoftmaxLayerQuantizedFixture, fram framework::dataset::make("DataType", DataType::QASYMM8)), combine(framework::dataset::make("QuantizationInfo", { QuantizationInfo(0.5f, -10) }), framework::dataset::make("Beta", { 1.0f, 2.f }))), - framework::dataset::make("ReduceEndAxis", { 0 }))) + framework::dataset::make("Axis", { 0 }))) { // Validate output validate(Accessor(_target), _reference, tolerance_qasymm8); @@ -193,7 +193,7 @@ FIXTURE_DATA_TEST_CASE(RunSmall4D, NESoftmaxLayerQuantizedFixture, fram framework::dataset::make("DataType", DataType::QASYMM8)), combine(framework::dataset::make("QuantizationInfo", { QuantizationInfo(0.5f, -10) }), framework::dataset::make("Beta", { 1.0f, 2.f }))), - framework::dataset::make("ReduceEndAxis", { -1, 1, 2 }))) + framework::dataset::make("Axis", { 0 }))) { // Validate output validate(Accessor(_target), _reference, tolerance_qasymm8); @@ -202,7 +202,7 @@ FIXTURE_DATA_TEST_CASE(RunLarge, NESoftmaxLayerQuantizedFixture, framew framework::dataset::make("DataType", DataType::QASYMM8)), combine(framework::dataset::make("QuantizationInfo", { QuantizationInfo(0.5f, -10) }), framework::dataset::make("Beta", { 1.0f, 2.0f }))), - framework::dataset::make("ReduceEndAxis", { 0 }))) + framework::dataset::make("Axis", { 0 }))) { // Validate output validate(Accessor(_target), _reference, tolerance_qasymm8); @@ -214,7 +214,7 @@ FIXTURE_DATA_TEST_CASE(RunSmall2D, NESoftmaxLayerQuantizedFixture, frame framework::dataset::make("DataType", DataType::QASYMM8_SIGNED)), combine(framework::dataset::make("QuantizationInfo", { QuantizationInfo(0.5f, -10) }), framework::dataset::make("Beta", { 1.0f, 2.f }))), - framework::dataset::make("ReduceEndAxis", { -1, 0 }))) + framework::dataset::make("Axis", { 0 }))) { // Validate output validate(Accessor(_target), _reference, tolerance_qasymm8_signed); @@ -223,7 +223,7 @@ FIXTURE_DATA_TEST_CASE(RunSmall4D, NESoftmaxLayerQuantizedFixture, frame framework::dataset::make("DataType", DataType::QASYMM8_SIGNED)), combine(framework::dataset::make("QuantizationInfo", { QuantizationInfo(0.5f, -10) }), framework::dataset::make("Beta", { 1.0f, 2.f }))), - framework::dataset::make("ReduceEndAxis", { -2, 1, 2 }))) + framework::dataset::make("Axis", { 0 }))) { // Validate output validate(Accessor(_target), _reference, tolerance_qasymm8_signed); -- cgit v1.2.1