aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJohn Richardson <john.richardson@arm.com>2018-05-08 14:34:33 +0100
committerAnthony Barbier <anthony.barbier@arm.com>2018-11-02 16:52:35 +0000
commit73d4aef12463ac42fa9e31174675f32535a7edd8 (patch)
tree31707cdf30e99ee6a0e55db4eda34090349bc572
parent5415a0267523931bae0a012db2438fa7cc89a549 (diff)
downloadComputeLibrary-73d4aef12463ac42fa9e31174675f32535a7edd8.tar.gz
COMPMID-948: Add validation to NEL2NormalizeLayer
Change-Id: I0cfea24884066412c2f13d9acdb72ddbccac7545 Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/130407 Tested-by: Jenkins <bsgcomp@arm.com> Reviewed-by: Anthony Barbier <anthony.barbier@arm.com>
-rw-r--r--arm_compute/core/NEON/kernels/NEL2NormalizeLayerKernel.h26
-rw-r--r--arm_compute/core/NEON/kernels/NEReductionOperationKernel.h19
-rw-r--r--arm_compute/runtime/NEON/functions/NEL2NormalizeLayer.h25
-rw-r--r--arm_compute/runtime/NEON/functions/NEReductionOperation.h17
-rw-r--r--src/core/NEON/kernels/NEL2NormalizeLayerKernel.cpp80
-rw-r--r--src/core/NEON/kernels/NEReductionOperationKernel.cpp88
-rw-r--r--src/runtime/NEON/functions/NEL2NormalizeLayer.cpp22
-rw-r--r--src/runtime/NEON/functions/NEReductionOperation.cpp9
-rw-r--r--tests/validation/NEON/L2NormalizeLayer.cpp33
-rw-r--r--tests/validation/NEON/ReductionOperation.cpp32
10 files changed, 291 insertions, 60 deletions
diff --git a/arm_compute/core/NEON/kernels/NEL2NormalizeLayerKernel.h b/arm_compute/core/NEON/kernels/NEL2NormalizeLayerKernel.h
index 7fb968e1d8..0de07fdab7 100644
--- a/arm_compute/core/NEON/kernels/NEL2NormalizeLayerKernel.h
+++ b/arm_compute/core/NEON/kernels/NEL2NormalizeLayerKernel.h
@@ -21,8 +21,8 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#ifndef __ARM_COMPUTE_NEL2NORMALIZEKERNEL_H__
-#define __ARM_COMPUTE_NEL2NORMALIZEKERNEL_H__
+#ifndef __ARM_COMPUTE_NEL2NORMALIZELAYERKERNEL_H__
+#define __ARM_COMPUTE_NEL2NORMALIZELAYERKERNEL_H__
#include "arm_compute/core/NEON/INEKernel.h"
@@ -52,14 +52,30 @@ public:
~NEL2NormalizeLayerKernel() = default;
/** Set the input and output tensors.
*
- * @param[in] input Source tensor. Data types supported: F32.
+ * @param[in] input Source tensor. Data types supported: F32. Data layouts supported: NCHW.
* @param[in] sum Sum values tensor. Data types supported: same as @p input.
- * @param[out] output Destination tensor. Data types supported: same as @p input.
+ * Sum will have the same number of dimensions as input.
+ * @param[out] output Destination tensor. Data types and data layouts supported: same as @p input.
+ * Output will have the same number of dimensions as input.
* @param[in] axis Dimension along which to reduce. Supported reduction axis : 0
* @param[in] epsilon Lower bound value for the normalization.
*/
void configure(const ITensor *input, const ITensor *sum, ITensor *output, unsigned int axis, float epsilon);
+ /** Static function to check if given info will lead to a valid configuration of @ref NEL2NormalizeLayerKernel.
+ *
+ * @param[in] input Source tensor info. Data types supported: F32. Data layouts supported: NCHW.
+ * @param[in] sum Sum values tensor info. Data types supported: same as @p input.
+ * Sum will have the same number of dimensions as input.
+ * @param[in] output Destination tensor info. Data types and data layouts supported: same as @p input.
+ * Output will have the same number of dimensions as input.
+ * @param[in] axis Dimension along which to reduce. Supported reduction axis : 0
+ * @param[in] epsilon Lower bound value for the normalization.
+ *
+ * @return a status
+ */
+ static Status validate(const ITensorInfo *input, const ITensorInfo *sum, const ITensorInfo *output, unsigned int axis, float epsilon);
+
// Inherited methods overridden:
void run(const Window &window, const ThreadInfo &info) override;
@@ -71,4 +87,4 @@ private:
float _epsilon;
};
} // namespace arm_compute
-#endif /*__ARM_COMPUTE_NEL2NORMALIZEKERNEL_H__ */
+#endif /*__ARM_COMPUTE_NEL2NORMALIZELAYERKERNEL_H__ */
diff --git a/arm_compute/core/NEON/kernels/NEReductionOperationKernel.h b/arm_compute/core/NEON/kernels/NEReductionOperationKernel.h
index a5ddd067ef..a20cd46434 100644
--- a/arm_compute/core/NEON/kernels/NEReductionOperationKernel.h
+++ b/arm_compute/core/NEON/kernels/NEReductionOperationKernel.h
@@ -50,14 +50,29 @@ public:
NEReductionOperationKernel &operator=(NEReductionOperationKernel &&) = default;
/** Default destructor */
~NEReductionOperationKernel() = default;
+
/** Set the source, destination of the kernel
*
- * @param[in] input Source tensor. Data type supported: F32.
- * @param[out] output Destination tensor.Data types supported: same as @p input.
+ * @param[in] input Source tensor. Data type supported: F32. Data layouts supported: NCHW.
+ * @param[out] output Destination tensor.Data types and data layouts supported: same as @p input.
+ * Output will have the same number of dimensions as input.
* @param[in] axis Axis along which to reduce. Supported reduction axis : 0
* @param[in] op Reduction operation to perform.
*/
void configure(const ITensor *input, ITensor *output, unsigned int axis, ReductionOperation op);
+
+ /** Static function to check if given info will lead to a valid configuration of @ref NEReductionOperationKernel.
+ *
+ * @param[in] input Source tensor info. Data type supported: F32. Data layouts supported: NCHW.
+ * @param[in] output Destination tensor info.Data types and data layouts supported: same as @p input.
+ * Output will have the same number of dimensions as input.
+ * @param[in] axis Axis along which to reduce. Supported reduction axis : 0
+ * @param[in] op Reduction operation to perform.
+ *
+ * @return a status
+ */
+ static Status validate(const ITensorInfo *input, const ITensorInfo *output, unsigned int axis, ReductionOperation op);
+
// Inherited methods overridden:
void run(const Window &window, const ThreadInfo &info) override;
BorderSize border_size() const override;
diff --git a/arm_compute/runtime/NEON/functions/NEL2NormalizeLayer.h b/arm_compute/runtime/NEON/functions/NEL2NormalizeLayer.h
index 100e239406..3c88bbd9d3 100644
--- a/arm_compute/runtime/NEON/functions/NEL2NormalizeLayer.h
+++ b/arm_compute/runtime/NEON/functions/NEL2NormalizeLayer.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -21,8 +21,8 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#ifndef __ARM_COMPUTE_NEL2NORMALIZE_H__
-#define __ARM_COMPUTE_NEL2NORMALIZE_H__
+#ifndef __ARM_COMPUTE_NEL2NORMALIZELAYER_H__
+#define __ARM_COMPUTE_NEL2NORMALIZELAYER_H__
#include "arm_compute/core/NEON/kernels/NEL2NormalizeLayerKernel.h"
#include "arm_compute/runtime/IFunction.h"
@@ -50,13 +50,24 @@ public:
NEL2NormalizeLayer(std::shared_ptr<IMemoryManager> memory_manager = nullptr);
/** Set the input and output tensors.
*
- * @param[in, out] input Source tensor. Data types supported: F32. (Written to only for border_size != 0)
- * @param[out] output Destination tensor. Data types supported: same as @p input.
+ * @param[in, out] input Source tensor. Data types supported: F32. Data layouts supported: NCHW. (Written to only for border_size != 0)
+ * @param[out] output Destination tensor. Data types and data layouts supported: same as @p input.
* @param[in] axis Dimension along which to reduce. Supported reduction axis : 0
- * @param[in] epsilon Lower bound value for the normalization.
+ * @param[in] epsilon (Optional) Lower bound value for the normalization.
*/
void configure(ITensor *input, ITensor *output, unsigned int axis, float epsilon = 1e-12);
+ /** Static function to check if given info will lead to a valid configuration of @ref NEL2NormalizeLayer.
+ *
+ * @param[in] input Source tensor info. Data types supported: F32. Data layouts supported: NCHW. (Written to only for border_size != 0)
+ * @param[in] output Destination tensor info. Data types and data layouts supported: same as @p input.
+ * @param[in] axis Dimension along which to reduce. Supported reduction axis : 0
+ * @param[in] epsilon (Optional) Lower bound value for the normalization.
+ *
+ * @return a status
+ */
+ static Status validate(const ITensorInfo *input, const ITensorInfo *output, unsigned int axis, float epsilon = 1e-12);
+
// Inherited methods overridden:
void run() override;
@@ -67,4 +78,4 @@ private:
Tensor _sumsq;
};
}
-#endif /* __ARM_COMPUTE_NEL2NORMALIZE_H__ */
+#endif /* __ARM_COMPUTE_NEL2NORMALIZELAYER_H__ */
diff --git a/arm_compute/runtime/NEON/functions/NEReductionOperation.h b/arm_compute/runtime/NEON/functions/NEReductionOperation.h
index 9aa5228b16..02b29fb64e 100644
--- a/arm_compute/runtime/NEON/functions/NEReductionOperation.h
+++ b/arm_compute/runtime/NEON/functions/NEReductionOperation.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -47,13 +47,24 @@ public:
NEReductionOperation();
/** Set the input and output tensors.
*
- * @param[in, out] input Source tensor. Data type supported: F32. (Written to only for border_size != 0)
- * @param[out] output Destination tensor. Data types supported: same as @p input.
+ * @param[in, out] input Source tensor. Data type supported: F32. Data layouts supported: NCHW. (Written to only for border_size != 0)
+ * @param[out] output Destination tensor. Data types and data layouts supported: same as @p input.
* @param[in] axis Dimension along which to reduce. Supported reduction axis : 0
* @param[in] op Reduction operation to perform.
*/
void configure(ITensor *input, ITensor *output, unsigned int axis, ReductionOperation op);
+ /** Static function to check if given info will lead to a valid configuration of @ref NEReductionOperation.
+ *
+ * @param[in] input Source tensor info. Data type supported: F32. Data layouts supported: NCHW. (Written to only for border_size != 0)
+ * @param[in] output Destination tensor info. Data types and data layouts supported: same as @p input.
+ * @param[in] axis Dimension along which to reduce. Supported reduction axis : 0
+ * @param[in] op Reduction operation to perform.
+ *
+ * @return a status
+ */
+ static Status validate(const ITensorInfo *input, const ITensorInfo *output, unsigned int axis, ReductionOperation op);
+
// Inherited methods overridden:
void run() override;
diff --git a/src/core/NEON/kernels/NEL2NormalizeLayerKernel.cpp b/src/core/NEON/kernels/NEL2NormalizeLayerKernel.cpp
index 3bf1d9400e..91776d8100 100644
--- a/src/core/NEON/kernels/NEL2NormalizeLayerKernel.cpp
+++ b/src/core/NEON/kernels/NEL2NormalizeLayerKernel.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -67,6 +67,55 @@ void l2_normalize_X(const ITensor *in, const ITensor *sum, ITensor *out, float e
}
while(window.slide_window_slice_1D(in_slice) && window.slide_window_slice_1D(sum_slice));
}
+
+Status validate_arguments(const ITensorInfo *input, const ITensorInfo *sum, const ITensorInfo *output, unsigned int axis, float epsilon)
+{
+ ARM_COMPUTE_UNUSED(epsilon);
+
+ ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, sum, output);
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, sum);
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::F32);
+ ARM_COMPUTE_RETURN_ERROR_ON(input->data_layout() != DataLayout::NCHW);
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(axis > 0, "Unsupported normalization axis, Supported axis is 0");
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(axis >= TensorShape::num_max_dimensions, "Normalization axis greater than max number of dimensions");
+
+ // Reduce shape on axis
+ TensorShape sum_shape = input->tensor_shape();
+ sum_shape.set(axis, 1);
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(sum->tensor_shape(), sum_shape);
+
+ if(output->total_size() != 0)
+ {
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(input, output);
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output);
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(input->tensor_shape(), output->tensor_shape());
+ ARM_COMPUTE_RETURN_ERROR_ON(output->data_layout() != DataLayout::NCHW);
+ }
+
+ return Status{};
+}
+
+std::tuple<Status, Window> validate_and_configure_window(ITensorInfo *input, ITensorInfo *sum, ITensorInfo *output, unsigned int axis)
+{
+ const unsigned int num_elems_processed_per_iteration = 16 / data_size_from_type(input->data_type());
+ const unsigned int num_elems_processed_per_iteration_sum = (axis == 0) ? 1 : num_elems_processed_per_iteration;
+
+ Window win = calculate_max_window(*input, Steps(num_elems_processed_per_iteration));
+
+ // Output auto initialization if not yet initialized
+ auto_init_if_empty(*output, input->tensor_shape(), 1, input->data_type(), input->fixed_point_position());
+
+ AccessWindowHorizontal input_access(input, 0, num_elems_processed_per_iteration);
+ AccessWindowHorizontal sum_access(sum, 0, num_elems_processed_per_iteration_sum);
+ AccessWindowHorizontal output_access(output, 0, num_elems_processed_per_iteration);
+
+ bool window_changed = update_window_and_padding(win, input_access, sum_access, output_access);
+ output_access.set_valid_region(win, input->valid_region());
+
+ Status err = (window_changed) ? ARM_COMPUTE_CREATE_ERROR(ErrorCode::RUNTIME_ERROR, "Insufficient Padding!") : Status{};
+
+ return std::make_tuple(err, win);
+}
} // namespace
NEL2NormalizeLayerKernel::NEL2NormalizeLayerKernel()
@@ -77,18 +126,7 @@ NEL2NormalizeLayerKernel::NEL2NormalizeLayerKernel()
void NEL2NormalizeLayerKernel::configure(const ITensor *input, const ITensor *sum, ITensor *output, unsigned int axis, float epsilon)
{
ARM_COMPUTE_ERROR_ON_NULLPTR(input, sum, output);
- ARM_COMPUTE_ERROR_ON_MSG(axis >= TensorShape::num_max_dimensions, "Normalization axis greater than max number of dimensions");
- ARM_COMPUTE_ERROR_ON_MSG(axis > 0, "Unsupported normalization axis, Supported axis is 0");
-
- // Output auto initialization if not yet initialized
- auto_init_if_empty(*output->info(), input->info()->tensor_shape(), 1, input->info()->data_type(), input->info()->fixed_point_position());
-
- ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::F32);
- ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(input, output, sum);
- ARM_COMPUTE_ERROR_ON_MISMATCHING_SHAPES(input, output);
-
- unsigned int num_elems_processed_per_iteration = 16 / data_size_from_type(input->info()->data_type());
- unsigned int num_elems_processed_per_iteration_sum = (axis == 0) ? 1 : num_elems_processed_per_iteration;
+ ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), sum->info(), output->info(), axis, epsilon));
_input = input;
_sum = sum;
@@ -97,16 +135,18 @@ void NEL2NormalizeLayerKernel::configure(const ITensor *input, const ITensor *su
_epsilon = epsilon;
// Configure kernel window
- Window win = calculate_max_window(*input->info(), Steps(num_elems_processed_per_iteration));
- AccessWindowHorizontal input_access(input->info(), 0, num_elems_processed_per_iteration);
- AccessWindowHorizontal sum_access(sum->info(), 0, num_elems_processed_per_iteration_sum);
- AccessWindowHorizontal output_access(output->info(), 0, num_elems_processed_per_iteration);
+ auto win_config = validate_and_configure_window(_input->info(), _sum->info(), _output->info(), axis);
+ ARM_COMPUTE_ERROR_THROW_ON(std::get<0>(win_config));
- update_window_and_padding(win, input_access, sum_access, output_access);
+ INEKernel::configure(std::get<1>(win_config));
+}
- output_access.set_valid_region(win, input->info()->valid_region());
+Status NEL2NormalizeLayerKernel::validate(const ITensorInfo *input, const ITensorInfo *sum, const ITensorInfo *output, unsigned int axis, float epsilon)
+{
+ ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, sum, output, axis, epsilon));
+ ARM_COMPUTE_RETURN_ON_ERROR(std::get<0>(validate_and_configure_window(input->clone().get(), sum->clone().get(), output->clone().get(), axis)));
- INEKernel::configure(win);
+ return Status{};
}
void NEL2NormalizeLayerKernel::run(const Window &window, const ThreadInfo &info)
diff --git a/src/core/NEON/kernels/NEReductionOperationKernel.cpp b/src/core/NEON/kernels/NEReductionOperationKernel.cpp
index 1a50ed8bfc..30d42fa25f 100644
--- a/src/core/NEON/kernels/NEReductionOperationKernel.cpp
+++ b/src/core/NEON/kernels/NEReductionOperationKernel.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -29,6 +29,7 @@
#include "arm_compute/core/ITensor.h"
#include "arm_compute/core/NEON/INEKernel.h"
#include "arm_compute/core/NEON/NEMath.h"
+#include "arm_compute/core/TensorInfo.h"
#include "arm_compute/core/Validate.h"
#include <arm_neon.h>
@@ -94,6 +95,61 @@ void reduce_sumsq(const Window &window, const ITensor *input, ITensor *output, u
ARM_COMPUTE_ERROR("Unsupported reduction axis");
}
}
+
+TensorShape calculate_output_shape(const TensorShape &input_shape, unsigned int axis)
+{
+ TensorShape output_shape{ input_shape };
+ output_shape.set(axis, 1);
+
+ return output_shape;
+}
+
+Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, unsigned int axis, ReductionOperation op)
+{
+ ARM_COMPUTE_UNUSED(op);
+
+ ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, output);
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::F32);
+ ARM_COMPUTE_RETURN_ERROR_ON(input->data_layout() != DataLayout::NCHW);
+
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(axis >= TensorShape::num_max_dimensions, "Reduction axis greater than max number of dimensions");
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(axis > 0, "Unsupported reduction axis, Supported axis is 0");
+
+ if(output->total_size() != 0)
+ {
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output);
+ ARM_COMPUTE_RETURN_ERROR_ON(output->data_layout() != DataLayout::NCHW);
+
+ const TensorShape output_shape = calculate_output_shape(input->tensor_shape(), axis);
+ const TensorInfo tensor_info_reshaped = input->clone()->set_tensor_shape(output_shape);
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(output, &tensor_info_reshaped);
+ }
+
+ return Status{};
+}
+
+std::tuple<Status, Window> validate_and_configure_window(ITensorInfo *input, ITensorInfo *output, unsigned int axis)
+{
+ // Calculate output shape and set if empty
+ const TensorShape output_shape = calculate_output_shape(input->tensor_shape(), axis);
+
+ // Output auto initialization if not yet initialized
+ auto_init_if_empty(*output, output_shape, 1, input->data_type(), input->fixed_point_position());
+
+ unsigned int num_elems_processed_per_iteration = 16 / data_size_from_type(input->data_type());
+
+ // Configure kernel window
+ Window win = calculate_max_window(*input, Steps(num_elems_processed_per_iteration));
+ AccessWindowHorizontal input_access(input, 0, num_elems_processed_per_iteration);
+ AccessWindowHorizontal output_access(output, 0, num_elems_processed_per_iteration);
+
+ bool window_changed = update_window_and_padding(win, input_access, output_access);
+ output_access.set_valid_region(win, ValidRegion(Coordinates(), output->tensor_shape()));
+
+ Status err = (window_changed) ? ARM_COMPUTE_CREATE_ERROR(ErrorCode::RUNTIME_ERROR, "Insufficient Padding!") : Status{};
+
+ return std::make_tuple(err, win);
+}
} // namespace
NEReductionOperationKernel::NEReductionOperationKernel()
@@ -109,19 +165,8 @@ BorderSize NEReductionOperationKernel::border_size() const
void NEReductionOperationKernel::configure(const ITensor *input, ITensor *output, unsigned int axis, ReductionOperation op)
{
ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
- ARM_COMPUTE_ERROR_ON_MSG(axis >= TensorShape::num_max_dimensions, "Reduction axis greater than max number of dimensions");
- ARM_COMPUTE_ERROR_ON_MSG(axis > 0, "Unsupported reduction axis, Supported axis is 0");
- // Calculate output shape and set if empty
- TensorShape output_shape{ input->info()->tensor_shape() };
- output_shape.set(axis, 1);
-
- // Output auto initialization if not yet initialized
- auto_init_if_empty(*output->info(), output_shape, 1, input->info()->data_type(), input->info()->fixed_point_position());
-
- ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::F32);
- ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(input, output);
- ARM_COMPUTE_ERROR_ON_MISMATCHING_DIMENSIONS(output->info()->tensor_shape(), output_shape);
+ ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), output->info(), axis, op));
unsigned int num_elems_processed_per_iteration = 16 / data_size_from_type(input->info()->data_type());
@@ -131,14 +176,19 @@ void NEReductionOperationKernel::configure(const ITensor *input, ITensor *output
_op = op;
// Configure kernel window
- Window win = calculate_max_window(*input->info(), Steps(num_elems_processed_per_iteration));
- AccessWindowHorizontal input_access(input->info(), 0, num_elems_processed_per_iteration);
- AccessWindowHorizontal output_access(output->info(), 0, num_elems_processed_per_iteration);
+ auto win_config = validate_and_configure_window(_input->info(), _output->info(), axis);
- update_window_and_padding(win, input_access, output_access);
- output_access.set_valid_region(win, ValidRegion(Coordinates(), output->info()->tensor_shape()));
+ ARM_COMPUTE_ERROR_THROW_ON(std::get<0>(win_config));
+
+ INEKernel::configure(std::get<1>(win_config));
+}
+
+Status NEReductionOperationKernel::validate(const ITensorInfo *input, const ITensorInfo *output, unsigned int axis, ReductionOperation op)
+{
+ ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, output, axis, op));
+ ARM_COMPUTE_RETURN_ON_ERROR(std::get<0>(validate_and_configure_window(input->clone().get(), output->clone().get(), axis)));
- INEKernel::configure(win);
+ return Status{};
}
void NEReductionOperationKernel::run(const Window &window, const ThreadInfo &info)
diff --git a/src/runtime/NEON/functions/NEL2NormalizeLayer.cpp b/src/runtime/NEON/functions/NEL2NormalizeLayer.cpp
index fa62483146..d0b80fb1b8 100644
--- a/src/runtime/NEON/functions/NEL2NormalizeLayer.cpp
+++ b/src/runtime/NEON/functions/NEL2NormalizeLayer.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -46,6 +46,26 @@ void NEL2NormalizeLayer::configure(ITensor *input, ITensor *output, unsigned int
_sumsq.allocator()->allocate();
}
+Status NEL2NormalizeLayer::validate(const ITensorInfo *input, const ITensorInfo *output, unsigned int axis, float epsilon)
+{
+ TensorShape shape(input->tensor_shape());
+
+ // Create intermediate tensor info
+ TensorInfo sum_sq;
+ sum_sq.set_data_type(input->data_type());
+ sum_sq.set_tensor_shape(shape);
+
+ ARM_COMPUTE_RETURN_ON_ERROR(NEReductionOperation::validate(input, &sum_sq, axis, ReductionOperation::SUM_SQUARE));
+
+ // Reduce shape on axis (supported axis is 0)
+ shape.set(0, 1);
+ sum_sq.set_tensor_shape(shape);
+
+ ARM_COMPUTE_RETURN_ON_ERROR(NEL2NormalizeLayerKernel::validate(input, &sum_sq, output, axis, epsilon));
+
+ return Status{};
+}
+
void NEL2NormalizeLayer::run()
{
_memory_group.acquire();
diff --git a/src/runtime/NEON/functions/NEReductionOperation.cpp b/src/runtime/NEON/functions/NEReductionOperation.cpp
index f1a9145f74..cd0b42fbe3 100644
--- a/src/runtime/NEON/functions/NEReductionOperation.cpp
+++ b/src/runtime/NEON/functions/NEReductionOperation.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -63,6 +63,13 @@ NEReductionOperation::NEReductionOperation()
{
}
+Status NEReductionOperation::validate(const ITensorInfo *input, const ITensorInfo *output, unsigned int axis, ReductionOperation op)
+{
+ ARM_COMPUTE_RETURN_ON_ERROR(NEReductionOperationKernel::validate(input, output, axis, op));
+
+ return Status{};
+}
+
void NEReductionOperation::configure(ITensor *input, ITensor *output, unsigned int axis, ReductionOperation op)
{
ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::F32);
diff --git a/tests/validation/NEON/L2NormalizeLayer.cpp b/tests/validation/NEON/L2NormalizeLayer.cpp
index c0f5920964..f868adea3b 100644
--- a/tests/validation/NEON/L2NormalizeLayer.cpp
+++ b/tests/validation/NEON/L2NormalizeLayer.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -49,6 +49,37 @@ RelativeTolerance<float> tolerance_f32(0.00001f);
TEST_SUITE(NEON)
TEST_SUITE(L2NormalizeLayer)
+// *INDENT-OFF*
+// clang-format off
+DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(
+ framework::dataset::make("InputInfo", { TensorInfo(TensorShape(128U, 64U), 1, DataType::F32), // Mismatching data type input/output
+ TensorInfo(TensorShape(128U, 64U), 1, DataType::F32), // Mismatching shape input/output
+ TensorInfo(TensorShape(128U, 64U), 2, DataType::F32), // Number of Input channels != 1
+ TensorInfo(TensorShape(128U, 64U), 1, DataType::S16), // DataType != F32
+ TensorInfo(TensorShape(128U, 64U), 1, DataType::F32), // Axis >= num_max_dimensions
+ TensorInfo(TensorShape(128U, 64U), 1, DataType::F32), // Axis > 0
+ TensorInfo(TensorShape(128U, 64U), 1, DataType::F32)
+ }),
+ framework::dataset::make("OutputInfo", { TensorInfo(TensorShape(128U, 64U), 1, DataType::F16),
+ TensorInfo(TensorShape(256U, 64U), 1, DataType::F32),
+ TensorInfo(TensorShape(128U, 64U), 1, DataType::F32),
+ TensorInfo(TensorShape(128U, 64U), 1, DataType::S16),
+ TensorInfo(TensorShape(128U, 64U), 1, DataType::F32),
+ TensorInfo(TensorShape(128U, 64U), 1, DataType::F32),
+ TensorInfo(TensorShape(128U, 64U), 1, DataType::F32)
+ })),
+ framework::dataset::make("Axis", { 0U, 0U, 0U, 0U, static_cast<unsigned int>(TensorShape::num_max_dimensions), 1U, 0U })),
+ framework::dataset::make("Expected", { false, false, false, false, false, false, true })),
+ input_info, output_info, axis, expected)
+{
+ bool is_valid = bool(NEL2NormalizeLayer::validate(&input_info.clone()->set_is_resizable(false),
+ &output_info.clone()->set_is_resizable(false),
+ axis));
+ ARM_COMPUTE_EXPECT(is_valid == expected, framework::LogLevel::ERRORS);
+}
+// clang-format on
+// *INDENT-ON*
+
template <typename T>
using NEL2NormalizeLayerFixture = L2NormalizeLayerValidationFixture<Tensor, Accessor, NEL2NormalizeLayer, T>;
diff --git a/tests/validation/NEON/ReductionOperation.cpp b/tests/validation/NEON/ReductionOperation.cpp
index cf603c67ff..c2f2909c66 100644
--- a/tests/validation/NEON/ReductionOperation.cpp
+++ b/tests/validation/NEON/ReductionOperation.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -50,6 +50,36 @@ RelativeTolerance<float> tolerance_f32(0.00001f);
TEST_SUITE(NEON)
TEST_SUITE(ReductionOperation)
+// *INDENT-OFF*
+// clang-format off
+DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(
+ framework::dataset::make("InputInfo", { TensorInfo(TensorShape(128U, 64U), 1, DataType::F32), // Mismatching data type input/output
+ TensorInfo(TensorShape(128U, 64U), 2, DataType::F32), // Number of Input channels != 1
+ TensorInfo(TensorShape(128U, 64U), 1, DataType::S16), // DataType != F32
+ TensorInfo(TensorShape(128U, 64U), 1, DataType::F32), // Axis >= num_max_dimensions
+ TensorInfo(TensorShape(128U, 64U), 1, DataType::F32), // Axis > 0
+ TensorInfo(TensorShape(128U, 64U), 1, DataType::F32)
+ }),
+ framework::dataset::make("OutputInfo", { TensorInfo(TensorShape(1U, 64U), 1, DataType::F16),
+ TensorInfo(TensorShape(1U, 64U), 1, DataType::F32),
+ TensorInfo(TensorShape(1U, 64U), 1, DataType::S16),
+ TensorInfo(TensorShape(1U, 64U), 1, DataType::F32),
+ TensorInfo(TensorShape(1U, 64U), 1, DataType::F32),
+ TensorInfo(TensorShape(1U, 64U), 1, DataType::F32)
+ })),
+ framework::dataset::make("Axis", { 0U, 0U, 0U, static_cast<unsigned int>(TensorShape::num_max_dimensions), 1U, 0U })),
+ framework::dataset::make("Expected", { false, false, false, false, false, true })),
+ input_info, output_info, axis, expected)
+{
+ bool is_valid = bool(NEReductionOperation::validate(&input_info.clone()->set_is_resizable(false),
+ &output_info.clone()->set_is_resizable(true),
+ axis,
+ ReductionOperation::SUM_SQUARE));
+ ARM_COMPUTE_EXPECT(is_valid == expected, framework::LogLevel::ERRORS);
+}
+// clang-format on
+// *INDENT-ON*
+
template <typename T>
using NEReductionOperationFixture = ReductionOperationValidationFixture<Tensor, Accessor, NEReductionOperation, T>;