aboutsummaryrefslogtreecommitdiff
path: root/src/runtime
diff options
context:
space:
mode:
authorMichalis Spyrou <michalis.spyrou@arm.com>2017-11-30 14:25:57 +0000
committerAnthony Barbier <anthony.barbier@arm.com>2018-11-02 16:42:17 +0000
commitafa5d817b1d083837cd7ea30d32f845d82620c12 (patch)
tree1ca2a27ab7108b7137b96fc1547a8b5ac5d9c8f7 /src/runtime
parent631c41a4e3645a948b0f597caa77e8fa91ca0efc (diff)
downloadComputeLibrary-afa5d817b1d083837cd7ea30d32f845d82620c12.tar.gz
COMPMID-617 Add validation methods to Kernels
- NEActivationLayer - NESoftmax - NEDirectConvolutionLayer - NENormalizationLayer - NEPoolingLayer Change-Id: Ib279f1c1b7f9247679b0d6593aed7393da8fe87b Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/111335 Tested-by: BSG Visual Compute Jenkins server to access repositories on http://mpd-gerrit.cambridge.arm.com <bsgcomp@arm.com> Reviewed-by: Georgios Pinitas <georgios.pinitas@arm.com>
Diffstat (limited to 'src/runtime')
-rw-r--r--src/runtime/NEON/functions/NEActivationLayer.cpp5
-rw-r--r--src/runtime/NEON/functions/NEDirectConvolutionLayer.cpp24
-rw-r--r--src/runtime/NEON/functions/NENormalizationLayer.cpp13
-rw-r--r--src/runtime/NEON/functions/NEPoolingLayer.cpp5
-rw-r--r--src/runtime/NEON/functions/NESoftmaxLayer.cpp19
5 files changed, 64 insertions, 2 deletions
diff --git a/src/runtime/NEON/functions/NEActivationLayer.cpp b/src/runtime/NEON/functions/NEActivationLayer.cpp
index 57a1738f85..cdf1b54659 100644
--- a/src/runtime/NEON/functions/NEActivationLayer.cpp
+++ b/src/runtime/NEON/functions/NEActivationLayer.cpp
@@ -34,3 +34,8 @@ void NEActivationLayer::configure(ITensor *input, ITensor *output, ActivationLay
k->configure(input, output, activation_info);
_kernel = std::move(k);
}
+
+Status NEActivationLayer::validate(const ITensorInfo *input, const ITensorInfo *output, const ActivationLayerInfo &act_info)
+{
+ return NEActivationLayerKernel::validate(input, output, act_info);
+}
diff --git a/src/runtime/NEON/functions/NEDirectConvolutionLayer.cpp b/src/runtime/NEON/functions/NEDirectConvolutionLayer.cpp
index 52a4cc158f..2eabe459a5 100644
--- a/src/runtime/NEON/functions/NEDirectConvolutionLayer.cpp
+++ b/src/runtime/NEON/functions/NEDirectConvolutionLayer.cpp
@@ -85,6 +85,30 @@ void NEDirectConvolutionLayer::configure(ITensor *input, const ITensor *weights,
_input_border_handler.configure(input, _conv_kernel.border_size(), BorderMode::CONSTANT, PixelValue(static_cast<float>(0.f)));
}
+Status NEDirectConvolutionLayer::validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *bias, const ITensorInfo *output, const PadStrideInfo &conv_info)
+{
+ ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, weights, bias, output);
+
+ DataType data_type = output->data_type();
+ if(is_data_type_fixed_point(data_type))
+ {
+ // Promote data type in case of fixed point
+ data_type = ((data_type == DataType::QS8) ? DataType::QS16 : DataType::QS32);
+ }
+ TensorInfo accumulator(output->clone()->set_is_resizable(true).reset_padding().set_data_type(data_type));
+
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(weights, bias);
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(bias->dimension(0) != weights->dimension(3),
+ "Biases size and number of input feature maps should match");
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(bias->num_dimensions() > 1,
+ "Biases should be one dimensional");
+
+ ARM_COMPUTE_RETURN_ON_ERROR(NEDirectConvolutionLayerKernel::validate(input, weights, &accumulator, conv_info));
+ ARM_COMPUTE_RETURN_ON_ERROR(NEDirectConvolutionLayerBiasAccumulateKernel::validate(&accumulator, bias, output));
+
+ return Status{};
+}
+
void NEDirectConvolutionLayer::run()
{
NEScheduler::get().schedule(&_input_border_handler, Window::DimZ);
diff --git a/src/runtime/NEON/functions/NENormalizationLayer.cpp b/src/runtime/NEON/functions/NENormalizationLayer.cpp
index da4314b5ed..af98ac1f17 100644
--- a/src/runtime/NEON/functions/NENormalizationLayer.cpp
+++ b/src/runtime/NEON/functions/NENormalizationLayer.cpp
@@ -39,7 +39,7 @@ NENormalizationLayer::NENormalizationLayer(std::shared_ptr<IMemoryManager> memor
void NENormalizationLayer::configure(const ITensor *input, ITensor *output, const NormalizationLayerInfo &norm_info)
{
- ARM_COMPUTE_ERROR_ON(input == nullptr);
+ ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
TensorInfo tensor_info(input->info()->tensor_shape(), 1, input->info()->data_type(), input->info()->fixed_point_position());
_input_squared.allocator()->init(tensor_info);
@@ -56,6 +56,17 @@ void NENormalizationLayer::configure(const ITensor *input, ITensor *output, cons
_input_squared.allocator()->allocate();
}
+Status NENormalizationLayer::validate(const ITensorInfo *input, const ITensorInfo *output, const NormalizationLayerInfo &norm_info)
+{
+ // Perform validation step
+ ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, output);
+
+ ARM_COMPUTE_RETURN_ON_ERROR(NENormalizationLayerKernel::validate(input, input, output, norm_info));
+ ARM_COMPUTE_RETURN_ON_ERROR(NEPixelWiseMultiplicationKernel::validate(input, input, output, 1.0f, ConvertPolicy::SATURATE, RoundingPolicy::TO_ZERO));
+
+ return Status{};
+}
+
void NENormalizationLayer::run()
{
_memory_group.acquire();
diff --git a/src/runtime/NEON/functions/NEPoolingLayer.cpp b/src/runtime/NEON/functions/NEPoolingLayer.cpp
index f8a85b9897..530c7fca4a 100644
--- a/src/runtime/NEON/functions/NEPoolingLayer.cpp
+++ b/src/runtime/NEON/functions/NEPoolingLayer.cpp
@@ -48,6 +48,11 @@ void NEPoolingLayer::configure(ITensor *input, ITensor *output, const PoolingLay
_border_handler.configure(input, _pooling_layer_kernel.border_size(), border_mode, PixelValue(static_cast<float>(0.f)));
}
+Status NEPoolingLayer::validate(const ITensorInfo *input, const ITensorInfo *output, const PoolingLayerInfo &pool_info)
+{
+ return NEPoolingLayerKernel::validate(input, output, pool_info);
+}
+
void NEPoolingLayer::run()
{
// Fill border
diff --git a/src/runtime/NEON/functions/NESoftmaxLayer.cpp b/src/runtime/NEON/functions/NESoftmaxLayer.cpp
index 84ecfdaf33..8e6773c5b1 100644
--- a/src/runtime/NEON/functions/NESoftmaxLayer.cpp
+++ b/src/runtime/NEON/functions/NESoftmaxLayer.cpp
@@ -38,7 +38,7 @@ NESoftmaxLayer::NESoftmaxLayer(std::shared_ptr<IMemoryManager> memory_manager)
void NESoftmaxLayer::configure(ITensor *input, ITensor *output, float beta)
{
- ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QS8, DataType::QS16, DataType::F16, DataType::F32);
+ ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
// Create intermediate tensors shapes
TensorInfo tensor_info_tmp(input->info()->tensor_shape(), input->info()->num_channels(), input->info()->data_type(), input->info()->fixed_point_position());
@@ -67,6 +67,23 @@ void NESoftmaxLayer::configure(ITensor *input, ITensor *output, float beta)
_sum.allocator()->allocate();
}
+Status NESoftmaxLayer::validate(const ITensorInfo *input, const ITensorInfo *output, float beta)
+{
+ // Perform validation step
+ ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, output);
+
+ TensorShape max_sum_shape = input->tensor_shape();
+ max_sum_shape.set(0, 1);
+
+ TensorInfo tensor_info_max_sum(input->clone()->set_is_resizable(true).reset_padding().set_tensor_shape(max_sum_shape));
+
+ ARM_COMPUTE_RETURN_ON_ERROR(NELogits1DMaxKernel::validate(input, &tensor_info_max_sum));
+ ARM_COMPUTE_RETURN_ON_ERROR(NELogits1DShiftExpSumKernel::validate(input, &tensor_info_max_sum, input, &tensor_info_max_sum, beta));
+ ARM_COMPUTE_RETURN_ON_ERROR(NELogits1DNormKernel::validate(input, &tensor_info_max_sum, output));
+
+ return Status{};
+}
+
void NESoftmaxLayer::run()
{
_memory_group.acquire();