aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/core/NEON/kernels/NEDirectConvolutionLayerOutputStageKernel.cpp (renamed from src/core/NEON/kernels/NEDirectConvolutionLayerBiasAccumulateKernel.cpp)133
-rw-r--r--src/runtime/NEON/functions/NEDepthwiseConvolutionLayer.cpp6
-rw-r--r--src/runtime/NEON/functions/NEDirectConvolutionLayer.cpp29
3 files changed, 112 insertions, 56 deletions
diff --git a/src/core/NEON/kernels/NEDirectConvolutionLayerBiasAccumulateKernel.cpp b/src/core/NEON/kernels/NEDirectConvolutionLayerOutputStageKernel.cpp
index 65b7087d7e..40abdb1672 100644
--- a/src/core/NEON/kernels/NEDirectConvolutionLayerBiasAccumulateKernel.cpp
+++ b/src/core/NEON/kernels/NEDirectConvolutionLayerOutputStageKernel.cpp
@@ -21,7 +21,7 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#include "arm_compute/core/NEON/kernels/NEDirectConvolutionLayerBiasAccumulateKernel.h"
+#include "arm_compute/core/NEON/kernels/NEDirectConvolutionLayerOutputStageKernel.h"
#include "arm_compute/core/AccessWindowStatic.h"
#include "arm_compute/core/Error.h"
@@ -42,32 +42,49 @@ namespace
{
Status validate_arguments(const ITensorInfo *input, const ITensorInfo *bias, const ITensorInfo *output)
{
- ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, bias);
+ ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input);
ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QS8, DataType::QS16, DataType::F16, DataType::QS32, DataType::F32);
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(bias, 1, DataType::QS8, DataType::QS16, DataType::F16, DataType::QS32, DataType::F32);
- if(is_data_type_quantized(input->data_type()))
+
+ if(bias != nullptr)
{
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(input->data_type() == DataType::QS8 && bias->data_type() != DataType::QS8, "Wrong data type for bias");
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(input->data_type() == DataType::QS16 && bias->data_type() != DataType::QS8, "Wrong data type for bias");
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(input->data_type() == DataType::QS32 && bias->data_type() != DataType::QS16, "Wrong data type for bias");
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(bias, 1, DataType::QS8, DataType::QS16, DataType::F16, DataType::QS32, DataType::F32);
+
+ if(is_data_type_quantized(input->data_type()))
+ {
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(input->data_type() == DataType::QS8 && bias->data_type() != DataType::QS8, "Wrong data type for bias");
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(input->data_type() == DataType::QS16 && bias->data_type() != DataType::QS8, "Wrong data type for bias");
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(input->data_type() == DataType::QS32 && bias->data_type() != DataType::QS16, "Wrong data type for bias");
+ }
+ else
+ {
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, bias);
+ }
+
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_FIXED_POINT_POSITION(input, bias);
+ ARM_COMPUTE_RETURN_ERROR_ON(bias->num_dimensions() > 1);
}
else
{
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, bias);
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(!is_data_type_quantized(input->data_type()), "Calling output stage kernel with floating point arguments");
}
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_FIXED_POINT_POSITION(input, bias);
-
// Checks performed when output is configured
if((output != nullptr) && (output->total_size() != 0))
{
ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::QS8, DataType::QS16, DataType::F32);
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(bias, output);
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_FIXED_POINT_POSITION(bias, output);
+ if(is_data_type_quantized(input->data_type()))
+ {
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(input->data_type() == DataType::QS8 && output->data_type() != DataType::QS8, "Wrong data type for output");
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(input->data_type() == DataType::QS16 && output->data_type() != DataType::QS8, "Wrong data type for output");
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(input->data_type() == DataType::QS32 && output->data_type() != DataType::QS16, "Wrong data type for output");
+ }
+ else
+ {
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output);
+ }
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_FIXED_POINT_POSITION(input, output);
}
- ARM_COMPUTE_RETURN_ERROR_ON(bias->num_dimensions() > 1);
-
return Status{};
}
@@ -79,16 +96,35 @@ std::pair<Status, Window> validate_and_configure_window(ITensorInfo *input, ITen
// Configure kernel window
Window win = calculate_max_window(*input, Steps(num_elems_processed_per_iteration));
AccessWindowHorizontal input_access(input, 0, num_elems_processed_per_iteration);
- AccessWindowStatic bias_access(bias, 0, 0, bias->dimension(0), bias->dimension(1));
+
if(output != nullptr && (output->total_size() != 0))
{
AccessWindowHorizontal output_access(output, 0, num_elems_processed_per_iteration);
- window_changed = update_window_and_padding(win, input_access, output_access, bias_access);
+
+ if(bias == nullptr)
+ {
+ window_changed = update_window_and_padding(win, input_access, output_access);
+ }
+ else
+ {
+ AccessWindowStatic bias_access(bias, 0, 0, bias->dimension(0), bias->dimension(1));
+ window_changed = update_window_and_padding(win, input_access, output_access, bias_access);
+ }
+
output_access.set_valid_region(win, ValidRegion(Coordinates(), output->tensor_shape()));
}
else
{
- window_changed = update_window_and_padding(win, input_access, bias_access);
+ if(bias == nullptr)
+ {
+ window_changed = update_window_and_padding(win, input_access);
+ }
+ else
+ {
+ AccessWindowStatic bias_access(bias, 0, 0, bias->dimension(0), bias->dimension(1));
+ window_changed = update_window_and_padding(win, input_access, bias_access);
+ }
+
input_access.set_valid_region(win, ValidRegion(Coordinates(), input->tensor_shape()));
}
@@ -199,8 +235,8 @@ inline float16x8_t internal_vqaddq(const float16x8_t &x, const float16x8_t &y)
}
#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
-template <typename T1, typename T2, bool in_place>
-void accumulate_bias(ITensor *input, const ITensor *bias, const Window window, ITensor *output)
+template <typename T1, typename T2, bool in_place, bool has_bias>
+void output_stage(ITensor *input, const ITensor *bias, const Window window, ITensor *output)
{
Iterator in(input, window);
@@ -210,10 +246,17 @@ void accumulate_bias(ITensor *input, const ITensor *bias, const Window window, I
{
// Get bias and pointer to input
const auto in_ptr = reinterpret_cast<T1 *>(in.ptr());
- const auto vb = internal_vdupq_n(static_cast<T1>(*reinterpret_cast<const T2 *>(bias->ptr_to_element(Coordinates(id.z())))));
// Accumulate bias
- internal_vst1q(in_ptr, internal_vqaddq(internal_vld1q(in_ptr), vb));
+ if(has_bias)
+ {
+ const auto vb = internal_vdupq_n(static_cast<T1>(*reinterpret_cast<const T2 *>(bias->ptr_to_element(Coordinates(id.z())))));
+ internal_vst1q(in_ptr, internal_vqaddq(internal_vld1q(in_ptr), vb));
+ }
+ else
+ {
+ internal_vst1q(in_ptr, internal_vld1q(in_ptr));
+ }
},
in);
}
@@ -225,24 +268,31 @@ void accumulate_bias(ITensor *input, const ITensor *bias, const Window window, I
// Get bias and pointer to input
const auto in_ptr = reinterpret_cast<const T1 *>(in.ptr());
const auto out_ptr = reinterpret_cast<T2 *>(out.ptr());
- const auto vb = internal_vdupq_n(static_cast<T1>(*reinterpret_cast<const T2 *>(bias->ptr_to_element(Coordinates(id.z())))));
// Accumulate bias
- internal_vst1q(out_ptr, internal_vqaddq(internal_vld1q(in_ptr), vb));
+ if(has_bias)
+ {
+ const auto vb = internal_vdupq_n(static_cast<T1>(*reinterpret_cast<const T2 *>(bias->ptr_to_element(Coordinates(id.z())))));
+ internal_vst1q(out_ptr, internal_vqaddq(internal_vld1q(in_ptr), vb));
+ }
+ else
+ {
+ internal_vst1q(out_ptr, internal_vld1q(in_ptr));
+ }
},
in, out);
}
}
} // namespace
-NEDirectConvolutionLayerBiasAccumulateKernel::NEDirectConvolutionLayerBiasAccumulateKernel()
+NEDirectConvolutionLayerOutputStageKernel::NEDirectConvolutionLayerOutputStageKernel()
: _func(nullptr), _input(nullptr), _bias(nullptr), _output(nullptr)
{
}
-void NEDirectConvolutionLayerBiasAccumulateKernel::configure(ITensor *input, const ITensor *bias, ITensor *output)
+void NEDirectConvolutionLayerOutputStageKernel::configure(ITensor *input, const ITensor *bias, ITensor *output)
{
- ARM_COMPUTE_ERROR_ON_NULLPTR(input, bias);
+ ARM_COMPUTE_ERROR_ON_NULLPTR(input);
// Auto-initialize output output if required
if(output != nullptr)
@@ -252,7 +302,7 @@ void NEDirectConvolutionLayerBiasAccumulateKernel::configure(ITensor *input, con
}
// Perform validation step
- ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), bias->info(), (output == nullptr) ? nullptr : output->info()));
+ ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), (bias == nullptr) ? nullptr : bias->info(), (output == nullptr) ? nullptr : output->info()));
_func = nullptr;
_bias = bias;
@@ -260,7 +310,7 @@ void NEDirectConvolutionLayerBiasAccumulateKernel::configure(ITensor *input, con
_output = output;
// Configure kernel window
- auto win_config = validate_and_configure_window(input->info(), bias->info(), (output == nullptr) ? nullptr : output->info());
+ auto win_config = validate_and_configure_window(input->info(), (bias == nullptr) ? nullptr : bias->info(), (output == nullptr) ? nullptr : output->info());
ARM_COMPUTE_ERROR_THROW_ON(win_config.first);
INEKernel::configure(win_config.second);
@@ -269,14 +319,25 @@ void NEDirectConvolutionLayerBiasAccumulateKernel::configure(ITensor *input, con
{
case DataType::QS8:
{
- _func = (output == nullptr) ? &accumulate_bias<qint8_t, qint8_t, true> : &accumulate_bias<qint8_t, qint8_t, false>;
+ if(bias == nullptr)
+ {
+ _func = (output == nullptr) ? &output_stage<qint8_t, qint8_t, true, false> : &output_stage<qint8_t, qint8_t, false, false>;
+ }
+ else
+ {
+ _func = (output == nullptr) ? &output_stage<qint8_t, qint8_t, true, true> : &output_stage<qint8_t, qint8_t, false, true>;
+ }
break;
}
case DataType::QS16:
{
- if(bias->info()->data_type() == DataType::QS8)
+ if(bias != nullptr && bias->info()->data_type() == DataType::QS8)
+ {
+ _func = (output == nullptr) ? &output_stage<qint16_t, qint8_t, true, true> : &output_stage<qint16_t, qint8_t, false, true>;
+ }
+ else if(bias == nullptr)
{
- _func = (output == nullptr) ? &accumulate_bias<qint16_t, qint8_t, true> : &accumulate_bias<qint16_t, qint8_t, false>;
+ _func = (output == nullptr) ? &output_stage<qint16_t, qint8_t, true, false> : &output_stage<qint16_t, qint8_t, false, false>;
}
else
{
@@ -286,19 +347,19 @@ void NEDirectConvolutionLayerBiasAccumulateKernel::configure(ITensor *input, con
}
case DataType::QS32:
{
- _func = (output == nullptr) ? &accumulate_bias<qint32_t, qint16_t, true> : &accumulate_bias<qint32_t, qint16_t, false>;
+ _func = (output == nullptr) ? &output_stage<qint32_t, qint16_t, true, true> : &output_stage<qint32_t, qint16_t, false, true>;
break;
}
#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
case DataType::F16:
{
- _func = (output == nullptr) ? &accumulate_bias<float16_t, float16_t, true> : &accumulate_bias<float16_t, float16_t, false>;
+ _func = (output == nullptr) ? &output_stage<float16_t, float16_t, true, true> : &output_stage<float16_t, float16_t, false, true>;
break;
}
#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
case DataType::F32:
{
- _func = (output == nullptr) ? &accumulate_bias<float, float, true> : &accumulate_bias<float, float, false>;
+ _func = (output == nullptr) ? &output_stage<float, float, true, true> : &output_stage<float, float, false, true>;
break;
}
default:
@@ -309,7 +370,7 @@ void NEDirectConvolutionLayerBiasAccumulateKernel::configure(ITensor *input, con
}
}
-Status NEDirectConvolutionLayerBiasAccumulateKernel::validate(const ITensorInfo *input, const ITensorInfo *bias, const ITensorInfo *output)
+Status NEDirectConvolutionLayerOutputStageKernel::validate(const ITensorInfo *input, const ITensorInfo *bias, const ITensorInfo *output)
{
ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, bias, output));
ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window(input->clone().get(), bias->clone().get(), output == nullptr ? nullptr : output->clone().get()).first);
@@ -317,7 +378,7 @@ Status NEDirectConvolutionLayerBiasAccumulateKernel::validate(const ITensorInfo
return Status{};
}
-void NEDirectConvolutionLayerBiasAccumulateKernel::run(const Window &window, const ThreadInfo &info)
+void NEDirectConvolutionLayerOutputStageKernel::run(const Window &window, const ThreadInfo &info)
{
ARM_COMPUTE_UNUSED(info);
ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
diff --git a/src/runtime/NEON/functions/NEDepthwiseConvolutionLayer.cpp b/src/runtime/NEON/functions/NEDepthwiseConvolutionLayer.cpp
index 4575c7af9d..298101a09d 100644
--- a/src/runtime/NEON/functions/NEDepthwiseConvolutionLayer.cpp
+++ b/src/runtime/NEON/functions/NEDepthwiseConvolutionLayer.cpp
@@ -32,7 +32,7 @@
using namespace arm_compute;
NEDepthwiseConvolutionLayer3x3::NEDepthwiseConvolutionLayer3x3()
- : _kernel(), _bias_kernel(), _border_handler(), _has_bias(false)
+ : _kernel(), _output_stage_kernel(), _border_handler(), _has_bias(false)
{
}
@@ -46,7 +46,7 @@ void NEDepthwiseConvolutionLayer3x3::configure(ITensor *input, const ITensor *we
_border_handler.configure(input, _kernel.border_size(), BorderMode::CONSTANT, PixelValue(static_cast<float>(0.f)));
if(biases != nullptr)
{
- _bias_kernel.configure(output, biases);
+ _output_stage_kernel.configure(output, biases);
_has_bias = true;
}
}
@@ -57,7 +57,7 @@ void NEDepthwiseConvolutionLayer3x3::run()
NEScheduler::get().schedule(&_kernel, Window::DimX);
if(_has_bias)
{
- NEScheduler::get().schedule(&_bias_kernel, Window::DimX);
+ NEScheduler::get().schedule(&_output_stage_kernel, Window::DimX);
}
}
diff --git a/src/runtime/NEON/functions/NEDirectConvolutionLayer.cpp b/src/runtime/NEON/functions/NEDirectConvolutionLayer.cpp
index ef5d987832..c26c99a0f8 100644
--- a/src/runtime/NEON/functions/NEDirectConvolutionLayer.cpp
+++ b/src/runtime/NEON/functions/NEDirectConvolutionLayer.cpp
@@ -34,7 +34,7 @@
using namespace arm_compute;
NEDirectConvolutionLayer::NEDirectConvolutionLayer(std::shared_ptr<IMemoryManager> memory_manager)
- : _memory_group(std::move(memory_manager)), _accumulate_bias_kernel(), _conv_kernel(), _input_border_handler(), _accumulator(), _has_bias(false)
+ : _memory_group(std::move(memory_manager)), _output_stage_kernel(), _conv_kernel(), _input_border_handler(), _accumulator(), _has_bias(false), _is_fixed_point(false)
{
}
@@ -50,17 +50,16 @@ void NEDirectConvolutionLayer::configure(ITensor *input, const ITensor *weights,
_has_bias = (bias != nullptr);
// Allocate the intermediate accumulator tensor in case of fixed point input
- if(is_data_type_fixed_point(input->info()->data_type()))
+ _is_fixed_point = is_data_type_fixed_point(input->info()->data_type());
+ if(_is_fixed_point)
{
const DataType promoted_dt = (input->info()->data_type() == DataType::QS8) ? DataType::QS16 : DataType::QS32;
_accumulator.allocator()->init(TensorInfo(output->info()->tensor_shape(), 1, promoted_dt, output->info()->fixed_point_position()));
_memory_group.manage(&_accumulator);
_conv_kernel.configure(input, weights, &_accumulator, conv_info);
- // TODO (COMPMID-746): Fix accumulate biases to just down-cast when no bias is provided
- if(_has_bias)
- {
- _accumulate_bias_kernel.configure(&_accumulator, bias, output);
- }
+
+ // When no bias is provided, we need to downscale the accumulator tensor
+ _output_stage_kernel.configure(&_accumulator, bias, output);
_accumulator.allocator()->allocate();
}
else
@@ -68,7 +67,7 @@ void NEDirectConvolutionLayer::configure(ITensor *input, const ITensor *weights,
_conv_kernel.configure(input, weights, output, conv_info);
if(_has_bias)
{
- _accumulate_bias_kernel.configure(output, bias);
+ _output_stage_kernel.configure(output, bias);
}
}
@@ -91,20 +90,17 @@ Status NEDirectConvolutionLayer::validate(const ITensorInfo *input, const ITenso
// Validate Convolution kernel
ARM_COMPUTE_RETURN_ON_ERROR(NEDirectConvolutionLayerKernel::validate(input, weights, &accumulator, conv_info));
- // Validate bias
- ARM_COMPUTE_RETURN_ERROR_ON_MSG((bias == nullptr) && is_data_type_fixed_point(data_type),
- "Biases should be provided for fixed point inputs");
if(bias != nullptr)
{
ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(weights, bias);
ARM_COMPUTE_RETURN_ERROR_ON_MSG(bias->dimension(0) != weights->dimension(3),
"Biases size and number of input feature maps should match");
ARM_COMPUTE_RETURN_ERROR_ON_MSG(bias->num_dimensions() > 1, "Biases should be one dimensional");
-
- // Validate bias kernel
- ARM_COMPUTE_RETURN_ON_ERROR(NEDirectConvolutionLayerBiasAccumulateKernel::validate(&accumulator, bias, output));
}
+ // Validate bias kernel
+ ARM_COMPUTE_RETURN_ON_ERROR(NEDirectConvolutionLayerOutputStageKernel::validate(&accumulator, bias, output));
+
return Status{};
}
@@ -115,10 +111,9 @@ void NEDirectConvolutionLayer::run()
_memory_group.acquire();
NEScheduler::get().schedule(&_conv_kernel, Window::DimZ);
- if(_has_bias)
+ if(_has_bias || _is_fixed_point)
{
- NEScheduler::get().schedule(&_accumulate_bias_kernel, Window::DimY);
+ NEScheduler::get().schedule(&_output_stage_kernel, Window::DimY);
}
-
_memory_group.release();
}