diff options
author | Giorgio Arena <giorgio.arena@arm.com> | 2019-10-15 11:09:33 +0100 |
---|---|---|
committer | Giorgio Arena <giorgio.arena@arm.com> | 2019-10-21 10:14:20 +0000 |
commit | d93e263e70e3101422402c95946e520fef34c4c7 (patch) | |
tree | f79d3b325ed6881fb9252cb7ee0b7573739e00be /tests/validation/reference/DepthwiseConvolutionLayer.cpp | |
parent | ab5b1a279284bed350d3bb75f3d9d3aec6edca0e (diff) | |
download | ComputeLibrary-d93e263e70e3101422402c95946e520fef34c4c7.tar.gz |
COMPMID-2708 NEDepthwiseConvolution Generic: support for QUANT8_PER_CHANNEL_SYMM
COMPMID-2470 Implement a new and generic depthwise convolution for NEON QASYMM8 NHWC
COMPMID-2477 Enable FP16 data type for the new generic convolution on NEON for NHWC
COMPMID-2625 Remove old implementation files for the generic NEDepthwiseConvolution
Change-Id: I8f6deda4fc69dd7e472fba3228b1ed5dad172f3e
Signed-off-by: Giorgio Arena <giorgio.arena@arm.com>
Reviewed-on: https://review.mlplatform.org/c/2094
Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
Reviewed-by: Gian Marco Iodice <gianmarco.iodice@arm.com>
Tested-by: Arm Jenkins <bsgcomp@arm.com>
Diffstat (limited to 'tests/validation/reference/DepthwiseConvolutionLayer.cpp')
-rw-r--r-- | tests/validation/reference/DepthwiseConvolutionLayer.cpp | 12 |
1 files changed, 6 insertions, 6 deletions
diff --git a/tests/validation/reference/DepthwiseConvolutionLayer.cpp b/tests/validation/reference/DepthwiseConvolutionLayer.cpp index 7458f815b8..608093d381 100644 --- a/tests/validation/reference/DepthwiseConvolutionLayer.cpp +++ b/tests/validation/reference/DepthwiseConvolutionLayer.cpp @@ -188,17 +188,17 @@ SimpleTensor<T> depthwise_convolution_quantized(const SimpleTensor<T> &src, cons { for(int z = 0; z < input_depth; ++z) { - int output_multiplier = 0; - int output_shift = 0; - const float weights_scale = (is_quantized_per_channel) ? weights_scale_vec[z] : weights_scale_vec[0]; - const float multiplier = input_scale * weights_scale / output_scale; - arm_compute::quantization::calculate_quantized_multiplier_less_than_one(multiplier, &output_multiplier, &output_shift); - for(unsigned int m = 0; m < depth_multiplier; ++m) { const int out_z = z * depth_multiplier + m; const int32_t bias_val = *static_cast<const int32_t *>(biases(Coordinates(out_z))); + int output_multiplier = 0; + int output_shift = 0; + const float weights_scale = (is_quantized_per_channel) ? weights_scale_vec[out_z] : weights_scale_vec[0]; + const float multiplier = input_scale * weights_scale / output_scale; + arm_compute::quantization::calculate_quantized_multiplier_less_than_one(multiplier, &output_multiplier, &output_shift); + for(int y = minimum_y; y <= minimum_y + maximum_y; y += conv_info.stride().second) { for(int x = minimum_x; x <= minimum_x + maximum_x; x += conv_info.stride().first) |