aboutsummaryrefslogtreecommitdiff
path: root/tests/validation/fixtures/DepthwiseConvolutionLayerFixture.h
diff options
context:
space:
mode:
authorGiorgio Arena <giorgio.arena@arm.com>2019-10-15 11:09:33 +0100
committerGiorgio Arena <giorgio.arena@arm.com>2019-10-21 10:14:20 +0000
commitd93e263e70e3101422402c95946e520fef34c4c7 (patch)
treef79d3b325ed6881fb9252cb7ee0b7573739e00be /tests/validation/fixtures/DepthwiseConvolutionLayerFixture.h
parentab5b1a279284bed350d3bb75f3d9d3aec6edca0e (diff)
downloadComputeLibrary-d93e263e70e3101422402c95946e520fef34c4c7.tar.gz
COMPMID-2708 NEDepthwiseConvolution Generic: support for QUANT8_PER_CHANNEL_SYMM
COMPMID-2470 Implement a new and generic depthwise convolution for NEON QASYMM8 NHWC COMPMID-2477 Enable FP16 data type for the new generic convolution on NEON for NHWC COMPMID-2625 Remove old implementation files for the generic NEDepthwiseConvolution Change-Id: I8f6deda4fc69dd7e472fba3228b1ed5dad172f3e Signed-off-by: Giorgio Arena <giorgio.arena@arm.com> Reviewed-on: https://review.mlplatform.org/c/2094 Comments-Addressed: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: Gian Marco Iodice <gianmarco.iodice@arm.com> Tested-by: Arm Jenkins <bsgcomp@arm.com>
Diffstat (limited to 'tests/validation/fixtures/DepthwiseConvolutionLayerFixture.h')
-rw-r--r--tests/validation/fixtures/DepthwiseConvolutionLayerFixture.h16
1 files changed, 13 insertions, 3 deletions
diff --git a/tests/validation/fixtures/DepthwiseConvolutionLayerFixture.h b/tests/validation/fixtures/DepthwiseConvolutionLayerFixture.h
index 85930eb95e..f909885245 100644
--- a/tests/validation/fixtures/DepthwiseConvolutionLayerFixture.h
+++ b/tests/validation/fixtures/DepthwiseConvolutionLayerFixture.h
@@ -449,12 +449,22 @@ class DepthwiseConvolutionLayerValidationQuantizedPerChannelFixture : public Dep
public:
template <typename...>
void setup(TensorShape in_shape, Size2D kernel_size, PadStrideInfo pad_stride_info, Size2D dilation, unsigned int depth_multiplier, DataType input_data_type, DataType weights_data_type,
- QuantizationInfo input_quantization_info, QuantizationInfo weights_quantization_info, QuantizationInfo output_quantization_info,
- DataLayout data_layout, ActivationLayerInfo act_info)
+ QuantizationInfo input_quantization_info, QuantizationInfo output_quantization_info, DataLayout data_layout, ActivationLayerInfo act_info)
{
+ const float out_scale = output_quantization_info.uniform().scale;
+ const float in_scale = input_quantization_info.uniform().scale;
+
+ std::vector<float> weights_scales{};
+ std::mt19937 gen(library->seed());
+ std::uniform_real_distribution<> dis(0.01f, out_scale / in_scale);
+ for(size_t i = 0; i < in_shape.z() * depth_multiplier; ++i)
+ {
+ weights_scales.push_back(dis(gen));
+ }
+
DepthwiseConvolutionLayerValidationGenericFixture<TensorType, AccessorType, FunctionType, T, TW>::setup(in_shape, kernel_size, pad_stride_info, dilation, depth_multiplier,
input_data_type, weights_data_type,
- input_quantization_info, weights_quantization_info, output_quantization_info,
+ input_quantization_info, QuantizationInfo(weights_scales), output_quantization_info,
data_layout, act_info);
}
};