aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorGeorgios Pinitas <georgios.pinitas@arm.com>2019-07-30 15:09:10 +0100
committerMichele Di Giorgio <michele.digiorgio@arm.com>2019-07-31 14:39:12 +0000
commit4d600c728a75792c5479b54114ec11c6d8fea61a (patch)
tree8d0dc48d651c10f147037948da3c7e419871a3bf /src
parenta231ea6dcbee90c0079cad1645783f0311a02a6c (diff)
downloadComputeLibrary-4d600c728a75792c5479b54114ec11c6d8fea61a.tar.gz
COMPMID-2500: Report error in Int8 Conv2d if multiplier > 1.
Change-Id: I7d0263eddfb4f9cf0145e94b35d5f9e18737cd2d Signed-off-by: Georgios Pinitas <georgios.pinitas@arm.com> Reviewed-on: https://review.mlplatform.org/c/1653 Tested-by: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: Pablo Marquez <pablo.tello@arm.com> Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
Diffstat (limited to 'src')
-rw-r--r--src/runtime/NEON/functions/NEGEMMConvolutionLayer.cpp3
1 files changed, 2 insertions, 1 deletions
diff --git a/src/runtime/NEON/functions/NEGEMMConvolutionLayer.cpp b/src/runtime/NEON/functions/NEGEMMConvolutionLayer.cpp
index bd46944f7a..e94c8933ae 100644
--- a/src/runtime/NEON/functions/NEGEMMConvolutionLayer.cpp
+++ b/src/runtime/NEON/functions/NEGEMMConvolutionLayer.cpp
@@ -187,7 +187,7 @@ Status NEGEMMConvolutionLayer::validate_mm(const ITensorInfo *input, const ITens
float multiplier = iqinfo.scale * wqinfo.scale / oqinfo.scale;
int output_multiplier;
int output_shift;
- quantization::calculate_quantized_multiplier_less_than_one(multiplier, &output_multiplier, &output_shift);
+ ARM_COMPUTE_RETURN_ON_ERROR(quantization::calculate_quantized_multiplier_less_than_one(multiplier, &output_multiplier, &output_shift));
// Merge activation with output stage
int min_activation = 0;
@@ -492,6 +492,7 @@ Status NEGEMMConvolutionLayer::validate(const ITensorInfo *input, const ITensorI
// Output tensor auto inizialization if not yet initialized
ARM_COMPUTE_RETURN_ON_ERROR(NEConvolutionLayerReshapeWeights::validate(weights, biases_to_use, nullptr));
weights_reshaped_info = TensorInfo(compute_weights_reshaped_shape(*weights, (append_bias && !skip_im2col)), 1, data_type);
+ weights_reshaped_info.set_quantization_info(weights->quantization_info());
weights_to_use = &weights_reshaped_info;
if(!skip_im2col)