aboutsummaryrefslogtreecommitdiff
path: root/src/runtime/CL/functions/CLGEMMConvolutionLayer.cpp
diff options
context:
space:
mode:
authorgiuros01 <giuseppe.rossini@arm.com>2018-11-20 18:34:46 +0000
committerGiuseppe Rossini <giuseppe.rossini@arm.com>2018-11-30 18:00:25 +0000
commit164a2727d3bbce0e575d24b7db787c85e2e2c203 (patch)
tree983fc1f519032ac9a056e19f87e32597ca1874a1 /src/runtime/CL/functions/CLGEMMConvolutionLayer.cpp
parent7930db48e12dd3a14c1971f41f5b83527efea281 (diff)
downloadComputeLibrary-164a2727d3bbce0e575d24b7db787c85e2e2c203.tar.gz
COMPMID-1717: CL: Implement Maximum, Minimum, SquaredDifference
Change-Id: Ice653e48211053bd3cd20a693bd76de6b4efc370 Reviewed-on: https://review.mlplatform.org/270 Reviewed-by: Georgios Pinitas <georgios.pinitas@arm.com> Tested-by: Arm Jenkins <bsgcomp@arm.com>
Diffstat (limited to 'src/runtime/CL/functions/CLGEMMConvolutionLayer.cpp')
-rw-r--r--src/runtime/CL/functions/CLGEMMConvolutionLayer.cpp16
1 files changed, 8 insertions, 8 deletions
diff --git a/src/runtime/CL/functions/CLGEMMConvolutionLayer.cpp b/src/runtime/CL/functions/CLGEMMConvolutionLayer.cpp
index 4694aa7f37..3a8b1a5891 100644
--- a/src/runtime/CL/functions/CLGEMMConvolutionLayer.cpp
+++ b/src/runtime/CL/functions/CLGEMMConvolutionLayer.cpp
@@ -242,7 +242,7 @@ void CLGEMMConvolutionLayer::configure(const ICLTensor *input, const ICLTensor *
else if(_append_bias)
{
// Configure add bias kernel
- _add_bias_kernel.configure(output, biases, output, ConvertPolicy::SATURATE);
+ _add_bias_kernel.configure(ArithmeticOperation::ADD, output, biases, output, ConvertPolicy::SATURATE);
}
// Create GEMM output tensor
@@ -276,9 +276,9 @@ void CLGEMMConvolutionLayer::configure(const ICLTensor *input, const ICLTensor *
{
const QuantizationInfo output_quant_info = (output->info()->total_size() == 0) ? input->info()->quantization_info() : output->info()->quantization_info();
- const float multiplier = (input->info()->quantization_info().scale * weights->info()->quantization_info().scale) / output_quant_info.scale;
- int output_multiplier = 0;
- int output_shift = 0;
+ const float multiplier = (input->info()->quantization_info().scale * weights->info()->quantization_info().scale) / output_quant_info.scale;
+ int output_multiplier = 0;
+ int output_shift = 0;
quantization::calculate_quantized_multiplier_less_than_one(multiplier, &output_multiplier, &output_shift);
int min_activation = 0;
@@ -432,7 +432,7 @@ Status CLGEMMConvolutionLayer::validate(const ITensorInfo *input, const ITensorI
else if(append_bias)
{
// Validate add bias kernel
- ARM_COMPUTE_RETURN_ON_ERROR(CLArithmeticAdditionKernel::validate(output, biases, output, ConvertPolicy::SATURATE));
+ ARM_COMPUTE_RETURN_ON_ERROR(CLSaturatedArithmeticOperationKernel::validate(ArithmeticOperation::ADD, output, biases, output, ConvertPolicy::SATURATE));
}
// Create GEMM output tensor
@@ -459,9 +459,9 @@ Status CLGEMMConvolutionLayer::validate(const ITensorInfo *input, const ITensorI
{
const QuantizationInfo output_quant_info = (output->total_size() == 0) ? input->quantization_info() : output->quantization_info();
- const float multiplier = (input->quantization_info().scale * weights->quantization_info().scale) / output_quant_info.scale;
- int output_multiplier = 0;
- int output_shift = 0;
+ const float multiplier = (input->quantization_info().scale * weights->quantization_info().scale) / output_quant_info.scale;
+ int output_multiplier = 0;
+ int output_shift = 0;
ARM_COMPUTE_RETURN_ON_ERROR(quantization::calculate_quantized_multiplier_less_than_one(multiplier, &output_multiplier, &output_shift));