diff options
author | Ryan OShea <ryan.oshea3@arm.com> | 2023-07-06 11:41:25 +0100 |
---|---|---|
committer | ryan.oshea3 <ryan.oshea3@arm.com> | 2023-07-10 16:18:42 +0000 |
commit | f183acdb95ed2bc3b14365d4b5cb438ca8a3649f (patch) | |
tree | 7dcf670d34aee267120a4515c57841847d4eb43c | |
parent | 2ea403d130db0d2853d5c43c29b5112893efc2bf (diff) | |
download | armnn-f183acdb95ed2bc3b14365d4b5cb438ca8a3649f.tar.gz |
IVGCVSW-7844 Remove unnecessary warnings for certain models
* Remove warning on constant layer optimization being run on layer without
constant tensor
* Remove warning on bias quantization scale not being equal to
(InputScale x WeightScale)
Signed-off-by: Ryan OShea <ryan.oshea3@arm.com>
Change-Id: I87e97127dc0fd45812bfada1c7dfcc3d5f5cdecc
-rw-r--r-- | src/armnn/layers/LayerWithParameters.hpp | 9 | ||||
-rw-r--r-- | src/backends/backendsCommon/WorkloadData.cpp | 44 |
2 files changed, 9 insertions, 44 deletions
diff --git a/src/armnn/layers/LayerWithParameters.hpp b/src/armnn/layers/LayerWithParameters.hpp index 4e7cd45125..89525aa351 100644 --- a/src/armnn/layers/LayerWithParameters.hpp +++ b/src/armnn/layers/LayerWithParameters.hpp @@ -72,17 +72,8 @@ protected: } } } - if (tensors.empty()) - { - const std::string warningMessage{"GetConnectedConstantAsInputTensors() called on Layer with no " - "connected Constants as Input Tensors."}; - ARMNN_LOG(warning) << warningMessage; - } return tensors; } }; - - - } // namespace diff --git a/src/backends/backendsCommon/WorkloadData.cpp b/src/backends/backendsCommon/WorkloadData.cpp index 6cde89c2e1..a26aaf490b 100644 --- a/src/backends/backendsCommon/WorkloadData.cpp +++ b/src/backends/backendsCommon/WorkloadData.cpp @@ -179,23 +179,9 @@ void ValidateTensorQuantizationSpace(const TensorInfo& first, //--------------------------------------------------------------- void ValidateBiasTensorQuantization(const TensorInfo& biasTensor, - const TensorInfo& inputTensorInfo, const TensorInfo& weightsTensorInfo, const std::string& descName) { - // Helper lambda function to validate a single bias quantization scale value - auto VerifyBiasQuantizationScale = [&descName](float biasScale, float expectedScale) -> void - { - constexpr float tolerance = 0.0001f; - if (std::abs(biasScale - expectedScale) > tolerance) - { - // Print the float values with extra precision to see very small differences - ARMNN_LOG(warning) << std::setprecision(6) << descName << ": Expected " << expectedScale << - " for bias quantization scale (product of input and weight scales), but got " << - biasScale << ". Using scale provided."; - } - }; - if (biasTensor.GetQuantizationOffset() != 0) { throw InvalidArgumentException(descName + ": Expected zero quantization offset for bias tensor but got " + @@ -216,18 +202,6 @@ void ValidateBiasTensorQuantization(const TensorInfo& biasTensor, << ", biases=" << biasScales.size(); throw InvalidArgumentException(msg.str(), CHECK_LOCATION()); } - - for (size_t i = 0ul; i < biasScales.size(); ++i) - { - const float expectedScale = inputTensorInfo.GetQuantizationScale() * weightScales[i]; - VerifyBiasQuantizationScale(biasScales[i], expectedScale); - } - } - else - { - // Validate per-tensor quantization scale - const float expectedScale = inputTensorInfo.GetQuantizationScale() * weightsTensorInfo.GetQuantizationScale(); - VerifyBiasQuantizationScale(biasTensor.GetQuantizationScale(), expectedScale); } } @@ -1086,7 +1060,7 @@ void FullyConnectedQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) c { TensorInfo biasTensorInfo = workloadInfo.m_InputTensorInfos[2]; // Validates type and quantization values. - ValidateBiasTensorQuantization(biasTensorInfo, inputTensorInfo, weightTensorInfo, descriptorName); + ValidateBiasTensorQuantization(biasTensorInfo, weightTensorInfo, descriptorName); ValidateTensorDataType(biasTensorInfo, GetBiasDataType(inputTensorInfo.GetDataType()), descriptorName, "bias"); ValidateTensorNumDimensions(biasTensorInfo, descriptorName, 1, "bias"); } @@ -1299,7 +1273,7 @@ void Convolution2dQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) co const TensorInfo& biasTensorInfo = optionalBiasTensorInfo.value(); ValidateTensorDataType(biasTensorInfo, GetBiasDataType(inputTensorInfo.GetDataType()), descriptorName, "bias"); - ValidateBiasTensorQuantization(biasTensorInfo, inputTensorInfo, weightTensorInfo, descriptorName); + ValidateBiasTensorQuantization(biasTensorInfo, weightTensorInfo, descriptorName); } if (m_Parameters.m_StrideX <= 0 || m_Parameters.m_StrideY <= 0 ) @@ -1374,7 +1348,7 @@ void Convolution3dQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) co const TensorInfo& biasTensorInfo = optionalBiasTensorInfo.value(); ValidateTensorDataType(biasTensorInfo, GetBiasDataType(inputTensorInfo.GetDataType()), descriptorName, "bias"); - ValidateBiasTensorQuantization(biasTensorInfo, inputTensorInfo, weightTensorInfo, descriptorName); + ValidateBiasTensorQuantization(biasTensorInfo, weightTensorInfo, descriptorName); } if (m_Parameters.m_StrideX <= 0 || m_Parameters.m_StrideY <= 0 || m_Parameters.m_StrideZ <= 0 ) @@ -1487,7 +1461,7 @@ void DepthwiseConvolution2dQueueDescriptor::Validate(const WorkloadInfo& workloa optionalBiasTensorInfo = MakeOptional<TensorInfo>(workloadInfo.m_InputTensorInfos[2]); const TensorInfo& biasTensorInfo = optionalBiasTensorInfo.value(); - ValidateBiasTensorQuantization(biasTensorInfo, inputTensorInfo, weightTensorInfo, descriptorName); + ValidateBiasTensorQuantization(biasTensorInfo, weightTensorInfo, descriptorName); ValidateTensorDataType(biasTensorInfo, GetBiasDataType(inputTensorInfo.GetDataType()), descriptorName, "bias"); } ValidatePerAxisQuantization(inputTensorInfo, @@ -3105,7 +3079,7 @@ void TransposeConvolution2dQueueDescriptor::Validate(const WorkloadInfo& workloa const TensorInfo& biasTensorInfo = optionalBiasTensorInfo.value(); ValidateTensorDataType(biasTensorInfo, GetBiasDataType(inputTensorInfo.GetDataType()), descriptorName, "bias"); - ValidateBiasTensorQuantization(biasTensorInfo, inputTensorInfo, weightTensorInfo, descriptorName); + ValidateBiasTensorQuantization(biasTensorInfo, weightTensorInfo, descriptorName); } ValidatePerAxisQuantization(inputTensorInfo, @@ -3621,10 +3595,10 @@ void QuantizedLstmQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) co "inputGateBias", "outputGateBias"); // Validate bias tensor quantization info - ValidateBiasTensorQuantization(inputGateBiasInfo, inputInfo, inputToInputWeightsInfo, descriptorName); - ValidateBiasTensorQuantization(forgetGateBiasInfo, inputInfo, inputToInputWeightsInfo, descriptorName); - ValidateBiasTensorQuantization(cellBiasInfo, inputInfo, inputToInputWeightsInfo, descriptorName); - ValidateBiasTensorQuantization(outputGateBiasInfo, inputInfo, inputToInputWeightsInfo, descriptorName); + ValidateBiasTensorQuantization(inputGateBiasInfo, inputToInputWeightsInfo, descriptorName); + ValidateBiasTensorQuantization(forgetGateBiasInfo, inputToInputWeightsInfo, descriptorName); + ValidateBiasTensorQuantization(cellBiasInfo, inputToInputWeightsInfo, descriptorName); + ValidateBiasTensorQuantization(outputGateBiasInfo, inputToInputWeightsInfo, descriptorName); } void AbsQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const |