aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorFinnWilliamsArm <Finn.Williams@arm.com>2019-07-18 11:15:42 +0100
committerFinnWilliamsArm <Finn.Williams@arm.com>2019-07-19 09:19:22 +0100
commit598950d611304ffeb9d57a5b28a13b0ddd629026 (patch)
tree7b6d49d6ff7e3821f7dd8e2b083124d22cac926e
parent37e68686a4889b6e79b035356202903647671f13 (diff)
downloadarmnn-598950d611304ffeb9d57a5b28a13b0ddd629026.tar.gz
IVGCVSW-3453 Fix VTS quant_output_multiplier_gt_1 test failures
* Remove ValidateTensorQuantizationMultiplier * Update CL pin to COMPMID-2336 Signed-off-by: FinnWilliamsArm <Finn.Williams@arm.com> Change-Id: Iaece5b564134f7ec91f82cb246bcb10ad455999d
-rwxr-xr-xscripts/get_compute_library.sh3
-rw-r--r--src/backends/backendsCommon/WorkloadData.cpp35
2 files changed, 2 insertions, 36 deletions
diff --git a/scripts/get_compute_library.sh b/scripts/get_compute_library.sh
index 9c45f5d4e9..8a24136472 100755
--- a/scripts/get_compute_library.sh
+++ b/scripts/get_compute_library.sh
@@ -10,7 +10,8 @@ CMD=$( basename $0 )
#DEFAULT_CLFRAMEWORKREVISION="branches/arm_compute_19_05" # Release 19.05
#
# For pinning to a revision use this:
-DEFAULT_CLFRAMEWORKREVISION="cd0b8b521eb309af8cb84e1a1b031280b027c809" #COMPMID-2236: Move assembly implementation interfaces to src folder
+DEFAULT_CLFRAMEWORKREVISION="ff2719299ea76a95f20a35a7900875a8152e293a" #COMPMID-2336: Fix validation for quantized NEDepthwiseConvolutionLayer
+
usage() {
echo "Usage: $CMD (Use the default clframework SHA)"
diff --git a/src/backends/backendsCommon/WorkloadData.cpp b/src/backends/backendsCommon/WorkloadData.cpp
index 878602391c..455e675fef 100644
--- a/src/backends/backendsCommon/WorkloadData.cpp
+++ b/src/backends/backendsCommon/WorkloadData.cpp
@@ -263,27 +263,6 @@ void ValidateBroadcastTensorShapesMatch(const TensorInfo& first,
}
//---------------------------------------------------------------
-/// Validates that the output tensor's quantization scale is greater than the product
-/// of the two input tensors' quantization scales. This is a requirement of the implementation of
-/// the quantized multiplication.
-void ValidateTensorQuantizationMultiplier(const TensorInfo& inputTensor1, const TensorInfo& inputTensor2,
- const TensorInfo& outputTensorInfo, std::string const& descName,
- const std::string& inputTensor1Name, const std::string& inputTensor2Name, const std::string& outputTensorName)
-{
- if (outputTensorInfo.GetDataType() == DataType::QuantisedAsymm8)
- {
- if (outputTensorInfo.GetQuantizationScale() <=
- inputTensor1.GetQuantizationScale() * inputTensor2.GetQuantizationScale())
- {
- std::stringstream msg;
- msg << descName << ": Quantization scale of " << outputTensorName << " is not greater than " <<
- "the product of the " << inputTensor1Name << " and " << inputTensor2Name << " tensors";
- throw InvalidArgumentException(msg.str());
- }
- }
-}
-
-//---------------------------------------------------------------
void ValidateDataTypes(const TensorInfo& info,
const std::vector<armnn::DataType>& supportedTypes,
std::string const& descName)
@@ -705,8 +684,6 @@ void FullyConnectedQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) c
ValidateTensorNumDimensions(m_Bias->GetTensorInfo(), "FullyConnectedQueueDescriptor", 1, "bias");
}
- ValidateTensorQuantizationMultiplier(workloadInfo.m_InputTensorInfos[0], m_Weight->GetTensorInfo(),
- workloadInfo.m_OutputTensorInfos[0], "FullyConnectedQueueDescriptor", "input", "weights", "output");
// Check the supported data types
std::vector<DataType> supportedTypes =
@@ -892,8 +869,6 @@ void Convolution2dQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) co
workloadInfo.m_InputTensorInfos[0], m_Weight->GetTensorInfo(), "Convolution2dQueueDescriptor");
}
- ValidateTensorQuantizationMultiplier(workloadInfo.m_InputTensorInfos[0], m_Weight->GetTensorInfo(),
- workloadInfo.m_OutputTensorInfos[0], "Convolution2dQueueDescriptor", "input", "weights", "output");
}
void DepthwiseConvolution2dQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
@@ -945,9 +920,6 @@ void DepthwiseConvolution2dQueueDescriptor::Validate(const WorkloadInfo& workloa
"DepthwiseConvolution2dQueueDescriptor", "bias");
}
- ValidateTensorQuantizationMultiplier(workloadInfo.m_InputTensorInfos[0], m_Weight->GetTensorInfo(),
- workloadInfo.m_OutputTensorInfos[0], "DepthwiseConvolution2dQueueDescriptor", "input", "weights", "output");
-
// Check the supported data types
std::vector<DataType> supportedTypes = {
DataType::Float32,
@@ -2256,13 +2228,6 @@ void TransposeConvolution2dQueueDescriptor::Validate(const WorkloadInfo& workloa
descriptorName);
}
- ValidateTensorQuantizationMultiplier(workloadInfo.m_InputTensorInfos[0],
- m_Weight->GetTensorInfo(),
- workloadInfo.m_OutputTensorInfos[0],
- descriptorName,
- "input",
- "weights",
- "output");
}
} //namespace armnn