aboutsummaryrefslogtreecommitdiff
path: root/src/backends/backendsCommon
diff options
context:
space:
mode:
authorAron Virginas-Tar <Aron.Virginas-Tar@arm.com>2019-11-05 18:00:21 +0000
committerFrancis Murtagh <francis.murtagh@arm.com>2019-11-06 12:10:02 +0000
commit5edc8816118fcddb2681379db04c978041ce8b46 (patch)
tree22e4382138e9963d0ed3dacefda4fb142877e1fc /src/backends/backendsCommon
parentec33a91ec1557b78b2d01975ec4c5eaf24aa058c (diff)
downloadarmnn-5edc8816118fcddb2681379db04c978041ce8b46.tar.gz
IVGCVSW-3837 Add support for per-axis quantization to reference Convolution2d workload
Signed-off-by: Aron Virginas-Tar <Aron.Virginas-Tar@arm.com> Change-Id: I0ac08ba4864d48e6f64c4ac645dad8ea850be112
Diffstat (limited to 'src/backends/backendsCommon')
-rw-r--r--src/backends/backendsCommon/WorkloadData.cpp109
-rw-r--r--src/backends/backendsCommon/test/WorkloadDataValidation.cpp11
-rw-r--r--src/backends/backendsCommon/test/layerTests/Conv2dTestImpl.cpp93
-rw-r--r--src/backends/backendsCommon/test/layerTests/Conv2dTestImpl.hpp5
4 files changed, 210 insertions, 8 deletions
diff --git a/src/backends/backendsCommon/WorkloadData.cpp b/src/backends/backendsCommon/WorkloadData.cpp
index e1a369af7c..201cc7d1ec 100644
--- a/src/backends/backendsCommon/WorkloadData.cpp
+++ b/src/backends/backendsCommon/WorkloadData.cpp
@@ -338,6 +338,102 @@ void ValidateTensorNumElementsMatch(const TensorInfo& first,
}
}
+void ValidateWeightDataType(const TensorInfo& inputInfo,
+ const TensorInfo& weightInfo,
+ const std::string& descName)
+{
+ const DataType inputType = inputInfo.GetDataType();
+ if (inputType == DataType::QuantisedAsymm8)
+ {
+ const std::vector<DataType> validTypes =
+ {
+ DataType::QuantisedAsymm8,
+ DataType::QuantizedSymm8PerAxis
+ };
+
+ ValidateDataTypes(weightInfo, validTypes, descName);
+ }
+ else
+ {
+ ValidateTensorDataTypesMatch(inputInfo, weightInfo, descName, "input", "weight");
+ }
+}
+
+void ValidatePerAxisQuantizationDimension(const TensorInfo& tensorInfo,
+ const std::string& descName,
+ const std::string& tensorName)
+{
+ const Optional<unsigned int>& quantizationDim = tensorInfo.GetQuantizationDim();
+ if (!quantizationDim.has_value())
+ {
+ throw InvalidArgumentException(boost::str(
+ boost::format("%1%: Quantization dimension for per-axis quantization not set on tensor %2%.")
+ % descName % tensorName));
+ }
+
+ if (quantizationDim.value() != 0)
+ {
+ throw InvalidArgumentException(boost::str(
+ boost::format("%1%: Quantization dimension for per-axis quantization expected to be 0 on tensor %2%, "
+ "but got: %3%") % descName % tensorName % quantizationDim.value()));
+ }
+}
+
+void ValidatePerAxisQuantizationOffset(const TensorInfo& tensorInfo,
+ const std::string& descName,
+ const std::string& tensorName)
+{
+ int32_t quantizationOffset = tensorInfo.GetQuantizationOffset();
+ if (quantizationOffset != 0)
+ {
+ throw InvalidArgumentException(boost::str(
+ boost::format("%1%: Quantization offset for per-axis quantization expected to be 0 on tensor %2%, "
+ "but got: %3%") % descName % tensorName % quantizationOffset));
+ }
+}
+
+void ValidatePerAxisQuantization(const TensorInfo& inputInfo,
+ const TensorInfo& outputInfo,
+ const TensorInfo& weightInfo,
+ const Optional<TensorInfo>& optionalBiasInfo,
+ const std::string& descName)
+{
+ if (weightInfo.HasPerAxisQuantization())
+ {
+ const DataType inputDataType = inputInfo.GetDataType();
+ const DataType outputDataType = outputInfo.GetDataType();
+
+ const bool canHavePerAxisQuantization =
+ inputDataType == DataType::QuantisedAsymm8 && inputDataType == outputDataType;
+
+ if (!canHavePerAxisQuantization)
+ {
+ throw InvalidArgumentException(boost::str(
+ boost::format("%1%: Per-axis quantization parameters set on tensor %2%, "
+ "but data type does not support per-axis quantization.") % descName % "weight"));
+ }
+
+ ValidateTensorDataType(weightInfo, DataType::QuantizedSymm8PerAxis, descName, "weight");
+ ValidatePerAxisQuantizationDimension(weightInfo, descName, "weight");
+ ValidatePerAxisQuantizationOffset(weightInfo, descName, "weight");
+
+ if (optionalBiasInfo.has_value())
+ {
+ const TensorInfo& biasInfo = optionalBiasInfo.value();
+ if (!biasInfo.HasPerAxisQuantization())
+ {
+ throw InvalidArgumentException(boost::str(
+ boost::format("%1%: Per-axis quantization parameters not set on bias tensor, despite being set on "
+ "weight tensor.") % descName));
+ }
+
+ ValidateTensorDataType(biasInfo, DataType::Signed32, descName, "bias");
+ ValidatePerAxisQuantizationDimension(biasInfo, descName, "bias");
+ ValidatePerAxisQuantizationOffset(biasInfo, descName, "bias");
+ }
+ }
+}
+
} // anonymous namespace
void QueueDescriptor::ValidateInputsOutputs(const std::string& descName,
@@ -1040,19 +1136,26 @@ void Convolution2dQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) co
const TensorInfo& weightTensorInfo = m_Weight->GetTensorInfo();
ValidateTensorNumDimensions(weightTensorInfo, descriptorName, 4, "weight");
- ValidateTensorDataTypesMatch(inputTensorInfo, weightTensorInfo, descriptorName, "input", "weight");
+ ValidateWeightDataType(inputTensorInfo, weightTensorInfo, descriptorName);
+ Optional<TensorInfo> optionalBiasTensorInfo;
if (m_Parameters.m_BiasEnabled)
{
ValidatePointer(m_Bias, descriptorName, "bias");
- const TensorInfo& biasTensorInfo = m_Bias->GetTensorInfo();
- ValidateTensorNumDimensions(biasTensorInfo, descriptorName, 1, "bias");
+ optionalBiasTensorInfo = MakeOptional<TensorInfo>(m_Bias->GetTensorInfo());
+ const TensorInfo& biasTensorInfo = optionalBiasTensorInfo.value();
ValidateTensorDataType(biasTensorInfo, GetBiasDataType(inputTensorInfo.GetDataType()), descriptorName, "bias");
ValidateBiasTensorQuantization(biasTensorInfo, inputTensorInfo, weightTensorInfo, descriptorName);
}
+ ValidatePerAxisQuantization(inputTensorInfo,
+ outputTensorInfo,
+ weightTensorInfo,
+ optionalBiasTensorInfo,
+ descriptorName);
+
std::vector<DataType> supportedTypes =
{
DataType::Float32,
diff --git a/src/backends/backendsCommon/test/WorkloadDataValidation.cpp b/src/backends/backendsCommon/test/WorkloadDataValidation.cpp
index 9773914220..70d00b3a91 100644
--- a/src/backends/backendsCommon/test/WorkloadDataValidation.cpp
+++ b/src/backends/backendsCommon/test/WorkloadDataValidation.cpp
@@ -605,15 +605,16 @@ BOOST_AUTO_TEST_CASE(BiasPerAxisQuantization_Validate)
const TensorShape weightShape{ cOutput, cInput, hInput, wInput };
const TensorShape biasShape { cOutput };
- constexpr DataType dataType = DataType::QuantisedAsymm8;
- constexpr DataType biasType = DataType::Signed32;
+ constexpr DataType inputType = DataType::QuantisedAsymm8;
+ constexpr DataType weightType = DataType::QuantizedSymm8PerAxis;
+ constexpr DataType biasType = DataType::Signed32;
constexpr float perTensorScale = 1.5f;
- const TensorInfo inputInfo (inputShape, dataType, perTensorScale);
- const TensorInfo outputInfo(outputShape, dataType, perTensorScale);
+ const TensorInfo inputInfo (inputShape, inputType, perTensorScale);
+ const TensorInfo outputInfo(outputShape, inputType, perTensorScale);
const std::vector<float> weightPerAxisScales = { 2.50f, 3.50f };
- const TensorInfo weightInfo(weightShape, dataType, weightPerAxisScales, 0);
+ const TensorInfo weightInfo(weightShape, weightType, weightPerAxisScales, 0);
Convolution2dQueueDescriptor queueDescriptor;
queueDescriptor.m_Parameters.m_BiasEnabled = true;
diff --git a/src/backends/backendsCommon/test/layerTests/Conv2dTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/Conv2dTestImpl.cpp
index 198904e71d..5fac09f5b3 100644
--- a/src/backends/backendsCommon/test/layerTests/Conv2dTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/Conv2dTestImpl.cpp
@@ -14,6 +14,7 @@
#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/test/DataLayoutUtils.hpp>
#include <backendsCommon/test/TensorCopyUtils.hpp>
#include <backendsCommon/test/WorkloadTestUtils.hpp>
@@ -3035,6 +3036,98 @@ LayerTestResult<uint8_t, 4> Convolution1dUint8Test(
workloadFactory, memoryManager, 0.1f, 128, biasEnabled);
}
+LayerTestResult<uint8_t, 4> Convolution2dPerAxisQuantTest(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::DataLayout layout)
+{
+ using namespace armnn;
+
+ const DataType inputType = DataType::QuantisedAsymm8;
+ const DataType kernelType = DataType::QuantizedSymm8PerAxis;
+ const DataType biasType = DataType::Signed32;
+
+ TensorInfo inputInfo ({ 1, 3, 1, 2 }, inputType, 0.5f, 128);
+ TensorInfo outputInfo({ 1, 3, 1, 3 }, inputType, 1.0f, 128);
+
+ const std::vector<float> quantScales{ 0.5f, 0.75f, 1.0f };
+ constexpr unsigned int quantDimension = 0;
+
+ TensorInfo kernelInfo({ 3, 1, 1, 2 }, kernelType, quantScales, quantDimension);
+
+ const std::vector<float> biasQuantScales{ 0.25f, 0.375f, 0.5f };
+ TensorInfo biasInfo({ 3 }, biasType, biasQuantScales, quantDimension);
+
+ std::vector<uint8_t> inputData =
+ {
+ 138, 108, 138, 108, 138, 108
+ };
+
+ std::vector<int8_t> kernelData =
+ {
+ 1, 2, 1, 2, 1, 2
+ };
+
+ std::vector<int32_t> biasData =
+ {
+ 4, 4, 4
+ };
+
+ std::vector<uint8_t> expectedOutputData =
+ {
+ 121, 118, 115, 121, 118, 115, 121, 118, 115
+ };
+
+ if (layout == DataLayout::NCHW)
+ {
+ PermuteTensorNhwcToNchw(inputInfo, inputData);
+ PermuteTensorNhwcToNchw(kernelInfo, kernelData);
+ PermuteTensorNhwcToNchw(outputInfo, expectedOutputData);
+ }
+
+ Convolution2dDescriptor descriptor;
+ descriptor.m_StrideX = 1;
+ descriptor.m_StrideY = 1;
+ descriptor.m_PadLeft = 0;
+ descriptor.m_PadRight = 0;
+ descriptor.m_PadTop = 0;
+ descriptor.m_PadBottom = 0;
+ descriptor.m_BiasEnabled = true;
+ descriptor.m_DataLayout = layout;
+
+ std::unique_ptr<ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputInfo);
+ std::unique_ptr<ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputInfo);
+
+ WorkloadInfo workloadInfo;
+ ScopedCpuTensorHandle weightTensor(kernelInfo);
+ ScopedCpuTensorHandle biasTensor(biasInfo);
+
+ AllocateAndCopyDataToITensorHandle(&weightTensor, kernelData.data());
+ AllocateAndCopyDataToITensorHandle(&biasTensor, biasData.data());
+
+ Convolution2dQueueDescriptor queueDescriptor;
+ queueDescriptor.m_Parameters = descriptor;
+ queueDescriptor.m_Weight = &weightTensor;
+ queueDescriptor.m_Bias = &biasTensor;
+
+ AddInputToWorkload(queueDescriptor, workloadInfo, inputInfo, inputHandle.get());
+ AddOutputToWorkload(queueDescriptor, workloadInfo, outputInfo, outputHandle.get());
+
+ std::unique_ptr<IWorkload> workload = workloadFactory.CreateConvolution2d(queueDescriptor, workloadInfo);
+ inputHandle->Allocate();
+ outputHandle->Allocate();
+
+ CopyDataToITensorHandle(inputHandle.get(), inputData.data());
+
+ ExecuteWorkload(*workload, memoryManager);
+
+ LayerTestResult<uint8_t, 4> ret(outputInfo);
+ CopyDataFromITensorHandle(ret.output.origin(), outputHandle.get());
+ ret.outputExpected = MakeTensor<uint8_t, 4>(outputInfo, expectedOutputData);
+
+ return ret;
+}
+
LayerTestResult<float,4> CompareConvolution2dTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
diff --git a/src/backends/backendsCommon/test/layerTests/Conv2dTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/Conv2dTestImpl.hpp
index f5ff586f02..3aac975b3b 100644
--- a/src/backends/backendsCommon/test/layerTests/Conv2dTestImpl.hpp
+++ b/src/backends/backendsCommon/test/layerTests/Conv2dTestImpl.hpp
@@ -111,6 +111,11 @@ LayerTestResult<float, 4> CompareConvolution2dTest(
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
armnn::IWorkloadFactory& refWorkloadFactory);
+LayerTestResult<uint8_t, 4> Convolution2dPerAxisQuantTest(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::DataLayout layout);
+
//
// DepthwiseConvolution2d
//