aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorNarumol Prangnawarat <narumol.prangnawarat@arm.com>2020-11-27 16:57:56 +0000
committerNarumol Prangnawarat <narumol.prangnawarat@arm.com>2020-12-16 17:38:23 +0000
commit8719d2247fab4ba0e3936c86e3043da3da1b573c (patch)
tree70fa1c36560d9484dd1e54f69df2e544394e4d94 /src
parentce55278f775fced323907ada6eb70398ad9e3fa2 (diff)
downloadarmnn-8719d2247fab4ba0e3936c86e3043da3da1b573c.tar.gz
IVGCVSW-5595 Fix incorrect padding value for asymmetric quantized type
Signed-off-by: Narumol Prangnawarat <narumol.prangnawarat@arm.com> Change-Id: I85f0c30757043f8c27c78d607f0f9dbbdd35b9fb
Diffstat (limited to 'src')
-rw-r--r--src/armnnTfLiteParser/TfLiteParser.cpp6
-rw-r--r--src/armnnTfLiteParser/test/Pad.cpp59
-rw-r--r--src/backends/backendsCommon/test/layerTests/PadTestImpl.cpp109
-rw-r--r--src/backends/backendsCommon/test/layerTests/PadTestImpl.hpp19
-rw-r--r--src/backends/reference/test/RefLayerTests.cpp4
5 files changed, 187 insertions, 10 deletions
diff --git a/src/armnnTfLiteParser/TfLiteParser.cpp b/src/armnnTfLiteParser/TfLiteParser.cpp
index d1d45f5583..c3d56b13d3 100644
--- a/src/armnnTfLiteParser/TfLiteParser.cpp
+++ b/src/armnnTfLiteParser/TfLiteParser.cpp
@@ -1845,6 +1845,8 @@ void TfLiteParser::ParsePad(size_t subgraphIndex, size_t operatorIndex)
TfLiteParser::TensorRawPtrVector outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
CHECK_VALID_SIZE(outputs.size(), 1);
+ armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
+
armnn::TensorInfo padTensorInfo = ToTensorInfo(inputs[1]);
BufferRawPtr bufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
@@ -1853,6 +1855,10 @@ void TfLiteParser::ParsePad(size_t subgraphIndex, size_t operatorIndex)
size_t step = 2;
armnn::PadDescriptor desc;
+ if (inputTensorInfo.IsQuantized())
+ {
+ desc.m_PadValue = static_cast<float>(inputTensorInfo.GetQuantizationOffset());
+ }
for (unsigned int i = 0; i < padTensorInfo.GetNumElements() / step; ++i)
{
desc.m_PadList.emplace_back(padBuffer[i * step], padBuffer[i * step + 1]);
diff --git a/src/armnnTfLiteParser/test/Pad.cpp b/src/armnnTfLiteParser/test/Pad.cpp
index bdc8478ca2..aab1536628 100644
--- a/src/armnnTfLiteParser/test/Pad.cpp
+++ b/src/armnnTfLiteParser/test/Pad.cpp
@@ -14,10 +14,13 @@ BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
struct PadFixture : public ParserFlatbuffersFixture
{
- explicit PadFixture(const std::string & inputShape,
- const std::string & outputShape,
- const std::string & padListShape,
- const std::string & padListData)
+ explicit PadFixture(const std::string& inputShape,
+ const std::string& outputShape,
+ const std::string& padListShape,
+ const std::string& padListData,
+ const std::string& dataType = "FLOAT32",
+ const std::string& scale = "1.0",
+ const std::string& offset = "0")
{
m_JsonString = R"(
{
@@ -27,26 +30,26 @@ struct PadFixture : public ParserFlatbuffersFixture
"tensors": [
{
"shape": )" + inputShape + R"(,
- "type": "FLOAT32",
+ "type": )" + dataType + R"(,
"buffer": 0,
"name": "inputTensor",
"quantization": {
"min": [ 0.0 ],
"max": [ 255.0 ],
- "scale": [ 1.0 ],
- "zero_point": [ 0 ],
+ "scale": [ )" + scale + R"( ],
+ "zero_point": [ )" + offset + R"( ],
}
},
{
"shape": )" + outputShape + R"(,
- "type": "FLOAT32",
+ "type": )" + dataType + R"(,
"buffer": 1,
"name": "outputTensor",
"quantization": {
"min": [ 0.0 ],
"max": [ 255.0 ],
- "scale": [ 1.0 ],
- "zero_point": [ 0 ],
+ "scale": [ )" + scale + R"( ],
+ "zero_point": [ )" + offset + R"( ],
}
},
{
@@ -101,4 +104,40 @@ BOOST_FIXTURE_TEST_CASE(ParsePad, SimplePadFixture)
0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f }}});
}
+struct Uint8PadFixture : public PadFixture
+{
+ Uint8PadFixture() : PadFixture("[ 2, 3 ]", "[ 4, 7 ]", "[ 2, 2 ]",
+ "[ 1,0,0,0, 1,0,0,0, 2,0,0,0, 2,0,0,0 ]",
+ "UINT8", "-2.0", "3") {}
+};
+
+BOOST_FIXTURE_TEST_CASE(ParsePadUint8, Uint8PadFixture)
+{
+ RunTest<2, armnn::DataType::QAsymmU8>
+ (0,
+ {{ "inputTensor", { 1, 2, 3, 4, 5, 6 }}},
+ {{ "outputTensor", { 3, 3, 3, 3, 3, 3, 3,
+ 3, 3, 1, 2, 3, 3, 3,
+ 3, 3, 4, 5, 6, 3, 3,
+ 3, 3, 3, 3, 3, 3, 3 }}});
+}
+
+struct Int8PadFixture : public PadFixture
+{
+ Int8PadFixture() : PadFixture("[ 2, 3 ]", "[ 4, 7 ]", "[ 2, 2 ]",
+ "[ 1,0,0,0, 1,0,0,0, 2,0,0,0, 2,0,0,0 ]",
+ "INT8", "-2.0", "3") {}
+};
+
+BOOST_FIXTURE_TEST_CASE(ParsePadInt8, Int8PadFixture)
+{
+ RunTest<2, armnn::DataType::QAsymmS8>
+ (0,
+ {{ "inputTensor", { 1, -2, 3, 4, 5, -6 }}},
+ {{ "outputTensor", { 3, 3, 3, 3, 3, 3, 3,
+ 3, 3, 1, -2, 3, 3, 3,
+ 3, 3, 4, 5, -6, 3, 3,
+ 3, 3, 3, 3, 3, 3, 3 }}});
+}
+
BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/backends/backendsCommon/test/layerTests/PadTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/PadTestImpl.cpp
index 5511f86e17..086f8757dd 100644
--- a/src/backends/backendsCommon/test/layerTests/PadTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/PadTestImpl.cpp
@@ -417,6 +417,79 @@ LayerTestResult<T, 4> Pad4dTestCommon(
return result;
}
+template<armnn::DataType ArmnnType, typename T>
+LayerTestResult<T, 2> PadQAsymmTestCommon(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
+ float qScale,
+ int32_t qOffset,
+ const float customPaddingValue)
+{
+ IgnoreUnused(memoryManager);
+ const armnn::TensorShape inputShape{ 3, 3 };
+ const armnn::TensorShape outputShape{ 7, 7 };
+
+ const armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType, qScale, qOffset);
+ const armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType, qScale, qOffset);
+
+ std::vector<T> inputValues =
+ {
+ // Height (3) x Width (3)
+ 4, 8, 6,
+ 7, 4, 4,
+ 3, 2, 4
+ };
+
+ T p = static_cast<T>(customPaddingValue);
+ std::vector<T> expectedOutputValues =
+ {
+ p, p, p, p, p, p, p,
+ p, p, p, p, p, p, p,
+ p, p, 4, 8, 6, p, p,
+ p, p, 7, 4, 4, p, p,
+ p, p, 3, 2, 4, p, p,
+ p, p, p, p, p, p, p,
+ p, p, p, p, p, p, p
+ };
+
+ auto inputTensor = MakeTensor<T, 2>(inputTensorInfo, std::vector<T>(inputValues));
+
+ LayerTestResult<T, 2> result(outputTensorInfo);
+ result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, std::vector<T>(expectedOutputValues));
+
+ std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
+ std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
+
+
+ armnn::PadQueueDescriptor descriptor;
+
+ std::vector<std::pair<unsigned int, unsigned int>> padList;
+ padList.push_back(std::pair<unsigned int, unsigned int>(2,2));
+ padList.push_back(std::pair<unsigned int, unsigned int>(2,2));
+
+ descriptor.m_Parameters.m_PadList = padList;
+ descriptor.m_Parameters.m_PadValue = customPaddingValue;
+ armnn::WorkloadInfo info;
+
+ AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
+ AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
+
+ std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePad(descriptor, info);
+
+ inputHandle->Allocate();
+ outputHandle->Allocate();
+
+ CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0]);
+
+ workload->PostAllocationConfigure();
+ workload->Execute();
+
+ CopyDataFromITensorHandle(&result.output[0][0], outputHandle.get());
+
+ return result;
+}
+
//
// Explicit template specializations
//
@@ -446,6 +519,24 @@ Pad4dTestCommon<armnn::DataType::QSymmS16>(
float qScale,
int32_t qOffset);
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmS8>, 2>
+PadQAsymmTestCommon<armnn::DataType::QAsymmS8>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
+ float qScale,
+ int32_t qOffset,
+ const float customPaddingValue);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 2>
+PadQAsymmTestCommon<armnn::DataType::QAsymmU8>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
+ float qScale,
+ int32_t qOffset,
+ const float customPaddingValue);
+
//
// Implementation functions
//
@@ -582,3 +673,21 @@ LayerTestResult<int8_t, 4> PadInt84dTest(
{
return Pad4dTestCommon<armnn::DataType::QSymmS8>(workloadFactory, memoryManager, tensorHandleFactory, 1.0f, 0);
}
+
+LayerTestResult<int8_t, 2> PadInt8AsymmTest(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+ return PadQAsymmTestCommon<armnn::DataType::QAsymmS8>(
+ workloadFactory, memoryManager, tensorHandleFactory, 2.0f, 2);
+}
+
+LayerTestResult<int8_t, 2> PadInt8CustomPaddingAsymmTest(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+ return PadQAsymmTestCommon<armnn::DataType::QAsymmS8>(
+ workloadFactory, memoryManager, tensorHandleFactory, 2.0f, 3, 1.0f);
+}
diff --git a/src/backends/backendsCommon/test/layerTests/PadTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/PadTestImpl.hpp
index 8d603862b7..4c30c427cb 100644
--- a/src/backends/backendsCommon/test/layerTests/PadTestImpl.hpp
+++ b/src/backends/backendsCommon/test/layerTests/PadTestImpl.hpp
@@ -39,6 +39,15 @@ LayerTestResult<T, 4> Pad4dTestCommon(
float qScale,
int32_t qOffset);
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<T, 2> PadQAsymmTestCommon(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
+ float qScale,
+ int32_t qOffset,
+ const float customPaddingValue = 0.0f);
+
LayerTestResult<uint8_t, 2> PadUint82dTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
@@ -117,4 +126,14 @@ LayerTestResult<int8_t, 3> PadInt83dTest(
LayerTestResult<int8_t, 4> PadInt84dTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+LayerTestResult<int8_t, 2> PadInt82dAsymmTest(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+LayerTestResult<int8_t, 2> PadInt82dCustomPaddingAsymmTest(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::ITensorHandleFactory& tensorHandleFactory); \ No newline at end of file
diff --git a/src/backends/reference/test/RefLayerTests.cpp b/src/backends/reference/test/RefLayerTests.cpp
index be95ad7daf..502e0cb84d 100644
--- a/src/backends/reference/test/RefLayerTests.cpp
+++ b/src/backends/reference/test/RefLayerTests.cpp
@@ -1309,6 +1309,8 @@ ARMNN_AUTO_TEST_CASE_WITH_THF(PadUint82d, PadUint82dTest)
ARMNN_AUTO_TEST_CASE_WITH_THF(PadUint82dCustomPadding, PadUint82dCustomPaddingTest)
ARMNN_AUTO_TEST_CASE_WITH_THF(PadUint83d, PadUint83dTest)
ARMNN_AUTO_TEST_CASE_WITH_THF(PadUint84d, PadUint84dTest)
+ARMNN_AUTO_TEST_CASE_WITH_THF(PadUint8Asymm, PadQAsymmTestCommon<DataType::QAsymmU8>, -2.0f, 3, 0.0f)
+ARMNN_AUTO_TEST_CASE_WITH_THF(PadUint8CustomPaddingAsymm, PadQAsymmTestCommon<DataType::QAsymmU8>, -2.0f, 3, 2.0f)
ARMNN_AUTO_TEST_CASE_WITH_THF(Pad2dQSymm16, Pad2dTestCommon<DataType::QSymmS16>, 2.0f, 0, 0.0f)
ARMNN_AUTO_TEST_CASE_WITH_THF(Pad2dQSymm16CustomPadding, Pad2dTestCommon<DataType::QSymmS16>, 2.0f, 0, 1.0f)
@@ -1319,6 +1321,8 @@ ARMNN_AUTO_TEST_CASE_WITH_THF(PadInt82d, PadInt82dTest)
ARMNN_AUTO_TEST_CASE_WITH_THF(PadInt82dCustomPadding, PadInt82dCustomPaddingTest)
ARMNN_AUTO_TEST_CASE_WITH_THF(PadInt83d, PadInt83dTest)
ARMNN_AUTO_TEST_CASE_WITH_THF(PadInt84d, PadInt84dTest)
+ARMNN_AUTO_TEST_CASE_WITH_THF(PadQAsymmS8, PadQAsymmTestCommon<DataType::QAsymmS8>, -2.0f, 3, 0.0f)
+ARMNN_AUTO_TEST_CASE_WITH_THF(PadQAsymmS8CustomPadding, PadQAsymmTestCommon<DataType::QAsymmS8>, -2.0f, 3, 2.0f)
// Constant
ARMNN_AUTO_TEST_CASE_WITH_THF(Constant, ConstantTest)