aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorNarumol Prangnawarat <narumol.prangnawarat@arm.com>2019-10-21 14:58:26 +0100
committerMatteo Martincigh <matteo.martincigh@arm.com>2019-10-22 07:57:51 +0000
commit2c52646b446c19f330438efcae8e306eddba0051 (patch)
tree5dc6f606c020fb32286d1eee20255c88e07eddd9
parent15db745d59a796fd05e3bb5a8b735f25710bdf22 (diff)
downloadarmnn-2c52646b446c19f330438efcae8e306eddba0051.tar.gz
Fix Unpack in TfLiteParser missing quantization parameters
Signed-off-by: Narumol Prangnawarat <narumol.prangnawarat@arm.com> Change-Id: I026e4880013fa4ed83b6c4643dda7e4d100014a5
-rw-r--r--src/armnnTfLiteParser/TfLiteParser.cpp27
-rw-r--r--src/armnnTfLiteParser/test/Unpack.cpp64
2 files changed, 60 insertions, 31 deletions
diff --git a/src/armnnTfLiteParser/TfLiteParser.cpp b/src/armnnTfLiteParser/TfLiteParser.cpp
index da81c0a628..24fe7dad0b 100644
--- a/src/armnnTfLiteParser/TfLiteParser.cpp
+++ b/src/armnnTfLiteParser/TfLiteParser.cpp
@@ -2168,35 +2168,22 @@ void TfLiteParser::ParseUnpack(size_t subgraphIndex, size_t operatorIndex)
auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
- // Reshape to remove unpacked dimension
- unsigned int reshapedNumDimensions = inputDimSize - 1;
- std::vector<unsigned int> reshapedDimensions(reshapedNumDimensions);
-
- unsigned int reshapeIndex = 0;
- for (unsigned int i = 0; i < inputDimSize; ++i)
- {
- if (i == unpackAxis)
- {
- continue;
- }
- reshapedDimensions[reshapeIndex++] = unpackDimSizes[i];
- }
-
// Create reshape to remove the unpacked dimension for unpack operator of each output from Splitter.
for (unsigned int k = 0; k < layer->GetNumOutputSlots(); ++k)
{
- armnn::TensorInfo reshapedTensorInfo = inputTensorInfo;
- reshapedTensorInfo.SetShape(armnn::TensorShape{ reshapedNumDimensions, reshapedDimensions.data() });
-
+ armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[k]);
std::string reshapeLayerName = boost::str(boost::format("Reshape_for:%1%") % layer->GetName());
armnn::ReshapeDescriptor desc;
- desc.m_TargetShape = reshapedTensorInfo.GetShape();
+ desc.m_TargetShape = outputTensorInfo.GetShape();
armnn::IConnectableLayer* reshapeLayer = m_Network->AddReshapeLayer(desc, layerName.c_str());
- layer->GetOutputSlot(k).SetTensorInfo(armnn::TensorInfo(splitOutShape, inputTensorInfo.GetDataType()));
+ layer->GetOutputSlot(k).SetTensorInfo(armnn::TensorInfo(splitOutShape,
+ outputTensorInfo.GetDataType(),
+ outputTensorInfo.GetQuantizationScale(),
+ outputTensorInfo.GetQuantizationOffset()));
layer->GetOutputSlot(k).Connect(reshapeLayer->GetInputSlot(0));
- reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapedTensorInfo);
+ reshapeLayer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
uint32_t reshapedOutputId = CHECKED_NON_NEGATIVE(operatorPtr->outputs[k]);
armnn::IOutputSlot* slot = &(reshapeLayer->GetOutputSlot(0));
diff --git a/src/armnnTfLiteParser/test/Unpack.cpp b/src/armnnTfLiteParser/test/Unpack.cpp
index 6b3c57b0bd..04fd50dc39 100644
--- a/src/armnnTfLiteParser/test/Unpack.cpp
+++ b/src/armnnTfLiteParser/test/Unpack.cpp
@@ -14,11 +14,14 @@ BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
struct UnpackFixture : public ParserFlatbuffersFixture
{
- explicit UnpackFixture(const std::string & inputShape,
+ explicit UnpackFixture(const std::string& inputShape,
const unsigned int numberOfOutputs,
- const std::string & outputShape,
- const std::string & axis,
- const std::string & num)
+ const std::string& outputShape,
+ const std::string& axis,
+ const std::string& num,
+ const std::string& dataType,
+ const std::string& outputScale,
+ const std::string& outputOffset)
{
// As input index is 0, output indexes start at 1
std::string outputIndexes = "1";
@@ -34,7 +37,7 @@ struct UnpackFixture : public ParserFlatbuffersFixture
"tensors": [
{
"shape": )" + inputShape + R"(,
- "type": "FLOAT32",
+ "type": )" + dataType + R"(,
"buffer": 0,
"name": "inputTensor",
"quantization": {
@@ -51,14 +54,14 @@ struct UnpackFixture : public ParserFlatbuffersFixture
m_JsonString += R"(
{
"shape": )" + outputShape + R"( ,
- "type": "FLOAT32",
+ "type": )" + dataType + R"(,
"buffer": )" + std::to_string(i + 1) + R"(,
"name": "outputTensor)" + std::to_string(i + 1) + R"(",
"quantization": {
"min": [ 0.0 ],
"max": [ 255.0 ],
- "scale": [ 1.0 ],
- "zero_point": [ 0 ],
+ "scale": [ )" + outputScale + R"( ],
+ "zero_point": [ )" + outputOffset + R"( ],
}
},)";
}
@@ -99,7 +102,12 @@ struct UnpackFixture : public ParserFlatbuffersFixture
struct DefaultUnpackAxisZeroFixture : UnpackFixture
{
- DefaultUnpackAxisZeroFixture() : UnpackFixture("[ 4, 1, 6 ]", 4, "[ 1, 6 ]", "0", "") {}
+ DefaultUnpackAxisZeroFixture() : UnpackFixture("[ 4, 1, 6 ]", 4, "[ 1, 6 ]", "0", "", "FLOAT32", "1.0", "0") {}
+};
+
+struct DefaultUnpackAxisZeroUint8Fixture : UnpackFixture
+{
+ DefaultUnpackAxisZeroUint8Fixture() : UnpackFixture("[ 4, 1, 6 ]", 4, "[ 1, 6 ]", "0", "", "UINT8", "0.1", "0") {}
};
BOOST_FIXTURE_TEST_CASE(UnpackAxisZeroNumIsDefaultNotSpecified, DefaultUnpackAxisZeroFixture)
@@ -111,14 +119,33 @@ BOOST_FIXTURE_TEST_CASE(UnpackAxisZeroNumIsDefaultNotSpecified, DefaultUnpackAxi
13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f,
19.0f, 20.0f, 21.0f, 22.0f, 23.0f, 24.0f } } },
{ {"outputTensor1", { 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f }},
- {"outputTensor2", { 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f }},
+ {"outputTensor2", { 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f }},
{"outputTensor3", { 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f }},
{"outputTensor4", { 19.0f, 20.0f, 21.0f, 22.0f, 23.0f, 24.0f }} });
}
+BOOST_FIXTURE_TEST_CASE(UnpackAxisZeroNumIsDefaultNotSpecifiedUint8, DefaultUnpackAxisZeroUint8Fixture)
+{
+ RunTest<2, armnn::DataType::QuantisedAsymm8>(
+ 0,
+ { {"inputTensor", { 1, 2, 3, 4, 5, 6,
+ 7, 8, 9, 10, 11, 12,
+ 13, 14, 15, 16, 17, 18,
+ 19, 20, 21, 22, 23, 24 } } },
+ { {"outputTensor1", { 10, 20, 30, 40, 50, 60 }},
+ {"outputTensor2", { 70, 80, 90, 100, 110, 120 }},
+ {"outputTensor3", { 130, 140, 150, 160, 170, 180 }},
+ {"outputTensor4", { 190, 200, 210, 220, 230, 240 }} });
+}
+
struct DefaultUnpackLastAxisFixture : UnpackFixture
{
- DefaultUnpackLastAxisFixture() : UnpackFixture("[ 4, 1, 6 ]", 6, "[ 4, 1 ]", "2", "6") {}
+ DefaultUnpackLastAxisFixture() : UnpackFixture("[ 4, 1, 6 ]", 6, "[ 4, 1 ]", "2", "6", "FLOAT32", "1.0", "0") {}
+};
+
+struct DefaultUnpackLastAxisUint8Fixture : UnpackFixture
+{
+ DefaultUnpackLastAxisUint8Fixture() : UnpackFixture("[ 4, 1, 6 ]", 6, "[ 4, 1 ]", "2", "6", "UINT8", "0.1", "0") {}
};
BOOST_FIXTURE_TEST_CASE(UnpackLastAxisNumSix, DefaultUnpackLastAxisFixture)
@@ -137,4 +164,19 @@ BOOST_FIXTURE_TEST_CASE(UnpackLastAxisNumSix, DefaultUnpackLastAxisFixture)
{"outputTensor6", { 6.0f, 12.0f, 18.0f, 24.0f }} });
}
+BOOST_FIXTURE_TEST_CASE(UnpackLastAxisNumSixUint8, DefaultUnpackLastAxisUint8Fixture) {
+ RunTest<2, armnn::DataType::QuantisedAsymm8>(
+ 0,
+ {{"inputTensor", { 1, 2, 3, 4, 5, 6,
+ 7, 8, 9, 10, 11, 12,
+ 13, 14, 15, 16, 17, 18,
+ 19, 20, 21, 22, 23, 24 }}},
+ {{"outputTensor1", { 10, 70, 130, 190 }},
+ {"outputTensor2", { 20, 80, 140, 200 }},
+ {"outputTensor3", { 30, 90, 150, 210 }},
+ {"outputTensor4", { 40, 100, 160, 220 }},
+ {"outputTensor5", { 50, 110, 170, 230 }},
+ {"outputTensor6", { 60, 120, 180, 240 }}});
+}
+
BOOST_AUTO_TEST_SUITE_END()