diff options
author | Narumol Prangnawarat <narumol.prangnawarat@arm.com> | 2019-10-21 14:58:26 +0100 |
---|---|---|
committer | Matteo Martincigh <matteo.martincigh@arm.com> | 2019-10-22 07:57:51 +0000 |
commit | 2c52646b446c19f330438efcae8e306eddba0051 (patch) | |
tree | 5dc6f606c020fb32286d1eee20255c88e07eddd9 /src/armnnTfLiteParser/TfLiteParser.cpp | |
parent | 15db745d59a796fd05e3bb5a8b735f25710bdf22 (diff) | |
download | armnn-2c52646b446c19f330438efcae8e306eddba0051.tar.gz |
Fix Unpack in TfLiteParser missing quantization parameters
Signed-off-by: Narumol Prangnawarat <narumol.prangnawarat@arm.com>
Change-Id: I026e4880013fa4ed83b6c4643dda7e4d100014a5
Diffstat (limited to 'src/armnnTfLiteParser/TfLiteParser.cpp')
-rw-r--r-- | src/armnnTfLiteParser/TfLiteParser.cpp | 27 |
1 files changed, 7 insertions, 20 deletions
diff --git a/src/armnnTfLiteParser/TfLiteParser.cpp b/src/armnnTfLiteParser/TfLiteParser.cpp index da81c0a628..24fe7dad0b 100644 --- a/src/armnnTfLiteParser/TfLiteParser.cpp +++ b/src/armnnTfLiteParser/TfLiteParser.cpp @@ -2168,35 +2168,22 @@ void TfLiteParser::ParseUnpack(size_t subgraphIndex, size_t operatorIndex) auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex)); RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]}); - // Reshape to remove unpacked dimension - unsigned int reshapedNumDimensions = inputDimSize - 1; - std::vector<unsigned int> reshapedDimensions(reshapedNumDimensions); - - unsigned int reshapeIndex = 0; - for (unsigned int i = 0; i < inputDimSize; ++i) - { - if (i == unpackAxis) - { - continue; - } - reshapedDimensions[reshapeIndex++] = unpackDimSizes[i]; - } - // Create reshape to remove the unpacked dimension for unpack operator of each output from Splitter. for (unsigned int k = 0; k < layer->GetNumOutputSlots(); ++k) { - armnn::TensorInfo reshapedTensorInfo = inputTensorInfo; - reshapedTensorInfo.SetShape(armnn::TensorShape{ reshapedNumDimensions, reshapedDimensions.data() }); - + armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[k]); std::string reshapeLayerName = boost::str(boost::format("Reshape_for:%1%") % layer->GetName()); armnn::ReshapeDescriptor desc; - desc.m_TargetShape = reshapedTensorInfo.GetShape(); + desc.m_TargetShape = outputTensorInfo.GetShape(); armnn::IConnectableLayer* reshapeLayer = m_Network->AddReshapeLayer(desc, layerName.c_str()); - layer->GetOutputSlot(k).SetTensorInfo(armnn::TensorInfo(splitOutShape, inputTensorInfo.GetDataType())); + layer->GetOutputSlot(k).SetTensorInfo(armnn::TensorInfo(splitOutShape, + outputTensorInfo.GetDataType(), + outputTensorInfo.GetQuantizationScale(), + outputTensorInfo.GetQuantizationOffset())); layer->GetOutputSlot(k).Connect(reshapeLayer->GetInputSlot(0)); - reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapedTensorInfo); + reshapeLayer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo); uint32_t reshapedOutputId = CHECKED_NON_NEGATIVE(operatorPtr->outputs[k]); armnn::IOutputSlot* slot = &(reshapeLayer->GetOutputSlot(0)); |