From 4628d05455dfc179f0437913185e76888115a98a Mon Sep 17 00:00:00 2001 From: Narumol Prangnawarat Date: Mon, 25 Feb 2019 17:26:05 +0000 Subject: IVGCVSW-2560 Verify Inference test for TensorFlow Lite MobileNet SSD * Assign output shape of MobileNet SSD to ArmNN network * Add m_OverridenOutputShapes to TfLiteParser to set shape in GetNetworkOutputBindingInfo * Use input quantization instead of output quantization params * Correct data and datatype in Inference test Change-Id: I01ac2e07ed08e8928ba0df33a4847399e1dd8394 Signed-off-by: Narumol Prangnawarat Signed-off-by: Aron Virginas-Tar --- src/armnnTfLiteParser/TfLiteParser.cpp | 32 +++++++++++++++++++++++--------- 1 file changed, 23 insertions(+), 9 deletions(-) (limited to 'src/armnnTfLiteParser/TfLiteParser.cpp') diff --git a/src/armnnTfLiteParser/TfLiteParser.cpp b/src/armnnTfLiteParser/TfLiteParser.cpp index cd0e9214c2..31e808fd6e 100644 --- a/src/armnnTfLiteParser/TfLiteParser.cpp +++ b/src/armnnTfLiteParser/TfLiteParser.cpp @@ -295,7 +295,7 @@ void CalcPadding(uint32_t inputSize, } } -armnn::TensorInfo ToTensorInfo(TfLiteParser::TensorRawPtr tensorPtr) +armnn::TensorInfo ToTensorInfo(TfLiteParser::TensorRawPtr tensorPtr, const std::vector& shapes) { armnn::DataType type; CHECK_TENSOR_PTR(tensorPtr); @@ -345,17 +345,21 @@ armnn::TensorInfo ToTensorInfo(TfLiteParser::TensorRawPtr tensorPtr) } } - auto const & dimensions = AsUnsignedVector(tensorPtr->shape); - // two statements (on purpose) for easier debugging: - armnn::TensorInfo result(static_cast(tensorPtr->shape.size()), - dimensions.data(), + armnn::TensorInfo result(static_cast(shapes.size()), + shapes.data(), type, quantizationScale, quantizationOffset); return result; } +armnn::TensorInfo ToTensorInfo(TfLiteParser::TensorRawPtr tensorPtr) +{ + auto const & dimensions = AsUnsignedVector(tensorPtr->shape); + return ToTensorInfo(tensorPtr, dimensions); +} + template std::pair> CreateConstTensorImpl(TfLiteParser::BufferRawPtr bufferPtr, @@ -1796,10 +1800,17 @@ void TfLiteParser::ParseDetectionPostProcess(size_t subgraphIndex, size_t operat BOOST_ASSERT(layer != nullptr); - // Register outputs + // The model does not specify the output shapes. + // The output shapes are calculated from the max_detection and max_classes_per_detection. + unsigned int numDetectedBox = desc.m_MaxDetections * desc.m_MaxClassesPerDetection; + m_OverridenOutputShapes.push_back({ 1, numDetectedBox, 4 }); + m_OverridenOutputShapes.push_back({ 1, numDetectedBox }); + m_OverridenOutputShapes.push_back({ 1, numDetectedBox }); + m_OverridenOutputShapes.push_back({ 1 }); + for (unsigned int i = 0 ; i < outputs.size() ; ++i) { - armnn::TensorInfo detectionBoxOutputTensorInfo = ToTensorInfo(outputs[i]); + armnn::TensorInfo detectionBoxOutputTensorInfo = ToTensorInfo(outputs[i], m_OverridenOutputShapes[i]); layer->GetOutputSlot(i).SetTensorInfo(detectionBoxOutputTensorInfo); } @@ -2232,12 +2243,15 @@ BindingPointInfo TfLiteParser::GetNetworkOutputBindingInfo(size_t subgraphId, { CHECK_SUBGRAPH(m_Model, subgraphId); auto outputs = GetSubgraphOutputs(m_Model, subgraphId); - for (auto const & output : outputs) + for (unsigned int i = 0; i < outputs.size(); ++i) { + auto const output = outputs[i]; if (output.second->name == name) { auto bindingId = GenerateLayerBindingId(subgraphId, output.first); - return std::make_pair(bindingId, ToTensorInfo(output.second)); + std::vector shape = m_OverridenOutputShapes.size() > 0 ? + m_OverridenOutputShapes[i] : AsUnsignedVector(output.second->shape); + return std::make_pair(bindingId, ToTensorInfo(output.second, shape)); } } -- cgit v1.2.1