aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorNarumol Prangnawarat <narumol.prangnawarat@arm.com>2019-02-25 17:26:05 +0000
committerNarumol Prangnawarat <narumol.prangnawarat@arm.com>2019-02-26 17:41:15 +0000
commit4628d05455dfc179f0437913185e76888115a98a (patch)
treea8eac68ee5aee88a7071ac6f13af7932b98caa87 /src
parent452869973b9a45c9c44820d16f92f7dfc96e9aef (diff)
downloadarmnn-4628d05455dfc179f0437913185e76888115a98a.tar.gz
IVGCVSW-2560 Verify Inference test for TensorFlow Lite MobileNet SSD
* Assign output shape of MobileNet SSD to ArmNN network * Add m_OverridenOutputShapes to TfLiteParser to set shape in GetNetworkOutputBindingInfo * Use input quantization instead of output quantization params * Correct data and datatype in Inference test Change-Id: I01ac2e07ed08e8928ba0df33a4847399e1dd8394 Signed-off-by: Narumol Prangnawarat <narumol.prangnawarat@arm.com> Signed-off-by: Aron Virginas-Tar <Aron.Virginas-Tar@arm.com>
Diffstat (limited to 'src')
-rw-r--r--src/armnnTfLiteParser/TfLiteParser.cpp32
-rw-r--r--src/armnnTfLiteParser/TfLiteParser.hpp4
-rw-r--r--src/armnnTfLiteParser/test/DetectionPostProcess.cpp4
-rw-r--r--src/backends/reference/workloads/DetectionPostProcess.cpp2
4 files changed, 28 insertions, 14 deletions
diff --git a/src/armnnTfLiteParser/TfLiteParser.cpp b/src/armnnTfLiteParser/TfLiteParser.cpp
index cd0e9214c2..31e808fd6e 100644
--- a/src/armnnTfLiteParser/TfLiteParser.cpp
+++ b/src/armnnTfLiteParser/TfLiteParser.cpp
@@ -295,7 +295,7 @@ void CalcPadding(uint32_t inputSize,
}
}
-armnn::TensorInfo ToTensorInfo(TfLiteParser::TensorRawPtr tensorPtr)
+armnn::TensorInfo ToTensorInfo(TfLiteParser::TensorRawPtr tensorPtr, const std::vector<unsigned int>& shapes)
{
armnn::DataType type;
CHECK_TENSOR_PTR(tensorPtr);
@@ -345,17 +345,21 @@ armnn::TensorInfo ToTensorInfo(TfLiteParser::TensorRawPtr tensorPtr)
}
}
- auto const & dimensions = AsUnsignedVector(tensorPtr->shape);
-
// two statements (on purpose) for easier debugging:
- armnn::TensorInfo result(static_cast<unsigned int>(tensorPtr->shape.size()),
- dimensions.data(),
+ armnn::TensorInfo result(static_cast<unsigned int>(shapes.size()),
+ shapes.data(),
type,
quantizationScale,
quantizationOffset);
return result;
}
+armnn::TensorInfo ToTensorInfo(TfLiteParser::TensorRawPtr tensorPtr)
+{
+ auto const & dimensions = AsUnsignedVector(tensorPtr->shape);
+ return ToTensorInfo(tensorPtr, dimensions);
+}
+
template<typename T>
std::pair<armnn::ConstTensor, std::unique_ptr<T[]>>
CreateConstTensorImpl(TfLiteParser::BufferRawPtr bufferPtr,
@@ -1796,10 +1800,17 @@ void TfLiteParser::ParseDetectionPostProcess(size_t subgraphIndex, size_t operat
BOOST_ASSERT(layer != nullptr);
- // Register outputs
+ // The model does not specify the output shapes.
+ // The output shapes are calculated from the max_detection and max_classes_per_detection.
+ unsigned int numDetectedBox = desc.m_MaxDetections * desc.m_MaxClassesPerDetection;
+ m_OverridenOutputShapes.push_back({ 1, numDetectedBox, 4 });
+ m_OverridenOutputShapes.push_back({ 1, numDetectedBox });
+ m_OverridenOutputShapes.push_back({ 1, numDetectedBox });
+ m_OverridenOutputShapes.push_back({ 1 });
+
for (unsigned int i = 0 ; i < outputs.size() ; ++i)
{
- armnn::TensorInfo detectionBoxOutputTensorInfo = ToTensorInfo(outputs[i]);
+ armnn::TensorInfo detectionBoxOutputTensorInfo = ToTensorInfo(outputs[i], m_OverridenOutputShapes[i]);
layer->GetOutputSlot(i).SetTensorInfo(detectionBoxOutputTensorInfo);
}
@@ -2232,12 +2243,15 @@ BindingPointInfo TfLiteParser::GetNetworkOutputBindingInfo(size_t subgraphId,
{
CHECK_SUBGRAPH(m_Model, subgraphId);
auto outputs = GetSubgraphOutputs(m_Model, subgraphId);
- for (auto const & output : outputs)
+ for (unsigned int i = 0; i < outputs.size(); ++i)
{
+ auto const output = outputs[i];
if (output.second->name == name)
{
auto bindingId = GenerateLayerBindingId(subgraphId, output.first);
- return std::make_pair(bindingId, ToTensorInfo(output.second));
+ std::vector<unsigned int> shape = m_OverridenOutputShapes.size() > 0 ?
+ m_OverridenOutputShapes[i] : AsUnsignedVector(output.second->shape);
+ return std::make_pair(bindingId, ToTensorInfo(output.second, shape));
}
}
diff --git a/src/armnnTfLiteParser/TfLiteParser.hpp b/src/armnnTfLiteParser/TfLiteParser.hpp
index 3fe4809aa2..2895487214 100644
--- a/src/armnnTfLiteParser/TfLiteParser.hpp
+++ b/src/armnnTfLiteParser/TfLiteParser.hpp
@@ -193,6 +193,10 @@ private:
/// Connections for tensors in each subgraph
/// The first index is the subgraph ID, the second index is the tensor ID
std::vector<TensorConnections> m_SubgraphConnections;
+
+ /// This is used in case that the model does not speciry the output.
+ /// The shape can be calculated from the options.
+ std::vector<std::vector<unsigned int>> m_OverridenOutputShapes;
};
}
diff --git a/src/armnnTfLiteParser/test/DetectionPostProcess.cpp b/src/armnnTfLiteParser/test/DetectionPostProcess.cpp
index 3c602937ce..3002885016 100644
--- a/src/armnnTfLiteParser/test/DetectionPostProcess.cpp
+++ b/src/armnnTfLiteParser/test/DetectionPostProcess.cpp
@@ -76,28 +76,24 @@ struct DetectionPostProcessFixture : ParserFlatbuffersFixture
}
},
{
- "shape": [1, 3, 4],
"type": "FLOAT32",
"buffer": 3,
"name": "detection_boxes",
"quantization": {}
},
{
- "shape": [1, 3],
"type": "FLOAT32",
"buffer": 4,
"name": "detection_classes",
"quantization": {}
},
{
- "shape": [1, 3],
"type": "FLOAT32",
"buffer": 5,
"name": "detection_scores",
"quantization": {}
},
{
- "shape": [1],
"type": "FLOAT32",
"buffer": 6,
"name": "num_detections",
diff --git a/src/backends/reference/workloads/DetectionPostProcess.cpp b/src/backends/reference/workloads/DetectionPostProcess.cpp
index 2eb35f5ffa..6868180b0b 100644
--- a/src/backends/reference/workloads/DetectionPostProcess.cpp
+++ b/src/backends/reference/workloads/DetectionPostProcess.cpp
@@ -197,7 +197,7 @@ void DetectionPostProcess(const TensorInfo& boxEncodingsInfo,
}
std::vector<unsigned int> selectedIndices = NonMaxSuppression(numBoxes, boxCorners, classScores,
desc.m_NmsScoreThreshold,
- desc.m_MaxClassesPerDetection,
+ desc.m_DetectionsPerClass,
desc.m_NmsIouThreshold);
for (unsigned int i = 0; i < selectedIndices.size(); ++i)