aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorkeidav01 <keith.davis@arm.com>2019-02-21 10:07:37 +0000
committerAron Virginas-Tar <aron.virginas-tar@arm.com>2019-02-21 11:08:38 +0000
commit1b3e2ead82db933ea8e97063cea132cb042b079a (patch)
treeed6df4aafb7a813cbf1c77139b24536ada3ce84e
parent19f74b440ab61a8f39aefe68bd943f7fafc4adc6 (diff)
downloadarmnn-1b3e2ead82db933ea8e97063cea132cb042b079a.tar.gz
IVGCVSW-2429 Add Detection PostProcess Parser to TensorFlow Lite Parser
* Added parser function in TFLiteParser * Removed custom options gating * Added unit test * Removed template usage in VerifyTensorInfo for DeserializeParser Change-Id: If198654ed70060855a05f8aaed010293405bd103 Signed-off-by: keidav01 <keith.davis@arm.com>
-rw-r--r--CMakeLists.txt1
-rw-r--r--include/armnn/TypesUtils.hpp9
-rw-r--r--src/armnnDeserializer/test/ParserFlatbuffersSerializeFixture.hpp6
-rw-r--r--src/armnnTfLiteParser/TfLiteParser.cpp84
-rw-r--r--src/armnnTfLiteParser/TfLiteParser.hpp1
-rw-r--r--src/armnnTfLiteParser/test/DetectionPostProcess.cpp294
-rw-r--r--src/armnnTfLiteParser/test/ParserFlatbuffersFixture.hpp120
7 files changed, 487 insertions, 28 deletions
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 813ac2c318..ebc59225a1 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -546,6 +546,7 @@ if(BUILD_UNIT_TESTS)
src/armnnTfLiteParser/test/Constant.cpp
src/armnnTfLiteParser/test/Conv2D.cpp
src/armnnTfLiteParser/test/DepthwiseConvolution2D.cpp
+ src/armnnTfLiteParser/test/DetectionPostProcess.cpp
src/armnnTfLiteParser/test/FullyConnected.cpp
src/armnnTfLiteParser/test/MaxPool2D.cpp
src/armnnTfLiteParser/test/Mean.cpp
diff --git a/include/armnn/TypesUtils.hpp b/include/armnn/TypesUtils.hpp
index bb75b18c32..c65eefc510 100644
--- a/include/armnn/TypesUtils.hpp
+++ b/include/armnn/TypesUtils.hpp
@@ -219,15 +219,14 @@ inline float Dequantize(QuantizedType value, float scale, int32_t offset)
return dequantized;
}
-template <armnn::DataType DataType>
-void VerifyTensorInfoDataType(const armnn::TensorInfo & info)
+inline void VerifyTensorInfoDataType(const armnn::TensorInfo & info, armnn::DataType dataType)
{
- if (info.GetDataType() != DataType)
+ if (info.GetDataType() != dataType)
{
std::stringstream ss;
ss << "Unexpected datatype:" << armnn::GetDataTypeName(info.GetDataType())
- << " for tensor:" << info.GetShape()
- << ". The type expected to be: " << armnn::GetDataTypeName(DataType);
+ << " for tensor:" << info.GetShape()
+ << ". The type expected to be: " << armnn::GetDataTypeName(dataType);
throw armnn::Exception(ss.str());
}
}
diff --git a/src/armnnDeserializer/test/ParserFlatbuffersSerializeFixture.hpp b/src/armnnDeserializer/test/ParserFlatbuffersSerializeFixture.hpp
index 42ab2b17d6..e5416362b6 100644
--- a/src/armnnDeserializer/test/ParserFlatbuffersSerializeFixture.hpp
+++ b/src/armnnDeserializer/test/ParserFlatbuffersSerializeFixture.hpp
@@ -171,7 +171,7 @@ void ParserFlatbuffersSerializeFixture::RunTest(unsigned int layersId,
for (auto&& it : inputData)
{
BindingPointInfo bindingInfo = m_Parser->GetNetworkInputBindingInfo(layersId, it.first);
- armnn::VerifyTensorInfoDataType<ArmnnType>(bindingInfo.second);
+ armnn::VerifyTensorInfoDataType(bindingInfo.second, ArmnnType);
inputTensors.push_back({ bindingInfo.first, armnn::ConstTensor(bindingInfo.second, it.second.data()) });
}
@@ -181,7 +181,7 @@ void ParserFlatbuffersSerializeFixture::RunTest(unsigned int layersId,
for (auto&& it : expectedOutputData)
{
BindingPointInfo bindingInfo = m_Parser->GetNetworkOutputBindingInfo(layersId, it.first);
- armnn::VerifyTensorInfoDataType<ArmnnType>(bindingInfo.second);
+ armnn::VerifyTensorInfoDataType(bindingInfo.second, ArmnnType);
outputStorage.emplace(it.first, MakeTensor<DataType, NumOutputDimensions>(bindingInfo.second));
outputTensors.push_back(
{ bindingInfo.first, armnn::Tensor(bindingInfo.second, outputStorage.at(it.first).data()) });
@@ -196,4 +196,4 @@ void ParserFlatbuffersSerializeFixture::RunTest(unsigned int layersId,
auto outputExpected = MakeTensor<DataType, NumOutputDimensions>(bindingInfo.second, it.second);
BOOST_TEST(CompareTensors(outputExpected, outputStorage[it.first]));
}
-}
+} \ No newline at end of file
diff --git a/src/armnnTfLiteParser/TfLiteParser.cpp b/src/armnnTfLiteParser/TfLiteParser.cpp
index 83f6950074..b45e5372ff 100644
--- a/src/armnnTfLiteParser/TfLiteParser.cpp
+++ b/src/armnnTfLiteParser/TfLiteParser.cpp
@@ -26,6 +26,7 @@
#include <algorithm>
#include <limits>
#include <numeric>
+#include <flatbuffers/flexbuffers.h>
using namespace armnn;
using armnn::CheckLocation;
@@ -421,6 +422,7 @@ TfLiteParser::TfLiteParser()
m_ParserFunctions[tflite::BuiltinOperator_CONCATENATION] = &TfLiteParser::ParseConcatenation;
m_ParserFunctions[tflite::BuiltinOperator_CONV_2D] = &TfLiteParser::ParseConv2D;
m_ParserFunctions[tflite::BuiltinOperator_DEPTHWISE_CONV_2D] = &TfLiteParser::ParseDepthwiseConv2D;
+ m_ParserFunctions[tflite::BuiltinOperator_CUSTOM] = &TfLiteParser::ParseDetectionPostProcess;
m_ParserFunctions[tflite::BuiltinOperator_FULLY_CONNECTED] = &TfLiteParser::ParseFullyConnected;
m_ParserFunctions[tflite::BuiltinOperator_LOGISTIC] = &TfLiteParser::ParseLogistic;
m_ParserFunctions[tflite::BuiltinOperator_MAX_POOL_2D] = &TfLiteParser::ParseMaxPool2D;
@@ -540,17 +542,6 @@ INetworkPtr TfLiteParser::CreateNetworkFromModel()
{
try
{
- if (op->custom_options.size() > 0)
- {
- throw ParseException(
- boost::str(
- boost::format("Custom options for op: %1% is not supported. "
- "It has %2% bytes of custom options. %3%") %
- op->opcode_index %
- op->custom_options.size() %
- CHECK_LOCATION().AsString()));
- }
-
auto const & opCodePtr = m_Model->operator_codes[op->opcode_index];
auto builtinCode = opCodePtr->builtin_code;
@@ -1455,6 +1446,77 @@ void TfLiteParser::ParseFullyConnected(size_t subgraphIndex, size_t operatorInde
RegisterOutputSlots(subgraphIndex, operatorIndex, fusedActivationLayer, {outputTensorIndexes[0]});
}
+void TfLiteParser::ParseDetectionPostProcess(size_t subgraphIndex, size_t operatorIndex)
+{
+ CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
+
+ const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
+
+ auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
+ auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
+ CHECK_VALID_SIZE(outputs.size(), 4);
+
+ // Obtain custom options from flexbuffers
+ auto custom_options = operatorPtr->custom_options;
+ const flexbuffers::Map& m = flexbuffers::GetRoot(custom_options.data(), custom_options.size()).AsMap();
+
+ // Obtain descriptor information from tf lite
+ DetectionPostProcessDescriptor desc;
+ desc.m_MaxDetections = m["max_detections"].AsUInt32();
+ desc.m_MaxClassesPerDetection = m["max_classes_per_detection"].AsUInt32();
+ desc.m_NmsScoreThreshold = m["nms_score_threshold"].AsFloat();
+ desc.m_NmsIouThreshold = m["nms_iou_threshold"].AsFloat();
+ desc.m_NumClasses = m["num_classes"].AsUInt32();
+ desc.m_ScaleH = m["h_scale"].AsFloat();
+ desc.m_ScaleW = m["w_scale"].AsFloat();
+ desc.m_ScaleX = m["x_scale"].AsFloat();
+ desc.m_ScaleY = m["y_scale"].AsFloat();
+
+ if (!(m["use_regular_non_max_suppression"].IsNull()))
+ {
+ desc.m_UseRegularNms = m["use_regular_non_max_suppression"].AsBool();
+ }
+ if (!(m["detections_per_class"].IsNull()))
+ {
+ desc.m_DetectionsPerClass = m["detections_per_class"].AsUInt32();
+ }
+
+ if (desc.m_NmsIouThreshold <= 0.0f || desc.m_NmsIouThreshold > 1.0f)
+ {
+ throw InvalidArgumentException("DetectionPostProcessTFLiteParser: Intersection over union threshold "
+ "must be positive and less than or equal to 1.");
+ }
+
+ armnn::TensorInfo anchorTensorInfo = ToTensorInfo(inputs[2]);
+ auto anchorTensorAndData = CreateConstTensor(inputs[2], anchorTensorInfo,
+ armnn::Optional<armnn::PermutationVector&>());
+
+ auto layerName = boost::str(boost::format("DetectionPostProcess:%1%:%2%") % subgraphIndex % operatorIndex);
+ IConnectableLayer* layer = m_Network->AddDetectionPostProcessLayer(desc, anchorTensorAndData.first,
+ layerName.c_str());
+
+ BOOST_ASSERT(layer != nullptr);
+
+ // Register outputs
+ for (unsigned int i = 0 ; i < outputs.size() ; ++i)
+ {
+ armnn::TensorInfo detectionBoxOutputTensorInfo = ToTensorInfo(outputs[i]);
+ layer->GetOutputSlot(i).SetTensorInfo(detectionBoxOutputTensorInfo);
+ }
+
+ // Register the input connection slots for the layer, connections are made after all layers have been created
+ // only the tensors for the inputs are relevant, exclude the const tensors
+ auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
+ RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
+
+ // Register the output connection slots for the layer, connections are made after all layers have been created
+ auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
+ RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0],
+ outputTensorIndexes[1],
+ outputTensorIndexes[2],
+ outputTensorIndexes[3]});
+}
+
armnn::IConnectableLayer* TfLiteParser::AddFusedActivationLayer(armnn::IConnectableLayer* prevLayer,
unsigned int outputSlot,
tflite::ActivationFunctionType activationType)
diff --git a/src/armnnTfLiteParser/TfLiteParser.hpp b/src/armnnTfLiteParser/TfLiteParser.hpp
index 1ca00b9ac9..6d16b8aab7 100644
--- a/src/armnnTfLiteParser/TfLiteParser.hpp
+++ b/src/armnnTfLiteParser/TfLiteParser.hpp
@@ -95,6 +95,7 @@ private:
void ParseConcatenation(size_t subgraphIndex, size_t operatorIndex);
void ParseConv2D(size_t subgraphIndex, size_t operatorIndex);
void ParseDepthwiseConv2D(size_t subgraphIndex, size_t operatorIndex);
+ void ParseDetectionPostProcess(size_t subgraphIndex, size_t operatorIndex);
void ParseFullyConnected(size_t subgraphIndex, size_t operatorIndex);
void ParseLogistic(size_t subgraphIndex, size_t operatorIndex);
void ParseMaxPool2D(size_t subgraphIndex, size_t operatorIndex);
diff --git a/src/armnnTfLiteParser/test/DetectionPostProcess.cpp b/src/armnnTfLiteParser/test/DetectionPostProcess.cpp
new file mode 100644
index 0000000000..4f748edfd7
--- /dev/null
+++ b/src/armnnTfLiteParser/test/DetectionPostProcess.cpp
@@ -0,0 +1,294 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "../TfLiteParser.hpp"
+
+#include <boost/test/unit_test.hpp>
+#include "test/GraphUtils.hpp"
+
+#include "ParserFlatbuffersFixture.hpp"
+#include "ParserPrototxtFixture.hpp"
+
+BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
+
+struct DetectionPostProcessFixture : ParserFlatbuffersFixture
+{
+ explicit DetectionPostProcessFixture()
+ {
+ /*
+ The following values were used for the custom_options:
+ use_regular_non_max_suppression = true
+ max_classes_per_detection = 1
+ nms_score_threshold = 0.0
+ nms_iou_threshold = 0.5
+ max_detections = 3
+ max_detections = 3
+ num_classes = 2
+ h_scale = 5
+ w_scale = 5
+ x_scale = 10
+ y_scale = 10
+ */
+ m_JsonString = R"(
+ {
+ "version": 3,
+ "operator_codes": [{
+ "builtin_code": "CUSTOM",
+ "custom_code": "TFLite_Detection_PostProcess"
+ }],
+ "subgraphs": [{
+ "tensors": [{
+ "shape": [1, 6, 4],
+ "type": "UINT8",
+ "buffer": 0,
+ "name": "box_encodings",
+ "quantization": {
+ "min": [0.0],
+ "max": [255.0],
+ "scale": [1.0],
+ "zero_point": [ 1 ]
+ }
+ },
+ {
+ "shape": [1, 6, 3],
+ "type": "UINT8",
+ "buffer": 1,
+ "name": "scores",
+ "quantization": {
+ "min": [0.0],
+ "max": [255.0],
+ "scale": [0.01],
+ "zero_point": [0]
+ }
+ },
+ {
+ "shape": [6, 4],
+ "type": "UINT8",
+ "buffer": 2,
+ "name": "anchors",
+ "quantization": {
+ "min": [0.0],
+ "max": [255.0],
+ "scale": [0.5],
+ "zero_point": [0]
+ }
+ },
+ {
+ "shape": [1, 3, 4],
+ "type": "FLOAT32",
+ "buffer": 3,
+ "name": "detection_boxes",
+ "quantization": {}
+ },
+ {
+ "shape": [1, 3],
+ "type": "FLOAT32",
+ "buffer": 4,
+ "name": "detection_classes",
+ "quantization": {}
+ },
+ {
+ "shape": [1, 3],
+ "type": "FLOAT32",
+ "buffer": 5,
+ "name": "detection_scores",
+ "quantization": {}
+ },
+ {
+ "shape": [1],
+ "type": "FLOAT32",
+ "buffer": 6,
+ "name": "num_detections",
+ "quantization": {}
+ }
+ ],
+ "inputs": [0, 1, 2],
+ "outputs": [3, 4, 5, 6],
+ "operators": [{
+ "opcode_index": 0,
+ "inputs": [0, 1, 2],
+ "outputs": [3, 4, 5, 6],
+ "builtin_options_type": 0,
+ "custom_options": [
+ 109, 97, 120, 95, 100, 101, 116, 101, 99, 116, 105, 111, 110, 115, 0, 109, 97,
+ 120, 95, 99, 108, 97, 115, 115, 101, 115, 95, 112, 101, 114, 95, 100, 101, 116,
+ 101, 99, 116, 105, 111, 110, 0, 110, 109, 115, 95, 115, 99, 111, 114, 101, 95,
+ 116, 104, 114, 101, 115, 104, 111, 108, 100, 0, 110, 109, 115, 95, 105, 111, 117,
+ 95, 116, 104, 114, 101, 115, 104, 111, 108, 100, 0, 110, 117, 109, 95, 99, 108, 97,
+ 115, 115, 101, 115, 0, 104, 95, 115, 99, 97, 108, 101, 0, 119, 95, 115, 99, 97,
+ 108, 101, 0, 120, 95, 115, 99, 97, 108, 101, 0, 121, 95, 115, 99, 97, 108, 101, 0,
+ 117, 115, 101, 95, 114, 101, 103, 117, 108, 97, 114, 95, 110, 111, 110, 95, 109, 97,
+ 120, 95, 115, 117, 112, 112, 114, 101, 115, 115, 105, 111, 110, 0, 100, 101, 116,
+ 101, 99, 116, 105, 111, 110, 115, 95, 112, 101, 114, 95, 99, 108, 97, 115, 115, 0,
+ 11, 22, 87, 164, 180, 120, 141, 104, 61, 86, 79, 72, 11, 0, 0, 0, 1, 0, 0, 0, 11, 0,
+ 0, 0, 1, 0, 0, 0, 0, 0, 160, 64, 1, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 63, 0, 0, 0, 0, 2,
+ 0, 0, 0, 1, 0, 0, 0, 0, 0, 160, 64, 0, 0, 32, 65, 0, 0, 32, 65, 6, 14, 6, 6, 14, 14,
+ 6, 106, 14, 14, 14, 55, 38, 1
+ ],
+ "custom_options_format": "FLEXBUFFERS"
+ }]
+ }],
+ "buffers": [{},
+ {},
+ { "data": [ 1, 1, 2, 2,
+ 1, 1, 2, 2,
+ 1, 1, 2, 2,
+ 1, 21, 2, 2,
+ 1, 21, 2, 2,
+ 1, 201, 2, 2]},
+ {},
+ {},
+ {},
+ {},
+ ]
+ }
+ )";
+ }
+};
+
+BOOST_FIXTURE_TEST_CASE( ParseDetectionPostProcess, DetectionPostProcessFixture )
+{
+ Setup();
+
+ // Inputs
+ using UnquantizedContainer = std::vector<float>;
+ UnquantizedContainer boxEncodings =
+ {
+ 0.0f, 0.0f, 0.0f, 0.0f,
+ 0.0f, 1.0f, 0.0f, 0.0f,
+ 0.0f, -1.0f, 0.0f, 0.0f,
+ 0.0f, 0.0f, 0.0f, 0.0f,
+ 0.0f, 1.0f, 0.0f, 0.0f,
+ 0.0f, 0.0f, 0.0f, 0.0f
+ };
+
+ UnquantizedContainer scores =
+ {
+ 0.0f, 0.9f, 0.8f,
+ 0.0f, 0.75f, 0.72f,
+ 0.0f, 0.6f, 0.5f,
+ 0.0f, 0.93f, 0.95f,
+ 0.0f, 0.5f, 0.4f,
+ 0.0f, 0.3f, 0.2f
+ };
+
+ // Outputs
+ UnquantizedContainer detectionBoxes =
+ {
+ 0.0f, 10.0f, 1.0f, 11.0f,
+ 0.0f, 10.0f, 1.0f, 11.0f,
+ 0.0f, 0.0f, 0.0f, 0.0f
+ };
+
+ UnquantizedContainer detectionClasses = { 1.0f, 0.0f, 0.0f };
+ UnquantizedContainer detectionScores = { 0.95f, 0.93f, 0.0f };
+
+ UnquantizedContainer numDetections = { 2.0f };
+
+ // Quantize inputs and outputs
+ using QuantizedContainer = std::vector<uint8_t>;
+ QuantizedContainer quantBoxEncodings = QuantizedVector<uint8_t>(1.0f, 1, boxEncodings);
+ QuantizedContainer quantScores = QuantizedVector<uint8_t>(0.01f, 0, scores);
+
+ std::map<std::string, QuantizedContainer> input =
+ {
+ { "box_encodings", quantBoxEncodings },
+ { "scores", quantScores }
+ };
+
+ std::map<std::string, UnquantizedContainer> output =
+ {
+ { "detection_boxes", detectionBoxes},
+ { "detection_classes", detectionClasses},
+ { "detection_scores", detectionScores},
+ { "num_detections", numDetections}
+ };
+
+ RunTest<armnn::DataType::QuantisedAsymm8, armnn::DataType::Float32>(0, input, output);
+}
+
+BOOST_FIXTURE_TEST_CASE(DetectionPostProcessGraphStructureTest, DetectionPostProcessFixture)
+{
+ /*
+ Inputs: box_encodings scores
+ \ /
+ DetectionPostProcess
+ / / \ \
+ / / \ \
+ Outputs: detection detection detection num_detections
+ boxes classes scores
+ */
+
+ ReadStringToBinary();
+
+ armnn::INetworkPtr network = m_Parser->CreateNetworkFromBinary(m_GraphBinary);
+
+ auto optimized = Optimize(*network, { armnn::Compute::CpuRef }, m_Runtime->GetDeviceSpec());
+
+ auto optimizedNetwork = boost::polymorphic_downcast<armnn::OptimizedNetwork*>(optimized.get());
+ auto graph = optimizedNetwork->GetGraph();
+
+ // Check the number of layers in the graph
+ BOOST_TEST((graph.GetNumInputs() == 2));
+ BOOST_TEST((graph.GetNumOutputs() == 4));
+ BOOST_TEST((graph.GetNumLayers() == 7));
+
+ // Input layers
+ armnn::Layer* boxEncodingLayer = GetFirstLayerWithName(graph, "box_encodings");
+ BOOST_TEST((boxEncodingLayer->GetType() == armnn::LayerType::Input));
+ BOOST_TEST(CheckNumberOfInputSlot(boxEncodingLayer, 0));
+ BOOST_TEST(CheckNumberOfOutputSlot(boxEncodingLayer, 1));
+
+ armnn::Layer* scoresLayer = GetFirstLayerWithName(graph, "scores");
+ BOOST_TEST((scoresLayer->GetType() == armnn::LayerType::Input));
+ BOOST_TEST(CheckNumberOfInputSlot(scoresLayer, 0));
+ BOOST_TEST(CheckNumberOfOutputSlot(scoresLayer, 1));
+
+ // DetectionPostProcess layer
+ armnn::Layer* detectionPostProcessLayer = GetFirstLayerWithName(graph, "DetectionPostProcess:0:0");
+ BOOST_TEST((detectionPostProcessLayer->GetType() == armnn::LayerType::DetectionPostProcess));
+ BOOST_TEST(CheckNumberOfInputSlot(detectionPostProcessLayer, 2));
+ BOOST_TEST(CheckNumberOfOutputSlot(detectionPostProcessLayer, 4));
+
+ // Output layers
+ armnn::Layer* detectionBoxesLayer = GetFirstLayerWithName(graph, "detection_boxes");
+ BOOST_TEST((detectionBoxesLayer->GetType() == armnn::LayerType::Output));
+ BOOST_TEST(CheckNumberOfInputSlot(detectionBoxesLayer, 1));
+ BOOST_TEST(CheckNumberOfOutputSlot(detectionBoxesLayer, 0));
+
+ armnn::Layer* detectionClassesLayer = GetFirstLayerWithName(graph, "detection_classes");
+ BOOST_TEST((detectionClassesLayer->GetType() == armnn::LayerType::Output));
+ BOOST_TEST(CheckNumberOfInputSlot(detectionClassesLayer, 1));
+ BOOST_TEST(CheckNumberOfOutputSlot(detectionClassesLayer, 0));
+
+ armnn::Layer* detectionScoresLayer = GetFirstLayerWithName(graph, "detection_scores");
+ BOOST_TEST((detectionScoresLayer->GetType() == armnn::LayerType::Output));
+ BOOST_TEST(CheckNumberOfInputSlot(detectionScoresLayer, 1));
+ BOOST_TEST(CheckNumberOfOutputSlot(detectionScoresLayer, 0));
+
+ armnn::Layer* numDetectionsLayer = GetFirstLayerWithName(graph, "num_detections");
+ BOOST_TEST((numDetectionsLayer->GetType() == armnn::LayerType::Output));
+ BOOST_TEST(CheckNumberOfInputSlot(numDetectionsLayer, 1));
+ BOOST_TEST(CheckNumberOfOutputSlot(numDetectionsLayer, 0));
+
+ // Check the connections
+ armnn::TensorInfo boxEncodingTensor(armnn::TensorShape({ 1, 6, 4 }), armnn::DataType::QuantisedAsymm8, 1, 1);
+ armnn::TensorInfo scoresTensor(armnn::TensorShape({ 1, 6, 3 }), armnn::DataType::QuantisedAsymm8,
+ 0.00999999978f, 0);
+
+ armnn::TensorInfo detectionBoxesTensor(armnn::TensorShape({ 1, 3, 4 }), armnn::DataType::Float32, 0, 0);
+ armnn::TensorInfo detectionClassesTensor(armnn::TensorShape({ 1, 3 }), armnn::DataType::Float32, 0, 0);
+ armnn::TensorInfo detectionScoresTensor(armnn::TensorShape({ 1, 3 }), armnn::DataType::Float32, 0, 0);
+ armnn::TensorInfo numDetectionsTensor(armnn::TensorShape({ 1} ), armnn::DataType::Float32, 0, 0);
+
+ BOOST_TEST(IsConnected(boxEncodingLayer, detectionPostProcessLayer, 0, 0, boxEncodingTensor));
+ BOOST_TEST(IsConnected(scoresLayer, detectionPostProcessLayer, 0, 1, scoresTensor));
+ BOOST_TEST(IsConnected(detectionPostProcessLayer, detectionBoxesLayer, 0, 0, detectionBoxesTensor));
+ BOOST_TEST(IsConnected(detectionPostProcessLayer, detectionClassesLayer, 1, 0, detectionClassesTensor));
+ BOOST_TEST(IsConnected(detectionPostProcessLayer, detectionScoresLayer, 2, 0, detectionScoresTensor));
+ BOOST_TEST(IsConnected(detectionPostProcessLayer, numDetectionsLayer, 3, 0, numDetectionsTensor));
+}
+
+BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/armnnTfLiteParser/test/ParserFlatbuffersFixture.hpp b/src/armnnTfLiteParser/test/ParserFlatbuffersFixture.hpp
index 8d0ee01aa9..50e674ef2c 100644
--- a/src/armnnTfLiteParser/test/ParserFlatbuffersFixture.hpp
+++ b/src/armnnTfLiteParser/test/ParserFlatbuffersFixture.hpp
@@ -116,7 +116,7 @@ struct ParserFlatbuffersFixture
}
/// Executes the network with the given input tensor and checks the result against the given output tensor.
- /// This overload assumes the network has a single input and a single output.
+ /// This assumes the network has a single input and a single output.
template <std::size_t NumOutputDimensions,
armnn::DataType ArmnnType,
typename DataType = armnn::ResolveType<ArmnnType>>
@@ -133,6 +133,32 @@ struct ParserFlatbuffersFixture
const std::map<std::string, std::vector<DataType>>& inputData,
const std::map<std::string, std::vector<DataType>>& expectedOutputData);
+ /// Multiple Inputs, Multiple Outputs w/ Variable Datatypes and different dimension sizes.
+ /// Executes the network with the given input tensors and checks the results against the given output tensors.
+ /// This overload supports multiple inputs and multiple outputs, identified by name along with the allowance for
+ /// the input datatype to be different to the output
+ template <std::size_t NumOutputDimensions,
+ armnn::DataType ArmnnType1,
+ armnn::DataType ArmnnType2,
+ typename DataType1 = armnn::ResolveType<ArmnnType1>,
+ typename DataType2 = armnn::ResolveType<ArmnnType2>>
+ void RunTest(size_t subgraphId,
+ const std::map<std::string, std::vector<DataType1>>& inputData,
+ const std::map<std::string, std::vector<DataType2>>& expectedOutputData);
+
+
+ /// Multiple Inputs, Multiple Outputs w/ Variable Datatypes and different dimension sizes.
+ /// Executes the network with the given input tensors and checks the results against the given output tensors.
+ /// This overload supports multiple inputs and multiple outputs, identified by name along with the allowance for
+ /// the input datatype to be different to the output
+ template<armnn::DataType ArmnnType1,
+ armnn::DataType ArmnnType2,
+ typename DataType1 = armnn::ResolveType<ArmnnType1>,
+ typename DataType2 = armnn::ResolveType<ArmnnType2>>
+ void RunTest(std::size_t subgraphId,
+ const std::map<std::string, std::vector<DataType1>>& inputData,
+ const std::map<std::string, std::vector<DataType2>>& expectedOutputData);
+
void CheckTensors(const TensorRawPtr& tensors, size_t shapeSize, const std::vector<int32_t>& shape,
tflite::TensorType tensorType, uint32_t buffer, const std::string& name,
const std::vector<float>& min, const std::vector<float>& max,
@@ -157,25 +183,47 @@ struct ParserFlatbuffersFixture
}
};
+/// Single Input, Single Output
+/// Executes the network with the given input tensor and checks the result against the given output tensor.
+/// This overload assumes the network has a single input and a single output.
template <std::size_t NumOutputDimensions,
- armnn::DataType ArmnnType,
+ armnn::DataType armnnType,
typename DataType>
void ParserFlatbuffersFixture::RunTest(size_t subgraphId,
const std::vector<DataType>& inputData,
const std::vector<DataType>& expectedOutputData)
{
- RunTest<NumOutputDimensions, ArmnnType>(subgraphId,
+ RunTest<NumOutputDimensions, armnnType>(subgraphId,
{ { m_SingleInputName, inputData } },
{ { m_SingleOutputName, expectedOutputData } });
}
+/// Multiple Inputs, Multiple Outputs
+/// Executes the network with the given input tensors and checks the results against the given output tensors.
+/// This overload supports multiple inputs and multiple outputs, identified by name.
template <std::size_t NumOutputDimensions,
- armnn::DataType ArmnnType,
+ armnn::DataType armnnType,
typename DataType>
void ParserFlatbuffersFixture::RunTest(size_t subgraphId,
const std::map<std::string, std::vector<DataType>>& inputData,
const std::map<std::string, std::vector<DataType>>& expectedOutputData)
{
+ RunTest<NumOutputDimensions, armnnType, armnnType>(subgraphId, inputData, expectedOutputData);
+}
+
+/// Multiple Inputs, Multiple Outputs w/ Variable Datatypes
+/// Executes the network with the given input tensors and checks the results against the given output tensors.
+/// This overload supports multiple inputs and multiple outputs, identified by name along with the allowance for
+/// the input datatype to be different to the output
+template <std::size_t NumOutputDimensions,
+ armnn::DataType armnnType1,
+ armnn::DataType armnnType2,
+ typename DataType1,
+ typename DataType2>
+void ParserFlatbuffersFixture::RunTest(size_t subgraphId,
+ const std::map<std::string, std::vector<DataType1>>& inputData,
+ const std::map<std::string, std::vector<DataType2>>& expectedOutputData)
+{
using BindingPointInfo = std::pair<armnn::LayerBindingId, armnn::TensorInfo>;
// Setup the armnn input tensors from the given vectors.
@@ -183,18 +231,18 @@ void ParserFlatbuffersFixture::RunTest(size_t subgraphId,
for (auto&& it : inputData)
{
BindingPointInfo bindingInfo = m_Parser->GetNetworkInputBindingInfo(subgraphId, it.first);
- armnn::VerifyTensorInfoDataType<ArmnnType>(bindingInfo.second);
+ armnn::VerifyTensorInfoDataType(bindingInfo.second, armnnType1);
inputTensors.push_back({ bindingInfo.first, armnn::ConstTensor(bindingInfo.second, it.second.data()) });
}
// Allocate storage for the output tensors to be written to and setup the armnn output tensors.
- std::map<std::string, boost::multi_array<DataType, NumOutputDimensions>> outputStorage;
+ std::map<std::string, boost::multi_array<DataType2, NumOutputDimensions>> outputStorage;
armnn::OutputTensors outputTensors;
for (auto&& it : expectedOutputData)
{
BindingPointInfo bindingInfo = m_Parser->GetNetworkOutputBindingInfo(subgraphId, it.first);
- armnn::VerifyTensorInfoDataType<ArmnnType>(bindingInfo.second);
- outputStorage.emplace(it.first, MakeTensor<DataType, NumOutputDimensions>(bindingInfo.second));
+ armnn::VerifyTensorInfoDataType(bindingInfo.second, armnnType2);
+ outputStorage.emplace(it.first, MakeTensor<DataType2, NumOutputDimensions>(bindingInfo.second));
outputTensors.push_back(
{ bindingInfo.first, armnn::Tensor(bindingInfo.second, outputStorage.at(it.first).data()) });
}
@@ -205,7 +253,61 @@ void ParserFlatbuffersFixture::RunTest(size_t subgraphId,
for (auto&& it : expectedOutputData)
{
BindingPointInfo bindingInfo = m_Parser->GetNetworkOutputBindingInfo(subgraphId, it.first);
- auto outputExpected = MakeTensor<DataType, NumOutputDimensions>(bindingInfo.second, it.second);
+ auto outputExpected = MakeTensor<DataType2, NumOutputDimensions>(bindingInfo.second, it.second);
BOOST_TEST(CompareTensors(outputExpected, outputStorage[it.first]));
}
}
+
+/// Multiple Inputs, Multiple Outputs w/ Variable Datatypes and different dimension sizes.
+/// Executes the network with the given input tensors and checks the results against the given output tensors.
+/// This overload supports multiple inputs and multiple outputs, identified by name along with the allowance for
+/// the input datatype to be different to the output.
+template <armnn::DataType armnnType1,
+ armnn::DataType armnnType2,
+ typename DataType1,
+ typename DataType2>
+void ParserFlatbuffersFixture::RunTest(std::size_t subgraphId,
+ const std::map<std::string, std::vector<DataType1>>& inputData,
+ const std::map<std::string, std::vector<DataType2>>& expectedOutputData)
+{
+ using BindingPointInfo = std::pair<armnn::LayerBindingId, armnn::TensorInfo>;
+
+ // Setup the armnn input tensors from the given vectors.
+ armnn::InputTensors inputTensors;
+ for (auto&& it : inputData)
+ {
+ BindingPointInfo bindingInfo = m_Parser->GetNetworkInputBindingInfo(subgraphId, it.first);
+ armnn::VerifyTensorInfoDataType(bindingInfo.second, armnnType1);
+
+ inputTensors.push_back({ bindingInfo.first, armnn::ConstTensor(bindingInfo.second, it.second.data()) });
+ }
+
+ armnn::OutputTensors outputTensors;
+ outputTensors.reserve(expectedOutputData.size());
+ std::map<std::string, std::vector<DataType2>> outputStorage;
+ for (auto&& it : expectedOutputData)
+ {
+ BindingPointInfo bindingInfo = m_Parser->GetNetworkOutputBindingInfo(subgraphId, it.first);
+ armnn::VerifyTensorInfoDataType(bindingInfo.second, armnnType2);
+
+ std::vector<DataType2> out(it.second.size());
+ outputStorage.emplace(it.first, out);
+ outputTensors.push_back({ bindingInfo.first,
+ armnn::Tensor(bindingInfo.second,
+ outputStorage.at(it.first).data()) });
+ }
+
+ m_Runtime->EnqueueWorkload(m_NetworkIdentifier, inputTensors, outputTensors);
+
+ // Checks the results.
+ for (auto&& it : expectedOutputData)
+ {
+ std::vector<DataType2> out = outputStorage.at(it.first);
+ {
+ for (unsigned int i = 0; i < out.size(); ++i)
+ {
+ BOOST_TEST(it.second[i] == out[i], boost::test_tools::tolerance(0.000001f));
+ }
+ }
+ }
+}