15 #include "../TfLiteParser.hpp" 21 #include <fmt/format.h> 22 #include <doctest/doctest.h> 24 #include "flatbuffers/idl.h" 25 #include "flatbuffers/util.h" 26 #include "flatbuffers/flexbuffers.h" 28 #include <schema_generated.h> 42 ITfLiteParser::TfLiteParserOptions options;
43 options.m_StandInLayerForUnsupported =
true;
44 options.m_InferAndValidate =
true;
46 m_Parser = std::make_unique<armnnTfLiteParser::TfLiteParserImpl>(
56 std::unique_ptr<armnnTfLiteParser::TfLiteParserImpl>
m_Parser;
63 void Setup(
bool testDynamic =
true)
65 m_TestDynamic = testDynamic;
76 const uint8_t* binaryContent = graphBinary.data();
77 const size_t len = graphBinary.size();
78 if (binaryContent ==
nullptr)
83 flatbuffers::Verifier verifier(binaryContent, len);
84 if (verifier.VerifyBuffer<tflite::Model>() ==
false)
87 "flatbuffers format. size:{} {}",
91 auto model = tflite::UnPackModel(binaryContent);
93 for (
auto const& subgraph : model->subgraphs)
95 std::vector<int32_t> inputIds = subgraph->inputs;
96 for (
unsigned int tensorIndex = 0; tensorIndex < subgraph->tensors.size(); ++tensorIndex)
98 if (std::find(inputIds.begin(), inputIds.end(), tensorIndex) != inputIds.end())
102 for (
auto const& tensor : subgraph->tensors)
104 if (tensor->shape_signature.size() != 0)
109 for (
unsigned int i = 0; i < tensor->shape.size(); ++i)
111 tensor->shape_signature.push_back(-1);
128 : m_Parser->CreateNetworkFromBinary(m_GraphBinary);
135 m_Runtime->GetDeviceSpec());
136 std::string errorMessage;
138 armnn::Status ret = m_Runtime->LoadNetwork(networkId, move(optimized), errorMessage);
143 fmt::format(
"The runtime failed to load the network. " 144 "Error was: {}. in {} [{}:{}]",
155 m_SingleInputName = inputName;
156 m_SingleOutputName = outputName;
165 flatbuffers::Parser parser;
167 bool ok = parser.Parse(schemafile.c_str());
168 CHECK_MESSAGE(ok, std::string(
"Failed to parse schema file. Error was: " + parser.error_).c_str());
170 ok = parser.Parse(m_JsonString.c_str());
171 CHECK_MESSAGE(ok, std::string(
"Failed to parse json input. Error was: " + parser.error_).c_str());
174 const uint8_t * bufferPtr = parser.builder_.GetBufferPointer();
175 size_t size =
static_cast<size_t>(parser.builder_.GetSize());
176 m_GraphBinary.assign(bufferPtr, bufferPtr+size);
183 template <std::size_t NumOutputDimensions,
185 void RunTest(
size_t subgraphId,
191 template <std::size_t NumOutputDimensions,
193 void RunTest(
size_t subgraphId,
201 template <std::size_t NumOutputDimensions,
204 void RunTest(
size_t subgraphId,
207 bool isDynamic =
false);
213 template <std::size_t NumOutputDimensions,
217 void RunTest(
size_t subgraphId,
228 void RunTest(std::size_t subgraphId,
235 flexbuffers::Builder detectPostProcess;
236 detectPostProcess.Map([&]() {
241 detectPostProcess.Int(
"num_classes", descriptor.
m_NumClasses);
244 detectPostProcess.Float(
"h_scale", descriptor.
m_ScaleH);
245 detectPostProcess.Float(
"w_scale", descriptor.
m_ScaleW);
246 detectPostProcess.Float(
"x_scale", descriptor.
m_ScaleX);
247 detectPostProcess.Float(
"y_scale", descriptor.
m_ScaleY);
249 detectPostProcess.Finish();
252 std::stringstream strStream;
253 std::vector<uint8_t> buffer = detectPostProcess.GetBuffer();
254 std::copy(buffer.begin(), buffer.end(),std::ostream_iterator<int>(strStream,
","));
256 return strStream.str();
260 tflite::TensorType tensorType, uint32_t buffer,
const std::string& name,
261 const std::vector<float>& min,
const std::vector<float>& max,
262 const std::vector<float>& scale,
const std::vector<int64_t>& zeroPoint)
265 CHECK_EQ(shapeSize, tensors->shape.size());
266 CHECK(std::equal(shape.begin(), shape.end(), tensors->shape.begin(), tensors->shape.end()));
267 CHECK_EQ(tensorType, tensors->type);
268 CHECK_EQ(buffer, tensors->buffer);
269 CHECK_EQ(name, tensors->name);
270 CHECK(tensors->quantization);
271 CHECK(std::equal(min.begin(), min.end(), tensors->quantization.get()->min.begin(),
272 tensors->quantization.get()->min.end()));
273 CHECK(std::equal(max.begin(), max.end(), tensors->quantization.get()->max.begin(),
274 tensors->quantization.get()->max.end()));
275 CHECK(std::equal(scale.begin(), scale.end(), tensors->quantization.get()->scale.begin(),
276 tensors->quantization.get()->scale.end()));
277 CHECK(std::equal(zeroPoint.begin(), zeroPoint.end(),
278 tensors->quantization.get()->zero_point.begin(),
279 tensors->quantization.get()->zero_point.end()));
284 template <armnn::DataType dataType>
291 template <armnn::DataType dataType>
292 void ParserFlatbuffersFixture::FillInputTensors(
297 for (
auto&& it : inputData)
300 bindingInfo.second.SetConstant(
true);
302 inputTensors.push_back({ bindingInfo.first,
armnn::ConstTensor(bindingInfo.second, it.second.data()) });
309 template <std::size_t NumOutputDimensions,
315 RunTest<NumOutputDimensions, armnnType>(subgraphId,
323 template <std::size_t NumOutputDimensions,
329 RunTest<NumOutputDimensions, armnnType, armnnType>(subgraphId, inputData, expectedOutputData);
336 template <std::size_t NumOutputDimensions,
348 FillInputTensors<armnnType1>(inputTensors, inputData, subgraphId);
351 std::map<std::string, std::vector<DataType2>> outputStorage;
353 for (
auto&& it : expectedOutputData)
360 CHECK_MESSAGE((outputNumDimensions == NumOutputDimensions),
361 fmt::format(
"Number of dimensions expected {}, but got {} for output layer {}",
367 outputStorage.emplace(it.first, std::vector<DataType2>(outputTensorInfo.
GetNumElements()));
368 outputTensors.push_back(
369 { outputBindingId,
armnn::Tensor(outputTensorInfo, outputStorage.at(it.first).data()) });
378 for (
auto&& it : expectedOutputData)
381 auto outputExpected = it.second;
382 auto result =
CompareTensors(outputExpected, outputStorage[it.first],
383 bindingInfo.second.GetShape(), bindingInfo.second.GetShape(),
384 isBoolean, isDynamic);
385 CHECK_MESSAGE(result.m_Result, result.m_Message.str());
393 for (
auto&& it : expectedOutputData)
396 auto outputExpected = it.second;
397 auto result =
CompareTensors(outputExpected, outputStorage[it.first],
398 bindingInfo.second.GetShape(), bindingInfo.second.GetShape(),
400 CHECK_MESSAGE(result.m_Result, result.m_Message.str());
419 FillInputTensors<armnnType1>(inputTensors, inputData, subgraphId);
422 outputTensors.reserve(expectedOutputData.size());
423 std::map<std::string, std::vector<DataType2>> outputStorage;
424 for (
auto&& it : expectedOutputData)
429 std::vector<DataType2> out(it.second.size());
430 outputStorage.emplace(it.first, out);
431 outputTensors.push_back({ bindingInfo.first,
433 outputStorage.at(it.first).data()) });
439 for (
auto&& it : expectedOutputData)
441 std::vector<armnn::ResolveType<armnnType2>> out = outputStorage.at(it.first);
443 for (
unsigned int i = 0; i < out.size(); ++i)
445 CHECK(doctest::Approx(it.second[i]).epsilon(0.000001f) == out[i]);
455 template <std::size_t NumOutputDimensions,
468 FillInputTensors<inputType1>(inputTensors, input1Data, subgraphId);
469 FillInputTensors<inputType2>(inputTensors, input2Data, subgraphId);
472 std::map<std::string, std::vector<DataType2>> outputStorage;
474 for (
auto&& it : expectedOutputData)
481 CHECK_MESSAGE((outputNumDimensions == NumOutputDimensions),
482 fmt::format(
"Number of dimensions expected {}, but got {} for output layer {}",
488 outputStorage.emplace(it.first, std::vector<DataType2>(outputTensorInfo.
GetNumElements()));
489 outputTensors.push_back(
490 { outputBindingId,
armnn::Tensor(outputTensorInfo, outputStorage.at(it.first).data()) });
499 for (
auto&& it : expectedOutputData)
502 auto outputExpected = it.second;
503 auto result =
CompareTensors(outputExpected, outputStorage[it.first],
504 bindingInfo.second.GetShape(), bindingInfo.second.GetShape(),
506 CHECK_MESSAGE(result.m_Result, result.m_Message.str());
armnn::NetworkId m_NetworkIdentifier
float m_ScaleW
Center size encoding scale weight.
static std::string GenerateDetectionPostProcessJsonString(const armnn::DetectionPostProcessDescriptor &descriptor)
CPU Execution: Reference C++ kernels.
void loadNetwork(armnn::NetworkId networkId, bool loadDynamic)
float m_ScaleX
Center size encoding scale x.
ParserFlatbuffersFixture()
void CheckTensors(const TensorRawPtr &tensors, size_t shapeSize, const std::vector< int32_t > &shape, tflite::TensorType tensorType, uint32_t buffer, const std::string &name, const std::vector< float > &min, const std::vector< float > &max, const std::vector< float > &scale, const std::vector< int64_t > &zeroPoint)
std::unique_ptr< IRuntime, void(*)(IRuntime *runtime)> IRuntimePtr
uint32_t m_DetectionsPerClass
Detections per classes, used in Regular NMS.
typename ResolveTypeImpl< DT >::Type ResolveType
bool ReadStringToBinary()
std::vector< std::pair< LayerBindingId, class ConstTensor > > InputTensors
std::unique_ptr< ITfLiteParser, void(*)(ITfLiteParser *parser)> ITfLiteParserPtr
Copyright (c) 2021 ARM Limited and Contributors.
void RunTest(size_t subgraphId, const std::vector< armnn::ResolveType< ArmnnType >> &inputData, const std::vector< armnn::ResolveType< ArmnnType >> &expectedOutputData)
Executes the network with the given input tensor and checks the result against the given output tenso...
int LayerBindingId
Type of identifiers for bindable layers (inputs, outputs).
uint32_t m_MaxClassesPerDetection
Maximum numbers of classes per detection, used in Fast NMS.
A tensor defined by a TensorInfo (shape and data type) and a mutable backing store.
uint32_t m_MaxDetections
Maximum numbers of detections.
std::unique_ptr< armnnTfLiteParser::TfLiteParserImpl > m_Parser
float m_NmsIouThreshold
Intersection over union threshold.
IOptimizedNetworkPtr Optimize(const INetwork &network, const std::vector< BackendId > &backendPreferences, const IDeviceSpec &deviceSpec, const OptimizerOptions &options=OptimizerOptions(), Optional< std::vector< std::string > &> messages=EmptyOptional())
Create an optimized version of the network.
armnnSerializer::TensorInfo * TensorRawPtr
A tensor defined by a TensorInfo (shape and data type) and an immutable backing store.
std::vector< std::pair< LayerBindingId, class Tensor > > OutputTensors
uint32_t m_NumClasses
Number of classes.
bool m_UseRegularNms
Use Regular NMS.
armnn::PredicateResult CompareTensors(const std::vector< T > &actualData, const std::vector< T > &expectedData, const armnn::TensorShape &actualShape, const armnn::TensorShape &expectedShape, bool compareBoolean=false, bool isDynamic=false)
std::string m_SingleOutputName
std::vector< uint8_t > m_GraphBinary
armnn::IRuntimePtr m_Runtime
float m_ScaleH
Center size encoding scale height.
std::pair< armnn::LayerBindingId, armnn::TensorInfo > BindingPointInfo
void SetupSingleInputSingleOutput(const std::string &inputName, const std::string &outputName)
armnn::NetworkId m_DynamicNetworkIdentifier
Base class for all ArmNN exceptions so that users can filter to just those.
void Setup(bool testDynamic=true)
void VerifyTensorInfoDataType(const armnn::TensorInfo &info, armnn::DataType dataType)
float m_ScaleY
Center size encoding scale y.
unsigned char g_TfLiteSchemaText[]
float m_NmsScoreThreshold
NMS score threshold.
std::unique_ptr< INetwork, void(*)(INetwork *network)> INetworkPtr
std::unique_ptr< tflite::ModelT > MakeModelDynamic(std::vector< uint8_t > graphBinary)
std::string m_SingleInputName
If the single-input-single-output overload of Setup() is called, these will store the input and outpu...
unsigned int GetNumDimensions() const
unsigned int g_TfLiteSchemaText_len
unsigned int GetNumElements() const