ArmNN
 21.11
ParserFlatbuffersFixture.hpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #pragma once
7 
8 #include "Schema.hpp"
9 
10 #include <armnn/Descriptors.hpp>
11 #include <armnn/IRuntime.hpp>
12 #include <armnn/TypesUtils.hpp>
14 #include <armnn/utility/Assert.hpp>
15 
16 #include "../TfLiteParser.hpp"
17 
18 #include <ResolveType.hpp>
19 
20 #include <test/TensorHelpers.hpp>
21 
22 #include <fmt/format.h>
23 #include <doctest/doctest.h>
24 
25 #include "flatbuffers/idl.h"
26 #include "flatbuffers/util.h"
27 #include "flatbuffers/flexbuffers.h"
28 
29 #include <schema_generated.h>
30 
31 
34 
35 using TensorRawPtr = const tflite::TensorT *;
37 {
39  m_Runtime(armnn::IRuntime::Create(armnn::IRuntime::CreationOptions())),
42  {
43  ITfLiteParser::TfLiteParserOptions options;
44  options.m_StandInLayerForUnsupported = true;
45  options.m_InferAndValidate = true;
46 
47  m_Parser = std::make_unique<armnnTfLiteParser::TfLiteParserImpl>(
49  }
50 
51  std::vector<uint8_t> m_GraphBinary;
52  std::string m_JsonString;
57  std::unique_ptr<armnnTfLiteParser::TfLiteParserImpl> m_Parser;
58 
59  /// If the single-input-single-output overload of Setup() is called, these will store the input and output name
60  /// so they don't need to be passed to the single-input-single-output overload of RunTest().
61  std::string m_SingleInputName;
62  std::string m_SingleOutputName;
63 
64  void Setup(bool testDynamic = true)
65  {
66  m_TestDynamic = testDynamic;
67  loadNetwork(m_NetworkIdentifier, false);
68 
69  if (m_TestDynamic)
70  {
71  loadNetwork(m_DynamicNetworkIdentifier, true);
72  }
73  }
74 
75  std::unique_ptr<tflite::ModelT> MakeModelDynamic(std::vector<uint8_t> graphBinary)
76  {
77  const uint8_t* binaryContent = graphBinary.data();
78  const size_t len = graphBinary.size();
79  if (binaryContent == nullptr)
80  {
81  throw armnn::InvalidArgumentException(fmt::format("Invalid (null) binary content {}",
82  CHECK_LOCATION().AsString()));
83  }
84  flatbuffers::Verifier verifier(binaryContent, len);
85  if (verifier.VerifyBuffer<tflite::Model>() == false)
86  {
87  throw armnn::ParseException(fmt::format("Buffer doesn't conform to the expected Tensorflow Lite "
88  "flatbuffers format. size:{} {}",
89  len,
90  CHECK_LOCATION().AsString()));
91  }
92  auto model = tflite::UnPackModel(binaryContent);
93 
94  for (auto const& subgraph : model->subgraphs)
95  {
96  std::vector<int32_t> inputIds = subgraph->inputs;
97  for (unsigned int tensorIndex = 0; tensorIndex < subgraph->tensors.size(); ++tensorIndex)
98  {
99  if (std::find(inputIds.begin(), inputIds.end(), tensorIndex) != inputIds.end())
100  {
101  continue;
102  }
103  for (auto const& tensor : subgraph->tensors)
104  {
105  if (tensor->shape_signature.size() != 0)
106  {
107  continue;
108  }
109 
110  for (unsigned int i = 0; i < tensor->shape.size(); ++i)
111  {
112  tensor->shape_signature.push_back(-1);
113  }
114  }
115  }
116  }
117 
118  return model;
119  }
120 
121  void loadNetwork(armnn::NetworkId networkId, bool loadDynamic)
122  {
123  bool ok = ReadStringToBinary();
124  if (!ok) {
125  throw armnn::Exception("LoadNetwork failed while reading binary input");
126  }
127 
128  armnn::INetworkPtr network = loadDynamic ? m_Parser->LoadModel(MakeModelDynamic(m_GraphBinary))
129  : m_Parser->CreateNetworkFromBinary(m_GraphBinary);
130 
131  if (!network) {
132  throw armnn::Exception("The parser failed to create an ArmNN network");
133  }
134 
135  auto optimized = Optimize(*network, { armnn::Compute::CpuRef },
136  m_Runtime->GetDeviceSpec());
137  std::string errorMessage;
138 
139  armnn::Status ret = m_Runtime->LoadNetwork(networkId, move(optimized), errorMessage);
140 
141  if (ret != armnn::Status::Success)
142  {
143  throw armnn::Exception(
144  fmt::format("The runtime failed to load the network. "
145  "Error was: {}. in {} [{}:{}]",
146  errorMessage,
147  __func__,
148  __FILE__,
149  __LINE__));
150  }
151  }
152 
153  void SetupSingleInputSingleOutput(const std::string& inputName, const std::string& outputName)
154  {
155  // Store the input and output name so they don't need to be passed to the single-input-single-output RunTest().
156  m_SingleInputName = inputName;
157  m_SingleOutputName = outputName;
158  Setup();
159  }
160 
162  {
164 
165  // parse schema first, so we can use it to parse the data after
166  flatbuffers::Parser parser;
167 
168  bool ok = parser.Parse(schemafile.c_str());
169  ARMNN_ASSERT_MSG(ok, "Failed to parse schema file");
170 
171  ok &= parser.Parse(m_JsonString.c_str());
172  ARMNN_ASSERT_MSG(ok, "Failed to parse json input");
173 
174  if (!ok)
175  {
176  return false;
177  }
178 
179  {
180  const uint8_t * bufferPtr = parser.builder_.GetBufferPointer();
181  size_t size = static_cast<size_t>(parser.builder_.GetSize());
182  m_GraphBinary.assign(bufferPtr, bufferPtr+size);
183  }
184  return ok;
185  }
186 
187  /// Executes the network with the given input tensor and checks the result against the given output tensor.
188  /// This assumes the network has a single input and a single output.
189  template <std::size_t NumOutputDimensions,
190  armnn::DataType ArmnnType>
191  void RunTest(size_t subgraphId,
192  const std::vector<armnn::ResolveType<ArmnnType>>& inputData,
193  const std::vector<armnn::ResolveType<ArmnnType>>& expectedOutputData);
194 
195  /// Executes the network with the given input tensors and checks the results against the given output tensors.
196  /// This overload supports multiple inputs and multiple outputs, identified by name.
197  template <std::size_t NumOutputDimensions,
198  armnn::DataType ArmnnType>
199  void RunTest(size_t subgraphId,
200  const std::map<std::string, std::vector<armnn::ResolveType<ArmnnType>>>& inputData,
201  const std::map<std::string, std::vector<armnn::ResolveType<ArmnnType>>>& expectedOutputData);
202 
203  /// Multiple Inputs, Multiple Outputs w/ Variable Datatypes and different dimension sizes.
204  /// Executes the network with the given input tensors and checks the results against the given output tensors.
205  /// This overload supports multiple inputs and multiple outputs, identified by name along with the allowance for
206  /// the input datatype to be different to the output
207  template <std::size_t NumOutputDimensions,
208  armnn::DataType ArmnnType1,
209  armnn::DataType ArmnnType2>
210  void RunTest(size_t subgraphId,
211  const std::map<std::string, std::vector<armnn::ResolveType<ArmnnType1>>>& inputData,
212  const std::map<std::string, std::vector<armnn::ResolveType<ArmnnType2>>>& expectedOutputData,
213  bool isDynamic = false);
214 
215  /// Multiple Inputs with different DataTypes, Multiple Outputs w/ Variable DataTypes
216  /// Executes the network with the given input tensors and checks the results against the given output tensors.
217  /// This overload supports multiple inputs and multiple outputs, identified by name along with the allowance for
218  /// the input datatype to be different to the output
219  template <std::size_t NumOutputDimensions,
220  armnn::DataType inputType1,
221  armnn::DataType inputType2,
222  armnn::DataType outputType>
223  void RunTest(size_t subgraphId,
224  const std::map<std::string, std::vector<armnn::ResolveType<inputType1>>>& input1Data,
225  const std::map<std::string, std::vector<armnn::ResolveType<inputType2>>>& input2Data,
226  const std::map<std::string, std::vector<armnn::ResolveType<outputType>>>& expectedOutputData);
227 
228  /// Multiple Inputs, Multiple Outputs w/ Variable Datatypes and different dimension sizes.
229  /// Executes the network with the given input tensors and checks the results against the given output tensors.
230  /// This overload supports multiple inputs and multiple outputs, identified by name along with the allowance for
231  /// the input datatype to be different to the output
232  template<armnn::DataType ArmnnType1,
233  armnn::DataType ArmnnType2>
234  void RunTest(std::size_t subgraphId,
235  const std::map<std::string, std::vector<armnn::ResolveType<ArmnnType1>>>& inputData,
236  const std::map<std::string, std::vector<armnn::ResolveType<ArmnnType2>>>& expectedOutputData);
237 
238  static inline std::string GenerateDetectionPostProcessJsonString(
239  const armnn::DetectionPostProcessDescriptor& descriptor)
240  {
241  flexbuffers::Builder detectPostProcess;
242  detectPostProcess.Map([&]() {
243  detectPostProcess.Bool("use_regular_nms", descriptor.m_UseRegularNms);
244  detectPostProcess.Int("max_detections", descriptor.m_MaxDetections);
245  detectPostProcess.Int("max_classes_per_detection", descriptor.m_MaxClassesPerDetection);
246  detectPostProcess.Int("detections_per_class", descriptor.m_DetectionsPerClass);
247  detectPostProcess.Int("num_classes", descriptor.m_NumClasses);
248  detectPostProcess.Float("nms_score_threshold", descriptor.m_NmsScoreThreshold);
249  detectPostProcess.Float("nms_iou_threshold", descriptor.m_NmsIouThreshold);
250  detectPostProcess.Float("h_scale", descriptor.m_ScaleH);
251  detectPostProcess.Float("w_scale", descriptor.m_ScaleW);
252  detectPostProcess.Float("x_scale", descriptor.m_ScaleX);
253  detectPostProcess.Float("y_scale", descriptor.m_ScaleY);
254  });
255  detectPostProcess.Finish();
256 
257  // Create JSON string
258  std::stringstream strStream;
259  std::vector<uint8_t> buffer = detectPostProcess.GetBuffer();
260  std::copy(buffer.begin(), buffer.end(),std::ostream_iterator<int>(strStream,","));
261 
262  return strStream.str();
263  }
264 
265  void CheckTensors(const TensorRawPtr& tensors, size_t shapeSize, const std::vector<int32_t>& shape,
266  tflite::TensorType tensorType, uint32_t buffer, const std::string& name,
267  const std::vector<float>& min, const std::vector<float>& max,
268  const std::vector<float>& scale, const std::vector<int64_t>& zeroPoint)
269  {
270  CHECK(tensors);
271  CHECK_EQ(shapeSize, tensors->shape.size());
272  CHECK(std::equal(shape.begin(), shape.end(), tensors->shape.begin(), tensors->shape.end()));
273  CHECK_EQ(tensorType, tensors->type);
274  CHECK_EQ(buffer, tensors->buffer);
275  CHECK_EQ(name, tensors->name);
276  CHECK(tensors->quantization);
277  CHECK(std::equal(min.begin(), min.end(), tensors->quantization.get()->min.begin(),
278  tensors->quantization.get()->min.end()));
279  CHECK(std::equal(max.begin(), max.end(), tensors->quantization.get()->max.begin(),
280  tensors->quantization.get()->max.end()));
281  CHECK(std::equal(scale.begin(), scale.end(), tensors->quantization.get()->scale.begin(),
282  tensors->quantization.get()->scale.end()));
283  CHECK(std::equal(zeroPoint.begin(), zeroPoint.end(),
284  tensors->quantization.get()->zero_point.begin(),
285  tensors->quantization.get()->zero_point.end()));
286  }
287 
288 private:
289  /// Fills the InputTensors with given input data
290  template <armnn::DataType dataType>
291  void FillInputTensors(armnn::InputTensors& inputTensors,
292  const std::map<std::string, std::vector<armnn::ResolveType<dataType>>>& inputData,
293  size_t subgraphId);
294 };
295 
296 /// Fills the InputTensors with given input data
297 template <armnn::DataType dataType>
298 void ParserFlatbuffersFixture::FillInputTensors(
299  armnn::InputTensors& inputTensors,
300  const std::map<std::string, std::vector<armnn::ResolveType<dataType>>>& inputData,
301  size_t subgraphId)
302 {
303  for (auto&& it : inputData)
304  {
305  armnn::BindingPointInfo bindingInfo = m_Parser->GetNetworkInputBindingInfo(subgraphId, it.first);
306  bindingInfo.second.SetConstant(true);
307  armnn::VerifyTensorInfoDataType(bindingInfo.second, dataType);
308  inputTensors.push_back({ bindingInfo.first, armnn::ConstTensor(bindingInfo.second, it.second.data()) });
309  }
310 }
311 
312 /// Single Input, Single Output
313 /// Executes the network with the given input tensor and checks the result against the given output tensor.
314 /// This overload assumes the network has a single input and a single output.
315 template <std::size_t NumOutputDimensions,
316  armnn::DataType armnnType>
317 void ParserFlatbuffersFixture::RunTest(size_t subgraphId,
318  const std::vector<armnn::ResolveType<armnnType>>& inputData,
319  const std::vector<armnn::ResolveType<armnnType>>& expectedOutputData)
320 {
321  RunTest<NumOutputDimensions, armnnType>(subgraphId,
322  { { m_SingleInputName, inputData } },
323  { { m_SingleOutputName, expectedOutputData } });
324 }
325 
326 /// Multiple Inputs, Multiple Outputs
327 /// Executes the network with the given input tensors and checks the results against the given output tensors.
328 /// This overload supports multiple inputs and multiple outputs, identified by name.
329 template <std::size_t NumOutputDimensions,
330  armnn::DataType armnnType>
331 void ParserFlatbuffersFixture::RunTest(size_t subgraphId,
332  const std::map<std::string, std::vector<armnn::ResolveType<armnnType>>>& inputData,
333  const std::map<std::string, std::vector<armnn::ResolveType<armnnType>>>& expectedOutputData)
334 {
335  RunTest<NumOutputDimensions, armnnType, armnnType>(subgraphId, inputData, expectedOutputData);
336 }
337 
338 /// Multiple Inputs, Multiple Outputs w/ Variable Datatypes
339 /// Executes the network with the given input tensors and checks the results against the given output tensors.
340 /// This overload supports multiple inputs and multiple outputs, identified by name along with the allowance for
341 /// the input datatype to be different to the output
342 template <std::size_t NumOutputDimensions,
343  armnn::DataType armnnType1,
344  armnn::DataType armnnType2>
345 void ParserFlatbuffersFixture::RunTest(size_t subgraphId,
346  const std::map<std::string, std::vector<armnn::ResolveType<armnnType1>>>& inputData,
347  const std::map<std::string, std::vector<armnn::ResolveType<armnnType2>>>& expectedOutputData,
348  bool isDynamic)
349 {
350  using DataType2 = armnn::ResolveType<armnnType2>;
351 
352  // Setup the armnn input tensors from the given vectors.
353  armnn::InputTensors inputTensors;
354  FillInputTensors<armnnType1>(inputTensors, inputData, subgraphId);
355 
356  // Allocate storage for the output tensors to be written to and setup the armnn output tensors.
357  std::map<std::string, std::vector<DataType2>> outputStorage;
358  armnn::OutputTensors outputTensors;
359  for (auto&& it : expectedOutputData)
360  {
361  armnn::LayerBindingId outputBindingId = m_Parser->GetNetworkOutputBindingInfo(subgraphId, it.first).first;
362  armnn::TensorInfo outputTensorInfo = m_Runtime->GetOutputTensorInfo(m_NetworkIdentifier, outputBindingId);
363 
364  // Check that output tensors have correct number of dimensions (NumOutputDimensions specified in test)
365  auto outputNumDimensions = outputTensorInfo.GetNumDimensions();
366  CHECK_MESSAGE((outputNumDimensions == NumOutputDimensions),
367  fmt::format("Number of dimensions expected {}, but got {} for output layer {}",
368  NumOutputDimensions,
369  outputNumDimensions,
370  it.first));
371 
372  armnn::VerifyTensorInfoDataType(outputTensorInfo, armnnType2);
373  outputStorage.emplace(it.first, std::vector<DataType2>(outputTensorInfo.GetNumElements()));
374  outputTensors.push_back(
375  { outputBindingId, armnn::Tensor(outputTensorInfo, outputStorage.at(it.first).data()) });
376  }
377 
378  m_Runtime->EnqueueWorkload(m_NetworkIdentifier, inputTensors, outputTensors);
379 
380  // Set flag so that the correct comparison function is called if the output is boolean.
381  bool isBoolean = armnnType2 == armnn::DataType::Boolean ? true : false;
382 
383  // Compare each output tensor to the expected values
384  for (auto&& it : expectedOutputData)
385  {
386  armnn::BindingPointInfo bindingInfo = m_Parser->GetNetworkOutputBindingInfo(subgraphId, it.first);
387  auto outputExpected = it.second;
388  auto result = CompareTensors(outputExpected, outputStorage[it.first],
389  bindingInfo.second.GetShape(), bindingInfo.second.GetShape(),
390  isBoolean, isDynamic);
391  CHECK_MESSAGE(result.m_Result, result.m_Message.str());
392  }
393 
394  if (isDynamic)
395  {
396  m_Runtime->EnqueueWorkload(m_DynamicNetworkIdentifier, inputTensors, outputTensors);
397 
398  // Compare each output tensor to the expected values
399  for (auto&& it : expectedOutputData)
400  {
401  armnn::BindingPointInfo bindingInfo = m_Parser->GetNetworkOutputBindingInfo(subgraphId, it.first);
402  auto outputExpected = it.second;
403  auto result = CompareTensors(outputExpected, outputStorage[it.first],
404  bindingInfo.second.GetShape(), bindingInfo.second.GetShape(),
405  false, isDynamic);
406  CHECK_MESSAGE(result.m_Result, result.m_Message.str());
407  }
408  }
409 }
410 
411 /// Multiple Inputs, Multiple Outputs w/ Variable Datatypes and different dimension sizes.
412 /// Executes the network with the given input tensors and checks the results against the given output tensors.
413 /// This overload supports multiple inputs and multiple outputs, identified by name along with the allowance for
414 /// the input datatype to be different to the output.
415 template <armnn::DataType armnnType1,
416  armnn::DataType armnnType2>
417 void ParserFlatbuffersFixture::RunTest(std::size_t subgraphId,
418  const std::map<std::string, std::vector<armnn::ResolveType<armnnType1>>>& inputData,
419  const std::map<std::string, std::vector<armnn::ResolveType<armnnType2>>>& expectedOutputData)
420 {
421  using DataType2 = armnn::ResolveType<armnnType2>;
422 
423  // Setup the armnn input tensors from the given vectors.
424  armnn::InputTensors inputTensors;
425  FillInputTensors<armnnType1>(inputTensors, inputData, subgraphId);
426 
427  armnn::OutputTensors outputTensors;
428  outputTensors.reserve(expectedOutputData.size());
429  std::map<std::string, std::vector<DataType2>> outputStorage;
430  for (auto&& it : expectedOutputData)
431  {
432  armnn::BindingPointInfo bindingInfo = m_Parser->GetNetworkOutputBindingInfo(subgraphId, it.first);
433  armnn::VerifyTensorInfoDataType(bindingInfo.second, armnnType2);
434 
435  std::vector<DataType2> out(it.second.size());
436  outputStorage.emplace(it.first, out);
437  outputTensors.push_back({ bindingInfo.first,
438  armnn::Tensor(bindingInfo.second,
439  outputStorage.at(it.first).data()) });
440  }
441 
442  m_Runtime->EnqueueWorkload(m_NetworkIdentifier, inputTensors, outputTensors);
443 
444  // Checks the results.
445  for (auto&& it : expectedOutputData)
446  {
447  std::vector<armnn::ResolveType<armnnType2>> out = outputStorage.at(it.first);
448  {
449  for (unsigned int i = 0; i < out.size(); ++i)
450  {
451  CHECK(doctest::Approx(it.second[i]).epsilon(0.000001f) == out[i]);
452  }
453  }
454  }
455 }
456 
457 /// Multiple Inputs with different DataTypes, Multiple Outputs w/ Variable DataTypes
458 /// Executes the network with the given input tensors and checks the results against the given output tensors.
459 /// This overload supports multiple inputs and multiple outputs, identified by name along with the allowance for
460 /// the input datatype to be different to the output
461 template <std::size_t NumOutputDimensions,
462  armnn::DataType inputType1,
463  armnn::DataType inputType2,
464  armnn::DataType outputType>
465 void ParserFlatbuffersFixture::RunTest(size_t subgraphId,
466  const std::map<std::string, std::vector<armnn::ResolveType<inputType1>>>& input1Data,
467  const std::map<std::string, std::vector<armnn::ResolveType<inputType2>>>& input2Data,
468  const std::map<std::string, std::vector<armnn::ResolveType<outputType>>>& expectedOutputData)
469 {
470  using DataType2 = armnn::ResolveType<outputType>;
471 
472  // Setup the armnn input tensors from the given vectors.
473  armnn::InputTensors inputTensors;
474  FillInputTensors<inputType1>(inputTensors, input1Data, subgraphId);
475  FillInputTensors<inputType2>(inputTensors, input2Data, subgraphId);
476 
477  // Allocate storage for the output tensors to be written to and setup the armnn output tensors.
478  std::map<std::string, std::vector<DataType2>> outputStorage;
479  armnn::OutputTensors outputTensors;
480  for (auto&& it : expectedOutputData)
481  {
482  armnn::LayerBindingId outputBindingId = m_Parser->GetNetworkOutputBindingInfo(subgraphId, it.first).first;
483  armnn::TensorInfo outputTensorInfo = m_Runtime->GetOutputTensorInfo(m_NetworkIdentifier, outputBindingId);
484 
485  // Check that output tensors have correct number of dimensions (NumOutputDimensions specified in test)
486  auto outputNumDimensions = outputTensorInfo.GetNumDimensions();
487  CHECK_MESSAGE((outputNumDimensions == NumOutputDimensions),
488  fmt::format("Number of dimensions expected {}, but got {} for output layer {}",
489  NumOutputDimensions,
490  outputNumDimensions,
491  it.first));
492 
493  armnn::VerifyTensorInfoDataType(outputTensorInfo, outputType);
494  outputStorage.emplace(it.first, std::vector<DataType2>(outputTensorInfo.GetNumElements()));
495  outputTensors.push_back(
496  { outputBindingId, armnn::Tensor(outputTensorInfo, outputStorage.at(it.first).data()) });
497  }
498 
499  m_Runtime->EnqueueWorkload(m_NetworkIdentifier, inputTensors, outputTensors);
500 
501  // Set flag so that the correct comparison function is called if the output is boolean.
502  bool isBoolean = outputType == armnn::DataType::Boolean ? true : false;
503 
504  // Compare each output tensor to the expected values
505  for (auto&& it : expectedOutputData)
506  {
507  armnn::BindingPointInfo bindingInfo = m_Parser->GetNetworkOutputBindingInfo(subgraphId, it.first);
508  auto outputExpected = it.second;
509  auto result = CompareTensors(outputExpected, outputStorage[it.first],
510  bindingInfo.second.GetShape(), bindingInfo.second.GetShape(),
511  isBoolean);
512  CHECK_MESSAGE(result.m_Result, result.m_Message.str());
513  }
514 }
float m_ScaleW
Center size encoding scale weight.
static std::string GenerateDetectionPostProcessJsonString(const armnn::DetectionPostProcessDescriptor &descriptor)
CPU Execution: Reference C++ kernels.
void loadNetwork(armnn::NetworkId networkId, bool loadDynamic)
float m_ScaleX
Center size encoding scale x.
void CheckTensors(const TensorRawPtr &tensors, size_t shapeSize, const std::vector< int32_t > &shape, tflite::TensorType tensorType, uint32_t buffer, const std::string &name, const std::vector< float > &min, const std::vector< float > &max, const std::vector< float > &scale, const std::vector< int64_t > &zeroPoint)
std::unique_ptr< IRuntime, void(*)(IRuntime *runtime)> IRuntimePtr
Definition: IRuntime.hpp:31
uint32_t m_DetectionsPerClass
Detections per classes, used in Regular NMS.
typename ResolveTypeImpl< DT >::Type ResolveType
Definition: ResolveType.hpp:79
std::vector< std::pair< LayerBindingId, class ConstTensor > > InputTensors
Definition: Tensor.hpp:392
armnn::PredicateResult CompareTensors(const std::vector< T > &actualData, const std::vector< T > &expectedData, const armnn::TensorShape &actualShape, const armnn::TensorShape &expectedShape, bool compareBoolean=false, bool isDynamic=false)
std::unique_ptr< ITfLiteParser, void(*)(ITfLiteParser *parser)> ITfLiteParserPtr
Copyright (c) 2021 ARM Limited and Contributors.
void RunTest(size_t subgraphId, const std::vector< armnn::ResolveType< ArmnnType >> &inputData, const std::vector< armnn::ResolveType< ArmnnType >> &expectedOutputData)
Executes the network with the given input tensor and checks the result against the given output tenso...
int LayerBindingId
Type of identifiers for bindable layers (inputs, outputs).
Definition: Types.hpp:277
uint32_t m_MaxClassesPerDetection
Maximum numbers of classes per detection, used in Fast NMS.
A tensor defined by a TensorInfo (shape and data type) and a mutable backing store.
Definition: Tensor.hpp:319
uint32_t m_MaxDetections
Maximum numbers of detections.
std::unique_ptr< armnnTfLiteParser::TfLiteParserImpl > m_Parser
DataType
Definition: Types.hpp:35
float m_NmsIouThreshold
Intersection over union threshold.
IOptimizedNetworkPtr Optimize(const INetwork &network, const std::vector< BackendId > &backendPreferences, const IDeviceSpec &deviceSpec, const OptimizerOptions &options=OptimizerOptions(), Optional< std::vector< std::string > &> messages=EmptyOptional())
Create an optimized version of the network.
Definition: Network.cpp:1605
#define ARMNN_ASSERT_MSG(COND, MSG)
Definition: Assert.hpp:15
armnnSerializer::TensorInfo * TensorRawPtr
int NetworkId
Definition: IRuntime.hpp:25
A tensor defined by a TensorInfo (shape and data type) and an immutable backing store.
Definition: Tensor.hpp:327
std::vector< std::pair< LayerBindingId, class Tensor > > OutputTensors
Definition: Tensor.hpp:393
Status
enumeration
Definition: Types.hpp:29
uint32_t m_NumClasses
Number of classes.
bool m_UseRegularNms
Use Regular NMS.
#define CHECK_LOCATION()
Definition: Exceptions.hpp:209
std::vector< uint8_t > m_GraphBinary
float m_ScaleH
Center size encoding scale height.
std::pair< armnn::LayerBindingId, armnn::TensorInfo > BindingPointInfo
Definition: Tensor.hpp:274
void SetupSingleInputSingleOutput(const std::string &inputName, const std::string &outputName)
Base class for all ArmNN exceptions so that users can filter to just those.
Definition: Exceptions.hpp:46
void Setup(bool testDynamic=true)
void VerifyTensorInfoDataType(const armnn::TensorInfo &info, armnn::DataType dataType)
Definition: TypesUtils.hpp:337
float m_ScaleY
Center size encoding scale y.
unsigned char g_TfLiteSchemaText[]
float m_NmsScoreThreshold
NMS score threshold.
std::unique_ptr< INetwork, void(*)(INetwork *network)> INetworkPtr
Definition: INetwork.hpp:197
std::unique_ptr< tflite::ModelT > MakeModelDynamic(std::vector< uint8_t > graphBinary)
std::string m_SingleInputName
If the single-input-single-output overload of Setup() is called, these will store the input and outpu...
unsigned int GetNumDimensions() const
Definition: Tensor.hpp:195
unsigned int g_TfLiteSchemaText_len
unsigned int GetNumElements() const
Definition: Tensor.hpp:196