ArmNN
 22.02
ParserFlatbuffersFixture.hpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #pragma once
7 
8 #include "Schema.hpp"
9 
10 #include <armnn/Descriptors.hpp>
11 #include <armnn/IRuntime.hpp>
12 #include <armnn/TypesUtils.hpp>
14 
15 #include "../TfLiteParser.hpp"
16 
17 #include <ResolveType.hpp>
18 
20 
21 #include <fmt/format.h>
22 #include <doctest/doctest.h>
23 
24 #include "flatbuffers/idl.h"
25 #include "flatbuffers/util.h"
26 #include "flatbuffers/flexbuffers.h"
27 
28 #include <schema_generated.h>
29 
30 
33 
34 using TensorRawPtr = const tflite::TensorT *;
36 {
38  m_Runtime(armnn::IRuntime::Create(armnn::IRuntime::CreationOptions())),
41  {
42  ITfLiteParser::TfLiteParserOptions options;
43  options.m_StandInLayerForUnsupported = true;
44  options.m_InferAndValidate = true;
45 
46  m_Parser = std::make_unique<armnnTfLiteParser::TfLiteParserImpl>(
48  }
49 
50  std::vector<uint8_t> m_GraphBinary;
51  std::string m_JsonString;
56  std::unique_ptr<armnnTfLiteParser::TfLiteParserImpl> m_Parser;
57 
58  /// If the single-input-single-output overload of Setup() is called, these will store the input and output name
59  /// so they don't need to be passed to the single-input-single-output overload of RunTest().
60  std::string m_SingleInputName;
61  std::string m_SingleOutputName;
62 
63  void Setup(bool testDynamic = true)
64  {
65  m_TestDynamic = testDynamic;
66  loadNetwork(m_NetworkIdentifier, false);
67 
68  if (m_TestDynamic)
69  {
70  loadNetwork(m_DynamicNetworkIdentifier, true);
71  }
72  }
73 
74  std::unique_ptr<tflite::ModelT> MakeModelDynamic(std::vector<uint8_t> graphBinary)
75  {
76  const uint8_t* binaryContent = graphBinary.data();
77  const size_t len = graphBinary.size();
78  if (binaryContent == nullptr)
79  {
80  throw armnn::InvalidArgumentException(fmt::format("Invalid (null) binary content {}",
81  CHECK_LOCATION().AsString()));
82  }
83  flatbuffers::Verifier verifier(binaryContent, len);
84  if (verifier.VerifyBuffer<tflite::Model>() == false)
85  {
86  throw armnn::ParseException(fmt::format("Buffer doesn't conform to the expected Tensorflow Lite "
87  "flatbuffers format. size:{} {}",
88  len,
89  CHECK_LOCATION().AsString()));
90  }
91  auto model = tflite::UnPackModel(binaryContent);
92 
93  for (auto const& subgraph : model->subgraphs)
94  {
95  std::vector<int32_t> inputIds = subgraph->inputs;
96  for (unsigned int tensorIndex = 0; tensorIndex < subgraph->tensors.size(); ++tensorIndex)
97  {
98  if (std::find(inputIds.begin(), inputIds.end(), tensorIndex) != inputIds.end())
99  {
100  continue;
101  }
102  for (auto const& tensor : subgraph->tensors)
103  {
104  if (tensor->shape_signature.size() != 0)
105  {
106  continue;
107  }
108 
109  for (unsigned int i = 0; i < tensor->shape.size(); ++i)
110  {
111  tensor->shape_signature.push_back(-1);
112  }
113  }
114  }
115  }
116 
117  return model;
118  }
119 
120  void loadNetwork(armnn::NetworkId networkId, bool loadDynamic)
121  {
122  if (!ReadStringToBinary())
123  {
124  throw armnn::Exception("LoadNetwork failed while reading binary input");
125  }
126 
127  armnn::INetworkPtr network = loadDynamic ? m_Parser->LoadModel(MakeModelDynamic(m_GraphBinary))
128  : m_Parser->CreateNetworkFromBinary(m_GraphBinary);
129 
130  if (!network) {
131  throw armnn::Exception("The parser failed to create an ArmNN network");
132  }
133 
134  auto optimized = Optimize(*network, { armnn::Compute::CpuRef },
135  m_Runtime->GetDeviceSpec());
136  std::string errorMessage;
137 
138  armnn::Status ret = m_Runtime->LoadNetwork(networkId, move(optimized), errorMessage);
139 
140  if (ret != armnn::Status::Success)
141  {
142  throw armnn::Exception(
143  fmt::format("The runtime failed to load the network. "
144  "Error was: {}. in {} [{}:{}]",
145  errorMessage,
146  __func__,
147  __FILE__,
148  __LINE__));
149  }
150  }
151 
152  void SetupSingleInputSingleOutput(const std::string& inputName, const std::string& outputName)
153  {
154  // Store the input and output name so they don't need to be passed to the single-input-single-output RunTest().
155  m_SingleInputName = inputName;
156  m_SingleOutputName = outputName;
157  Setup();
158  }
159 
161  {
163 
164  // parse schema first, so we can use it to parse the data after
165  flatbuffers::Parser parser;
166 
167  bool ok = parser.Parse(schemafile.c_str());
168  CHECK_MESSAGE(ok, std::string("Failed to parse schema file. Error was: " + parser.error_).c_str());
169 
170  ok = parser.Parse(m_JsonString.c_str());
171  CHECK_MESSAGE(ok, std::string("Failed to parse json input. Error was: " + parser.error_).c_str());
172 
173  {
174  const uint8_t * bufferPtr = parser.builder_.GetBufferPointer();
175  size_t size = static_cast<size_t>(parser.builder_.GetSize());
176  m_GraphBinary.assign(bufferPtr, bufferPtr+size);
177  }
178  return ok;
179  }
180 
181  /// Executes the network with the given input tensor and checks the result against the given output tensor.
182  /// This assumes the network has a single input and a single output.
183  template <std::size_t NumOutputDimensions,
184  armnn::DataType ArmnnType>
185  void RunTest(size_t subgraphId,
186  const std::vector<armnn::ResolveType<ArmnnType>>& inputData,
187  const std::vector<armnn::ResolveType<ArmnnType>>& expectedOutputData);
188 
189  /// Executes the network with the given input tensors and checks the results against the given output tensors.
190  /// This overload supports multiple inputs and multiple outputs, identified by name.
191  template <std::size_t NumOutputDimensions,
192  armnn::DataType ArmnnType>
193  void RunTest(size_t subgraphId,
194  const std::map<std::string, std::vector<armnn::ResolveType<ArmnnType>>>& inputData,
195  const std::map<std::string, std::vector<armnn::ResolveType<ArmnnType>>>& expectedOutputData);
196 
197  /// Multiple Inputs, Multiple Outputs w/ Variable Datatypes and different dimension sizes.
198  /// Executes the network with the given input tensors and checks the results against the given output tensors.
199  /// This overload supports multiple inputs and multiple outputs, identified by name along with the allowance for
200  /// the input datatype to be different to the output
201  template <std::size_t NumOutputDimensions,
202  armnn::DataType ArmnnType1,
203  armnn::DataType ArmnnType2>
204  void RunTest(size_t subgraphId,
205  const std::map<std::string, std::vector<armnn::ResolveType<ArmnnType1>>>& inputData,
206  const std::map<std::string, std::vector<armnn::ResolveType<ArmnnType2>>>& expectedOutputData,
207  bool isDynamic = false);
208 
209  /// Multiple Inputs with different DataTypes, Multiple Outputs w/ Variable DataTypes
210  /// Executes the network with the given input tensors and checks the results against the given output tensors.
211  /// This overload supports multiple inputs and multiple outputs, identified by name along with the allowance for
212  /// the input datatype to be different to the output
213  template <std::size_t NumOutputDimensions,
214  armnn::DataType inputType1,
215  armnn::DataType inputType2,
216  armnn::DataType outputType>
217  void RunTest(size_t subgraphId,
218  const std::map<std::string, std::vector<armnn::ResolveType<inputType1>>>& input1Data,
219  const std::map<std::string, std::vector<armnn::ResolveType<inputType2>>>& input2Data,
220  const std::map<std::string, std::vector<armnn::ResolveType<outputType>>>& expectedOutputData);
221 
222  /// Multiple Inputs, Multiple Outputs w/ Variable Datatypes and different dimension sizes.
223  /// Executes the network with the given input tensors and checks the results against the given output tensors.
224  /// This overload supports multiple inputs and multiple outputs, identified by name along with the allowance for
225  /// the input datatype to be different to the output
226  template<armnn::DataType ArmnnType1,
227  armnn::DataType ArmnnType2>
228  void RunTest(std::size_t subgraphId,
229  const std::map<std::string, std::vector<armnn::ResolveType<ArmnnType1>>>& inputData,
230  const std::map<std::string, std::vector<armnn::ResolveType<ArmnnType2>>>& expectedOutputData);
231 
232  static inline std::string GenerateDetectionPostProcessJsonString(
233  const armnn::DetectionPostProcessDescriptor& descriptor)
234  {
235  flexbuffers::Builder detectPostProcess;
236  detectPostProcess.Map([&]() {
237  detectPostProcess.Bool("use_regular_nms", descriptor.m_UseRegularNms);
238  detectPostProcess.Int("max_detections", descriptor.m_MaxDetections);
239  detectPostProcess.Int("max_classes_per_detection", descriptor.m_MaxClassesPerDetection);
240  detectPostProcess.Int("detections_per_class", descriptor.m_DetectionsPerClass);
241  detectPostProcess.Int("num_classes", descriptor.m_NumClasses);
242  detectPostProcess.Float("nms_score_threshold", descriptor.m_NmsScoreThreshold);
243  detectPostProcess.Float("nms_iou_threshold", descriptor.m_NmsIouThreshold);
244  detectPostProcess.Float("h_scale", descriptor.m_ScaleH);
245  detectPostProcess.Float("w_scale", descriptor.m_ScaleW);
246  detectPostProcess.Float("x_scale", descriptor.m_ScaleX);
247  detectPostProcess.Float("y_scale", descriptor.m_ScaleY);
248  });
249  detectPostProcess.Finish();
250 
251  // Create JSON string
252  std::stringstream strStream;
253  std::vector<uint8_t> buffer = detectPostProcess.GetBuffer();
254  std::copy(buffer.begin(), buffer.end(),std::ostream_iterator<int>(strStream,","));
255 
256  return strStream.str();
257  }
258 
259  void CheckTensors(const TensorRawPtr& tensors, size_t shapeSize, const std::vector<int32_t>& shape,
260  tflite::TensorType tensorType, uint32_t buffer, const std::string& name,
261  const std::vector<float>& min, const std::vector<float>& max,
262  const std::vector<float>& scale, const std::vector<int64_t>& zeroPoint)
263  {
264  CHECK(tensors);
265  CHECK_EQ(shapeSize, tensors->shape.size());
266  CHECK(std::equal(shape.begin(), shape.end(), tensors->shape.begin(), tensors->shape.end()));
267  CHECK_EQ(tensorType, tensors->type);
268  CHECK_EQ(buffer, tensors->buffer);
269  CHECK_EQ(name, tensors->name);
270  CHECK(tensors->quantization);
271  CHECK(std::equal(min.begin(), min.end(), tensors->quantization.get()->min.begin(),
272  tensors->quantization.get()->min.end()));
273  CHECK(std::equal(max.begin(), max.end(), tensors->quantization.get()->max.begin(),
274  tensors->quantization.get()->max.end()));
275  CHECK(std::equal(scale.begin(), scale.end(), tensors->quantization.get()->scale.begin(),
276  tensors->quantization.get()->scale.end()));
277  CHECK(std::equal(zeroPoint.begin(), zeroPoint.end(),
278  tensors->quantization.get()->zero_point.begin(),
279  tensors->quantization.get()->zero_point.end()));
280  }
281 
282 private:
283  /// Fills the InputTensors with given input data
284  template <armnn::DataType dataType>
285  void FillInputTensors(armnn::InputTensors& inputTensors,
286  const std::map<std::string, std::vector<armnn::ResolveType<dataType>>>& inputData,
287  size_t subgraphId);
288 };
289 
290 /// Fills the InputTensors with given input data
291 template <armnn::DataType dataType>
292 void ParserFlatbuffersFixture::FillInputTensors(
293  armnn::InputTensors& inputTensors,
294  const std::map<std::string, std::vector<armnn::ResolveType<dataType>>>& inputData,
295  size_t subgraphId)
296 {
297  for (auto&& it : inputData)
298  {
299  armnn::BindingPointInfo bindingInfo = m_Parser->GetNetworkInputBindingInfo(subgraphId, it.first);
300  bindingInfo.second.SetConstant(true);
301  armnn::VerifyTensorInfoDataType(bindingInfo.second, dataType);
302  inputTensors.push_back({ bindingInfo.first, armnn::ConstTensor(bindingInfo.second, it.second.data()) });
303  }
304 }
305 
306 /// Single Input, Single Output
307 /// Executes the network with the given input tensor and checks the result against the given output tensor.
308 /// This overload assumes the network has a single input and a single output.
309 template <std::size_t NumOutputDimensions,
310  armnn::DataType armnnType>
311 void ParserFlatbuffersFixture::RunTest(size_t subgraphId,
312  const std::vector<armnn::ResolveType<armnnType>>& inputData,
313  const std::vector<armnn::ResolveType<armnnType>>& expectedOutputData)
314 {
315  RunTest<NumOutputDimensions, armnnType>(subgraphId,
316  { { m_SingleInputName, inputData } },
317  { { m_SingleOutputName, expectedOutputData } });
318 }
319 
320 /// Multiple Inputs, Multiple Outputs
321 /// Executes the network with the given input tensors and checks the results against the given output tensors.
322 /// This overload supports multiple inputs and multiple outputs, identified by name.
323 template <std::size_t NumOutputDimensions,
324  armnn::DataType armnnType>
325 void ParserFlatbuffersFixture::RunTest(size_t subgraphId,
326  const std::map<std::string, std::vector<armnn::ResolveType<armnnType>>>& inputData,
327  const std::map<std::string, std::vector<armnn::ResolveType<armnnType>>>& expectedOutputData)
328 {
329  RunTest<NumOutputDimensions, armnnType, armnnType>(subgraphId, inputData, expectedOutputData);
330 }
331 
332 /// Multiple Inputs, Multiple Outputs w/ Variable Datatypes
333 /// Executes the network with the given input tensors and checks the results against the given output tensors.
334 /// This overload supports multiple inputs and multiple outputs, identified by name along with the allowance for
335 /// the input datatype to be different to the output
336 template <std::size_t NumOutputDimensions,
337  armnn::DataType armnnType1,
338  armnn::DataType armnnType2>
339 void ParserFlatbuffersFixture::RunTest(size_t subgraphId,
340  const std::map<std::string, std::vector<armnn::ResolveType<armnnType1>>>& inputData,
341  const std::map<std::string, std::vector<armnn::ResolveType<armnnType2>>>& expectedOutputData,
342  bool isDynamic)
343 {
344  using DataType2 = armnn::ResolveType<armnnType2>;
345 
346  // Setup the armnn input tensors from the given vectors.
347  armnn::InputTensors inputTensors;
348  FillInputTensors<armnnType1>(inputTensors, inputData, subgraphId);
349 
350  // Allocate storage for the output tensors to be written to and setup the armnn output tensors.
351  std::map<std::string, std::vector<DataType2>> outputStorage;
352  armnn::OutputTensors outputTensors;
353  for (auto&& it : expectedOutputData)
354  {
355  armnn::LayerBindingId outputBindingId = m_Parser->GetNetworkOutputBindingInfo(subgraphId, it.first).first;
356  armnn::TensorInfo outputTensorInfo = m_Runtime->GetOutputTensorInfo(m_NetworkIdentifier, outputBindingId);
357 
358  // Check that output tensors have correct number of dimensions (NumOutputDimensions specified in test)
359  auto outputNumDimensions = outputTensorInfo.GetNumDimensions();
360  CHECK_MESSAGE((outputNumDimensions == NumOutputDimensions),
361  fmt::format("Number of dimensions expected {}, but got {} for output layer {}",
362  NumOutputDimensions,
363  outputNumDimensions,
364  it.first));
365 
366  armnn::VerifyTensorInfoDataType(outputTensorInfo, armnnType2);
367  outputStorage.emplace(it.first, std::vector<DataType2>(outputTensorInfo.GetNumElements()));
368  outputTensors.push_back(
369  { outputBindingId, armnn::Tensor(outputTensorInfo, outputStorage.at(it.first).data()) });
370  }
371 
372  m_Runtime->EnqueueWorkload(m_NetworkIdentifier, inputTensors, outputTensors);
373 
374  // Set flag so that the correct comparison function is called if the output is boolean.
375  bool isBoolean = armnnType2 == armnn::DataType::Boolean ? true : false;
376 
377  // Compare each output tensor to the expected values
378  for (auto&& it : expectedOutputData)
379  {
380  armnn::BindingPointInfo bindingInfo = m_Parser->GetNetworkOutputBindingInfo(subgraphId, it.first);
381  auto outputExpected = it.second;
382  auto result = CompareTensors(outputExpected, outputStorage[it.first],
383  bindingInfo.second.GetShape(), bindingInfo.second.GetShape(),
384  isBoolean, isDynamic);
385  CHECK_MESSAGE(result.m_Result, result.m_Message.str());
386  }
387 
388  if (isDynamic)
389  {
390  m_Runtime->EnqueueWorkload(m_DynamicNetworkIdentifier, inputTensors, outputTensors);
391 
392  // Compare each output tensor to the expected values
393  for (auto&& it : expectedOutputData)
394  {
395  armnn::BindingPointInfo bindingInfo = m_Parser->GetNetworkOutputBindingInfo(subgraphId, it.first);
396  auto outputExpected = it.second;
397  auto result = CompareTensors(outputExpected, outputStorage[it.first],
398  bindingInfo.second.GetShape(), bindingInfo.second.GetShape(),
399  false, isDynamic);
400  CHECK_MESSAGE(result.m_Result, result.m_Message.str());
401  }
402  }
403 }
404 
405 /// Multiple Inputs, Multiple Outputs w/ Variable Datatypes and different dimension sizes.
406 /// Executes the network with the given input tensors and checks the results against the given output tensors.
407 /// This overload supports multiple inputs and multiple outputs, identified by name along with the allowance for
408 /// the input datatype to be different to the output.
409 template <armnn::DataType armnnType1,
410  armnn::DataType armnnType2>
411 void ParserFlatbuffersFixture::RunTest(std::size_t subgraphId,
412  const std::map<std::string, std::vector<armnn::ResolveType<armnnType1>>>& inputData,
413  const std::map<std::string, std::vector<armnn::ResolveType<armnnType2>>>& expectedOutputData)
414 {
415  using DataType2 = armnn::ResolveType<armnnType2>;
416 
417  // Setup the armnn input tensors from the given vectors.
418  armnn::InputTensors inputTensors;
419  FillInputTensors<armnnType1>(inputTensors, inputData, subgraphId);
420 
421  armnn::OutputTensors outputTensors;
422  outputTensors.reserve(expectedOutputData.size());
423  std::map<std::string, std::vector<DataType2>> outputStorage;
424  for (auto&& it : expectedOutputData)
425  {
426  armnn::BindingPointInfo bindingInfo = m_Parser->GetNetworkOutputBindingInfo(subgraphId, it.first);
427  armnn::VerifyTensorInfoDataType(bindingInfo.second, armnnType2);
428 
429  std::vector<DataType2> out(it.second.size());
430  outputStorage.emplace(it.first, out);
431  outputTensors.push_back({ bindingInfo.first,
432  armnn::Tensor(bindingInfo.second,
433  outputStorage.at(it.first).data()) });
434  }
435 
436  m_Runtime->EnqueueWorkload(m_NetworkIdentifier, inputTensors, outputTensors);
437 
438  // Checks the results.
439  for (auto&& it : expectedOutputData)
440  {
441  std::vector<armnn::ResolveType<armnnType2>> out = outputStorage.at(it.first);
442  {
443  for (unsigned int i = 0; i < out.size(); ++i)
444  {
445  CHECK(doctest::Approx(it.second[i]).epsilon(0.000001f) == out[i]);
446  }
447  }
448  }
449 }
450 
451 /// Multiple Inputs with different DataTypes, Multiple Outputs w/ Variable DataTypes
452 /// Executes the network with the given input tensors and checks the results against the given output tensors.
453 /// This overload supports multiple inputs and multiple outputs, identified by name along with the allowance for
454 /// the input datatype to be different to the output
455 template <std::size_t NumOutputDimensions,
456  armnn::DataType inputType1,
457  armnn::DataType inputType2,
458  armnn::DataType outputType>
459 void ParserFlatbuffersFixture::RunTest(size_t subgraphId,
460  const std::map<std::string, std::vector<armnn::ResolveType<inputType1>>>& input1Data,
461  const std::map<std::string, std::vector<armnn::ResolveType<inputType2>>>& input2Data,
462  const std::map<std::string, std::vector<armnn::ResolveType<outputType>>>& expectedOutputData)
463 {
464  using DataType2 = armnn::ResolveType<outputType>;
465 
466  // Setup the armnn input tensors from the given vectors.
467  armnn::InputTensors inputTensors;
468  FillInputTensors<inputType1>(inputTensors, input1Data, subgraphId);
469  FillInputTensors<inputType2>(inputTensors, input2Data, subgraphId);
470 
471  // Allocate storage for the output tensors to be written to and setup the armnn output tensors.
472  std::map<std::string, std::vector<DataType2>> outputStorage;
473  armnn::OutputTensors outputTensors;
474  for (auto&& it : expectedOutputData)
475  {
476  armnn::LayerBindingId outputBindingId = m_Parser->GetNetworkOutputBindingInfo(subgraphId, it.first).first;
477  armnn::TensorInfo outputTensorInfo = m_Runtime->GetOutputTensorInfo(m_NetworkIdentifier, outputBindingId);
478 
479  // Check that output tensors have correct number of dimensions (NumOutputDimensions specified in test)
480  auto outputNumDimensions = outputTensorInfo.GetNumDimensions();
481  CHECK_MESSAGE((outputNumDimensions == NumOutputDimensions),
482  fmt::format("Number of dimensions expected {}, but got {} for output layer {}",
483  NumOutputDimensions,
484  outputNumDimensions,
485  it.first));
486 
487  armnn::VerifyTensorInfoDataType(outputTensorInfo, outputType);
488  outputStorage.emplace(it.first, std::vector<DataType2>(outputTensorInfo.GetNumElements()));
489  outputTensors.push_back(
490  { outputBindingId, armnn::Tensor(outputTensorInfo, outputStorage.at(it.first).data()) });
491  }
492 
493  m_Runtime->EnqueueWorkload(m_NetworkIdentifier, inputTensors, outputTensors);
494 
495  // Set flag so that the correct comparison function is called if the output is boolean.
496  bool isBoolean = outputType == armnn::DataType::Boolean ? true : false;
497 
498  // Compare each output tensor to the expected values
499  for (auto&& it : expectedOutputData)
500  {
501  armnn::BindingPointInfo bindingInfo = m_Parser->GetNetworkOutputBindingInfo(subgraphId, it.first);
502  auto outputExpected = it.second;
503  auto result = CompareTensors(outputExpected, outputStorage[it.first],
504  bindingInfo.second.GetShape(), bindingInfo.second.GetShape(),
505  isBoolean);
506  CHECK_MESSAGE(result.m_Result, result.m_Message.str());
507  }
508 }
float m_ScaleW
Center size encoding scale weight.
static std::string GenerateDetectionPostProcessJsonString(const armnn::DetectionPostProcessDescriptor &descriptor)
CPU Execution: Reference C++ kernels.
void loadNetwork(armnn::NetworkId networkId, bool loadDynamic)
float m_ScaleX
Center size encoding scale x.
void CheckTensors(const TensorRawPtr &tensors, size_t shapeSize, const std::vector< int32_t > &shape, tflite::TensorType tensorType, uint32_t buffer, const std::string &name, const std::vector< float > &min, const std::vector< float > &max, const std::vector< float > &scale, const std::vector< int64_t > &zeroPoint)
std::unique_ptr< IRuntime, void(*)(IRuntime *runtime)> IRuntimePtr
Definition: IRuntime.hpp:31
uint32_t m_DetectionsPerClass
Detections per classes, used in Regular NMS.
typename ResolveTypeImpl< DT >::Type ResolveType
Definition: ResolveType.hpp:79
std::vector< std::pair< LayerBindingId, class ConstTensor > > InputTensors
Definition: Tensor.hpp:392
std::unique_ptr< ITfLiteParser, void(*)(ITfLiteParser *parser)> ITfLiteParserPtr
Copyright (c) 2021 ARM Limited and Contributors.
void RunTest(size_t subgraphId, const std::vector< armnn::ResolveType< ArmnnType >> &inputData, const std::vector< armnn::ResolveType< ArmnnType >> &expectedOutputData)
Executes the network with the given input tensor and checks the result against the given output tenso...
int LayerBindingId
Type of identifiers for bindable layers (inputs, outputs).
Definition: Types.hpp:277
uint32_t m_MaxClassesPerDetection
Maximum numbers of classes per detection, used in Fast NMS.
A tensor defined by a TensorInfo (shape and data type) and a mutable backing store.
Definition: Tensor.hpp:319
uint32_t m_MaxDetections
Maximum numbers of detections.
std::unique_ptr< armnnTfLiteParser::TfLiteParserImpl > m_Parser
DataType
Definition: Types.hpp:35
float m_NmsIouThreshold
Intersection over union threshold.
IOptimizedNetworkPtr Optimize(const INetwork &network, const std::vector< BackendId > &backendPreferences, const IDeviceSpec &deviceSpec, const OptimizerOptions &options=OptimizerOptions(), Optional< std::vector< std::string > &> messages=EmptyOptional())
Create an optimized version of the network.
Definition: Network.cpp:1680
armnnSerializer::TensorInfo * TensorRawPtr
int NetworkId
Definition: IRuntime.hpp:25
A tensor defined by a TensorInfo (shape and data type) and an immutable backing store.
Definition: Tensor.hpp:327
std::vector< std::pair< LayerBindingId, class Tensor > > OutputTensors
Definition: Tensor.hpp:393
Status
enumeration
Definition: Types.hpp:29
uint32_t m_NumClasses
Number of classes.
bool m_UseRegularNms
Use Regular NMS.
#define CHECK_LOCATION()
Definition: Exceptions.hpp:209
armnn::PredicateResult CompareTensors(const std::vector< T > &actualData, const std::vector< T > &expectedData, const armnn::TensorShape &actualShape, const armnn::TensorShape &expectedShape, bool compareBoolean=false, bool isDynamic=false)
std::vector< uint8_t > m_GraphBinary
float m_ScaleH
Center size encoding scale height.
std::pair< armnn::LayerBindingId, armnn::TensorInfo > BindingPointInfo
Definition: Tensor.hpp:274
void SetupSingleInputSingleOutput(const std::string &inputName, const std::string &outputName)
Base class for all ArmNN exceptions so that users can filter to just those.
Definition: Exceptions.hpp:46
void Setup(bool testDynamic=true)
void VerifyTensorInfoDataType(const armnn::TensorInfo &info, armnn::DataType dataType)
Definition: TypesUtils.hpp:337
float m_ScaleY
Center size encoding scale y.
unsigned char g_TfLiteSchemaText[]
float m_NmsScoreThreshold
NMS score threshold.
std::unique_ptr< INetwork, void(*)(INetwork *network)> INetworkPtr
Definition: INetwork.hpp:241
std::unique_ptr< tflite::ModelT > MakeModelDynamic(std::vector< uint8_t > graphBinary)
std::string m_SingleInputName
If the single-input-single-output overload of Setup() is called, these will store the input and outpu...
unsigned int GetNumDimensions() const
Definition: Tensor.hpp:195
unsigned int g_TfLiteSchemaText_len
unsigned int GetNumElements() const
Definition: Tensor.hpp:196