ArmNN
 21.11
ParserFlatbuffersFixture Struct Reference

#include <ParserFlatbuffersFixture.hpp>

Public Member Functions

 ParserFlatbuffersFixture ()
 
void Setup (bool testDynamic=true)
 
std::unique_ptr< tflite::ModelT > MakeModelDynamic (std::vector< uint8_t > graphBinary)
 
void loadNetwork (armnn::NetworkId networkId, bool loadDynamic)
 
void SetupSingleInputSingleOutput (const std::string &inputName, const std::string &outputName)
 
bool ReadStringToBinary ()
 
template<std::size_t NumOutputDimensions, armnn::DataType ArmnnType>
void RunTest (size_t subgraphId, const std::vector< armnn::ResolveType< ArmnnType >> &inputData, const std::vector< armnn::ResolveType< ArmnnType >> &expectedOutputData)
 Executes the network with the given input tensor and checks the result against the given output tensor. More...
 
template<std::size_t NumOutputDimensions, armnn::DataType ArmnnType>
void RunTest (size_t subgraphId, const std::map< std::string, std::vector< armnn::ResolveType< ArmnnType >>> &inputData, const std::map< std::string, std::vector< armnn::ResolveType< ArmnnType >>> &expectedOutputData)
 Executes the network with the given input tensors and checks the results against the given output tensors. More...
 
template<std::size_t NumOutputDimensions, armnn::DataType ArmnnType1, armnn::DataType ArmnnType2>
void RunTest (size_t subgraphId, const std::map< std::string, std::vector< armnn::ResolveType< ArmnnType1 >>> &inputData, const std::map< std::string, std::vector< armnn::ResolveType< ArmnnType2 >>> &expectedOutputData, bool isDynamic=false)
 Multiple Inputs, Multiple Outputs w/ Variable Datatypes and different dimension sizes. More...
 
template<std::size_t NumOutputDimensions, armnn::DataType inputType1, armnn::DataType inputType2, armnn::DataType outputType>
void RunTest (size_t subgraphId, const std::map< std::string, std::vector< armnn::ResolveType< inputType1 >>> &input1Data, const std::map< std::string, std::vector< armnn::ResolveType< inputType2 >>> &input2Data, const std::map< std::string, std::vector< armnn::ResolveType< outputType >>> &expectedOutputData)
 Multiple Inputs with different DataTypes, Multiple Outputs w/ Variable DataTypes Executes the network with the given input tensors and checks the results against the given output tensors. More...
 
template<armnn::DataType ArmnnType1, armnn::DataType ArmnnType2>
void RunTest (std::size_t subgraphId, const std::map< std::string, std::vector< armnn::ResolveType< ArmnnType1 >>> &inputData, const std::map< std::string, std::vector< armnn::ResolveType< ArmnnType2 >>> &expectedOutputData)
 Multiple Inputs, Multiple Outputs w/ Variable Datatypes and different dimension sizes. More...
 
void CheckTensors (const TensorRawPtr &tensors, size_t shapeSize, const std::vector< int32_t > &shape, tflite::TensorType tensorType, uint32_t buffer, const std::string &name, const std::vector< float > &min, const std::vector< float > &max, const std::vector< float > &scale, const std::vector< int64_t > &zeroPoint)
 

Static Public Member Functions

static std::string GenerateDetectionPostProcessJsonString (const armnn::DetectionPostProcessDescriptor &descriptor)
 

Public Attributes

std::vector< uint8_t > m_GraphBinary
 
std::string m_JsonString
 
armnn::IRuntimePtr m_Runtime
 
armnn::NetworkId m_NetworkIdentifier
 
armnn::NetworkId m_DynamicNetworkIdentifier
 
bool m_TestDynamic
 
std::unique_ptr< armnnTfLiteParser::TfLiteParserImplm_Parser
 
std::string m_SingleInputName
 If the single-input-single-output overload of Setup() is called, these will store the input and output name so they don't need to be passed to the single-input-single-output overload of RunTest(). More...
 
std::string m_SingleOutputName
 

Detailed Description

Definition at line 36 of file ParserFlatbuffersFixture.hpp.

Constructor & Destructor Documentation

◆ ParserFlatbuffersFixture()

Definition at line 38 of file ParserFlatbuffersFixture.hpp.

References m_Parser.

38  :
42  {
43  ITfLiteParser::TfLiteParserOptions options;
44  options.m_StandInLayerForUnsupported = true;
45  options.m_InferAndValidate = true;
46 
47  m_Parser = std::make_unique<armnnTfLiteParser::TfLiteParserImpl>(
49  }
static IRuntimePtr Create(const CreationOptions &options)
Definition: Runtime.cpp:40
std::unique_ptr< armnnTfLiteParser::TfLiteParserImpl > m_Parser

Member Function Documentation

◆ CheckTensors()

void CheckTensors ( const TensorRawPtr tensors,
size_t  shapeSize,
const std::vector< int32_t > &  shape,
tflite::TensorType  tensorType,
uint32_t  buffer,
const std::string &  name,
const std::vector< float > &  min,
const std::vector< float > &  max,
const std::vector< float > &  scale,
const std::vector< int64_t > &  zeroPoint 
)
inline

Definition at line 265 of file ParserFlatbuffersFixture.hpp.

References m_Parser, and armnn::VerifyTensorInfoDataType().

269  {
270  CHECK(tensors);
271  CHECK_EQ(shapeSize, tensors->shape.size());
272  CHECK(std::equal(shape.begin(), shape.end(), tensors->shape.begin(), tensors->shape.end()));
273  CHECK_EQ(tensorType, tensors->type);
274  CHECK_EQ(buffer, tensors->buffer);
275  CHECK_EQ(name, tensors->name);
276  CHECK(tensors->quantization);
277  CHECK(std::equal(min.begin(), min.end(), tensors->quantization.get()->min.begin(),
278  tensors->quantization.get()->min.end()));
279  CHECK(std::equal(max.begin(), max.end(), tensors->quantization.get()->max.begin(),
280  tensors->quantization.get()->max.end()));
281  CHECK(std::equal(scale.begin(), scale.end(), tensors->quantization.get()->scale.begin(),
282  tensors->quantization.get()->scale.end()));
283  CHECK(std::equal(zeroPoint.begin(), zeroPoint.end(),
284  tensors->quantization.get()->zero_point.begin(),
285  tensors->quantization.get()->zero_point.end()));
286  }

◆ GenerateDetectionPostProcessJsonString()

static std::string GenerateDetectionPostProcessJsonString ( const armnn::DetectionPostProcessDescriptor descriptor)
inlinestatic

Definition at line 238 of file ParserFlatbuffersFixture.hpp.

References DetectionPostProcessDescriptor::m_DetectionsPerClass, DetectionPostProcessDescriptor::m_MaxClassesPerDetection, DetectionPostProcessDescriptor::m_MaxDetections, DetectionPostProcessDescriptor::m_NmsIouThreshold, DetectionPostProcessDescriptor::m_NmsScoreThreshold, DetectionPostProcessDescriptor::m_NumClasses, DetectionPostProcessDescriptor::m_ScaleH, DetectionPostProcessDescriptor::m_ScaleW, DetectionPostProcessDescriptor::m_ScaleX, DetectionPostProcessDescriptor::m_ScaleY, and DetectionPostProcessDescriptor::m_UseRegularNms.

240  {
241  flexbuffers::Builder detectPostProcess;
242  detectPostProcess.Map([&]() {
243  detectPostProcess.Bool("use_regular_nms", descriptor.m_UseRegularNms);
244  detectPostProcess.Int("max_detections", descriptor.m_MaxDetections);
245  detectPostProcess.Int("max_classes_per_detection", descriptor.m_MaxClassesPerDetection);
246  detectPostProcess.Int("detections_per_class", descriptor.m_DetectionsPerClass);
247  detectPostProcess.Int("num_classes", descriptor.m_NumClasses);
248  detectPostProcess.Float("nms_score_threshold", descriptor.m_NmsScoreThreshold);
249  detectPostProcess.Float("nms_iou_threshold", descriptor.m_NmsIouThreshold);
250  detectPostProcess.Float("h_scale", descriptor.m_ScaleH);
251  detectPostProcess.Float("w_scale", descriptor.m_ScaleW);
252  detectPostProcess.Float("x_scale", descriptor.m_ScaleX);
253  detectPostProcess.Float("y_scale", descriptor.m_ScaleY);
254  });
255  detectPostProcess.Finish();
256 
257  // Create JSON string
258  std::stringstream strStream;
259  std::vector<uint8_t> buffer = detectPostProcess.GetBuffer();
260  std::copy(buffer.begin(), buffer.end(),std::ostream_iterator<int>(strStream,","));
261 
262  return strStream.str();
263  }
float m_ScaleW
Center size encoding scale weight.
float m_ScaleX
Center size encoding scale x.
uint32_t m_DetectionsPerClass
Detections per classes, used in Regular NMS.
uint32_t m_MaxClassesPerDetection
Maximum numbers of classes per detection, used in Fast NMS.
uint32_t m_MaxDetections
Maximum numbers of detections.
float m_NmsIouThreshold
Intersection over union threshold.
uint32_t m_NumClasses
Number of classes.
bool m_UseRegularNms
Use Regular NMS.
float m_ScaleH
Center size encoding scale height.
float m_ScaleY
Center size encoding scale y.
float m_NmsScoreThreshold
NMS score threshold.

◆ loadNetwork()

void loadNetwork ( armnn::NetworkId  networkId,
bool  loadDynamic 
)
inline

Definition at line 121 of file ParserFlatbuffersFixture.hpp.

References armnn::CpuRef, MakeModelDynamic(), armnn::Optimize(), ReadStringToBinary(), and armnn::Success.

Referenced by Setup().

122  {
123  bool ok = ReadStringToBinary();
124  if (!ok) {
125  throw armnn::Exception("LoadNetwork failed while reading binary input");
126  }
127 
128  armnn::INetworkPtr network = loadDynamic ? m_Parser->LoadModel(MakeModelDynamic(m_GraphBinary))
129  : m_Parser->CreateNetworkFromBinary(m_GraphBinary);
130 
131  if (!network) {
132  throw armnn::Exception("The parser failed to create an ArmNN network");
133  }
134 
135  auto optimized = Optimize(*network, { armnn::Compute::CpuRef },
136  m_Runtime->GetDeviceSpec());
137  std::string errorMessage;
138 
139  armnn::Status ret = m_Runtime->LoadNetwork(networkId, move(optimized), errorMessage);
140 
141  if (ret != armnn::Status::Success)
142  {
143  throw armnn::Exception(
144  fmt::format("The runtime failed to load the network. "
145  "Error was: {}. in {} [{}:{}]",
146  errorMessage,
147  __func__,
148  __FILE__,
149  __LINE__));
150  }
151  }
CPU Execution: Reference C++ kernels.
std::unique_ptr< armnnTfLiteParser::TfLiteParserImpl > m_Parser
IOptimizedNetworkPtr Optimize(const INetwork &network, const std::vector< BackendId > &backendPreferences, const IDeviceSpec &deviceSpec, const OptimizerOptions &options=OptimizerOptions(), Optional< std::vector< std::string > &> messages=EmptyOptional())
Create an optimized version of the network.
Definition: Network.cpp:1605
Status
enumeration
Definition: Types.hpp:29
std::vector< uint8_t > m_GraphBinary
Base class for all ArmNN exceptions so that users can filter to just those.
Definition: Exceptions.hpp:46
std::unique_ptr< INetwork, void(*)(INetwork *network)> INetworkPtr
Definition: INetwork.hpp:197
std::unique_ptr< tflite::ModelT > MakeModelDynamic(std::vector< uint8_t > graphBinary)

◆ MakeModelDynamic()

std::unique_ptr<tflite::ModelT> MakeModelDynamic ( std::vector< uint8_t >  graphBinary)
inline

Definition at line 75 of file ParserFlatbuffersFixture.hpp.

References CHECK_LOCATION.

Referenced by loadNetwork().

76  {
77  const uint8_t* binaryContent = graphBinary.data();
78  const size_t len = graphBinary.size();
79  if (binaryContent == nullptr)
80  {
81  throw armnn::InvalidArgumentException(fmt::format("Invalid (null) binary content {}",
82  CHECK_LOCATION().AsString()));
83  }
84  flatbuffers::Verifier verifier(binaryContent, len);
85  if (verifier.VerifyBuffer<tflite::Model>() == false)
86  {
87  throw armnn::ParseException(fmt::format("Buffer doesn't conform to the expected Tensorflow Lite "
88  "flatbuffers format. size:{} {}",
89  len,
90  CHECK_LOCATION().AsString()));
91  }
92  auto model = tflite::UnPackModel(binaryContent);
93 
94  for (auto const& subgraph : model->subgraphs)
95  {
96  std::vector<int32_t> inputIds = subgraph->inputs;
97  for (unsigned int tensorIndex = 0; tensorIndex < subgraph->tensors.size(); ++tensorIndex)
98  {
99  if (std::find(inputIds.begin(), inputIds.end(), tensorIndex) != inputIds.end())
100  {
101  continue;
102  }
103  for (auto const& tensor : subgraph->tensors)
104  {
105  if (tensor->shape_signature.size() != 0)
106  {
107  continue;
108  }
109 
110  for (unsigned int i = 0; i < tensor->shape.size(); ++i)
111  {
112  tensor->shape_signature.push_back(-1);
113  }
114  }
115  }
116  }
117 
118  return model;
119  }
#define CHECK_LOCATION()
Definition: Exceptions.hpp:209

◆ ReadStringToBinary()

bool ReadStringToBinary ( )
inline

Definition at line 161 of file ParserFlatbuffersFixture.hpp.

References ARMNN_ASSERT_MSG, g_TfLiteSchemaText, g_TfLiteSchemaText_len, and RunTest().

Referenced by loadNetwork(), and TEST_SUITE().

162  {
164 
165  // parse schema first, so we can use it to parse the data after
166  flatbuffers::Parser parser;
167 
168  bool ok = parser.Parse(schemafile.c_str());
169  ARMNN_ASSERT_MSG(ok, "Failed to parse schema file");
170 
171  ok &= parser.Parse(m_JsonString.c_str());
172  ARMNN_ASSERT_MSG(ok, "Failed to parse json input");
173 
174  if (!ok)
175  {
176  return false;
177  }
178 
179  {
180  const uint8_t * bufferPtr = parser.builder_.GetBufferPointer();
181  size_t size = static_cast<size_t>(parser.builder_.GetSize());
182  m_GraphBinary.assign(bufferPtr, bufferPtr+size);
183  }
184  return ok;
185  }
#define ARMNN_ASSERT_MSG(COND, MSG)
Definition: Assert.hpp:15
std::vector< uint8_t > m_GraphBinary
unsigned char g_TfLiteSchemaText[]
unsigned int g_TfLiteSchemaText_len

◆ RunTest() [1/5]

void RunTest ( size_t  subgraphId,
const std::vector< armnn::ResolveType< armnnType >> &  inputData,
const std::vector< armnn::ResolveType< armnnType >> &  expectedOutputData 
)

Executes the network with the given input tensor and checks the result against the given output tensor.

Single Input, Single Output Executes the network with the given input tensor and checks the result against the given output tensor.

This assumes the network has a single input and a single output.

This overload assumes the network has a single input and a single output.

Definition at line 317 of file ParserFlatbuffersFixture.hpp.

References m_SingleInputName, and m_SingleOutputName.

Referenced by ReadStringToBinary().

320 {
321  RunTest<NumOutputDimensions, armnnType>(subgraphId,
322  { { m_SingleInputName, inputData } },
323  { { m_SingleOutputName, expectedOutputData } });
324 }
std::string m_SingleInputName
If the single-input-single-output overload of Setup() is called, these will store the input and outpu...

◆ RunTest() [2/5]

void RunTest ( size_t  subgraphId,
const std::map< std::string, std::vector< armnn::ResolveType< armnnType >>> &  inputData,
const std::map< std::string, std::vector< armnn::ResolveType< armnnType >>> &  expectedOutputData 
)

Executes the network with the given input tensors and checks the results against the given output tensors.

Multiple Inputs, Multiple Outputs Executes the network with the given input tensors and checks the results against the given output tensors.

This overload supports multiple inputs and multiple outputs, identified by name.

Definition at line 331 of file ParserFlatbuffersFixture.hpp.

334 {
335  RunTest<NumOutputDimensions, armnnType, armnnType>(subgraphId, inputData, expectedOutputData);
336 }

◆ RunTest() [3/5]

void RunTest ( size_t  subgraphId,
const std::map< std::string, std::vector< armnn::ResolveType< armnnType1 >>> &  inputData,
const std::map< std::string, std::vector< armnn::ResolveType< armnnType2 >>> &  expectedOutputData,
bool  isDynamic = false 
)

Multiple Inputs, Multiple Outputs w/ Variable Datatypes and different dimension sizes.

Multiple Inputs, Multiple Outputs w/ Variable Datatypes Executes the network with the given input tensors and checks the results against the given output tensors.

Executes the network with the given input tensors and checks the results against the given output tensors. This overload supports multiple inputs and multiple outputs, identified by name along with the allowance for the input datatype to be different to the output

This overload supports multiple inputs and multiple outputs, identified by name along with the allowance for the input datatype to be different to the output

Definition at line 345 of file ParserFlatbuffersFixture.hpp.

References armnn::Boolean, CompareTensors(), TensorInfo::GetNumDimensions(), TensorInfo::GetNumElements(), m_DynamicNetworkIdentifier, m_NetworkIdentifier, m_Parser, m_Runtime, and armnn::VerifyTensorInfoDataType().

349 {
350  using DataType2 = armnn::ResolveType<armnnType2>;
351 
352  // Setup the armnn input tensors from the given vectors.
353  armnn::InputTensors inputTensors;
354  FillInputTensors<armnnType1>(inputTensors, inputData, subgraphId);
355 
356  // Allocate storage for the output tensors to be written to and setup the armnn output tensors.
357  std::map<std::string, std::vector<DataType2>> outputStorage;
358  armnn::OutputTensors outputTensors;
359  for (auto&& it : expectedOutputData)
360  {
361  armnn::LayerBindingId outputBindingId = m_Parser->GetNetworkOutputBindingInfo(subgraphId, it.first).first;
362  armnn::TensorInfo outputTensorInfo = m_Runtime->GetOutputTensorInfo(m_NetworkIdentifier, outputBindingId);
363 
364  // Check that output tensors have correct number of dimensions (NumOutputDimensions specified in test)
365  auto outputNumDimensions = outputTensorInfo.GetNumDimensions();
366  CHECK_MESSAGE((outputNumDimensions == NumOutputDimensions),
367  fmt::format("Number of dimensions expected {}, but got {} for output layer {}",
368  NumOutputDimensions,
369  outputNumDimensions,
370  it.first));
371 
372  armnn::VerifyTensorInfoDataType(outputTensorInfo, armnnType2);
373  outputStorage.emplace(it.first, std::vector<DataType2>(outputTensorInfo.GetNumElements()));
374  outputTensors.push_back(
375  { outputBindingId, armnn::Tensor(outputTensorInfo, outputStorage.at(it.first).data()) });
376  }
377 
378  m_Runtime->EnqueueWorkload(m_NetworkIdentifier, inputTensors, outputTensors);
379 
380  // Set flag so that the correct comparison function is called if the output is boolean.
381  bool isBoolean = armnnType2 == armnn::DataType::Boolean ? true : false;
382 
383  // Compare each output tensor to the expected values
384  for (auto&& it : expectedOutputData)
385  {
386  armnn::BindingPointInfo bindingInfo = m_Parser->GetNetworkOutputBindingInfo(subgraphId, it.first);
387  auto outputExpected = it.second;
388  auto result = CompareTensors(outputExpected, outputStorage[it.first],
389  bindingInfo.second.GetShape(), bindingInfo.second.GetShape(),
390  isBoolean, isDynamic);
391  CHECK_MESSAGE(result.m_Result, result.m_Message.str());
392  }
393 
394  if (isDynamic)
395  {
396  m_Runtime->EnqueueWorkload(m_DynamicNetworkIdentifier, inputTensors, outputTensors);
397 
398  // Compare each output tensor to the expected values
399  for (auto&& it : expectedOutputData)
400  {
401  armnn::BindingPointInfo bindingInfo = m_Parser->GetNetworkOutputBindingInfo(subgraphId, it.first);
402  auto outputExpected = it.second;
403  auto result = CompareTensors(outputExpected, outputStorage[it.first],
404  bindingInfo.second.GetShape(), bindingInfo.second.GetShape(),
405  false, isDynamic);
406  CHECK_MESSAGE(result.m_Result, result.m_Message.str());
407  }
408  }
409 }
typename ResolveTypeImpl< DT >::Type ResolveType
Definition: ResolveType.hpp:79
std::vector< std::pair< LayerBindingId, class ConstTensor > > InputTensors
Definition: Tensor.hpp:392
armnn::PredicateResult CompareTensors(const std::vector< T > &actualData, const std::vector< T > &expectedData, const armnn::TensorShape &actualShape, const armnn::TensorShape &expectedShape, bool compareBoolean=false, bool isDynamic=false)
int LayerBindingId
Type of identifiers for bindable layers (inputs, outputs).
Definition: Types.hpp:277
A tensor defined by a TensorInfo (shape and data type) and a mutable backing store.
Definition: Tensor.hpp:319
std::unique_ptr< armnnTfLiteParser::TfLiteParserImpl > m_Parser
std::vector< std::pair< LayerBindingId, class Tensor > > OutputTensors
Definition: Tensor.hpp:393
std::pair< armnn::LayerBindingId, armnn::TensorInfo > BindingPointInfo
Definition: Tensor.hpp:274
void VerifyTensorInfoDataType(const armnn::TensorInfo &info, armnn::DataType dataType)
Definition: TypesUtils.hpp:337
unsigned int GetNumDimensions() const
Definition: Tensor.hpp:195
unsigned int GetNumElements() const
Definition: Tensor.hpp:196

◆ RunTest() [4/5]

void RunTest ( size_t  subgraphId,
const std::map< std::string, std::vector< armnn::ResolveType< inputType1 >>> &  input1Data,
const std::map< std::string, std::vector< armnn::ResolveType< inputType2 >>> &  input2Data,
const std::map< std::string, std::vector< armnn::ResolveType< outputType >>> &  expectedOutputData 
)

Multiple Inputs with different DataTypes, Multiple Outputs w/ Variable DataTypes Executes the network with the given input tensors and checks the results against the given output tensors.

This overload supports multiple inputs and multiple outputs, identified by name along with the allowance for the input datatype to be different to the output

Definition at line 465 of file ParserFlatbuffersFixture.hpp.

References armnn::Boolean, CompareTensors(), TensorInfo::GetNumDimensions(), TensorInfo::GetNumElements(), m_NetworkIdentifier, m_Parser, m_Runtime, and armnn::VerifyTensorInfoDataType().

469 {
470  using DataType2 = armnn::ResolveType<outputType>;
471 
472  // Setup the armnn input tensors from the given vectors.
473  armnn::InputTensors inputTensors;
474  FillInputTensors<inputType1>(inputTensors, input1Data, subgraphId);
475  FillInputTensors<inputType2>(inputTensors, input2Data, subgraphId);
476 
477  // Allocate storage for the output tensors to be written to and setup the armnn output tensors.
478  std::map<std::string, std::vector<DataType2>> outputStorage;
479  armnn::OutputTensors outputTensors;
480  for (auto&& it : expectedOutputData)
481  {
482  armnn::LayerBindingId outputBindingId = m_Parser->GetNetworkOutputBindingInfo(subgraphId, it.first).first;
483  armnn::TensorInfo outputTensorInfo = m_Runtime->GetOutputTensorInfo(m_NetworkIdentifier, outputBindingId);
484 
485  // Check that output tensors have correct number of dimensions (NumOutputDimensions specified in test)
486  auto outputNumDimensions = outputTensorInfo.GetNumDimensions();
487  CHECK_MESSAGE((outputNumDimensions == NumOutputDimensions),
488  fmt::format("Number of dimensions expected {}, but got {} for output layer {}",
489  NumOutputDimensions,
490  outputNumDimensions,
491  it.first));
492 
493  armnn::VerifyTensorInfoDataType(outputTensorInfo, outputType);
494  outputStorage.emplace(it.first, std::vector<DataType2>(outputTensorInfo.GetNumElements()));
495  outputTensors.push_back(
496  { outputBindingId, armnn::Tensor(outputTensorInfo, outputStorage.at(it.first).data()) });
497  }
498 
499  m_Runtime->EnqueueWorkload(m_NetworkIdentifier, inputTensors, outputTensors);
500 
501  // Set flag so that the correct comparison function is called if the output is boolean.
502  bool isBoolean = outputType == armnn::DataType::Boolean ? true : false;
503 
504  // Compare each output tensor to the expected values
505  for (auto&& it : expectedOutputData)
506  {
507  armnn::BindingPointInfo bindingInfo = m_Parser->GetNetworkOutputBindingInfo(subgraphId, it.first);
508  auto outputExpected = it.second;
509  auto result = CompareTensors(outputExpected, outputStorage[it.first],
510  bindingInfo.second.GetShape(), bindingInfo.second.GetShape(),
511  isBoolean);
512  CHECK_MESSAGE(result.m_Result, result.m_Message.str());
513  }
514 }
typename ResolveTypeImpl< DT >::Type ResolveType
Definition: ResolveType.hpp:79
std::vector< std::pair< LayerBindingId, class ConstTensor > > InputTensors
Definition: Tensor.hpp:392
armnn::PredicateResult CompareTensors(const std::vector< T > &actualData, const std::vector< T > &expectedData, const armnn::TensorShape &actualShape, const armnn::TensorShape &expectedShape, bool compareBoolean=false, bool isDynamic=false)
int LayerBindingId
Type of identifiers for bindable layers (inputs, outputs).
Definition: Types.hpp:277
A tensor defined by a TensorInfo (shape and data type) and a mutable backing store.
Definition: Tensor.hpp:319
std::unique_ptr< armnnTfLiteParser::TfLiteParserImpl > m_Parser
std::vector< std::pair< LayerBindingId, class Tensor > > OutputTensors
Definition: Tensor.hpp:393
std::pair< armnn::LayerBindingId, armnn::TensorInfo > BindingPointInfo
Definition: Tensor.hpp:274
void VerifyTensorInfoDataType(const armnn::TensorInfo &info, armnn::DataType dataType)
Definition: TypesUtils.hpp:337
unsigned int GetNumDimensions() const
Definition: Tensor.hpp:195
unsigned int GetNumElements() const
Definition: Tensor.hpp:196

◆ RunTest() [5/5]

void RunTest ( std::size_t  subgraphId,
const std::map< std::string, std::vector< armnn::ResolveType< armnnType1 >>> &  inputData,
const std::map< std::string, std::vector< armnn::ResolveType< armnnType2 >>> &  expectedOutputData 
)

Multiple Inputs, Multiple Outputs w/ Variable Datatypes and different dimension sizes.

Executes the network with the given input tensors and checks the results against the given output tensors. This overload supports multiple inputs and multiple outputs, identified by name along with the allowance for the input datatype to be different to the output

Executes the network with the given input tensors and checks the results against the given output tensors. This overload supports multiple inputs and multiple outputs, identified by name along with the allowance for the input datatype to be different to the output.

Definition at line 417 of file ParserFlatbuffersFixture.hpp.

References m_NetworkIdentifier, m_Parser, m_Runtime, and armnn::VerifyTensorInfoDataType().

420 {
421  using DataType2 = armnn::ResolveType<armnnType2>;
422 
423  // Setup the armnn input tensors from the given vectors.
424  armnn::InputTensors inputTensors;
425  FillInputTensors<armnnType1>(inputTensors, inputData, subgraphId);
426 
427  armnn::OutputTensors outputTensors;
428  outputTensors.reserve(expectedOutputData.size());
429  std::map<std::string, std::vector<DataType2>> outputStorage;
430  for (auto&& it : expectedOutputData)
431  {
432  armnn::BindingPointInfo bindingInfo = m_Parser->GetNetworkOutputBindingInfo(subgraphId, it.first);
433  armnn::VerifyTensorInfoDataType(bindingInfo.second, armnnType2);
434 
435  std::vector<DataType2> out(it.second.size());
436  outputStorage.emplace(it.first, out);
437  outputTensors.push_back({ bindingInfo.first,
438  armnn::Tensor(bindingInfo.second,
439  outputStorage.at(it.first).data()) });
440  }
441 
442  m_Runtime->EnqueueWorkload(m_NetworkIdentifier, inputTensors, outputTensors);
443 
444  // Checks the results.
445  for (auto&& it : expectedOutputData)
446  {
447  std::vector<armnn::ResolveType<armnnType2>> out = outputStorage.at(it.first);
448  {
449  for (unsigned int i = 0; i < out.size(); ++i)
450  {
451  CHECK(doctest::Approx(it.second[i]).epsilon(0.000001f) == out[i]);
452  }
453  }
454  }
455 }
typename ResolveTypeImpl< DT >::Type ResolveType
Definition: ResolveType.hpp:79
std::vector< std::pair< LayerBindingId, class ConstTensor > > InputTensors
Definition: Tensor.hpp:392
A tensor defined by a TensorInfo (shape and data type) and a mutable backing store.
Definition: Tensor.hpp:319
std::unique_ptr< armnnTfLiteParser::TfLiteParserImpl > m_Parser
std::vector< std::pair< LayerBindingId, class Tensor > > OutputTensors
Definition: Tensor.hpp:393
std::pair< armnn::LayerBindingId, armnn::TensorInfo > BindingPointInfo
Definition: Tensor.hpp:274
void VerifyTensorInfoDataType(const armnn::TensorInfo &info, armnn::DataType dataType)
Definition: TypesUtils.hpp:337

◆ Setup()

void Setup ( bool  testDynamic = true)
inline

Definition at line 64 of file ParserFlatbuffersFixture.hpp.

References loadNetwork().

Referenced by SetupSingleInputSingleOutput(), and TEST_SUITE().

65  {
66  m_TestDynamic = testDynamic;
68 
69  if (m_TestDynamic)
70  {
72  }
73  }
void loadNetwork(armnn::NetworkId networkId, bool loadDynamic)

◆ SetupSingleInputSingleOutput()

void SetupSingleInputSingleOutput ( const std::string &  inputName,
const std::string &  outputName 
)
inline

Definition at line 153 of file ParserFlatbuffersFixture.hpp.

References Setup().

Referenced by TEST_SUITE().

154  {
155  // Store the input and output name so they don't need to be passed to the single-input-single-output RunTest().
156  m_SingleInputName = inputName;
157  m_SingleOutputName = outputName;
158  Setup();
159  }
void Setup(bool testDynamic=true)
std::string m_SingleInputName
If the single-input-single-output overload of Setup() is called, these will store the input and outpu...

Member Data Documentation

◆ m_DynamicNetworkIdentifier

armnn::NetworkId m_DynamicNetworkIdentifier

Definition at line 55 of file ParserFlatbuffersFixture.hpp.

Referenced by RunTest().

◆ m_GraphBinary

std::vector<uint8_t> m_GraphBinary

Definition at line 51 of file ParserFlatbuffersFixture.hpp.

◆ m_JsonString

std::string m_JsonString

Definition at line 52 of file ParserFlatbuffersFixture.hpp.

◆ m_NetworkIdentifier

armnn::NetworkId m_NetworkIdentifier

Definition at line 54 of file ParserFlatbuffersFixture.hpp.

Referenced by RunTest().

◆ m_Parser

std::unique_ptr<armnnTfLiteParser::TfLiteParserImpl> m_Parser

Definition at line 57 of file ParserFlatbuffersFixture.hpp.

Referenced by CheckTensors(), ParserFlatbuffersFixture(), and RunTest().

◆ m_Runtime

armnn::IRuntimePtr m_Runtime

Definition at line 53 of file ParserFlatbuffersFixture.hpp.

Referenced by RunTest().

◆ m_SingleInputName

std::string m_SingleInputName

If the single-input-single-output overload of Setup() is called, these will store the input and output name so they don't need to be passed to the single-input-single-output overload of RunTest().

Definition at line 61 of file ParserFlatbuffersFixture.hpp.

Referenced by RunTest().

◆ m_SingleOutputName

std::string m_SingleOutputName

Definition at line 62 of file ParserFlatbuffersFixture.hpp.

Referenced by RunTest().

◆ m_TestDynamic

bool m_TestDynamic

Definition at line 56 of file ParserFlatbuffersFixture.hpp.


The documentation for this struct was generated from the following file: