ArmNN
 22.05
ParserFlatbuffersFixture Struct Reference

#include <ParserFlatbuffersFixture.hpp>

Public Member Functions

 ParserFlatbuffersFixture ()
 
void Setup (bool testDynamic=true)
 
std::unique_ptr< tflite::ModelT > MakeModelDynamic (std::vector< uint8_t > graphBinary)
 
void loadNetwork (armnn::NetworkId networkId, bool loadDynamic)
 
void SetupSingleInputSingleOutput (const std::string &inputName, const std::string &outputName)
 
bool ReadStringToBinary ()
 
template<std::size_t NumOutputDimensions, armnn::DataType ArmnnType>
void RunTest (size_t subgraphId, const std::vector< armnn::ResolveType< ArmnnType >> &inputData, const std::vector< armnn::ResolveType< ArmnnType >> &expectedOutputData)
 Executes the network with the given input tensor and checks the result against the given output tensor. More...
 
template<std::size_t NumOutputDimensions, armnn::DataType ArmnnType>
void RunTest (size_t subgraphId, const std::map< std::string, std::vector< armnn::ResolveType< ArmnnType >>> &inputData, const std::map< std::string, std::vector< armnn::ResolveType< ArmnnType >>> &expectedOutputData)
 Executes the network with the given input tensors and checks the results against the given output tensors. More...
 
template<std::size_t NumOutputDimensions, armnn::DataType ArmnnType1, armnn::DataType ArmnnType2>
void RunTest (size_t subgraphId, const std::map< std::string, std::vector< armnn::ResolveType< ArmnnType1 >>> &inputData, const std::map< std::string, std::vector< armnn::ResolveType< ArmnnType2 >>> &expectedOutputData, bool isDynamic=false)
 Multiple Inputs, Multiple Outputs w/ Variable Datatypes and different dimension sizes. More...
 
template<std::size_t NumOutputDimensions, armnn::DataType inputType1, armnn::DataType inputType2, armnn::DataType outputType>
void RunTest (size_t subgraphId, const std::map< std::string, std::vector< armnn::ResolveType< inputType1 >>> &input1Data, const std::map< std::string, std::vector< armnn::ResolveType< inputType2 >>> &input2Data, const std::map< std::string, std::vector< armnn::ResolveType< outputType >>> &expectedOutputData)
 Multiple Inputs with different DataTypes, Multiple Outputs w/ Variable DataTypes Executes the network with the given input tensors and checks the results against the given output tensors. More...
 
template<armnn::DataType ArmnnType1, armnn::DataType ArmnnType2>
void RunTest (std::size_t subgraphId, const std::map< std::string, std::vector< armnn::ResolveType< ArmnnType1 >>> &inputData, const std::map< std::string, std::vector< armnn::ResolveType< ArmnnType2 >>> &expectedOutputData)
 Multiple Inputs, Multiple Outputs w/ Variable Datatypes and different dimension sizes. More...
 
void CheckTensors (const TensorRawPtr &tensors, size_t shapeSize, const std::vector< int32_t > &shape, tflite::TensorType tensorType, uint32_t buffer, const std::string &name, const std::vector< float > &min, const std::vector< float > &max, const std::vector< float > &scale, const std::vector< int64_t > &zeroPoint)
 

Static Public Member Functions

static std::string GenerateDetectionPostProcessJsonString (const armnn::DetectionPostProcessDescriptor &descriptor)
 

Public Attributes

std::vector< uint8_t > m_GraphBinary
 
std::string m_JsonString
 
armnn::IRuntimePtr m_Runtime
 
armnn::NetworkId m_NetworkIdentifier
 
armnn::NetworkId m_DynamicNetworkIdentifier
 
bool m_TestDynamic
 
std::unique_ptr< armnnTfLiteParser::TfLiteParserImplm_Parser
 
std::string m_SingleInputName
 If the single-input-single-output overload of Setup() is called, these will store the input and output name so they don't need to be passed to the single-input-single-output overload of RunTest(). More...
 
std::string m_SingleOutputName
 

Detailed Description

Definition at line 35 of file ParserFlatbuffersFixture.hpp.

Constructor & Destructor Documentation

◆ ParserFlatbuffersFixture()

Definition at line 37 of file ParserFlatbuffersFixture.hpp.

References m_Parser.

37  :
41  {
42  ITfLiteParser::TfLiteParserOptions options;
43  options.m_StandInLayerForUnsupported = true;
44  options.m_InferAndValidate = true;
45 
46  m_Parser = std::make_unique<armnnTfLiteParser::TfLiteParserImpl>(
48  }
static IRuntimePtr Create(const CreationOptions &options)
Definition: Runtime.cpp:49
std::unique_ptr< armnnTfLiteParser::TfLiteParserImpl > m_Parser

Member Function Documentation

◆ CheckTensors()

void CheckTensors ( const TensorRawPtr tensors,
size_t  shapeSize,
const std::vector< int32_t > &  shape,
tflite::TensorType  tensorType,
uint32_t  buffer,
const std::string &  name,
const std::vector< float > &  min,
const std::vector< float > &  max,
const std::vector< float > &  scale,
const std::vector< int64_t > &  zeroPoint 
)
inline

Definition at line 259 of file ParserFlatbuffersFixture.hpp.

References m_Parser, and armnn::VerifyTensorInfoDataType().

263  {
264  CHECK(tensors);
265  CHECK_EQ(shapeSize, tensors->shape.size());
266  CHECK(std::equal(shape.begin(), shape.end(), tensors->shape.begin(), tensors->shape.end()));
267  CHECK_EQ(tensorType, tensors->type);
268  CHECK_EQ(buffer, tensors->buffer);
269  CHECK_EQ(name, tensors->name);
270  CHECK(tensors->quantization);
271  CHECK(std::equal(min.begin(), min.end(), tensors->quantization.get()->min.begin(),
272  tensors->quantization.get()->min.end()));
273  CHECK(std::equal(max.begin(), max.end(), tensors->quantization.get()->max.begin(),
274  tensors->quantization.get()->max.end()));
275  CHECK(std::equal(scale.begin(), scale.end(), tensors->quantization.get()->scale.begin(),
276  tensors->quantization.get()->scale.end()));
277  CHECK(std::equal(zeroPoint.begin(), zeroPoint.end(),
278  tensors->quantization.get()->zero_point.begin(),
279  tensors->quantization.get()->zero_point.end()));
280  }

◆ GenerateDetectionPostProcessJsonString()

static std::string GenerateDetectionPostProcessJsonString ( const armnn::DetectionPostProcessDescriptor descriptor)
inlinestatic

Definition at line 232 of file ParserFlatbuffersFixture.hpp.

References DetectionPostProcessDescriptor::m_DetectionsPerClass, DetectionPostProcessDescriptor::m_MaxClassesPerDetection, DetectionPostProcessDescriptor::m_MaxDetections, DetectionPostProcessDescriptor::m_NmsIouThreshold, DetectionPostProcessDescriptor::m_NmsScoreThreshold, DetectionPostProcessDescriptor::m_NumClasses, DetectionPostProcessDescriptor::m_ScaleH, DetectionPostProcessDescriptor::m_ScaleW, DetectionPostProcessDescriptor::m_ScaleX, DetectionPostProcessDescriptor::m_ScaleY, and DetectionPostProcessDescriptor::m_UseRegularNms.

234  {
235  flexbuffers::Builder detectPostProcess;
236  detectPostProcess.Map([&]() {
237  detectPostProcess.Bool("use_regular_nms", descriptor.m_UseRegularNms);
238  detectPostProcess.Int("max_detections", descriptor.m_MaxDetections);
239  detectPostProcess.Int("max_classes_per_detection", descriptor.m_MaxClassesPerDetection);
240  detectPostProcess.Int("detections_per_class", descriptor.m_DetectionsPerClass);
241  detectPostProcess.Int("num_classes", descriptor.m_NumClasses);
242  detectPostProcess.Float("nms_score_threshold", descriptor.m_NmsScoreThreshold);
243  detectPostProcess.Float("nms_iou_threshold", descriptor.m_NmsIouThreshold);
244  detectPostProcess.Float("h_scale", descriptor.m_ScaleH);
245  detectPostProcess.Float("w_scale", descriptor.m_ScaleW);
246  detectPostProcess.Float("x_scale", descriptor.m_ScaleX);
247  detectPostProcess.Float("y_scale", descriptor.m_ScaleY);
248  });
249  detectPostProcess.Finish();
250 
251  // Create JSON string
252  std::stringstream strStream;
253  std::vector<uint8_t> buffer = detectPostProcess.GetBuffer();
254  std::copy(buffer.begin(), buffer.end(),std::ostream_iterator<int>(strStream,","));
255 
256  return strStream.str();
257  }
float m_ScaleW
Center size encoding scale weight.
float m_ScaleX
Center size encoding scale x.
uint32_t m_DetectionsPerClass
Detections per classes, used in Regular NMS.
uint32_t m_MaxClassesPerDetection
Maximum numbers of classes per detection, used in Fast NMS.
uint32_t m_MaxDetections
Maximum numbers of detections.
float m_NmsIouThreshold
Intersection over union threshold.
uint32_t m_NumClasses
Number of classes.
bool m_UseRegularNms
Use Regular NMS.
float m_ScaleH
Center size encoding scale height.
float m_ScaleY
Center size encoding scale y.
float m_NmsScoreThreshold
NMS score threshold.

◆ loadNetwork()

void loadNetwork ( armnn::NetworkId  networkId,
bool  loadDynamic 
)
inline

Definition at line 120 of file ParserFlatbuffersFixture.hpp.

References armnn::CpuRef, MakeModelDynamic(), armnn::Optimize(), ReadStringToBinary(), and armnn::Success.

Referenced by Setup().

121  {
122  if (!ReadStringToBinary())
123  {
124  throw armnn::Exception("LoadNetwork failed while reading binary input");
125  }
126 
127  armnn::INetworkPtr network = loadDynamic ? m_Parser->LoadModel(MakeModelDynamic(m_GraphBinary))
128  : m_Parser->CreateNetworkFromBinary(m_GraphBinary);
129 
130  if (!network) {
131  throw armnn::Exception("The parser failed to create an ArmNN network");
132  }
133 
134  auto optimized = Optimize(*network, { armnn::Compute::CpuRef },
135  m_Runtime->GetDeviceSpec());
136  std::string errorMessage;
137 
138  armnn::Status ret = m_Runtime->LoadNetwork(networkId, move(optimized), errorMessage);
139 
140  if (ret != armnn::Status::Success)
141  {
142  throw armnn::Exception(
143  fmt::format("The runtime failed to load the network. "
144  "Error was: {}. in {} [{}:{}]",
145  errorMessage,
146  __func__,
147  __FILE__,
148  __LINE__));
149  }
150  }
CPU Execution: Reference C++ kernels.
std::unique_ptr< armnnTfLiteParser::TfLiteParserImpl > m_Parser
IOptimizedNetworkPtr Optimize(const INetwork &network, const std::vector< BackendId > &backendPreferences, const IDeviceSpec &deviceSpec, const OptimizerOptions &options=OptimizerOptions(), Optional< std::vector< std::string > &> messages=EmptyOptional())
Create an optimized version of the network.
Definition: Network.cpp:1847
Status
enumeration
Definition: Types.hpp:42
std::vector< uint8_t > m_GraphBinary
Base class for all ArmNN exceptions so that users can filter to just those.
Definition: Exceptions.hpp:46
std::unique_ptr< INetwork, void(*)(INetwork *network)> INetworkPtr
Definition: INetwork.hpp:241
std::unique_ptr< tflite::ModelT > MakeModelDynamic(std::vector< uint8_t > graphBinary)

◆ MakeModelDynamic()

std::unique_ptr<tflite::ModelT> MakeModelDynamic ( std::vector< uint8_t >  graphBinary)
inline

Definition at line 74 of file ParserFlatbuffersFixture.hpp.

References CHECK_LOCATION.

Referenced by loadNetwork().

75  {
76  const uint8_t* binaryContent = graphBinary.data();
77  const size_t len = graphBinary.size();
78  if (binaryContent == nullptr)
79  {
80  throw armnn::InvalidArgumentException(fmt::format("Invalid (null) binary content {}",
81  CHECK_LOCATION().AsString()));
82  }
83  flatbuffers::Verifier verifier(binaryContent, len);
84  if (verifier.VerifyBuffer<tflite::Model>() == false)
85  {
86  throw armnn::ParseException(fmt::format("Buffer doesn't conform to the expected Tensorflow Lite "
87  "flatbuffers format. size:{} {}",
88  len,
89  CHECK_LOCATION().AsString()));
90  }
91  auto model = tflite::UnPackModel(binaryContent);
92 
93  for (auto const& subgraph : model->subgraphs)
94  {
95  std::vector<int32_t> inputIds = subgraph->inputs;
96  for (unsigned int tensorIndex = 0; tensorIndex < subgraph->tensors.size(); ++tensorIndex)
97  {
98  if (std::find(inputIds.begin(), inputIds.end(), tensorIndex) != inputIds.end())
99  {
100  continue;
101  }
102  for (auto const& tensor : subgraph->tensors)
103  {
104  if (tensor->shape_signature.size() != 0)
105  {
106  continue;
107  }
108 
109  for (unsigned int i = 0; i < tensor->shape.size(); ++i)
110  {
111  tensor->shape_signature.push_back(-1);
112  }
113  }
114  }
115  }
116 
117  return model;
118  }
#define CHECK_LOCATION()
Definition: Exceptions.hpp:203

◆ ReadStringToBinary()

bool ReadStringToBinary ( )
inline

Definition at line 160 of file ParserFlatbuffersFixture.hpp.

References g_TfLiteSchemaText, g_TfLiteSchemaText_len, and RunTest().

Referenced by loadNetwork(), and TEST_SUITE().

161  {
163 
164  // parse schema first, so we can use it to parse the data after
165  flatbuffers::Parser parser;
166 
167  bool ok = parser.Parse(schemafile.c_str());
168  CHECK_MESSAGE(ok, std::string("Failed to parse schema file. Error was: " + parser.error_).c_str());
169 
170  ok = parser.Parse(m_JsonString.c_str());
171  CHECK_MESSAGE(ok, std::string("Failed to parse json input. Error was: " + parser.error_).c_str());
172 
173  {
174  const uint8_t * bufferPtr = parser.builder_.GetBufferPointer();
175  size_t size = static_cast<size_t>(parser.builder_.GetSize());
176  m_GraphBinary.assign(bufferPtr, bufferPtr+size);
177  }
178  return ok;
179  }
std::vector< uint8_t > m_GraphBinary
unsigned char g_TfLiteSchemaText[]
unsigned int g_TfLiteSchemaText_len

◆ RunTest() [1/5]

void RunTest ( size_t  subgraphId,
const std::vector< armnn::ResolveType< armnnType >> &  inputData,
const std::vector< armnn::ResolveType< armnnType >> &  expectedOutputData 
)

Executes the network with the given input tensor and checks the result against the given output tensor.

Single Input, Single Output Executes the network with the given input tensor and checks the result against the given output tensor.

This assumes the network has a single input and a single output.

This overload assumes the network has a single input and a single output.

Definition at line 311 of file ParserFlatbuffersFixture.hpp.

References m_SingleInputName, and m_SingleOutputName.

Referenced by ReadStringToBinary().

314 {
315  RunTest<NumOutputDimensions, armnnType>(subgraphId,
316  { { m_SingleInputName, inputData } },
317  { { m_SingleOutputName, expectedOutputData } });
318 }
std::string m_SingleInputName
If the single-input-single-output overload of Setup() is called, these will store the input and outpu...

◆ RunTest() [2/5]

void RunTest ( size_t  subgraphId,
const std::map< std::string, std::vector< armnn::ResolveType< armnnType >>> &  inputData,
const std::map< std::string, std::vector< armnn::ResolveType< armnnType >>> &  expectedOutputData 
)

Executes the network with the given input tensors and checks the results against the given output tensors.

Multiple Inputs, Multiple Outputs Executes the network with the given input tensors and checks the results against the given output tensors.

This overload supports multiple inputs and multiple outputs, identified by name.

Definition at line 325 of file ParserFlatbuffersFixture.hpp.

328 {
329  RunTest<NumOutputDimensions, armnnType, armnnType>(subgraphId, inputData, expectedOutputData);
330 }

◆ RunTest() [3/5]

void RunTest ( size_t  subgraphId,
const std::map< std::string, std::vector< armnn::ResolveType< armnnType1 >>> &  inputData,
const std::map< std::string, std::vector< armnn::ResolveType< armnnType2 >>> &  expectedOutputData,
bool  isDynamic = false 
)

Multiple Inputs, Multiple Outputs w/ Variable Datatypes and different dimension sizes.

Multiple Inputs, Multiple Outputs w/ Variable Datatypes Executes the network with the given input tensors and checks the results against the given output tensors.

Executes the network with the given input tensors and checks the results against the given output tensors. This overload supports multiple inputs and multiple outputs, identified by name along with the allowance for the input datatype to be different to the output

This overload supports multiple inputs and multiple outputs, identified by name along with the allowance for the input datatype to be different to the output

Definition at line 339 of file ParserFlatbuffersFixture.hpp.

References armnn::Boolean, CompareTensors(), TensorInfo::GetNumDimensions(), TensorInfo::GetNumElements(), m_DynamicNetworkIdentifier, m_NetworkIdentifier, m_Parser, m_Runtime, and armnn::VerifyTensorInfoDataType().

343 {
344  using DataType2 = armnn::ResolveType<armnnType2>;
345 
346  // Setup the armnn input tensors from the given vectors.
347  armnn::InputTensors inputTensors;
348  FillInputTensors<armnnType1>(inputTensors, inputData, subgraphId);
349 
350  // Allocate storage for the output tensors to be written to and setup the armnn output tensors.
351  std::map<std::string, std::vector<DataType2>> outputStorage;
352  armnn::OutputTensors outputTensors;
353  for (auto&& it : expectedOutputData)
354  {
355  armnn::LayerBindingId outputBindingId = m_Parser->GetNetworkOutputBindingInfo(subgraphId, it.first).first;
356  armnn::TensorInfo outputTensorInfo = m_Runtime->GetOutputTensorInfo(m_NetworkIdentifier, outputBindingId);
357 
358  // Check that output tensors have correct number of dimensions (NumOutputDimensions specified in test)
359  auto outputNumDimensions = outputTensorInfo.GetNumDimensions();
360  CHECK_MESSAGE((outputNumDimensions == NumOutputDimensions),
361  fmt::format("Number of dimensions expected {}, but got {} for output layer {}",
362  NumOutputDimensions,
363  outputNumDimensions,
364  it.first));
365 
366  armnn::VerifyTensorInfoDataType(outputTensorInfo, armnnType2);
367  outputStorage.emplace(it.first, std::vector<DataType2>(outputTensorInfo.GetNumElements()));
368  outputTensors.push_back(
369  { outputBindingId, armnn::Tensor(outputTensorInfo, outputStorage.at(it.first).data()) });
370  }
371 
372  m_Runtime->EnqueueWorkload(m_NetworkIdentifier, inputTensors, outputTensors);
373 
374  // Set flag so that the correct comparison function is called if the output is boolean.
375  bool isBoolean = armnnType2 == armnn::DataType::Boolean ? true : false;
376 
377  // Compare each output tensor to the expected values
378  for (auto&& it : expectedOutputData)
379  {
380  armnn::BindingPointInfo bindingInfo = m_Parser->GetNetworkOutputBindingInfo(subgraphId, it.first);
381  auto outputExpected = it.second;
382  auto result = CompareTensors(outputExpected, outputStorage[it.first],
383  bindingInfo.second.GetShape(), bindingInfo.second.GetShape(),
384  isBoolean, isDynamic);
385  CHECK_MESSAGE(result.m_Result, result.m_Message.str());
386  }
387 
388  if (isDynamic)
389  {
390  m_Runtime->EnqueueWorkload(m_DynamicNetworkIdentifier, inputTensors, outputTensors);
391 
392  // Compare each output tensor to the expected values
393  for (auto&& it : expectedOutputData)
394  {
395  armnn::BindingPointInfo bindingInfo = m_Parser->GetNetworkOutputBindingInfo(subgraphId, it.first);
396  auto outputExpected = it.second;
397  auto result = CompareTensors(outputExpected, outputStorage[it.first],
398  bindingInfo.second.GetShape(), bindingInfo.second.GetShape(),
399  false, isDynamic);
400  CHECK_MESSAGE(result.m_Result, result.m_Message.str());
401  }
402  }
403 }
typename ResolveTypeImpl< DT >::Type ResolveType
Definition: ResolveType.hpp:79
std::vector< std::pair< LayerBindingId, class ConstTensor > > InputTensors
Definition: Tensor.hpp:392
int LayerBindingId
Type of identifiers for bindable layers (inputs, outputs).
Definition: Types.hpp:290
A tensor defined by a TensorInfo (shape and data type) and a mutable backing store.
Definition: Tensor.hpp:319
std::unique_ptr< armnnTfLiteParser::TfLiteParserImpl > m_Parser
std::vector< std::pair< LayerBindingId, class Tensor > > OutputTensors
Definition: Tensor.hpp:393
armnn::PredicateResult CompareTensors(const std::vector< T > &actualData, const std::vector< T > &expectedData, const armnn::TensorShape &actualShape, const armnn::TensorShape &expectedShape, bool compareBoolean=false, bool isDynamic=false)
std::pair< armnn::LayerBindingId, armnn::TensorInfo > BindingPointInfo
Definition: Tensor.hpp:274
void VerifyTensorInfoDataType(const armnn::TensorInfo &info, armnn::DataType dataType)
Definition: TypesUtils.hpp:337
unsigned int GetNumDimensions() const
Definition: Tensor.hpp:195
unsigned int GetNumElements() const
Definition: Tensor.hpp:196

◆ RunTest() [4/5]

void RunTest ( size_t  subgraphId,
const std::map< std::string, std::vector< armnn::ResolveType< inputType1 >>> &  input1Data,
const std::map< std::string, std::vector< armnn::ResolveType< inputType2 >>> &  input2Data,
const std::map< std::string, std::vector< armnn::ResolveType< outputType >>> &  expectedOutputData 
)

Multiple Inputs with different DataTypes, Multiple Outputs w/ Variable DataTypes Executes the network with the given input tensors and checks the results against the given output tensors.

This overload supports multiple inputs and multiple outputs, identified by name along with the allowance for the input datatype to be different to the output

Definition at line 459 of file ParserFlatbuffersFixture.hpp.

References armnn::Boolean, CompareTensors(), TensorInfo::GetNumDimensions(), TensorInfo::GetNumElements(), m_NetworkIdentifier, m_Parser, m_Runtime, and armnn::VerifyTensorInfoDataType().

463 {
464  using DataType2 = armnn::ResolveType<outputType>;
465 
466  // Setup the armnn input tensors from the given vectors.
467  armnn::InputTensors inputTensors;
468  FillInputTensors<inputType1>(inputTensors, input1Data, subgraphId);
469  FillInputTensors<inputType2>(inputTensors, input2Data, subgraphId);
470 
471  // Allocate storage for the output tensors to be written to and setup the armnn output tensors.
472  std::map<std::string, std::vector<DataType2>> outputStorage;
473  armnn::OutputTensors outputTensors;
474  for (auto&& it : expectedOutputData)
475  {
476  armnn::LayerBindingId outputBindingId = m_Parser->GetNetworkOutputBindingInfo(subgraphId, it.first).first;
477  armnn::TensorInfo outputTensorInfo = m_Runtime->GetOutputTensorInfo(m_NetworkIdentifier, outputBindingId);
478 
479  // Check that output tensors have correct number of dimensions (NumOutputDimensions specified in test)
480  auto outputNumDimensions = outputTensorInfo.GetNumDimensions();
481  CHECK_MESSAGE((outputNumDimensions == NumOutputDimensions),
482  fmt::format("Number of dimensions expected {}, but got {} for output layer {}",
483  NumOutputDimensions,
484  outputNumDimensions,
485  it.first));
486 
487  armnn::VerifyTensorInfoDataType(outputTensorInfo, outputType);
488  outputStorage.emplace(it.first, std::vector<DataType2>(outputTensorInfo.GetNumElements()));
489  outputTensors.push_back(
490  { outputBindingId, armnn::Tensor(outputTensorInfo, outputStorage.at(it.first).data()) });
491  }
492 
493  m_Runtime->EnqueueWorkload(m_NetworkIdentifier, inputTensors, outputTensors);
494 
495  // Set flag so that the correct comparison function is called if the output is boolean.
496  bool isBoolean = outputType == armnn::DataType::Boolean ? true : false;
497 
498  // Compare each output tensor to the expected values
499  for (auto&& it : expectedOutputData)
500  {
501  armnn::BindingPointInfo bindingInfo = m_Parser->GetNetworkOutputBindingInfo(subgraphId, it.first);
502  auto outputExpected = it.second;
503  auto result = CompareTensors(outputExpected, outputStorage[it.first],
504  bindingInfo.second.GetShape(), bindingInfo.second.GetShape(),
505  isBoolean);
506  CHECK_MESSAGE(result.m_Result, result.m_Message.str());
507  }
508 }
typename ResolveTypeImpl< DT >::Type ResolveType
Definition: ResolveType.hpp:79
std::vector< std::pair< LayerBindingId, class ConstTensor > > InputTensors
Definition: Tensor.hpp:392
int LayerBindingId
Type of identifiers for bindable layers (inputs, outputs).
Definition: Types.hpp:290
A tensor defined by a TensorInfo (shape and data type) and a mutable backing store.
Definition: Tensor.hpp:319
std::unique_ptr< armnnTfLiteParser::TfLiteParserImpl > m_Parser
std::vector< std::pair< LayerBindingId, class Tensor > > OutputTensors
Definition: Tensor.hpp:393
armnn::PredicateResult CompareTensors(const std::vector< T > &actualData, const std::vector< T > &expectedData, const armnn::TensorShape &actualShape, const armnn::TensorShape &expectedShape, bool compareBoolean=false, bool isDynamic=false)
std::pair< armnn::LayerBindingId, armnn::TensorInfo > BindingPointInfo
Definition: Tensor.hpp:274
void VerifyTensorInfoDataType(const armnn::TensorInfo &info, armnn::DataType dataType)
Definition: TypesUtils.hpp:337
unsigned int GetNumDimensions() const
Definition: Tensor.hpp:195
unsigned int GetNumElements() const
Definition: Tensor.hpp:196

◆ RunTest() [5/5]

void RunTest ( std::size_t  subgraphId,
const std::map< std::string, std::vector< armnn::ResolveType< armnnType1 >>> &  inputData,
const std::map< std::string, std::vector< armnn::ResolveType< armnnType2 >>> &  expectedOutputData 
)

Multiple Inputs, Multiple Outputs w/ Variable Datatypes and different dimension sizes.

Executes the network with the given input tensors and checks the results against the given output tensors. This overload supports multiple inputs and multiple outputs, identified by name along with the allowance for the input datatype to be different to the output

Executes the network with the given input tensors and checks the results against the given output tensors. This overload supports multiple inputs and multiple outputs, identified by name along with the allowance for the input datatype to be different to the output.

Definition at line 411 of file ParserFlatbuffersFixture.hpp.

References m_NetworkIdentifier, m_Parser, m_Runtime, and armnn::VerifyTensorInfoDataType().

414 {
415  using DataType2 = armnn::ResolveType<armnnType2>;
416 
417  // Setup the armnn input tensors from the given vectors.
418  armnn::InputTensors inputTensors;
419  FillInputTensors<armnnType1>(inputTensors, inputData, subgraphId);
420 
421  armnn::OutputTensors outputTensors;
422  outputTensors.reserve(expectedOutputData.size());
423  std::map<std::string, std::vector<DataType2>> outputStorage;
424  for (auto&& it : expectedOutputData)
425  {
426  armnn::BindingPointInfo bindingInfo = m_Parser->GetNetworkOutputBindingInfo(subgraphId, it.first);
427  armnn::VerifyTensorInfoDataType(bindingInfo.second, armnnType2);
428 
429  std::vector<DataType2> out(it.second.size());
430  outputStorage.emplace(it.first, out);
431  outputTensors.push_back({ bindingInfo.first,
432  armnn::Tensor(bindingInfo.second,
433  outputStorage.at(it.first).data()) });
434  }
435 
436  m_Runtime->EnqueueWorkload(m_NetworkIdentifier, inputTensors, outputTensors);
437 
438  // Checks the results.
439  for (auto&& it : expectedOutputData)
440  {
441  std::vector<armnn::ResolveType<armnnType2>> out = outputStorage.at(it.first);
442  {
443  for (unsigned int i = 0; i < out.size(); ++i)
444  {
445  CHECK(doctest::Approx(it.second[i]).epsilon(0.000001f) == out[i]);
446  }
447  }
448  }
449 }
typename ResolveTypeImpl< DT >::Type ResolveType
Definition: ResolveType.hpp:79
std::vector< std::pair< LayerBindingId, class ConstTensor > > InputTensors
Definition: Tensor.hpp:392
A tensor defined by a TensorInfo (shape and data type) and a mutable backing store.
Definition: Tensor.hpp:319
std::unique_ptr< armnnTfLiteParser::TfLiteParserImpl > m_Parser
std::vector< std::pair< LayerBindingId, class Tensor > > OutputTensors
Definition: Tensor.hpp:393
std::pair< armnn::LayerBindingId, armnn::TensorInfo > BindingPointInfo
Definition: Tensor.hpp:274
void VerifyTensorInfoDataType(const armnn::TensorInfo &info, armnn::DataType dataType)
Definition: TypesUtils.hpp:337

◆ Setup()

void Setup ( bool  testDynamic = true)
inline

Definition at line 63 of file ParserFlatbuffersFixture.hpp.

References loadNetwork().

Referenced by SetupSingleInputSingleOutput(), and TEST_SUITE().

64  {
65  m_TestDynamic = testDynamic;
67 
68  if (m_TestDynamic)
69  {
71  }
72  }
void loadNetwork(armnn::NetworkId networkId, bool loadDynamic)

◆ SetupSingleInputSingleOutput()

void SetupSingleInputSingleOutput ( const std::string &  inputName,
const std::string &  outputName 
)
inline

Definition at line 152 of file ParserFlatbuffersFixture.hpp.

References Setup().

Referenced by TEST_SUITE().

153  {
154  // Store the input and output name so they don't need to be passed to the single-input-single-output RunTest().
155  m_SingleInputName = inputName;
156  m_SingleOutputName = outputName;
157  Setup();
158  }
void Setup(bool testDynamic=true)
std::string m_SingleInputName
If the single-input-single-output overload of Setup() is called, these will store the input and outpu...

Member Data Documentation

◆ m_DynamicNetworkIdentifier

armnn::NetworkId m_DynamicNetworkIdentifier

Definition at line 54 of file ParserFlatbuffersFixture.hpp.

Referenced by RunTest().

◆ m_GraphBinary

std::vector<uint8_t> m_GraphBinary

Definition at line 50 of file ParserFlatbuffersFixture.hpp.

◆ m_JsonString

std::string m_JsonString

Definition at line 51 of file ParserFlatbuffersFixture.hpp.

◆ m_NetworkIdentifier

armnn::NetworkId m_NetworkIdentifier

Definition at line 53 of file ParserFlatbuffersFixture.hpp.

Referenced by RunTest().

◆ m_Parser

std::unique_ptr<armnnTfLiteParser::TfLiteParserImpl> m_Parser

Definition at line 56 of file ParserFlatbuffersFixture.hpp.

Referenced by CheckTensors(), ParserFlatbuffersFixture(), and RunTest().

◆ m_Runtime

armnn::IRuntimePtr m_Runtime

Definition at line 52 of file ParserFlatbuffersFixture.hpp.

Referenced by RunTest().

◆ m_SingleInputName

std::string m_SingleInputName

If the single-input-single-output overload of Setup() is called, these will store the input and output name so they don't need to be passed to the single-input-single-output overload of RunTest().

Definition at line 60 of file ParserFlatbuffersFixture.hpp.

Referenced by RunTest().

◆ m_SingleOutputName

std::string m_SingleOutputName

Definition at line 61 of file ParserFlatbuffersFixture.hpp.

Referenced by RunTest().

◆ m_TestDynamic

bool m_TestDynamic

Definition at line 55 of file ParserFlatbuffersFixture.hpp.


The documentation for this struct was generated from the following file: