ArmNN
 21.08
ParserFlatbuffersFixture Struct Reference

#include <ParserFlatbuffersFixture.hpp>

Public Member Functions

 ParserFlatbuffersFixture ()
 
void Setup (bool testDynamic=true)
 
std::unique_ptr< tflite::ModelT > MakeModelDynamic (std::vector< uint8_t > graphBinary)
 
void loadNetwork (armnn::NetworkId networkId, bool loadDynamic)
 
void SetupSingleInputSingleOutput (const std::string &inputName, const std::string &outputName)
 
bool ReadStringToBinary ()
 
template<std::size_t NumOutputDimensions, armnn::DataType ArmnnType>
void RunTest (size_t subgraphId, const std::vector< armnn::ResolveType< ArmnnType >> &inputData, const std::vector< armnn::ResolveType< ArmnnType >> &expectedOutputData)
 Executes the network with the given input tensor and checks the result against the given output tensor. More...
 
template<std::size_t NumOutputDimensions, armnn::DataType ArmnnType>
void RunTest (size_t subgraphId, const std::map< std::string, std::vector< armnn::ResolveType< ArmnnType >>> &inputData, const std::map< std::string, std::vector< armnn::ResolveType< ArmnnType >>> &expectedOutputData)
 Executes the network with the given input tensors and checks the results against the given output tensors. More...
 
template<std::size_t NumOutputDimensions, armnn::DataType ArmnnType1, armnn::DataType ArmnnType2>
void RunTest (size_t subgraphId, const std::map< std::string, std::vector< armnn::ResolveType< ArmnnType1 >>> &inputData, const std::map< std::string, std::vector< armnn::ResolveType< ArmnnType2 >>> &expectedOutputData, bool isDynamic=false)
 Multiple Inputs, Multiple Outputs w/ Variable Datatypes and different dimension sizes. More...
 
template<std::size_t NumOutputDimensions, armnn::DataType inputType1, armnn::DataType inputType2, armnn::DataType outputType>
void RunTest (size_t subgraphId, const std::map< std::string, std::vector< armnn::ResolveType< inputType1 >>> &input1Data, const std::map< std::string, std::vector< armnn::ResolveType< inputType2 >>> &input2Data, const std::map< std::string, std::vector< armnn::ResolveType< outputType >>> &expectedOutputData)
 Multiple Inputs with different DataTypes, Multiple Outputs w/ Variable DataTypes Executes the network with the given input tensors and checks the results against the given output tensors. More...
 
template<armnn::DataType ArmnnType1, armnn::DataType ArmnnType2>
void RunTest (std::size_t subgraphId, const std::map< std::string, std::vector< armnn::ResolveType< ArmnnType1 >>> &inputData, const std::map< std::string, std::vector< armnn::ResolveType< ArmnnType2 >>> &expectedOutputData)
 Multiple Inputs, Multiple Outputs w/ Variable Datatypes and different dimension sizes. More...
 
void CheckTensors (const TensorRawPtr &tensors, size_t shapeSize, const std::vector< int32_t > &shape, tflite::TensorType tensorType, uint32_t buffer, const std::string &name, const std::vector< float > &min, const std::vector< float > &max, const std::vector< float > &scale, const std::vector< int64_t > &zeroPoint)
 

Static Public Member Functions

static std::string GenerateDetectionPostProcessJsonString (const armnn::DetectionPostProcessDescriptor &descriptor)
 

Public Attributes

std::vector< uint8_t > m_GraphBinary
 
std::string m_JsonString
 
armnn::IRuntimePtr m_Runtime
 
armnn::NetworkId m_NetworkIdentifier
 
armnn::NetworkId m_DynamicNetworkIdentifier
 
bool m_TestDynamic
 
std::unique_ptr< armnnTfLiteParser::TfLiteParserImplm_Parser
 
std::string m_SingleInputName
 If the single-input-single-output overload of Setup() is called, these will store the input and output name so they don't need to be passed to the single-input-single-output overload of RunTest(). More...
 
std::string m_SingleOutputName
 

Detailed Description

Definition at line 36 of file ParserFlatbuffersFixture.hpp.

Constructor & Destructor Documentation

◆ ParserFlatbuffersFixture()

Definition at line 38 of file ParserFlatbuffersFixture.hpp.

References m_Parser.

38  :
42  {
43  ITfLiteParser::TfLiteParserOptions options;
44  options.m_StandInLayerForUnsupported = true;
45  options.m_InferAndValidate = true;
46 
47  m_Parser = std::make_unique<armnnTfLiteParser::TfLiteParserImpl>(
49  }
static IRuntimePtr Create(const CreationOptions &options)
Definition: Runtime.cpp:39
std::unique_ptr< armnnTfLiteParser::TfLiteParserImpl > m_Parser

Member Function Documentation

◆ CheckTensors()

void CheckTensors ( const TensorRawPtr tensors,
size_t  shapeSize,
const std::vector< int32_t > &  shape,
tflite::TensorType  tensorType,
uint32_t  buffer,
const std::string &  name,
const std::vector< float > &  min,
const std::vector< float > &  max,
const std::vector< float > &  scale,
const std::vector< int64_t > &  zeroPoint 
)
inline

Definition at line 265 of file ParserFlatbuffersFixture.hpp.

References m_Parser, and armnn::VerifyTensorInfoDataType().

269  {
270  CHECK(tensors);
271  CHECK_EQ(shapeSize, tensors->shape.size());
272  CHECK(std::equal(shape.begin(), shape.end(), tensors->shape.begin(), tensors->shape.end()));
273  CHECK_EQ(tensorType, tensors->type);
274  CHECK_EQ(buffer, tensors->buffer);
275  CHECK_EQ(name, tensors->name);
276  CHECK(tensors->quantization);
277  CHECK(std::equal(min.begin(), min.end(), tensors->quantization.get()->min.begin(),
278  tensors->quantization.get()->min.end()));
279  CHECK(std::equal(max.begin(), max.end(), tensors->quantization.get()->max.begin(),
280  tensors->quantization.get()->max.end()));
281  CHECK(std::equal(scale.begin(), scale.end(), tensors->quantization.get()->scale.begin(),
282  tensors->quantization.get()->scale.end()));
283  CHECK(std::equal(zeroPoint.begin(), zeroPoint.end(),
284  tensors->quantization.get()->zero_point.begin(),
285  tensors->quantization.get()->zero_point.end()));
286  }

◆ GenerateDetectionPostProcessJsonString()

static std::string GenerateDetectionPostProcessJsonString ( const armnn::DetectionPostProcessDescriptor descriptor)
inlinestatic

Definition at line 238 of file ParserFlatbuffersFixture.hpp.

References DetectionPostProcessDescriptor::m_DetectionsPerClass, DetectionPostProcessDescriptor::m_MaxClassesPerDetection, DetectionPostProcessDescriptor::m_MaxDetections, DetectionPostProcessDescriptor::m_NmsIouThreshold, DetectionPostProcessDescriptor::m_NmsScoreThreshold, DetectionPostProcessDescriptor::m_NumClasses, DetectionPostProcessDescriptor::m_ScaleH, DetectionPostProcessDescriptor::m_ScaleW, DetectionPostProcessDescriptor::m_ScaleX, DetectionPostProcessDescriptor::m_ScaleY, and DetectionPostProcessDescriptor::m_UseRegularNms.

240  {
241  flexbuffers::Builder detectPostProcess;
242  detectPostProcess.Map([&]() {
243  detectPostProcess.Bool("use_regular_nms", descriptor.m_UseRegularNms);
244  detectPostProcess.Int("max_detections", descriptor.m_MaxDetections);
245  detectPostProcess.Int("max_classes_per_detection", descriptor.m_MaxClassesPerDetection);
246  detectPostProcess.Int("detections_per_class", descriptor.m_DetectionsPerClass);
247  detectPostProcess.Int("num_classes", descriptor.m_NumClasses);
248  detectPostProcess.Float("nms_score_threshold", descriptor.m_NmsScoreThreshold);
249  detectPostProcess.Float("nms_iou_threshold", descriptor.m_NmsIouThreshold);
250  detectPostProcess.Float("h_scale", descriptor.m_ScaleH);
251  detectPostProcess.Float("w_scale", descriptor.m_ScaleW);
252  detectPostProcess.Float("x_scale", descriptor.m_ScaleX);
253  detectPostProcess.Float("y_scale", descriptor.m_ScaleY);
254  });
255  detectPostProcess.Finish();
256 
257  // Create JSON string
258  std::stringstream strStream;
259  std::vector<uint8_t> buffer = detectPostProcess.GetBuffer();
260  std::copy(buffer.begin(), buffer.end(),std::ostream_iterator<int>(strStream,","));
261 
262  return strStream.str();
263  }
float m_ScaleW
Center size encoding scale weight.
float m_ScaleX
Center size encoding scale x.
uint32_t m_DetectionsPerClass
Detections per classes, used in Regular NMS.
uint32_t m_MaxClassesPerDetection
Maximum numbers of classes per detection, used in Fast NMS.
uint32_t m_MaxDetections
Maximum numbers of detections.
float m_NmsIouThreshold
Intersection over union threshold.
uint32_t m_NumClasses
Number of classes.
bool m_UseRegularNms
Use Regular NMS.
float m_ScaleH
Center size encoding scale height.
float m_ScaleY
Center size encoding scale y.
float m_NmsScoreThreshold
NMS score threshold.

◆ loadNetwork()

void loadNetwork ( armnn::NetworkId  networkId,
bool  loadDynamic 
)
inline

Definition at line 121 of file ParserFlatbuffersFixture.hpp.

References armnn::CpuRef, MakeModelDynamic(), armnn::Optimize(), ReadStringToBinary(), and armnn::Success.

Referenced by Setup().

122  {
123  bool ok = ReadStringToBinary();
124  if (!ok) {
125  throw armnn::Exception("LoadNetwork failed while reading binary input");
126  }
127 
128  armnn::INetworkPtr network = loadDynamic ? m_Parser->LoadModel(MakeModelDynamic(m_GraphBinary))
129  : m_Parser->CreateNetworkFromBinary(m_GraphBinary);
130 
131  if (!network) {
132  throw armnn::Exception("The parser failed to create an ArmNN network");
133  }
134 
135  auto optimized = Optimize(*network, { armnn::Compute::CpuRef },
136  m_Runtime->GetDeviceSpec());
137  std::string errorMessage;
138 
139  armnn::Status ret = m_Runtime->LoadNetwork(networkId, move(optimized), errorMessage);
140 
141  if (ret != armnn::Status::Success)
142  {
143  throw armnn::Exception(
144  fmt::format("The runtime failed to load the network. "
145  "Error was: {}. in {} [{}:{}]",
146  errorMessage,
147  __func__,
148  __FILE__,
149  __LINE__));
150  }
151  }
CPU Execution: Reference C++ kernels.
std::unique_ptr< armnnTfLiteParser::TfLiteParserImpl > m_Parser
IOptimizedNetworkPtr Optimize(const INetwork &network, const std::vector< BackendId > &backendPreferences, const IDeviceSpec &deviceSpec, const OptimizerOptions &options=OptimizerOptions(), Optional< std::vector< std::string > &> messages=EmptyOptional())
Create an optimized version of the network.
Definition: Network.cpp:1613
Status
enumeration
Definition: Types.hpp:29
std::vector< uint8_t > m_GraphBinary
Base class for all ArmNN exceptions so that users can filter to just those.
Definition: Exceptions.hpp:46
std::unique_ptr< INetwork, void(*)(INetwork *network)> INetworkPtr
Definition: INetwork.hpp:172
std::unique_ptr< tflite::ModelT > MakeModelDynamic(std::vector< uint8_t > graphBinary)

◆ MakeModelDynamic()

std::unique_ptr<tflite::ModelT> MakeModelDynamic ( std::vector< uint8_t >  graphBinary)
inline

Definition at line 75 of file ParserFlatbuffersFixture.hpp.

References CHECK_LOCATION.

Referenced by loadNetwork().

76  {
77  const uint8_t* binaryContent = graphBinary.data();
78  const size_t len = graphBinary.size();
79  if (binaryContent == nullptr)
80  {
81  throw armnn::InvalidArgumentException(fmt::format("Invalid (null) binary content {}",
82  CHECK_LOCATION().AsString()));
83  }
84  flatbuffers::Verifier verifier(binaryContent, len);
85  if (verifier.VerifyBuffer<tflite::Model>() == false)
86  {
87  throw armnn::ParseException(fmt::format("Buffer doesn't conform to the expected Tensorflow Lite "
88  "flatbuffers format. size:{} {}",
89  len,
90  CHECK_LOCATION().AsString()));
91  }
92  auto model = tflite::UnPackModel(binaryContent);
93 
94  for (auto const& subgraph : model->subgraphs)
95  {
96  std::vector<int32_t> inputIds = subgraph->inputs;
97  for (unsigned int tensorIndex = 0; tensorIndex < subgraph->tensors.size(); ++tensorIndex)
98  {
99  if (std::find(inputIds.begin(), inputIds.end(), tensorIndex) != inputIds.end())
100  {
101  continue;
102  }
103  for (auto const& tensor : subgraph->tensors)
104  {
105  if (tensor->shape_signature.size() != 0)
106  {
107  continue;
108  }
109 
110  for (unsigned int i = 0; i < tensor->shape.size(); ++i)
111  {
112  tensor->shape_signature.push_back(-1);
113  }
114  }
115  }
116  }
117 
118  return model;
119  }
#define CHECK_LOCATION()
Definition: Exceptions.hpp:197

◆ ReadStringToBinary()

bool ReadStringToBinary ( )
inline

Definition at line 161 of file ParserFlatbuffersFixture.hpp.

References ARMNN_ASSERT_MSG, g_TfLiteSchemaText, g_TfLiteSchemaText_len, and RunTest().

Referenced by loadNetwork(), and TEST_SUITE().

162  {
164 
165  // parse schema first, so we can use it to parse the data after
166  flatbuffers::Parser parser;
167 
168  bool ok = parser.Parse(schemafile.c_str());
169  ARMNN_ASSERT_MSG(ok, "Failed to parse schema file");
170 
171  ok &= parser.Parse(m_JsonString.c_str());
172  ARMNN_ASSERT_MSG(ok, "Failed to parse json input");
173 
174  if (!ok)
175  {
176  return false;
177  }
178 
179  {
180  const uint8_t * bufferPtr = parser.builder_.GetBufferPointer();
181  size_t size = static_cast<size_t>(parser.builder_.GetSize());
182  m_GraphBinary.assign(bufferPtr, bufferPtr+size);
183  }
184  return ok;
185  }
#define ARMNN_ASSERT_MSG(COND, MSG)
Definition: Assert.hpp:15
std::vector< uint8_t > m_GraphBinary
unsigned char g_TfLiteSchemaText[]
unsigned int g_TfLiteSchemaText_len

◆ RunTest() [1/5]

void RunTest ( size_t  subgraphId,
const std::vector< armnn::ResolveType< armnnType >> &  inputData,
const std::vector< armnn::ResolveType< armnnType >> &  expectedOutputData 
)

Executes the network with the given input tensor and checks the result against the given output tensor.

Single Input, Single Output Executes the network with the given input tensor and checks the result against the given output tensor.

This assumes the network has a single input and a single output.

This overload assumes the network has a single input and a single output.

Definition at line 316 of file ParserFlatbuffersFixture.hpp.

References m_SingleInputName, and m_SingleOutputName.

Referenced by ReadStringToBinary().

319 {
320  RunTest<NumOutputDimensions, armnnType>(subgraphId,
321  { { m_SingleInputName, inputData } },
322  { { m_SingleOutputName, expectedOutputData } });
323 }
std::string m_SingleInputName
If the single-input-single-output overload of Setup() is called, these will store the input and outpu...

◆ RunTest() [2/5]

void RunTest ( size_t  subgraphId,
const std::map< std::string, std::vector< armnn::ResolveType< armnnType >>> &  inputData,
const std::map< std::string, std::vector< armnn::ResolveType< armnnType >>> &  expectedOutputData 
)

Executes the network with the given input tensors and checks the results against the given output tensors.

Multiple Inputs, Multiple Outputs Executes the network with the given input tensors and checks the results against the given output tensors.

This overload supports multiple inputs and multiple outputs, identified by name.

Definition at line 330 of file ParserFlatbuffersFixture.hpp.

333 {
334  RunTest<NumOutputDimensions, armnnType, armnnType>(subgraphId, inputData, expectedOutputData);
335 }

◆ RunTest() [3/5]

void RunTest ( size_t  subgraphId,
const std::map< std::string, std::vector< armnn::ResolveType< armnnType1 >>> &  inputData,
const std::map< std::string, std::vector< armnn::ResolveType< armnnType2 >>> &  expectedOutputData,
bool  isDynamic = false 
)

Multiple Inputs, Multiple Outputs w/ Variable Datatypes and different dimension sizes.

Multiple Inputs, Multiple Outputs w/ Variable Datatypes Executes the network with the given input tensors and checks the results against the given output tensors.

Executes the network with the given input tensors and checks the results against the given output tensors. This overload supports multiple inputs and multiple outputs, identified by name along with the allowance for the input datatype to be different to the output

This overload supports multiple inputs and multiple outputs, identified by name along with the allowance for the input datatype to be different to the output

Definition at line 344 of file ParserFlatbuffersFixture.hpp.

References CompareTensors(), TensorInfo::GetNumDimensions(), TensorInfo::GetNumElements(), m_DynamicNetworkIdentifier, m_NetworkIdentifier, m_Parser, m_Runtime, and armnn::VerifyTensorInfoDataType().

348 {
349  using DataType2 = armnn::ResolveType<armnnType2>;
350 
351  // Setup the armnn input tensors from the given vectors.
352  armnn::InputTensors inputTensors;
353  FillInputTensors<armnnType1>(inputTensors, inputData, subgraphId);
354 
355  // Allocate storage for the output tensors to be written to and setup the armnn output tensors.
356  std::map<std::string, std::vector<DataType2>> outputStorage;
357  armnn::OutputTensors outputTensors;
358  for (auto&& it : expectedOutputData)
359  {
360  armnn::LayerBindingId outputBindingId = m_Parser->GetNetworkOutputBindingInfo(subgraphId, it.first).first;
361  armnn::TensorInfo outputTensorInfo = m_Runtime->GetOutputTensorInfo(m_NetworkIdentifier, outputBindingId);
362 
363  // Check that output tensors have correct number of dimensions (NumOutputDimensions specified in test)
364  auto outputNumDimensions = outputTensorInfo.GetNumDimensions();
365  CHECK_MESSAGE((outputNumDimensions == NumOutputDimensions),
366  fmt::format("Number of dimensions expected {}, but got {} for output layer {}",
367  NumOutputDimensions,
368  outputNumDimensions,
369  it.first));
370 
371  armnn::VerifyTensorInfoDataType(outputTensorInfo, armnnType2);
372  outputStorage.emplace(it.first, std::vector<DataType2>(outputTensorInfo.GetNumElements()));
373  outputTensors.push_back(
374  { outputBindingId, armnn::Tensor(outputTensorInfo, outputStorage.at(it.first).data()) });
375  }
376 
377  m_Runtime->EnqueueWorkload(m_NetworkIdentifier, inputTensors, outputTensors);
378 
379  // Compare each output tensor to the expected values
380  for (auto&& it : expectedOutputData)
381  {
382  armnn::BindingPointInfo bindingInfo = m_Parser->GetNetworkOutputBindingInfo(subgraphId, it.first);
383  auto outputExpected = it.second;
384  if (std::is_same<DataType2, uint8_t>::value)
385  {
386  auto result = CompareTensors(outputExpected, outputStorage[it.first],
387  bindingInfo.second.GetShape(), bindingInfo.second.GetShape(),
388  true, isDynamic);
389  CHECK_MESSAGE(result.m_Result, result.m_Message.str());
390  }
391  else
392  {
393  auto result = CompareTensors(outputExpected, outputStorage[it.first],
394  bindingInfo.second.GetShape(), bindingInfo.second.GetShape(),
395  false, isDynamic);
396  CHECK_MESSAGE(result.m_Result, result.m_Message.str());
397  }
398  }
399 
400  if (isDynamic)
401  {
402  m_Runtime->EnqueueWorkload(m_DynamicNetworkIdentifier, inputTensors, outputTensors);
403 
404  // Compare each output tensor to the expected values
405  for (auto&& it : expectedOutputData)
406  {
407  armnn::BindingPointInfo bindingInfo = m_Parser->GetNetworkOutputBindingInfo(subgraphId, it.first);
408  auto outputExpected = it.second;
409  auto result = CompareTensors(outputExpected, outputStorage[it.first],
410  bindingInfo.second.GetShape(), bindingInfo.second.GetShape(),
411  false, isDynamic);
412  CHECK_MESSAGE(result.m_Result, result.m_Message.str());
413  }
414  }
415 }
typename ResolveTypeImpl< DT >::Type ResolveType
Definition: ResolveType.hpp:79
std::vector< std::pair< LayerBindingId, class ConstTensor > > InputTensors
Definition: Tensor.hpp:360
armnn::PredicateResult CompareTensors(const std::vector< T > &actualData, const std::vector< T > &expectedData, const armnn::TensorShape &actualShape, const armnn::TensorShape &expectedShape, bool compareBoolean=false, bool isDynamic=false)
int LayerBindingId
Type of identifiers for bindable layers (inputs, outputs).
Definition: Types.hpp:244
A tensor defined by a TensorInfo (shape and data type) and a mutable backing store.
Definition: Tensor.hpp:319
std::unique_ptr< armnnTfLiteParser::TfLiteParserImpl > m_Parser
std::vector< std::pair< LayerBindingId, class Tensor > > OutputTensors
Definition: Tensor.hpp:361
std::pair< armnn::LayerBindingId, armnn::TensorInfo > BindingPointInfo
Definition: Tensor.hpp:274
void VerifyTensorInfoDataType(const armnn::TensorInfo &info, armnn::DataType dataType)
Definition: TypesUtils.hpp:322
unsigned int GetNumDimensions() const
Definition: Tensor.hpp:195
unsigned int GetNumElements() const
Definition: Tensor.hpp:196

◆ RunTest() [4/5]

void RunTest ( size_t  subgraphId,
const std::map< std::string, std::vector< armnn::ResolveType< inputType1 >>> &  input1Data,
const std::map< std::string, std::vector< armnn::ResolveType< inputType2 >>> &  input2Data,
const std::map< std::string, std::vector< armnn::ResolveType< outputType >>> &  expectedOutputData 
)

Multiple Inputs with different DataTypes, Multiple Outputs w/ Variable DataTypes Executes the network with the given input tensors and checks the results against the given output tensors.

This overload supports multiple inputs and multiple outputs, identified by name along with the allowance for the input datatype to be different to the output

Definition at line 471 of file ParserFlatbuffersFixture.hpp.

References CompareTensors(), TensorInfo::GetNumDimensions(), TensorInfo::GetNumElements(), m_NetworkIdentifier, m_Parser, m_Runtime, and armnn::VerifyTensorInfoDataType().

475 {
476  using DataType2 = armnn::ResolveType<outputType>;
477 
478  // Setup the armnn input tensors from the given vectors.
479  armnn::InputTensors inputTensors;
480  FillInputTensors<inputType1>(inputTensors, input1Data, subgraphId);
481  FillInputTensors<inputType2>(inputTensors, input2Data, subgraphId);
482 
483  // Allocate storage for the output tensors to be written to and setup the armnn output tensors.
484  std::map<std::string, std::vector<DataType2>> outputStorage;
485  armnn::OutputTensors outputTensors;
486  for (auto&& it : expectedOutputData)
487  {
488  armnn::LayerBindingId outputBindingId = m_Parser->GetNetworkOutputBindingInfo(subgraphId, it.first).first;
489  armnn::TensorInfo outputTensorInfo = m_Runtime->GetOutputTensorInfo(m_NetworkIdentifier, outputBindingId);
490 
491  // Check that output tensors have correct number of dimensions (NumOutputDimensions specified in test)
492  auto outputNumDimensions = outputTensorInfo.GetNumDimensions();
493  CHECK_MESSAGE((outputNumDimensions == NumOutputDimensions),
494  fmt::format("Number of dimensions expected {}, but got {} for output layer {}",
495  NumOutputDimensions,
496  outputNumDimensions,
497  it.first));
498 
499  armnn::VerifyTensorInfoDataType(outputTensorInfo, outputType);
500  outputStorage.emplace(it.first, std::vector<DataType2>(outputTensorInfo.GetNumElements()));
501  outputTensors.push_back(
502  { outputBindingId, armnn::Tensor(outputTensorInfo, outputStorage.at(it.first).data()) });
503  }
504 
505  m_Runtime->EnqueueWorkload(m_NetworkIdentifier, inputTensors, outputTensors);
506 
507  // Compare each output tensor to the expected values
508  for (auto&& it : expectedOutputData)
509  {
510  armnn::BindingPointInfo bindingInfo = m_Parser->GetNetworkOutputBindingInfo(subgraphId, it.first);
511  auto outputExpected = it.second;
512  if (std::is_same<DataType2, uint8_t>::value)
513  {
514  auto result = CompareTensors(outputExpected, outputStorage[it.first],
515  bindingInfo.second.GetShape(), bindingInfo.second.GetShape(), true);
516  CHECK_MESSAGE(result.m_Result, result.m_Message.str());
517  }
518  else
519  {
520  auto result = CompareTensors(outputExpected, outputStorage[it.first],
521  bindingInfo.second.GetShape(), bindingInfo.second.GetShape());
522  CHECK_MESSAGE(result.m_Result, result.m_Message.str());
523  }
524  }
525 }
typename ResolveTypeImpl< DT >::Type ResolveType
Definition: ResolveType.hpp:79
std::vector< std::pair< LayerBindingId, class ConstTensor > > InputTensors
Definition: Tensor.hpp:360
armnn::PredicateResult CompareTensors(const std::vector< T > &actualData, const std::vector< T > &expectedData, const armnn::TensorShape &actualShape, const armnn::TensorShape &expectedShape, bool compareBoolean=false, bool isDynamic=false)
int LayerBindingId
Type of identifiers for bindable layers (inputs, outputs).
Definition: Types.hpp:244
A tensor defined by a TensorInfo (shape and data type) and a mutable backing store.
Definition: Tensor.hpp:319
std::unique_ptr< armnnTfLiteParser::TfLiteParserImpl > m_Parser
std::vector< std::pair< LayerBindingId, class Tensor > > OutputTensors
Definition: Tensor.hpp:361
std::pair< armnn::LayerBindingId, armnn::TensorInfo > BindingPointInfo
Definition: Tensor.hpp:274
void VerifyTensorInfoDataType(const armnn::TensorInfo &info, armnn::DataType dataType)
Definition: TypesUtils.hpp:322
unsigned int GetNumDimensions() const
Definition: Tensor.hpp:195
unsigned int GetNumElements() const
Definition: Tensor.hpp:196

◆ RunTest() [5/5]

void RunTest ( std::size_t  subgraphId,
const std::map< std::string, std::vector< armnn::ResolveType< armnnType1 >>> &  inputData,
const std::map< std::string, std::vector< armnn::ResolveType< armnnType2 >>> &  expectedOutputData 
)

Multiple Inputs, Multiple Outputs w/ Variable Datatypes and different dimension sizes.

Executes the network with the given input tensors and checks the results against the given output tensors. This overload supports multiple inputs and multiple outputs, identified by name along with the allowance for the input datatype to be different to the output

Executes the network with the given input tensors and checks the results against the given output tensors. This overload supports multiple inputs and multiple outputs, identified by name along with the allowance for the input datatype to be different to the output.

Definition at line 423 of file ParserFlatbuffersFixture.hpp.

References m_NetworkIdentifier, m_Parser, m_Runtime, and armnn::VerifyTensorInfoDataType().

426 {
427  using DataType2 = armnn::ResolveType<armnnType2>;
428 
429  // Setup the armnn input tensors from the given vectors.
430  armnn::InputTensors inputTensors;
431  FillInputTensors<armnnType1>(inputTensors, inputData, subgraphId);
432 
433  armnn::OutputTensors outputTensors;
434  outputTensors.reserve(expectedOutputData.size());
435  std::map<std::string, std::vector<DataType2>> outputStorage;
436  for (auto&& it : expectedOutputData)
437  {
438  armnn::BindingPointInfo bindingInfo = m_Parser->GetNetworkOutputBindingInfo(subgraphId, it.first);
439  armnn::VerifyTensorInfoDataType(bindingInfo.second, armnnType2);
440 
441  std::vector<DataType2> out(it.second.size());
442  outputStorage.emplace(it.first, out);
443  outputTensors.push_back({ bindingInfo.first,
444  armnn::Tensor(bindingInfo.second,
445  outputStorage.at(it.first).data()) });
446  }
447 
448  m_Runtime->EnqueueWorkload(m_NetworkIdentifier, inputTensors, outputTensors);
449 
450  // Checks the results.
451  for (auto&& it : expectedOutputData)
452  {
453  std::vector<armnn::ResolveType<armnnType2>> out = outputStorage.at(it.first);
454  {
455  for (unsigned int i = 0; i < out.size(); ++i)
456  {
457  CHECK(doctest::Approx(it.second[i]).epsilon(0.000001f) == out[i]);
458  }
459  }
460  }
461 }
typename ResolveTypeImpl< DT >::Type ResolveType
Definition: ResolveType.hpp:79
std::vector< std::pair< LayerBindingId, class ConstTensor > > InputTensors
Definition: Tensor.hpp:360
A tensor defined by a TensorInfo (shape and data type) and a mutable backing store.
Definition: Tensor.hpp:319
std::unique_ptr< armnnTfLiteParser::TfLiteParserImpl > m_Parser
std::vector< std::pair< LayerBindingId, class Tensor > > OutputTensors
Definition: Tensor.hpp:361
std::pair< armnn::LayerBindingId, armnn::TensorInfo > BindingPointInfo
Definition: Tensor.hpp:274
void VerifyTensorInfoDataType(const armnn::TensorInfo &info, armnn::DataType dataType)
Definition: TypesUtils.hpp:322

◆ Setup()

void Setup ( bool  testDynamic = true)
inline

Definition at line 64 of file ParserFlatbuffersFixture.hpp.

References loadNetwork().

Referenced by SetupSingleInputSingleOutput(), and TEST_SUITE().

65  {
66  m_TestDynamic = testDynamic;
68 
69  if (m_TestDynamic)
70  {
72  }
73  }
void loadNetwork(armnn::NetworkId networkId, bool loadDynamic)

◆ SetupSingleInputSingleOutput()

void SetupSingleInputSingleOutput ( const std::string &  inputName,
const std::string &  outputName 
)
inline

Definition at line 153 of file ParserFlatbuffersFixture.hpp.

References Setup().

Referenced by TEST_SUITE().

154  {
155  // Store the input and output name so they don't need to be passed to the single-input-single-output RunTest().
156  m_SingleInputName = inputName;
157  m_SingleOutputName = outputName;
158  Setup();
159  }
void Setup(bool testDynamic=true)
std::string m_SingleInputName
If the single-input-single-output overload of Setup() is called, these will store the input and outpu...

Member Data Documentation

◆ m_DynamicNetworkIdentifier

armnn::NetworkId m_DynamicNetworkIdentifier

Definition at line 55 of file ParserFlatbuffersFixture.hpp.

Referenced by RunTest().

◆ m_GraphBinary

std::vector<uint8_t> m_GraphBinary

Definition at line 51 of file ParserFlatbuffersFixture.hpp.

◆ m_JsonString

std::string m_JsonString

Definition at line 52 of file ParserFlatbuffersFixture.hpp.

◆ m_NetworkIdentifier

armnn::NetworkId m_NetworkIdentifier

Definition at line 54 of file ParserFlatbuffersFixture.hpp.

Referenced by RunTest().

◆ m_Parser

std::unique_ptr<armnnTfLiteParser::TfLiteParserImpl> m_Parser

Definition at line 57 of file ParserFlatbuffersFixture.hpp.

Referenced by CheckTensors(), ParserFlatbuffersFixture(), and RunTest().

◆ m_Runtime

armnn::IRuntimePtr m_Runtime

Definition at line 53 of file ParserFlatbuffersFixture.hpp.

Referenced by RunTest().

◆ m_SingleInputName

std::string m_SingleInputName

If the single-input-single-output overload of Setup() is called, these will store the input and output name so they don't need to be passed to the single-input-single-output overload of RunTest().

Definition at line 61 of file ParserFlatbuffersFixture.hpp.

Referenced by RunTest().

◆ m_SingleOutputName

std::string m_SingleOutputName

Definition at line 62 of file ParserFlatbuffersFixture.hpp.

Referenced by RunTest().

◆ m_TestDynamic

bool m_TestDynamic

Definition at line 56 of file ParserFlatbuffersFixture.hpp.


The documentation for this struct was generated from the following file: