ArmNN
 24.02
TfLiteParser.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include "TfLiteParser.hpp"
7 
9 #include "armnn/LstmParams.hpp"
10 
11 #include <armnn/BackendOptions.hpp>
12 #include <armnn/Descriptors.hpp>
13 #include <armnn/Exceptions.hpp>
14 #include <armnn/Logging.hpp>
15 #include <armnn/Tensor.hpp>
17 #include <armnn/TypesUtils.hpp>
18 #include <armnn/utility/Assert.hpp>
21 
22 // armnnUtils:
23 #include <armnnUtils/Permute.hpp>
25 
26 #include <ParserHelper.hpp>
27 #include <VerificationHelpers.hpp>
28 
29 // The generated code based on the Tf Lite schema:
30 #include <schema_generated.h>
31 
32 #include <flatbuffers/flexbuffers.h>
33 
34 #include <fmt/format.h>
35 
36 #include <algorithm>
37 #include <iostream>
38 #include <limits>
39 #include <numeric>
40 
41 #define ARMNN_THROW_PARSE_EXCEPTION(msg) \
42  { \
43  throw armnn::ParseException( static_cast<const std::stringstream&>( std::stringstream() << msg \
44  << ": " \
45  << CHECK_LOCATION().AsString()).str()); \
46  }
47 
48 using namespace armnn;
50 namespace armnnTfLiteParser
51 {
52 
53 ITfLiteParser::ITfLiteParser(const armnn::Optional<TfLiteParserOptions>& options) :
54  pTfLiteParserImpl(new TfLiteParserImpl(options)) {}
55 
56 ITfLiteParser::~ITfLiteParser() = default;
57 
58 ITfLiteParser* ITfLiteParser::CreateRaw(const armnn::Optional<TfLiteParserOptions>& options)
59 {
60  return new ITfLiteParser(options);
61 }
62 
63 ITfLiteParserPtr ITfLiteParser::Create(const armnn::Optional<TfLiteParserOptions>& options)
64 {
65  return ITfLiteParserPtr(CreateRaw(options), &ITfLiteParser::Destroy);
66 }
67 
68 void ITfLiteParser::Destroy(ITfLiteParser* parser)
69 {
70  delete parser;
71 }
72 
73 armnn::INetworkPtr ITfLiteParser::CreateNetworkFromBinaryFile(const char* graphFile)
74 {
75  return pTfLiteParserImpl->CreateNetworkFromBinaryFile(graphFile);
76 }
77 
78 armnn::INetworkPtr ITfLiteParser::CreateNetworkFromBinary(const std::vector<uint8_t>& binaryContent)
79 {
80  return pTfLiteParserImpl->CreateNetworkFromBinary(binaryContent);
81 }
82 
83 BindingPointInfo ITfLiteParser::GetNetworkInputBindingInfo(size_t subgraphId,
84  const std::string& name) const
85 {
86  return pTfLiteParserImpl->GetNetworkInputBindingInfo(subgraphId, name);
87 }
88 
89 BindingPointInfo ITfLiteParser::GetNetworkOutputBindingInfo(size_t subgraphId,
90  const std::string& name) const
91 {
92  return pTfLiteParserImpl->GetNetworkOutputBindingInfo(subgraphId, name);
93 }
94 
95 size_t ITfLiteParser::GetSubgraphCount() const
96 {
97  return pTfLiteParserImpl->GetSubgraphCount();
98 }
99 
100 std::vector<std::string> ITfLiteParser::GetSubgraphInputTensorNames(size_t subgraphId) const
101 {
102  return pTfLiteParserImpl->GetSubgraphInputTensorNames(subgraphId);
103 }
104 
105 std::vector<std::string> ITfLiteParser::GetSubgraphOutputTensorNames(size_t subgraphId) const
106 {
107  return pTfLiteParserImpl->GetSubgraphOutputTensorNames(subgraphId);
108 }
109 
110 namespace
111 {
112 
113 const uint32_t VIRTUAL_OPERATOR_ID = std::numeric_limits<uint32_t>::max();
114 
115 void CheckSubgraph(const TfLiteParserImpl::ModelPtr& model,
116  size_t subgraphIndex,
117  const CheckLocation& location)
118 {
119  if (model.get() == nullptr)
120  {
121  throw ParseException(
122  fmt::format("{} was called with invalid (null) model. "
123  "Possible reason is that the model is not yet loaded and Unpack(ed). "
124  "subgraph:{} at {}",
125  location.m_Function,
126  subgraphIndex,
127  location.FileLine()));
128  }
129  else if (subgraphIndex >= model->subgraphs.size())
130  {
131  throw ParseException(
132  fmt::format("{} was called with an invalid subgraph index. "
133  "subgraph:{} at {}",
134  location.m_Function,
135  subgraphIndex,
136  location.FileLine()));
137  }
138 }
139 
140 #define CHECK_SUBGRAPH(MODEL, SUBGRAPH_INDEX) \
141  CheckSubgraph(MODEL, SUBGRAPH_INDEX, CHECK_LOCATION())
142 
143 void CheckModel(const TfLiteParserImpl::ModelPtr& model,
144  size_t subgraphIndex,
145  size_t operatorIndex,
146  const CheckLocation& location)
147 {
148  if (model.get() == nullptr)
149  {
150  throw ParseException(
151  fmt::format("{} was called with invalid (null) model. "
152  "Possible reason is that the model is not yet loaded and Unpack(ed). "
153  "subgraph:{} operator:{} at {}",
154  location.m_Function,
155  subgraphIndex,
156  operatorIndex,
157  location.FileLine()));
158  }
159  else if (subgraphIndex >= model->subgraphs.size())
160  {
161  throw ParseException(
162  fmt::format("{} was called with an invalid subgraph index. "
163  "subgraph:{} operator:{} at {}",
164  location.m_Function,
165  subgraphIndex,
166  operatorIndex,
167  location.FileLine()));
168  }
169  else if (operatorIndex >= model->subgraphs[subgraphIndex]->operators.size() &&
170  operatorIndex != VIRTUAL_OPERATOR_ID)
171  {
172  throw ParseException(
173  fmt::format("{} was called with an invalid operator index. "
174  "subgraph:{} operator:{} at {}",
175  location.m_Function,
176  subgraphIndex,
177  operatorIndex,
178  location.FileLine()));
179  }
180 }
181 
182 #define CHECK_MODEL(MODEL, SUBGRAPH_INDEX, OPERATOR_INDEX) \
183  CheckModel(MODEL, SUBGRAPH_INDEX, OPERATOR_INDEX, CHECK_LOCATION())
184 
185 void CheckTensor(const TfLiteParserImpl::ModelPtr& model,
186  size_t subgraphIndex,
187  size_t tensorIndex,
188  const CheckLocation& location)
189 {
190  // the tensor index is the only one to check here
191  if (tensorIndex >= model->subgraphs[subgraphIndex]->tensors.size())
192  {
193  throw ParseException(
194  fmt::format("{} was called with an invalid tensor index. "
195  "subgraph:{} tensor:{} at {}",
196  location.m_Function,
197  subgraphIndex,
198  tensorIndex,
199  location.FileLine()));
200  }
201 }
202 
203 #define CHECK_TENSOR(MODEL, SUBGRAPH_INDEX, TENSOR_INDEX) \
204  CheckTensor(MODEL, SUBGRAPH_INDEX, TENSOR_INDEX, CHECK_LOCATION())
205 
206 void CheckTensorPtr(TfLiteParserImpl::TensorRawPtr rawPtr,
207  const CheckLocation& location)
208 {
209  if (rawPtr == nullptr)
210  {
211  throw ParseException(
212  fmt::format("{} was called with a null tensor pointer at {}", location.m_Function, location.FileLine()));
213  }
214 }
215 
216 #define CHECK_TENSOR_PTR(TENSOR_PTR) \
217  CheckTensorPtr(TENSOR_PTR, CHECK_LOCATION())
218 
219 void CheckBuffer(const TfLiteParserImpl::ModelPtr& model,
220  size_t bufferIndex,
221  const CheckLocation& location)
222 {
223  if (model.get() == nullptr)
224  {
225  throw ParseException(
226  fmt::format("{} was called with invalid (null) model. "
227  "Possible reason is that the model is not yet loaded and Unpack(ed). "
228  "buffer:{} at {}",
229  location.m_Function,
230  bufferIndex,
231  location.FileLine()));
232  }
233  else if (bufferIndex >= model->buffers.size())
234  {
235  throw ParseException(
236  fmt::format("{} was called with an invalid buffer index. "
237  "buffer index:{} at {}",
238  location.m_Function,
239  bufferIndex,
240  location.FileLine()));
241  }
242  else if (model->buffers[bufferIndex].get() == nullptr)
243  {
244  throw ParseException(
245  fmt::format("The buffer #{} is null. {}",
246  bufferIndex,
247  location.AsString()));
248  }
249 }
250 
251 #define CHECK_BUFFER(MODEL, BUFFER_INDEX) \
252  CheckBuffer(MODEL, BUFFER_INDEX, CHECK_LOCATION())
253 
254 void CheckBufferSize(TfLiteParserImpl::BufferRawPtr bufferPtr,
255  const armnn::TensorInfo& tensorInfo,
256  uint32_t bufferId,
257  const CheckLocation& location)
258 {
259  if (bufferPtr == nullptr)
260  {
261  throw ParseException(
262  fmt::format("BufferPtr is null for buffer:{}. {}",
263  bufferId,
264  location.AsString()));
265  }
266  else if(tensorInfo.GetNumElements() > bufferPtr->data.size() ||
267  tensorInfo.GetNumBytes() > bufferPtr->data.size())
268  {
269  std::stringstream ss;
270  ss << "Buffer #" << bufferId << " has " << bufferPtr->data.size() << " bytes. "
271  << "For tensor: " << tensorInfo.GetShape()
272  << " expecting: " << tensorInfo.GetNumBytes() << " bytes and "
273  << tensorInfo.GetNumElements() << " elements. " << location.AsString();
274  throw ParseException(ss.str());
275  }
276 }
277 
278 
279 tflite::BuiltinOperator GetOpCode(const TfLiteParserImpl::ModelPtr& model, size_t subgraphIndex, size_t operatorIndex)
280 {
281  const auto& operatorPtr = model->subgraphs[subgraphIndex]->operators[operatorIndex];
282  auto opcodeIndex = operatorPtr->opcode_index;
283 
284 // work around the introduction of the deprecated_builtin_code introduced in 2.4 in a backwards compatible manner
285 #if defined(ARMNN_POST_TFLITE_2_3)
286  auto opcode = std::max(model->operator_codes[opcodeIndex]->builtin_code,
287  static_cast<tflite::BuiltinOperator>(model->operator_codes[opcodeIndex]->deprecated_builtin_code));
288 #else
289  auto opcode = model->operator_codes[opcodeIndex]->builtin_code;
290 #endif
291  return opcode;
292 }
293 
294 std::vector<unsigned int> GetUIntBuffer(armnn::TensorInfo info,
295  const TfLiteParserImpl::ModelPtr& model,
296  size_t bufferIndex)
297 {
298  TfLiteParserImpl::BufferRawPtr bufferPtr = TfLiteParserImpl::GetBuffer(model, bufferIndex);
299  std::vector<unsigned int> buffer(info.GetNumElements());
300 
301  if (info.GetDataType() == DataType::Signed32)
302  {
303  ::memcpy(buffer.data(), bufferPtr->data.data(), bufferPtr->data.size());
304  }
305  else if (info.GetDataType() == DataType::Signed64)
306  {
307  std::vector<uint64_t> uint64Buffer(info.GetNumElements());
308  ::memcpy(uint64Buffer.data(), bufferPtr->data.data(), bufferPtr->data.size());
309  buffer.assign(std::begin(uint64Buffer), std::end(uint64Buffer));
310  }
311  else
312  {
313  CheckLocation location = CHECK_LOCATION();
314  throw ParseException(
315  fmt::format("Unsupported data type for uint buffer {}, only Signed 32 or Signed 64 are supported. {}",
316  GetDataTypeName(info.GetDataType()),
317  location.AsString()));
318  }
319  return buffer;
320 }
321 
322 #define CHECK_BUFFER_SIZE(BUFFER_PTR, TENSOR_INFO, BUFFER_ID) \
323  CheckBufferSize(BUFFER_PTR, TENSOR_INFO, BUFFER_ID, CHECK_LOCATION())
324 
325 bool IsActivationSupported(tflite::ActivationFunctionType activationType)
326 {
327  switch(activationType)
328  {
329  case tflite::ActivationFunctionType_NONE:
330  case tflite::ActivationFunctionType_RELU:
331  case tflite::ActivationFunctionType_RELU6:
332  case tflite::ActivationFunctionType_TANH:
333  {
334  return true;
335  }
336  default:
337  {
338  return false;
339  }
340  }
341 }
342 
343 #define CHECK_SUPPORTED_FUSED_ACTIVATION(OPTION, SUBGRAPH_INDEX, OPERATOR_INDEX) \
344  do { \
345  if (IsActivationSupported(OPTION->fused_activation_function) == false) \
346  { \
347  throw ParseException( \
348  fmt::format("TfLite parser doesn't support fused activation: " \
349  "{}/{} in {} subgraph:{} operator:{} at {}", \
350  OPTION->fused_activation_function, \
351  tflite::EnumNameActivationFunctionType(\
352  OPTION->fused_activation_function), \
353  __func__, \
354  SUBGRAPH_INDEX, \
355  OPERATOR_INDEX, \
356  CHECK_LOCATION().FileLine())); \
357  } \
358  } while(false)
359 
360 
361 std::vector<unsigned int> AsUnsignedVector(const std::vector<int32_t>& in)
362 {
363  std::vector<unsigned int> result;
364  result.reserve(in.size());
365  for (auto& i : in)
366  {
367  // If the location of the input data is -1 then the input should be ignored.
368  if (i == -1)
369  {
370  continue;
371  }
372  result.push_back(CHECKED_NON_NEGATIVE(i));
373  }
374  return result;
375 }
376 
377 bool IsOptionalOperandPresent(int input)
378 {
379  return (input >= 0);
380 }
381 
382 void CalcPadding(uint32_t inputSize,
383  uint32_t filterSize,
384  uint32_t stride,
385  uint32_t dilation,
386  uint32_t& paddingFront,
387  uint32_t& paddingBack,
388  tflite::Padding padding)
389 {
390  paddingFront = 0;
391  paddingBack = 0;
392  if (padding == tflite::Padding_SAME)
393  {
394  uint32_t outputSize = (inputSize + stride - 1) / stride;
395  uint32_t dilatedSize = filterSize + (dilation - 1) * (filterSize - 1);
396  uint32_t temp = (outputSize - 1) * stride + dilatedSize;
397  if (temp > inputSize)
398  {
399  paddingFront = (temp - inputSize) / 2;
400  paddingBack = (temp - inputSize) - paddingFront;
401  }
402  }
403 }
404 
405 // Function that calculates explicit padding when the output shape is known.
406 // At the moment the output is only given as an input parameter in Transpose Convolution,
407 // not in Convolution and Depthwise Convolution
408 void CalcPadding(uint32_t inputSize,
409  uint32_t filterSize,
410  uint32_t stride,
411  uint32_t dilation,
412  uint32_t& paddingFront,
413  uint32_t& paddingBack,
414  tflite::Padding padding,
415  uint32_t outputSize)
416 {
417  IgnoreUnused(dilation);
418  paddingFront = 0;
419  paddingBack = 0;
420  if (padding == tflite::Padding_SAME)
421  {
422  uint32_t totalPadding = (inputSize - 1) * stride + filterSize - outputSize;
423  paddingFront = totalPadding / 2;
424  paddingBack = totalPadding - paddingFront;
425  }
426 }
427 
429  const std::vector<unsigned int>& shape,
430  const bool outputTensor = false)
431 {
432  armnn::DataType type;
433  CHECK_TENSOR_PTR(tensorPtr);
434 
435  switch (tensorPtr->type)
436  {
437  case tflite::TensorType_UINT8:
439  break;
440  case tflite::TensorType_FLOAT32:
442  break;
443  case tflite::TensorType_FLOAT16:
445  break;
446  case tflite::TensorType_INT8:
447  if (tensorPtr->quantization->zero_point.size() == 1)
448  {
449  // Per-tensor
451  }
452  else
453  {
454  // Per-channel
456  }
457  break;
458  case tflite::TensorType_INT16:
460  break;
461  case tflite::TensorType_INT32:
463  break;
464  case tflite::TensorType_INT64:
466  break;
467  case tflite::TensorType_BOOL:
469  break;
470  default:
471  {
472  CheckLocation location = CHECK_LOCATION();
473  throw ParseException(
474  fmt::format("Unsupported data type {} = {} for tensor: {}. {}",
475  tensorPtr->type,
476  tflite::EnumNameTensorType(tensorPtr->type),
477  tensorPtr->name,
478  location.AsString()));
479  }
480  }
481  TensorShape tensorShape;
482 
483  std::vector<unsigned int> safeShape = shape;
484  if (shape.size() == 0)
485  {
486  safeShape.push_back(1);
487  }
488 
489  if (!outputTensor)
490  {
491  tensorShape = TensorShape(armnn::numeric_cast<unsigned int>(safeShape.size()), safeShape.data());
492  }
493  else
494  {
495  size_t shapeSignatureSize = tensorPtr->shape_signature.size();
496 
497  // If a shape signature exists we will use that to infer dynamic tensors
498  if (shapeSignatureSize != 0)
499  {
500  // If the shape is incompatible with the shape signature override the shape
501  if (shapeSignatureSize != shape.size())
502  {
503  safeShape = {};
504 
505  for (unsigned int i = 0; i < shapeSignatureSize; ++i)
506  {
507  unsigned int dim = tensorPtr->shape_signature[i] > -1 ?
508  static_cast<unsigned int>(tensorPtr->shape_signature[i]) : 0;
509  safeShape.push_back(dim);
510  }
511  }
512 
513  std::unique_ptr<bool[]> dimMask = std::make_unique<bool[]>(tensorPtr->shape_signature.size());
514  bool batchOnly = true;
515  for (unsigned int i = 0; i < tensorPtr->shape_signature.size(); ++i)
516  {
517  dimMask[i] = tensorPtr->shape_signature[i] != -1;
518 
519  if (i > 0 && !dimMask[i])
520  {
521  batchOnly = false;
522  }
523  }
524  if (batchOnly)
525  {
526  dimMask[0] = true;
527  }
528  tensorShape = TensorShape(static_cast<unsigned int>(safeShape.size()), safeShape.data(), dimMask.get());
529  }
530  // If there is no shape signature treat the tensor as dynamic if the shape has a size of zero
531  else if (shape.size() == 0)
532  {
533  tensorShape = TensorShape(1, false);
534  }
535  else
536  {
537  tensorShape = TensorShape(armnn::numeric_cast<unsigned int>(shape.size()), shape.data());
538  }
539  }
540 
541  float quantizationScale = 1.0f;
542  int32_t quantizationOffset = 0;
543 
544  if (tensorPtr->quantization.get())
545  {
546  if (tensorPtr->quantization->scale.size() <= 1)
547  {
548  CHECK_VALID_SIZE(tensorPtr->quantization->zero_point.size(), 0, 1);
549  CHECK_VALID_SIZE(tensorPtr->quantization->zero_point.size(), 0, 1);
550 
551  if (tensorPtr->quantization->scale.size() == 1)
552  {
553  quantizationScale = tensorPtr->quantization->scale[0];
554  }
555  if (tensorPtr->quantization->zero_point.size() == 1)
556  {
557  // NOTE: we lose precision here when converting from 64 bit to 32
558  // but this is what we support at the moment in ArmNN
559  quantizationOffset = armnn::numeric_cast<int32_t>(tensorPtr->quantization->zero_point[0]);
560  }
561 
562  armnn::TensorInfo result(tensorShape,
563  type,
564  quantizationScale,
565  quantizationOffset);
566  return result;
567  }
568  else
569  {
570  std::vector<float> quantizationScales;
571  std::vector<int32_t> quantizationOffsets;
572 
573  // Scale
574  std::copy(tensorPtr->quantization->scale.begin(),
575  tensorPtr->quantization->scale.end(),
576  std::back_inserter(quantizationScales));
577 
578  // QSymmS8 Per-axis
579  armnn::TensorInfo result(tensorShape,
580  type,
581  quantizationScales,
582  armnn::numeric_cast<unsigned int>(tensorPtr->quantization->quantized_dimension));
583  return result;
584  }
585  }
586  else
587  {
588  armnn::TensorInfo result(tensorShape,
589  type,
590  quantizationScale,
591  quantizationOffset);
592  return result;
593  }
594 }
595 
597  const bool outputTensor = false)
598 {
599  auto const& dimensions = AsUnsignedVector(tensorPtr->shape);
600  return ToTensorInfo(tensorPtr, dimensions, outputTensor);
601 }
602 
603 template<typename T>
604 std::pair<armnn::ConstTensor, std::unique_ptr<T[]>>
605 CreateConstTensorImpl(TfLiteParserImpl::BufferRawPtr bufferPtr,
607  armnn::TensorInfo& tensorInfo,
609 {
610  IgnoreUnused(tensorPtr);
611 
612  if (!tensorPtr)
613  {
614  throw armnn::ParseException(fmt::format("Tensor pointer is null {}", CHECK_LOCATION().AsString()));
615  }
616 
617  if (!bufferPtr)
618  {
619  throw armnn::ParseException(fmt::format("Buffer for buffer:{} is null", tensorPtr->buffer).c_str());
620  }
621 
622  std::unique_ptr<T[]> data(new T[tensorInfo.GetNumElements()]);
623 
624  if (permutationVector.has_value() && permutationVector.value().GetSize() > 0)
625  {
626  tensorInfo = armnnUtils::Permuted(tensorInfo, permutationVector.value());
627  armnnUtils::Permute(tensorInfo.GetShape(), permutationVector.value(),
628  reinterpret_cast<const T*>(bufferPtr->data.data()), data.get(), sizeof(T));
629  }
630  else
631  {
632  ::memcpy(data.get(), bufferPtr->data.data(), tensorInfo.GetNumBytes());
633  }
634 
635  // Make sure isConstant flag is set.
636  tensorInfo.SetConstant();
637 
638  return std::make_pair(ConstTensor(tensorInfo, data.get()), std::move(data));
639 }
640 
641 armnn::LayerBindingId GenerateLayerBindingId(size_t subgraphIndex, size_t tensorIndex)
642 {
643  // generate the binding id by shifting the tensor id by 8 bit
644  // and add the subgraph id, which allows 256 subgraphs
645  return static_cast<armnn::LayerBindingId>((tensorIndex<<8)+subgraphIndex);
646 }
647 
648 bool CheckShape(const armnn::TensorShape& actual, const std::vector<int32_t>& expected)
649 {
650  const unsigned int actualSize = actual.GetNumDimensions();
651  if (actualSize != expected.size())
652  {
653  return false;
654  }
655 
656  for (unsigned int i = 0u; i < actualSize; i++)
657  {
658  if (expected[i] < 0 ||
659  actual[i] != static_cast<unsigned int>(expected[i]))
660  {
661  return false;
662  }
663  }
664 
665  return true;
666 }
667 
668 bool CheckShape(const armnn::TensorShape& actual, const armnn::TensorShape& expected)
669 {
670  std::vector<int32_t> expectedVec;
671  for (uint32_t i = 0; i < expected.GetNumDimensions(); i++)
672  {
673  expectedVec.push_back(expected[i]);
674  }
675  return CheckShape(actual, expectedVec);
676 }
677 
678 void CheckMatchingQuantization(const TensorInfo& first,
679  const TensorInfo& second,
680  const std::string& descName,
681  std::string const& firstName,
682  std::string const& secondName)
683 {
684  if (!first.IsQuantized() ||
685  !second.IsQuantized())
686  {
687  // Not a quantized type, ignore the validation
688  return;
689  }
690 
691  DataType firstDataType = first.GetDataType();
692  DataType secondDataType = second.GetDataType();
693 
694  if (firstDataType != secondDataType)
695  {
696  throw InvalidArgumentException(descName + ": " + firstName + " and " + secondName +
697  " must be of the same quantized type, " +
698  firstName + " is " + GetDataTypeName(firstDataType) + ", " +
699  secondName + " is " + GetDataTypeName(secondDataType));
700  }
701 
702  if (!first.IsTypeSpaceMatch(second))
703  {
704  throw InvalidArgumentException(descName + ": " + firstName + " and " + secondName +
705  " must have the same quantization space, " +
706  firstName + " has offset " + std::to_string(first.GetQuantizationOffset()) +
707  " and scale " + std::to_string(first.GetQuantizationScale()) + ", " +
708  secondName + " has offset " + std::to_string(second.GetQuantizationOffset()) +
709  " and scale " + std::to_string(second.GetQuantizationScale()));
710  }
711 }
712 
713 bool IsDynamic(TfLiteParserImpl::TensorRawPtr tensorPtr)
714 {
715  auto shape = tensorPtr->shape;
716 
717  if (shape.empty())
718  {
719  return true;
720  }
721  auto shapeSig = tensorPtr->shape_signature;
722 
723  if (shapeSig.empty())
724  {
725  return false;
726  }
727 
728  for (unsigned int i = 0; i < shapeSig.size() ; ++i)
729  {
730  if (shapeSig[i] == -1)
731  {
732  return true;
733  }
734  }
735  return false;
736 }
737 
738 } // <anonymous>
739 
740 TfLiteParserImpl::TfLiteParserImpl(const Optional<ITfLiteParser::TfLiteParserOptions>& options)
741 : m_Options(options)
742 , m_Network(nullptr, nullptr)
743 , m_ParserFunctions(tflite::BuiltinOperator_MAX+1, &TfLiteParserImpl::ParseUnsupportedOperator)
744 {
745  // register supported operators
746  m_ParserFunctions[tflite::BuiltinOperator_ABS] = &TfLiteParserImpl::ParseAbs;
747  m_ParserFunctions[tflite::BuiltinOperator_ADD] = &TfLiteParserImpl::ParseAdd;
748  m_ParserFunctions[tflite::BuiltinOperator_ARG_MIN] = &TfLiteParserImpl::ParseArgMin;
749  m_ParserFunctions[tflite::BuiltinOperator_ARG_MAX] = &TfLiteParserImpl::ParseArgMax;
750  m_ParserFunctions[tflite::BuiltinOperator_AVERAGE_POOL_2D] = &TfLiteParserImpl::ParseAveragePool2D;
751  m_ParserFunctions[tflite::BuiltinOperator_BATCH_TO_SPACE_ND] = &TfLiteParserImpl::ParseBatchToSpaceND;
752  m_ParserFunctions[tflite::BuiltinOperator_BATCH_MATMUL] = &TfLiteParserImpl::ParseBatchMatMul;
753  m_ParserFunctions[tflite::BuiltinOperator_BROADCAST_TO] = &TfLiteParserImpl::ParseBroadcastTo;
754  m_ParserFunctions[tflite::BuiltinOperator_CEIL] = &TfLiteParserImpl::ParseCeil;
755  m_ParserFunctions[tflite::BuiltinOperator_CAST] = &TfLiteParserImpl::ParseCast;
756  m_ParserFunctions[tflite::BuiltinOperator_CONCATENATION] = &TfLiteParserImpl::ParseConcatenation;
757  m_ParserFunctions[tflite::BuiltinOperator_CONV_2D] = &TfLiteParserImpl::ParseConv2D;
758  // Conv3D support was added in TF 2.5, so for backwards compatibility a hash define is needed.
759  #if defined(ARMNN_POST_TFLITE_2_4)
760  m_ParserFunctions[tflite::BuiltinOperator_CONV_3D] = &TfLiteParserImpl::ParseConv3D;
761  #endif
762  m_ParserFunctions[tflite::BuiltinOperator_CUSTOM] = &TfLiteParserImpl::ParseCustomOperator;
763  m_ParserFunctions[tflite::BuiltinOperator_DEPTH_TO_SPACE] = &TfLiteParserImpl::ParseDepthToSpace;
764  m_ParserFunctions[tflite::BuiltinOperator_DEPTHWISE_CONV_2D] = &TfLiteParserImpl::ParseDepthwiseConv2D;
765  m_ParserFunctions[tflite::BuiltinOperator_DEQUANTIZE] = &TfLiteParserImpl::ParseDequantize;
766  m_ParserFunctions[tflite::BuiltinOperator_DIV] = &TfLiteParserImpl::ParseDiv;
767  m_ParserFunctions[tflite::BuiltinOperator_ELU] = &TfLiteParserImpl::ParseElu;
768  m_ParserFunctions[tflite::BuiltinOperator_EQUAL] = &TfLiteParserImpl::ParseEqual;
769  m_ParserFunctions[tflite::BuiltinOperator_EXP] = &TfLiteParserImpl::ParseExp;
770  m_ParserFunctions[tflite::BuiltinOperator_EXPAND_DIMS] = &TfLiteParserImpl::ParseExpandDims;
771  m_ParserFunctions[tflite::BuiltinOperator_FLOOR_DIV] = &TfLiteParserImpl::ParseFloorDiv;
772  m_ParserFunctions[tflite::BuiltinOperator_FULLY_CONNECTED] = &TfLiteParserImpl::ParseFullyConnected;
773  m_ParserFunctions[tflite::BuiltinOperator_GATHER] = &TfLiteParserImpl::ParseGather;
774  m_ParserFunctions[tflite::BuiltinOperator_GELU] = &TfLiteParserImpl::ParseGelu;
775  m_ParserFunctions[tflite::BuiltinOperator_GATHER_ND] = &TfLiteParserImpl::ParseGatherNd;
776  m_ParserFunctions[tflite::BuiltinOperator_GREATER] = &TfLiteParserImpl::ParseGreater;
777  m_ParserFunctions[tflite::BuiltinOperator_GREATER_EQUAL] = &TfLiteParserImpl::ParseGreaterOrEqual;
778  m_ParserFunctions[tflite::BuiltinOperator_HARD_SWISH] = &TfLiteParserImpl::ParseHardSwish;
779  m_ParserFunctions[tflite::BuiltinOperator_LEAKY_RELU] = &TfLiteParserImpl::ParseLeakyRelu;
780  m_ParserFunctions[tflite::BuiltinOperator_LESS] = &TfLiteParserImpl::ParseLess;
781  m_ParserFunctions[tflite::BuiltinOperator_LESS_EQUAL] = &TfLiteParserImpl::ParseLessOrEqual;
782  m_ParserFunctions[tflite::BuiltinOperator_LOCAL_RESPONSE_NORMALIZATION]
783  = &TfLiteParserImpl::ParseLocalResponseNormalization;
784  m_ParserFunctions[tflite::BuiltinOperator_LOG] = &TfLiteParserImpl::ParseLog;
785  m_ParserFunctions[tflite::BuiltinOperator_LOGICAL_NOT] = &TfLiteParserImpl::ParseLogicalNot;
786  m_ParserFunctions[tflite::BuiltinOperator_LOGISTIC] = &TfLiteParserImpl::ParseLogistic;
787  m_ParserFunctions[tflite::BuiltinOperator_LOG_SOFTMAX] = &TfLiteParserImpl::ParseLogSoftmax;
788  m_ParserFunctions[tflite::BuiltinOperator_L2_NORMALIZATION] = &TfLiteParserImpl::ParseL2Normalization;
789  m_ParserFunctions[tflite::BuiltinOperator_MAX_POOL_2D] = &TfLiteParserImpl::ParseMaxPool2D;
790  m_ParserFunctions[tflite::BuiltinOperator_MAXIMUM] = &TfLiteParserImpl::ParseMaximum;
791  m_ParserFunctions[tflite::BuiltinOperator_MEAN] = &TfLiteParserImpl::ParseMean;
792  m_ParserFunctions[tflite::BuiltinOperator_MINIMUM] = &TfLiteParserImpl::ParseMinimum;
793  m_ParserFunctions[tflite::BuiltinOperator_MIRROR_PAD] = &TfLiteParserImpl::ParseMirrorPad;
794  m_ParserFunctions[tflite::BuiltinOperator_MUL] = &TfLiteParserImpl::ParseMul;
795  m_ParserFunctions[tflite::BuiltinOperator_NEG] = &TfLiteParserImpl::ParseNeg;
796  m_ParserFunctions[tflite::BuiltinOperator_NOT_EQUAL] = &TfLiteParserImpl::ParseNotEqual;
797  m_ParserFunctions[tflite::BuiltinOperator_PACK] = &TfLiteParserImpl::ParsePack;
798  m_ParserFunctions[tflite::BuiltinOperator_PAD] = &TfLiteParserImpl::ParsePad;
799  m_ParserFunctions[tflite::BuiltinOperator_PADV2] = &TfLiteParserImpl::ParsePad;
800  m_ParserFunctions[tflite::BuiltinOperator_POW] = &TfLiteParserImpl::ParsePower;
801  m_ParserFunctions[tflite::BuiltinOperator_PRELU] = &TfLiteParserImpl::ParsePrelu;
802  m_ParserFunctions[tflite::BuiltinOperator_QUANTIZE] = &TfLiteParserImpl::ParseQuantize;
803  m_ParserFunctions[tflite::BuiltinOperator_RELU] = &TfLiteParserImpl::ParseRelu;
804  m_ParserFunctions[tflite::BuiltinOperator_RELU6] = &TfLiteParserImpl::ParseRelu6;
805  m_ParserFunctions[tflite::BuiltinOperator_REDUCE_MAX] = &TfLiteParserImpl::ParseReduceMax;
806  m_ParserFunctions[tflite::BuiltinOperator_REDUCE_MIN] = &TfLiteParserImpl::ParseReduceMin;
807  m_ParserFunctions[tflite::BuiltinOperator_REDUCE_PROD] = &TfLiteParserImpl::ParseReduceProd;
808  m_ParserFunctions[tflite::BuiltinOperator_RESHAPE] = &TfLiteParserImpl::ParseReshape;
809  m_ParserFunctions[tflite::BuiltinOperator_RESIZE_BILINEAR] = &TfLiteParserImpl::ParseResizeBilinear;
810  m_ParserFunctions[tflite::BuiltinOperator_RESIZE_NEAREST_NEIGHBOR] = &TfLiteParserImpl::ParseResizeNearestNeighbor;
811  m_ParserFunctions[tflite::BuiltinOperator_REVERSE_V2] = &TfLiteParserImpl::ParseReverseV2;
812  m_ParserFunctions[tflite::BuiltinOperator_RSQRT] = &TfLiteParserImpl::ParseRsqrt;
813  m_ParserFunctions[tflite::BuiltinOperator_SQRT] = &TfLiteParserImpl::ParseSqrt;
814  m_ParserFunctions[tflite::BuiltinOperator_SHAPE] = &TfLiteParserImpl::ParseShape;
815  m_ParserFunctions[tflite::BuiltinOperator_SIN] = &TfLiteParserImpl::ParseSin;
816  m_ParserFunctions[tflite::BuiltinOperator_SLICE] = &TfLiteParserImpl::ParseSlice;
817  m_ParserFunctions[tflite::BuiltinOperator_SOFTMAX] = &TfLiteParserImpl::ParseSoftmax;
818  m_ParserFunctions[tflite::BuiltinOperator_SPACE_TO_BATCH_ND] = &TfLiteParserImpl::ParseSpaceToBatchND;
819  m_ParserFunctions[tflite::BuiltinOperator_SPACE_TO_DEPTH] = &TfLiteParserImpl::ParseSpaceToDepth;
820  m_ParserFunctions[tflite::BuiltinOperator_SPLIT] = &TfLiteParserImpl::ParseSplit;
821  m_ParserFunctions[tflite::BuiltinOperator_SPLIT_V] = &TfLiteParserImpl::ParseSplitV;
822  m_ParserFunctions[tflite::BuiltinOperator_SQUEEZE] = &TfLiteParserImpl::ParseSqueeze;
823  m_ParserFunctions[tflite::BuiltinOperator_SQUARE] = &TfLiteParserImpl::ParseSquare;
824  m_ParserFunctions[tflite::BuiltinOperator_SQUARED_DIFFERENCE] = &TfLiteParserImpl::ParseSquaredDifference;
825  m_ParserFunctions[tflite::BuiltinOperator_STRIDED_SLICE] = &TfLiteParserImpl::ParseStridedSlice;
826  m_ParserFunctions[tflite::BuiltinOperator_SUB] = &TfLiteParserImpl::ParseSub;
827  m_ParserFunctions[tflite::BuiltinOperator_SUM] = &TfLiteParserImpl::ParseSum;
828  m_ParserFunctions[tflite::BuiltinOperator_TANH] = &TfLiteParserImpl::ParseTanH;
829  m_ParserFunctions[tflite::BuiltinOperator_TILE] = &TfLiteParserImpl::ParseTile;
830  m_ParserFunctions[tflite::BuiltinOperator_TRANSPOSE] = &TfLiteParserImpl::ParseTranspose;
831  m_ParserFunctions[tflite::BuiltinOperator_TRANSPOSE_CONV] = &TfLiteParserImpl::ParseTransposeConv;
832  m_ParserFunctions[tflite::BuiltinOperator_UNIDIRECTIONAL_SEQUENCE_LSTM]
833  = &TfLiteParserImpl::ParseUnidirectionalSequenceLSTM;
834  m_ParserFunctions[tflite::BuiltinOperator_UNPACK] = &TfLiteParserImpl::ParseUnpack;
835 
836  // register supported custom operators
837  m_CustomParserFunctions["TFLite_Detection_PostProcess"] = &TfLiteParserImpl::ParseDetectionPostProcess;
838 }
839 
840 armnn::TensorInfo TfLiteParserImpl::InputTensorInfo(size_t subgraphIndex,
841  size_t operatorIndex,
842  int input)
843 {
844  const auto& subgraphPtr = m_Model->subgraphs[subgraphIndex];
845  const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
846 
847  uint32_t inputId = CHECKED_NON_NEGATIVE(operatorPtr->inputs[input]);
848  auto search = armnnTfLiteParser::TfLiteParserImpl::m_TensorInfos.find(inputId);
849 
850  if (search != m_TensorInfos.end())
851  {
852  return m_TensorInfos[inputId];
853  }
854  else
855  {
856  auto tensorInfo = ::armnnTfLiteParser::ToTensorInfo(subgraphPtr->tensors[inputId].get());
857  m_TensorInfos.insert({ inputId, tensorInfo });
858  return tensorInfo;
859  }
860 }
861 
862 armnn::TensorInfo TfLiteParserImpl::OutputTensorInfoFromInputs(size_t subgraphIndex,
863  size_t operatorIndex,
865  int output,
866  std::vector<int> inputs)
867 {
868  const auto& subgraphPtr = m_Model->subgraphs[subgraphIndex];
869  const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
870 
871  uint32_t outputId = CHECKED_NON_NEGATIVE(operatorPtr->outputs[output]);
872 
873  auto outputSearch = armnnTfLiteParser::TfLiteParserImpl::m_TensorInfos.find(outputId);
874 
875  if (outputSearch != m_TensorInfos.end())
876  {
877  return m_TensorInfos[outputId];
878  }
879 
880  const auto& outputTensorPtr = subgraphPtr->tensors[outputId].get();
881  TensorInfo tensor = ::armnnTfLiteParser::ToTensorInfo(outputTensorPtr, true);
882 
883  if (IsDynamic(outputTensorPtr))
884  {
885  if (inputs.empty())
886  {
887  for (unsigned int i = 0; i < layer->GetNumInputSlots(); ++i)
888  {
889  inputs.emplace_back(i);
890  }
891  }
892  auto inputTensorIds = GetInputTensorIds(m_Model, subgraphIndex, operatorIndex);
893  std::vector<armnn::TensorShape> inputShapes;
894 
895  for (unsigned int i = 0; i < inputs.size(); ++i)
896  {
897  uint32_t inputId = CHECKED_NON_NEGATIVE(operatorPtr->inputs[inputs[i]]);
898  auto search = armnnTfLiteParser::TfLiteParserImpl::m_TensorInfos.find(inputId);
899 
900  if (search != m_TensorInfos.end())
901  {
902  auto &inputTensorInfo = m_TensorInfos[inputId];
903  inputShapes.push_back(inputTensorInfo.GetShape());
904  }
905  else
906  {
907  auto inputTensorInfo = ::armnnTfLiteParser::ToTensorInfo(subgraphPtr->tensors[inputId].get());
908  m_TensorInfos.insert({ inputId, inputTensorInfo});
909  inputShapes.push_back(inputTensorInfo.GetShape());
910  }
911  }
912  const auto outputShape = layer->InferOutputShapes(inputShapes)[output];
913  tensor.SetShape(outputShape);
914  }
915  m_TensorInfos.insert({ outputId, tensor});
916  return tensor;
917 }
918 
919 armnn::TensorInfo TfLiteParserImpl::OutputTensorInfoFromShapes(size_t subgraphIndex,
920  size_t operatorIndex,
922  int output,
923  std::vector<armnn::TensorShape> inputShapes)
924 {
925  const auto& subgraphPtr = m_Model->subgraphs[subgraphIndex];
926  const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
927 
928  uint32_t outputId = CHECKED_NON_NEGATIVE(operatorPtr->outputs[output]);
929  const auto& outputTensorPtr = subgraphPtr->tensors[outputId].get();
930  TensorInfo tensor = ::armnnTfLiteParser::ToTensorInfo(outputTensorPtr, true);
931 
932  if (IsDynamic(outputTensorPtr))
933  {
934  const auto outputShape = layer->InferOutputShapes(inputShapes)[output];
935  tensor.SetShape(outputShape);
936  }
937  m_TensorInfos.insert({ outputId, tensor});
938  return tensor;
939 }
940 
941 void TfLiteParserImpl::ResetParser()
942 {
943  m_Network = armnn::INetworkPtr(nullptr, nullptr);
944  m_Model = nullptr;
945  m_SubgraphConnections.clear();
946  m_OverriddenOutputShapes.clear();
947  m_ConstantsToDequantize.clear();
948  m_ConstantsToBeCreated.clear();
949  m_TensorInfos.clear();
950 }
951 
953 {
954  ResetParser();
955  m_Model = LoadModelFromFile(graphFile);
956  return CreateNetworkFromModel();
957 }
958 
959 INetworkPtr TfLiteParserImpl::CreateNetworkFromBinary(const std::vector<uint8_t>& binaryContent)
960 {
961  ResetParser();
962  m_Model = LoadModelFromBinary(binaryContent.data(), binaryContent.size());
963  return CreateNetworkFromModel();
964 }
965 
966 
967 armnn::INetworkPtr TfLiteParserImpl::LoadModel(std::unique_ptr<tflite::ModelT> model)
968 {
969  ResetParser();
970  m_Model = std::move(model);
971 
972  return CreateNetworkFromModel();
973 }
974 
975 INetworkPtr TfLiteParserImpl::CreateNetworkFromModel()
976 {
977 
978  using NetworkOptions = std::vector<BackendOptions>;
979  NetworkOptions networkOptions = {};
980  if (m_Options)
981  {
982  if (m_Options.value().m_InferAndValidate)
983  {
984  BackendOptions shapeInferenceMethodOption("ShapeInferenceMethod",
985  {
986  { "InferAndValidate", true }
987  });
988 
989  networkOptions.push_back(shapeInferenceMethodOption);
990  }
991  if (m_Options.value().m_AllowExpandedDims)
992  {
993  BackendOptions shapeInferenceMethodOption("AllowExpandedDims",
994  {
995  { "AllowExpandedDims", true }
996  });
997 
998  networkOptions.push_back(shapeInferenceMethodOption);
999  }
1000  }
1001  m_Network = INetwork::Create(networkOptions);
1002 
1003  if (m_Model.get() == nullptr)
1004  {
1005  throw ParseException(fmt::format("Tflite Model pointer is null {}", CHECK_LOCATION().AsString()));
1006  }
1007 
1008  // Identify which subgraph we are going to parse. We only support one subgraph but there may be validation
1009  // subgraphs still stored in the model. We'll ignore these. In the tflite code base they are identified by
1010  // their name beginning with "VALIDATION:".
1011  size_t subgraphIndex = 0;
1012  uint8_t usableSubgraphs = 0;
1013  for (size_t i = 0; i < m_Model->subgraphs.size(); i++)
1014  {
1015  if (m_Model->subgraphs[i]->name.rfind("VALIDATION:", 0) != 0)
1016  {
1017  usableSubgraphs++;
1018  subgraphIndex = i;
1019  }
1020  }
1021 
1022  if (usableSubgraphs > 1)
1023  {
1024  throw ParseException(
1025  fmt::format("Current TfLite parser only supports 1 non validation subgraph. This model has: {} {}",
1026  usableSubgraphs, CHECK_LOCATION().AsString()));
1027  }
1028 
1029  size_t operatorIndex = 0;
1030  try
1031  {
1032  const SubgraphPtr& subgraph = m_Model->subgraphs[subgraphIndex];
1033  SetupInputLayerTensorInfos(subgraphIndex);
1034  SetupConstantLayerTensorInfos(subgraphIndex);
1035 
1036  m_SubgraphConnections.emplace_back(subgraph->tensors.size());
1037  for (const OperatorPtr& op : subgraph->operators)
1038  {
1039  const auto& opCodePtr = m_Model->operator_codes[op->opcode_index];
1040 
1041 // work around the introduction of the deprecated_builtin_code introduced in 2.4 in a backwards compatible manner
1042 #if defined(ARMNN_POST_TFLITE_2_3)
1043  auto builtinCode = std::max(opCodePtr->builtin_code,
1044  static_cast<tflite::BuiltinOperator>(opCodePtr->deprecated_builtin_code));
1045 #else
1046  auto builtinCode = opCodePtr->builtin_code;
1047 #endif
1048 
1049  if (builtinCode > tflite::BuiltinOperator_MAX)
1050  {
1051  throw ParseException(fmt::format("Operator code {} is out of range 0-{}. "
1052  "subgraph:{} operator idx:{}. {}",
1053  builtinCode, tflite::BuiltinOperator_MAX, subgraphIndex,
1054  operatorIndex, CHECK_LOCATION().AsString()));
1055  }
1056 
1057  // lookup and call the parser function
1058  auto& parserFunction = m_ParserFunctions[builtinCode];
1059  (this->*parserFunction)(subgraphIndex, operatorIndex);
1060  ++operatorIndex;
1061  }
1062 
1063  SetupInputLayers(subgraphIndex);
1064  SetupOutputLayers(subgraphIndex);
1065  SetupConstantLayers(subgraphIndex);
1066  }
1067  catch (const ParseException& e)
1068  {
1069  std::stringstream errorString;
1070  errorString << "Failed to parse operator #" << operatorIndex << " within subgraph #"
1071  << subgraphIndex << " error: " << e.what();
1072  ARMNN_LOG(error) << errorString.str();
1073  std::stringstream errors;
1074  errors << errorString.str() << "\n";
1075  throw ParseException(errors.str());
1076  }
1077 
1078  // establish the connections from the layer outputs to the inputs of the subsequent layers
1079  for (subgraphIndex = 0; subgraphIndex < m_SubgraphConnections.size(); ++subgraphIndex)
1080  {
1081  for (size_t tensorIndex = 0; tensorIndex < m_SubgraphConnections[subgraphIndex].size(); ++tensorIndex)
1082  {
1083  if (m_SubgraphConnections[subgraphIndex][tensorIndex].outputSlot != nullptr)
1084  {
1085  for (size_t inputSlotIdx = 0;
1086  inputSlotIdx < m_SubgraphConnections[subgraphIndex][tensorIndex].inputSlots.size();
1087  ++inputSlotIdx)
1088  {
1089  m_SubgraphConnections[subgraphIndex][tensorIndex].outputSlot->Connect(
1090  *(m_SubgraphConnections[subgraphIndex][tensorIndex].inputSlots[inputSlotIdx]));
1091  }
1092  }
1093  }
1094  }
1095  return std::move(m_Network);
1096 }
1097 
1098 bool TfLiteParserImpl::ShouldConstantTensorBeConverted(TfLiteParserImpl::TensorRawPtr tensorPtr,
1099  armnn::DataType inputDataType,
1100  armnn::DataType tensorDataType)
1101 {
1102  return (TfLiteParserImpl::IsConstTensor(tensorPtr) && inputDataType == DataType::Float32 &&
1103  (tensorDataType == DataType::QAsymmU8 ||
1104  tensorDataType == DataType::QAsymmS8 ||
1105  tensorDataType == DataType::QSymmS8 ||
1106  tensorDataType == DataType::Signed32 ||
1107  tensorDataType == DataType::Signed64));
1108 }
1109 
1110 void TfLiteParserImpl::RegisterProducerOfTensor(size_t subgraphIndex,
1111  size_t tensorIndex,
1112  armnn::IOutputSlot* slot)
1113 {
1114  CHECK_TENSOR(m_Model, subgraphIndex, tensorIndex);
1115 
1116  TensorSlots & tensorSlots = m_SubgraphConnections[subgraphIndex][tensorIndex];
1117 
1118  if (slot->GetOwningIConnectableLayer().GetType() != LayerType::Constant)
1119  {
1120 
1121  // assuming there is only one producer for that tensor
1122  if (tensorSlots.outputSlot != nullptr)
1123  {
1124  throw ParseException(fmt::format("Another layer has already registered itself as the producer of "
1125  "subgraph:{} tensor:{} {}",
1126  subgraphIndex,
1127  tensorIndex,
1128  CHECK_LOCATION().AsString()));
1129  }
1130  }
1131  tensorSlots.outputSlot = slot;
1132 }
1133 
1134 void TfLiteParserImpl::RegisterConsumerOfTensor(size_t subgraphIndex,
1135  size_t tensorIndex,
1136  armnn::IInputSlot* slot)
1137 {
1138  CHECK_TENSOR(m_Model, subgraphIndex, tensorIndex);
1139 
1140  TensorSlots& tensorSlots = m_SubgraphConnections[subgraphIndex][tensorIndex];
1141  tensorSlots.inputSlots.push_back(slot);
1142 }
1143 
1144 void TfLiteParserImpl::ParseCustomOperator(size_t subgraphIndex, size_t operatorIndex)
1145 {
1146  CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1147 
1148  // NOTE: By default we presume the custom operator is not supported
1149  auto customParserFunction = &TfLiteParserImpl::ParseUnsupportedOperator;
1150 
1151  // Identify custom code defined for custom operator
1152  const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1153  const auto& customCode = m_Model->operator_codes[operatorPtr->opcode_index]->custom_code;
1154 
1155  // Find parser function that corresponds to custom code (if any)
1156  auto iterator = m_CustomParserFunctions.find(customCode);
1157  if (iterator != m_CustomParserFunctions.end())
1158  {
1159  customParserFunction = iterator->second;
1160  }
1161 
1162  // Run parser function
1163  (this->*customParserFunction)(subgraphIndex, operatorIndex);
1164 }
1165 
1166 void TfLiteParserImpl::ParseUnsupportedOperator(size_t subgraphIndex, size_t operatorIndex)
1167 {
1168  CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1169 
1170  const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1171 
1172  auto opcodeIndex = operatorPtr->opcode_index;
1173 
1174 // work around the introduction of the deprecated_builtin_code introduced in 2.4 in a backwards compatible manner
1175 #if defined(ARMNN_POST_TFLITE_2_3)
1176  auto opcode = std::max(m_Model->operator_codes[opcodeIndex]->builtin_code,
1177  static_cast<tflite::BuiltinOperator>(m_Model->operator_codes[opcodeIndex]->deprecated_builtin_code));
1178 #else
1179  auto opcode = m_Model->operator_codes[opcodeIndex]->builtin_code;
1180 #endif
1181 
1182  if (!m_Options || !m_Options.value().m_StandInLayerForUnsupported)
1183  {
1184  // Do not add StandInLayer, throw ParseException instead
1185  throw ParseException(
1186  fmt::format("Operator not supported. "
1187  "subgraph:{} operator:{} "
1188  "opcode_index:{} opcode:{} / {} {}",
1189  subgraphIndex,
1190  operatorIndex,
1191  opcodeIndex,
1192  opcode,
1193  tflite::EnumNameBuiltinOperator(opcode),
1194  CHECK_LOCATION().AsString()));
1195  }
1196 
1197  auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1198  auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1199 
1200  const unsigned int numInputs = armnn::numeric_cast<unsigned int>(inputs.size());
1201  const unsigned int numOutputs = armnn::numeric_cast<unsigned int>(outputs.size());
1202 
1203  StandInDescriptor descriptor(numInputs, numOutputs);
1204  auto layerName = fmt::format("StandIn:{}:{}:{}", subgraphIndex, operatorIndex, opcode);
1205 
1206  // Add a non-executable StandInLayer as a placeholder for any unsupported operator
1207  IConnectableLayer* layer = m_Network->AddStandInLayer(descriptor, layerName.c_str());
1208 
1209  if (!layer)
1210  {
1211  throw NullPointerException(fmt::format("Layer {} pointer is null {}",
1212  operatorIndex, CHECK_LOCATION().AsString()));
1213  }
1214 
1215  for (unsigned int i = 0u; i < numOutputs; ++i)
1216  {
1217  layer->GetOutputSlot(i).SetTensorInfo(ToTensorInfo(outputs[0], true));
1218  }
1219 
1220  auto inputTensorIds = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1221  auto outputTensorIds = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1222 
1223  RegisterInputSlots(subgraphIndex, operatorIndex, layer, inputTensorIds);
1224  RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIds);
1225 }
1226 
1227 void TfLiteParserImpl::ParseCast(size_t subgraphIndex, size_t operatorIndex)
1228 {
1229  CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1230 
1231  auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1232  CHECK_VALID_SIZE(inputs.size(), 1);
1233  auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1234  CHECK_VALID_SIZE(outputs.size(), 1);
1235 
1236  auto layerName = fmt::format("Cast:{}:{}", subgraphIndex, operatorIndex);
1237 
1238  IConnectableLayer* layer = m_Network->AddCastLayer(layerName.c_str());
1239 
1240  if (!layer)
1241  {
1242  throw NullPointerException(fmt::format("Layer {} pointer is null {}",
1243  operatorIndex, CHECK_LOCATION().AsString()));
1244  }
1245 
1246  TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0});
1247  layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1248 
1249  auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1250  RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1251 
1252  auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1253  RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
1254 }
1255 
1256 void TfLiteParserImpl::ParseConv2D(size_t subgraphIndex, size_t operatorIndex)
1257 {
1258  CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1259 
1260  const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1261  const auto* options = operatorPtr->builtin_options.AsConv2DOptions();
1262 
1263  CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
1264 
1265  auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1266  auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1267  CHECK_VALID_SIZE(outputs.size(), 1);
1268 
1270  inputs.size() == 3 ?
1271  desc.m_BiasEnabled = true : desc.m_BiasEnabled = false;
1272  desc.m_StrideX = CHECKED_NON_NEGATIVE(options->stride_w);
1273  desc.m_StrideY = CHECKED_NON_NEGATIVE(options->stride_h);
1275  desc.m_DilationX = CHECKED_NON_NEGATIVE(options->dilation_w_factor);
1276  desc.m_DilationY = CHECKED_NON_NEGATIVE(options->dilation_h_factor);
1277 
1278  armnn::TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
1279  armnn::TensorInfo filterTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1);
1280 
1281  // assuming input is NHWC
1282  unsigned int inputHeight = inputTensorInfo.GetShape()[1];
1283  unsigned int inputWidth = inputTensorInfo.GetShape()[2];
1284 
1285  // assuming the filter is OHWI : Output, H, W, Input
1286  // which is essentially the same as NHWC
1287  unsigned int filterHeight = filterTensorInfo.GetShape()[1];
1288  unsigned int filterWidth = filterTensorInfo.GetShape()[2];
1289 
1290  CalcPadding(inputHeight, filterHeight, desc.m_StrideY,
1291  desc.m_DilationY, desc.m_PadTop, desc.m_PadBottom, options->padding);
1292  CalcPadding(inputWidth, filterWidth, desc.m_StrideX,
1293  desc.m_DilationX, desc.m_PadLeft, desc.m_PadRight, options->padding);
1294 
1295  // Add the first input and weights tensor to the registration list.
1296  // The constant weights will be added by SetupConstantLayers.
1297  auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1298  std::vector<unsigned int> tensorIndexesToRegister = { inputTensorIndexes[0], inputTensorIndexes[1] };
1299 
1300  auto layerName = fmt::format("Conv2D:{}:{}", subgraphIndex, operatorIndex);
1301  armnn::IConnectableLayer* layer = m_Network->AddConvolution2dLayer(desc, layerName.c_str());
1302 
1303  if (ShouldConstantTensorBeConverted(inputs[1], inputTensorInfo.GetDataType(), filterTensorInfo.GetDataType()))
1304  {
1305  m_ConstantsToDequantize.emplace_back(inputs[1]->buffer);
1306  }
1307 
1308  if (desc.m_BiasEnabled)
1309  {
1310  armnn::TensorInfo biasTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 2);
1311 
1312  // Add the biases input to the registration list, a constant layer will be added by SetupConstantLayers.
1313  tensorIndexesToRegister.emplace_back(inputTensorIndexes[2]);
1314 
1315  if (ShouldConstantTensorBeConverted(inputs[2], inputTensorInfo.GetDataType(), biasTensorInfo.GetDataType()))
1316  {
1317  m_ConstantsToDequantize.emplace_back(inputs[2]->buffer);
1318  }
1319  }
1320 
1321  if (!layer)
1322  {
1323  throw NullPointerException(fmt::format("Layer {} pointer is null {}",
1324  operatorIndex, CHECK_LOCATION().AsString()));
1325  }
1326 
1327  armnn::TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0, 1});
1328  layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1329 
1330  // register the input connection slots for the layer, connections are made after all layers have been created
1331  // only the tensors for the inputs are relevant, exclude the const tensors
1332  RegisterInputSlots(subgraphIndex, operatorIndex, layer, tensorIndexesToRegister);
1333 
1334  layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
1335  // register the output connection slots for the layer, connections are made after all layers have been created
1336  auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1337  RegisterOutputSlots(subgraphIndex, operatorIndex, layer, { outputTensorIndexes[0] });
1338 }
1339 
1340 // Conv3D support was added in TF 2.5, so for backwards compatibility a hash define is needed.
1341 #if defined(ARMNN_POST_TFLITE_2_4)
1342 void TfLiteParserImpl::ParseConv3D(size_t subgraphIndex, size_t operatorIndex)
1343 {
1344  CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1345 
1346  const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1347  const auto* options = operatorPtr->builtin_options.AsConv3DOptions();
1348 
1349  CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
1350 
1352  desc.m_BiasEnabled = false;
1354  desc.m_StrideX = CHECKED_NON_NEGATIVE(options->stride_w);
1355  desc.m_StrideY = CHECKED_NON_NEGATIVE(options->stride_h);
1356  desc.m_StrideZ = CHECKED_NON_NEGATIVE(options->stride_d);
1357  desc.m_DilationX = CHECKED_NON_NEGATIVE(options->dilation_w_factor);
1358  desc.m_DilationY = CHECKED_NON_NEGATIVE(options->dilation_h_factor);
1359  desc.m_DilationZ = CHECKED_NON_NEGATIVE(options->dilation_d_factor);
1360 
1361  auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1362  CHECK_VALID_SIZE(inputs.size(), 2, 3);
1363 
1364  auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1365  CHECK_VALID_SIZE(outputs.size(), 1);
1366 
1367  armnn::TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
1368  armnn::TensorInfo filterTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1);
1369 
1370  // Assuming input is NDHWC
1371  unsigned int inputDepth = inputTensorInfo.GetShape()[1];
1372  unsigned int inputHeight = inputTensorInfo.GetShape()[2];
1373  unsigned int inputWidth = inputTensorInfo.GetShape()[3];
1374 
1375  // Assuming the filter is DHWIO : Depth, Height, Width, OutputChannels, InputChannels
1376  unsigned int filterDepth = filterTensorInfo.GetShape()[0];
1377  unsigned int filterHeight = filterTensorInfo.GetShape()[1];
1378  unsigned int filterWidth = filterTensorInfo.GetShape()[2];
1379 
1380  CalcPadding(inputDepth, filterDepth, desc.m_StrideZ,
1381  desc.m_DilationZ, desc.m_PadFront, desc.m_PadBack, options->padding);
1382  CalcPadding(inputHeight, filterHeight, desc.m_StrideY,
1383  desc.m_DilationY, desc.m_PadTop, desc.m_PadBottom, options->padding);
1384  CalcPadding(inputWidth, filterWidth, desc.m_StrideX,
1385  desc.m_DilationX, desc.m_PadLeft, desc.m_PadRight, options->padding);
1386 
1387  auto filterTensorAndData = CreateConstTensorNonPermuted(inputs[1], filterTensorInfo, inputTensorInfo.GetDataType());
1388 
1389  auto layerName = fmt::format("Conv3D:{}:{}", subgraphIndex, operatorIndex);
1390 
1391  auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1392  // Add the first input and weights tensor to the registration list.
1393  // The constant weights will be added by SetupConstantLayers.
1394  std::vector<unsigned int> tensorIndexesToRegister = {inputTensorIndexes[0], inputTensorIndexes[1]};
1395 
1396  if (inputs.size() == 3)
1397  {
1398  desc.m_BiasEnabled = true;
1399 
1400  // Add the biases input to the registration list, a constant layer will be added by SetupConstantLayers.
1401  tensorIndexesToRegister.emplace_back(inputTensorIndexes[2]);
1402  }
1403 
1404  armnn::IConnectableLayer* layer = m_Network->AddConvolution3dLayer(desc, layerName.c_str());
1405 
1406  if (!layer)
1407  {
1408  throw NullPointerException(fmt::format("Layer {} pointer is null {}",
1409  operatorIndex, CHECK_LOCATION().AsString()));
1410  }
1411 
1412  armnn::TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0, 1});
1413  layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1414 
1415  // Register the input connection slots for the layer, connections are made after all layers have been created
1416  RegisterInputSlots(subgraphIndex, operatorIndex, layer, tensorIndexesToRegister);
1417 
1418  layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
1419  // Register the output connection slots for the layer, connections are made after all layers have been created
1420  auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1421  RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1422 }
1423 #endif
1424 
1425 void TfLiteParserImpl::ParseDepthwiseConv2D(size_t subgraphIndex, size_t operatorIndex)
1426 {
1427  CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1428 
1429  const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1430  const auto* options = operatorPtr->builtin_options.AsDepthwiseConv2DOptions();
1431 
1432  CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
1433 
1435  desc.m_StrideX = CHECKED_NON_NEGATIVE(options->stride_w);
1436  desc.m_StrideY = CHECKED_NON_NEGATIVE(options->stride_h);
1438  CHECKED_NON_NEGATIVE(options->depth_multiplier);
1439 
1440  auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1441  CHECK_VALID_SIZE(inputs.size(), 2, 3);
1442  if (inputs.size() == 3)
1443  {
1444  desc.m_BiasEnabled = true;
1445  }
1446 
1447  auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1448  CHECK_VALID_SIZE(outputs.size(), 1);
1449  desc.m_DilationX = CHECKED_NON_NEGATIVE(options->dilation_w_factor);
1450  desc.m_DilationY = CHECKED_NON_NEGATIVE(options->dilation_h_factor);
1451 
1452  armnn::TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
1453  armnn::TensorInfo filterTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1);
1454 
1455  // Assuming input is NHWC
1456  unsigned int inputHeight = inputTensorInfo.GetShape()[1];
1457  unsigned int inputWidth = inputTensorInfo.GetShape()[2];
1458 
1459  // TensorflowLite weights come in the format [1, H, W, I * M]
1460  unsigned int filterHeight = filterTensorInfo.GetShape()[1];
1461  unsigned int filterWidth = filterTensorInfo.GetShape()[2];
1462 
1463  CalcPadding(inputHeight, filterHeight, desc.m_StrideY,
1464  desc.m_DilationY, desc.m_PadTop, desc.m_PadBottom, options->padding);
1465  CalcPadding(inputWidth, filterWidth, desc.m_StrideX,
1466  desc.m_DilationX, desc.m_PadLeft, desc.m_PadRight, options->padding);
1467 
1468  // ArmNN uses the same filter tensor layout at TfLite [1, H, W, O] no need for any permutation
1469  auto layerName = fmt::format("DepthwiseConv2D:{}:{}", subgraphIndex, operatorIndex);
1470 
1471  auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1472  // Add the first input and weights tensor to the registration list.
1473  // The constant weights will be added by SetupConstantLayers.
1474  std::vector<unsigned int> tensorIndexesToRegister = {inputTensorIndexes[0], inputTensorIndexes[1]};
1475 
1476  armnn::IConnectableLayer* layer = m_Network->AddDepthwiseConvolution2dLayer(desc, layerName.c_str());
1477 
1478  if (desc.m_BiasEnabled)
1479  {
1480  desc.m_BiasEnabled = true;
1481  TensorInfo biasTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 2);
1482 
1483  // Add the biases input to the registration list, a constant layer will be added by SetupConstantLayers.
1484  tensorIndexesToRegister.emplace_back(inputTensorIndexes[2]);
1485  }
1486 
1487  if (!layer)
1488  {
1489  throw NullPointerException(fmt::format("Layer {} pointer is null {}",
1490  operatorIndex, CHECK_LOCATION().AsString()));
1491  }
1492 
1493  armnn::TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0, 1});
1494  layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1495 
1496  // register the input connection slots for the layer, connections are made after all layers have been created
1497  // only the tensors for the inputs are relevant, exclude the const tensors
1498  RegisterInputSlots(subgraphIndex, operatorIndex, layer, tensorIndexesToRegister);
1499 
1500  layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
1501  // register the output connection slots for the layer, connections are made after all layers have been created
1502  auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1503  RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1504 }
1505 
1506 void TfLiteParserImpl::ParseDequantize(size_t subgraphIndex, size_t operatorIndex)
1507 {
1508  CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1509 
1510  auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1511  CHECK_VALID_SIZE(inputs.size(), 1);
1512 
1513  auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1514  CHECK_VALID_SIZE(outputs.size(), 1);
1515 
1516  auto layerName = fmt::format("Dequantize:{}:{}", subgraphIndex, operatorIndex);
1517 
1518  IConnectableLayer* layer = m_Network->AddDequantizeLayer(layerName.c_str());
1519 
1520  if (!layer)
1521  {
1522  throw NullPointerException(fmt::format("Layer {} pointer is null {}",
1523  operatorIndex, CHECK_LOCATION().AsString()));
1524  }
1525 
1526  TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0});
1527  layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1528 
1529  auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1530  RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1531 
1532  auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1533  RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
1534 }
1535 
1536 void TfLiteParserImpl::ParseExpandDims(size_t subgraphIndex, size_t operatorIndex)
1537 {
1538  CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1539 
1540  auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1541  CHECK_VALID_SIZE(inputs.size(), 2);
1542 
1543  auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1544  CHECK_VALID_SIZE(outputs.size(), 1);
1545 
1546  auto layerName = fmt::format("ExpandDims:{}:{}", subgraphIndex, operatorIndex);
1547 
1548  armnn::TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
1549  armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
1550  CheckMatchingQuantization(inputTensorInfo, outputTensorInfo, layerName, "Input 0", "Output 0");
1551 
1552  armnn::TensorInfo axisTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1);
1553 
1554  BufferRawPtr axisBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
1555  if (axisBufferPtr == nullptr)
1556  {
1557  throw ParseException(fmt::format("{}: Operation has invalid inputs. Failed to read axis.",
1558  CHECK_LOCATION().AsString()));
1559  }
1560 
1561  std::vector<int32_t> axisData(axisTensorInfo.GetNumElements());
1562  ::memcpy(axisData.data(), axisBufferPtr->data.data(), axisTensorInfo.GetNumBytes());
1563  int32_t axis = axisData[0];
1564 
1565  auto inputRank = static_cast<int32_t>(inputTensorInfo.GetShape().GetNumDimensions());
1566  auto outputRank = inputRank + 1;
1567  if((axis < -1 * outputRank) || (outputRank <= axis))
1568  {
1569  throw ParseException(fmt::format("{}: Axis {} is not within [-{}, {}) range.",
1570  CHECK_LOCATION().AsString(), axis, outputRank, outputRank));
1571  }
1572 
1573  axis = axis < 0 ? (axis + outputRank) : axis;
1574 
1575  std::vector<unsigned int> shape(static_cast<unsigned int>(outputRank));
1576  unsigned int inputShapeIndex = 0;
1577  for (unsigned int i = 0; i < static_cast<unsigned int>(outputRank); ++i)
1578  {
1579  if (i == static_cast<unsigned int>(axis))
1580  {
1581  shape[i] = 1;
1582  }
1583  else
1584  {
1585  shape[i] = inputTensorInfo.GetShape()[inputShapeIndex];
1586  ++inputShapeIndex;
1587  }
1588  }
1589 
1590  ReshapeDescriptor reshapeDesc;
1591  reshapeDesc.m_TargetShape = TensorShape(static_cast<unsigned int>(outputRank), shape.data());
1592  outputTensorInfo.SetShape(reshapeDesc.m_TargetShape);
1593 
1594  IConnectableLayer* layer = m_Network->AddReshapeLayer(reshapeDesc, layerName.c_str());
1595 
1596  if (!layer)
1597  {
1598  throw NullPointerException(fmt::format("Layer {} pointer is null {}",
1599  operatorIndex, CHECK_LOCATION().AsString()));
1600  } layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1601 
1602  auto outputTensorIds = GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex);
1603  m_TensorInfos[outputTensorIds[0]] = outputTensorInfo;
1604 
1605  auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1606  RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1607 
1608  auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1609  RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1610 }
1611 
1612 void TfLiteParserImpl::ParseTranspose(size_t subgraphIndex, size_t operatorIndex)
1613 {
1614  CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1615 
1616  auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1617  CHECK_VALID_SIZE(inputs.size(), 1, 2);
1618 
1619  auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1620  CHECK_VALID_SIZE(outputs.size(), 1);
1621 
1622  auto layerName = fmt::format("Transpose:{}:{}", subgraphIndex, operatorIndex);
1623  TransposeDescriptor desc;
1624 
1625  if (inputs.size() == 2)
1626  {
1627  armnn::TensorInfo permuteTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1);
1628  BufferRawPtr permuteBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
1629  auto numPermVecElements = permuteTensorInfo.GetNumElements();
1630  std::vector<unsigned int> permuteShape(numPermVecElements);
1631  ::memcpy(permuteShape.data(), permuteBufferPtr->data.data(), permuteTensorInfo.GetNumBytes());
1632  PermutationVector permutationVector(permuteShape.data(), permuteTensorInfo.GetNumElements());
1633 
1634  desc = TransposeDescriptor(permutationVector);
1635  }
1636  TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
1637 
1638  IConnectableLayer* layer = m_Network->AddTransposeLayer(desc, layerName.c_str());
1639 
1640  if (!layer)
1641  {
1642  throw NullPointerException(fmt::format("Layer {} pointer is null {}",
1643  operatorIndex, CHECK_LOCATION().AsString()));
1644  }
1645 
1646  TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0});
1647  CheckMatchingQuantization(inputTensorInfo, outputTensorInfo, layerName, "Input 0", "Output 0");
1648  layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1649 
1650  auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1651  RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1652 
1653  auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1654  RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1655 }
1656 
1657 void TfLiteParserImpl::ParseTransposeConv(size_t subgraphIndex, size_t operatorIndex)
1658 {
1659  CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1660 
1661  const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1662  const auto* options = operatorPtr->builtin_options.AsTransposeConvOptions();
1663 
1665  desc.m_BiasEnabled = false;
1666  desc.m_StrideX = CHECKED_NON_NEGATIVE(options->stride_w);
1667  desc.m_StrideY = CHECKED_NON_NEGATIVE(options->stride_h);
1669 
1670  auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1671  if (inputs.size() == 4)
1672  {
1673  desc.m_BiasEnabled = true;
1674  }
1675  else
1676  {
1677  CHECK_VALID_SIZE(inputs.size(), 3);
1678  }
1679 
1680  auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1681  CHECK_VALID_SIZE(outputs.size(), 1);
1682 
1683 
1684  armnn::TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 2);
1685  armnn::TensorInfo filterTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1);
1686 
1687  // TfLite uses NHWC tensors
1688  const unsigned int inputHeight = inputTensorInfo.GetShape()[1];
1689  const unsigned int inputWidth = inputTensorInfo.GetShape()[2];
1690 
1691  const unsigned int filterHeight = filterTensorInfo.GetShape()[1];
1692  const unsigned int filterWidth = filterTensorInfo.GetShape()[2];
1693 
1694  // This block determines the output shape of the transpose convolution. If the output shape tensor ptr is not null
1695  // And the tensor is a constant, we can access the data at load time and set the output shape of the
1696  // layer. If this is not constant, We do not have access to the shape data, so we have to use
1697  // infer output shape and skip this code block.
1698  if (inputs[0] && IsConstTensor(inputs[0]))
1699  {
1700  armnn::TensorInfo tensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
1701  std::vector<int> output_shape(tensorInfo.GetNumElements());
1702 
1703  if (tensorInfo.GetDataType() == DataType::Signed32)
1704  {
1705  ::memcpy(output_shape.data(), GetBuffer(m_Model, inputs[0]->buffer)->data.data(), tensorInfo.GetNumBytes());
1706  }
1707  if (tensorInfo.GetDataType() == DataType::QAsymmU8)
1708  {
1709  for(unsigned int i=0; i < tensorInfo.GetNumElements(); i++)
1710  {
1711  output_shape[i] = GetBuffer(m_Model, inputs[0]->buffer)->data.data()[i];
1712  }
1713  }
1714  // Change from signed to unsigned int to store in TransposeConvolution2dDescriptor.
1715  for (int dimension : output_shape)
1716  {
1717  desc.m_OutputShape.push_back(static_cast<unsigned int>(dimension));
1718  }
1719  desc.m_OutputShapeEnabled = true;
1720 
1721  // TfLite uses NHWC tensors
1722  const unsigned int outputHeight = desc.m_OutputShape[1];
1723  const unsigned int outputWidth = desc.m_OutputShape[2];
1724 
1725  CalcPadding(inputHeight,
1726  filterHeight,
1727  desc.m_StrideY,
1728  1, // DilationY
1729  desc.m_PadTop,
1730  desc.m_PadBottom,
1731  options->padding,
1732  outputHeight);
1733 
1734  CalcPadding(inputWidth,
1735  filterWidth,
1736  desc.m_StrideX,
1737  1, // DilationX
1738  desc.m_PadLeft,
1739  desc.m_PadRight,
1740  options->padding,
1741  outputWidth);
1742  }
1743  else
1744  {
1745  CalcPadding(inputHeight,
1746  filterHeight,
1747  desc.m_StrideY,
1748  1, // DilationY
1749  desc.m_PadTop,
1750  desc.m_PadBottom,
1751  options->padding);
1752 
1753  CalcPadding(inputWidth,
1754  filterWidth,
1755  desc.m_StrideX,
1756  1, // DilationX
1757  desc.m_PadLeft,
1758  desc.m_PadRight,
1759  options->padding);
1760  }
1761 
1762  auto filterTensorAndData = CreateConstTensorNonPermuted(inputs[1], filterTensorInfo, inputTensorInfo.GetDataType());
1763 
1764  armnn::IConnectableLayer* layer = nullptr;
1765  auto layerName = fmt::format("TransposeConv:{}:{}", subgraphIndex, operatorIndex);
1766 
1767  if (desc.m_BiasEnabled)
1768  {
1769  auto biasTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 3);
1770  auto biasConstTensor = CreateConstTensorNonPermuted(inputs[3], biasTensorInfo, inputTensorInfo.GetDataType());
1771  layer = m_Network->AddTransposeConvolution2dLayer(desc,
1772  filterTensorAndData.first,
1773  biasConstTensor.first,
1774  layerName.c_str());
1775  }
1776  else
1777  {
1778  layer = m_Network->AddTransposeConvolution2dLayer(desc,
1779  filterTensorAndData.first,
1780  EmptyOptional(),
1781  layerName.c_str());
1782  }
1783 
1784  if (!layer)
1785  {
1786  throw NullPointerException(fmt::format("Layer {} pointer is null {}",
1787  operatorIndex, CHECK_LOCATION().AsString()));
1788  }
1789 
1790  armnn::TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0 , { 2, 1 });
1791  layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1792 
1793  // only the tensors for the inputs are relevant, exclude the const (filter) tensor
1794  auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1795  RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[2]});
1796 
1797  auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1798  RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1799 }
1800 
1801 void TfLiteParserImpl::ParseAveragePool2D(size_t subgraphIndex, size_t operatorIndex)
1802 {
1803  ParsePool(subgraphIndex, operatorIndex, PoolingAlgorithm::Average);
1804 }
1805 
1806 void TfLiteParserImpl::ParseBatchMatMul(size_t subgraphIndex, size_t operatorIndex)
1807 {
1808  CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1809 
1810  auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1811  CHECK_VALID_SIZE(inputs.size(), 2);
1812 
1813  auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1814  CHECK_VALID_SIZE(outputs.size(), 1);
1815 
1816  auto layerName = fmt::format("BatchMatMul:{}:{}", subgraphIndex, operatorIndex);
1817 
1818  TensorInfo inputXTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
1819  TensorInfo inputYTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1);
1820 
1821  const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1822  const auto* options = operatorPtr->builtin_options.AsBatchMatMulOptions();
1823 
1824  // Adjoint in tensorflow lite performs transpose operation
1825  BatchMatMulDescriptor descriptor(options->adj_x,
1826  options->adj_y,
1827  false,
1828  false);
1829  // Arbitrary DataLayout
1830 
1831  IConnectableLayer* layer = m_Network->AddBatchMatMulLayer(descriptor, layerName.c_str());
1832 
1833  if (!layer)
1834  {
1835  throw NullPointerException(fmt::format("Layer {} pointer is null {}",
1836  operatorIndex, CHECK_LOCATION().AsString()));
1837  }
1838 
1839  TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0, 1});
1840  layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1841 
1842  auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1843  RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
1844 
1845  auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1846  RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1847 }
1848 
1849 void TfLiteParserImpl::ParseBatchToSpaceND(size_t subgraphIndex, size_t operatorIndex)
1850 {
1851  CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1852 
1853  auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1854  CHECK_VALID_SIZE(inputs.size(), 3);
1855 
1856  auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1857  CHECK_VALID_SIZE(outputs.size(), 1);
1858 
1859  armnn::TensorInfo blockShapeTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1);
1860  BufferRawPtr blockShapeBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
1861 
1862  armnn::TensorInfo cropsTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 2);
1863  BufferRawPtr cropsBufferPtr = GetBuffer(m_Model, inputs[2]->buffer);
1864 
1865  std::vector<unsigned int> blockShape(blockShapeTensorInfo.GetNumElements());
1866  ::memcpy(blockShape.data(), blockShapeBufferPtr->data.data(), blockShapeTensorInfo.GetNumBytes());
1867 
1868  std::vector<unsigned int> cropsVector(cropsTensorInfo.GetNumElements());
1869  ::memcpy(cropsVector.data(), cropsBufferPtr->data.data(), cropsTensorInfo.GetNumBytes());
1870 
1871  size_t step = 2;
1872  std::vector<std::pair<unsigned int, unsigned int>> crops;
1873  for (unsigned int i = 0; i < cropsTensorInfo.GetNumElements() / step; ++i)
1874  {
1875  crops.emplace_back(cropsVector[i * step], cropsVector[i * step + 1]);
1876  }
1877 
1879  desc.m_BlockShape = blockShape;
1880  desc.m_Crops = crops;
1882 
1883  auto layerName = fmt::format("BatchToSpaceND:{}:{}", subgraphIndex, operatorIndex);
1884 
1885  TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
1886 
1887  IConnectableLayer* layer = m_Network->AddBatchToSpaceNdLayer(desc, layerName.c_str());
1888 
1889  if (!layer)
1890  {
1891  throw NullPointerException(fmt::format("Layer {} pointer is null {}",
1892  operatorIndex, CHECK_LOCATION().AsString()));
1893  }
1894 
1895  TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0});
1896  CheckMatchingQuantization(inputTensorInfo, outputTensorInfo, layerName, "Input 0", "Output 0");
1897  layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1898 
1899  auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1900  RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1901 
1902  auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1903  RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1904 }
1905 
1906 void TfLiteParserImpl::ParseBroadcastTo(size_t subgraphIndex, size_t operatorIndex)
1907 {
1908  CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1909 
1910  auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1911  CHECK_VALID_SIZE(inputs.size(), 2);
1912 
1913  auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1914  CHECK_VALID_SIZE(outputs.size(), 1);
1915 
1916  TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1917  TensorInfo shapeTensorInfo = ToTensorInfo(inputs[1]);
1918  TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1919 
1920  auto layerName = fmt::format("Broadcast_to:{}:{}", subgraphIndex, operatorIndex);
1921 
1922  BroadcastToDescriptor descriptor;
1923 
1924  auto shapeBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
1925  if (shapeBufferPtr != nullptr)
1926  {
1927  std::vector<unsigned int> targetShape;
1928  unsigned int numElement = shapeTensorInfo.GetNumElements();
1929  auto shapeData = reinterpret_cast<const int32_t*>(shapeBufferPtr->data.data());
1930  if (shapeData)
1931  {
1932  for (unsigned int i = 0; i < numElement; ++i)
1933  {
1934  targetShape.push_back(armnn::numeric_cast<unsigned int>(shapeData[i]));
1935  }
1936  descriptor.m_BroadcastToShape = TensorShape(numElement, targetShape.data());
1937  }
1938  /// get dataShape from outputShape if missing
1939  else
1940  {
1941  if(outputTensorInfo.GetShape().GetNumElements() <= 1)
1942  {
1943  ARMNN_THROW_PARSE_EXCEPTION("For Broadcast_to layer, "
1944  "data and output shape are not found in the buffer.");
1945  }
1946  descriptor.m_BroadcastToShape = outputTensorInfo.GetShape();
1947  }
1948  }
1949  else
1950  {
1951  ARMNN_THROW_PARSE_EXCEPTION("For Broadcast_to layer, Shape data was not found in the buffer.");
1952  }
1953 
1954  IConnectableLayer* layer = m_Network->AddBroadcastToLayer(descriptor, layerName.c_str());
1955  ARMNN_ASSERT(layer != nullptr);
1956 
1957  layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1958 
1959  auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1960  RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1961 
1962  auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1963  RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1964 }
1965 
1966 void TfLiteParserImpl::ParseL2Normalization(size_t subgraphIndex, size_t operatorIndex)
1967 {
1968  CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1969 
1970  auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1971  CHECK_VALID_SIZE(inputs.size(), 1);
1972 
1973  auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1974  CHECK_VALID_SIZE(outputs.size(), 1);
1975 
1978  auto layerName = fmt::format("L2Normalization:{}:{}", subgraphIndex, operatorIndex);
1979  IConnectableLayer* layer = m_Network->AddL2NormalizationLayer(desc, layerName.c_str());
1980 
1981  if (!layer)
1982  {
1983  throw NullPointerException(fmt::format("Layer {} pointer is null {}",
1984  operatorIndex, CHECK_LOCATION().AsString()));
1985  }
1986 
1987  armnn::TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0});
1988  layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1989 
1990  auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1991  RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1992 
1993  auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1994  RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1995 }
1996 
1997 void TfLiteParserImpl::ParseMaxPool2D(size_t subgraphIndex, size_t operatorIndex)
1998 {
1999  ParsePool(subgraphIndex, operatorIndex, PoolingAlgorithm::Max);
2000 }
2001 
2002 void TfLiteParserImpl::ParseMaximum(size_t subgraphIndex, size_t operatorIndex)
2003 {
2004  CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2005 
2006  auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2007  CHECK_VALID_SIZE(inputs.size(), 2);
2008 
2009  auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2010  CHECK_VALID_SIZE(outputs.size(), 1);
2011 
2012  auto layerName = fmt::format("Maximum:{}:{}", subgraphIndex, operatorIndex);
2013 
2014  TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
2015  TensorInfo input1TensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1);
2016  CheckMatchingQuantization(inputTensorInfo, input1TensorInfo, layerName, "Input 0", "Input 1");
2017 
2018  IConnectableLayer* layer = m_Network->AddElementwiseBinaryLayer(BinaryOperation::Maximum, layerName.c_str());
2019 
2020  if (!layer)
2021  {
2022  throw NullPointerException(fmt::format("Layer {} pointer is null {}",
2023  operatorIndex, CHECK_LOCATION().AsString()));
2024  }
2025 
2026  TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0, 1});
2027  CheckMatchingQuantization(inputTensorInfo, outputTensorInfo, layerName, "Input 0", "Output 0");
2028  layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2029 
2030  auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2031  RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
2032 
2033  auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2034  RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
2035 }
2036 
2037 void TfLiteParserImpl::ParseMinimum(size_t subgraphIndex, size_t operatorIndex)
2038 {
2039  CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2040 
2041  auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2042  CHECK_VALID_SIZE(inputs.size(), 2);
2043 
2044  auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2045  CHECK_VALID_SIZE(outputs.size(), 1);
2046 
2047  auto layerName = fmt::format("Minimum:{}:{}", subgraphIndex, operatorIndex);
2048 
2049  TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
2050  TensorInfo input1TensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1);
2051  CheckMatchingQuantization(inputTensorInfo, input1TensorInfo, layerName, "Input 0", "Input 1");
2052 
2053  IConnectableLayer* layer = m_Network->AddElementwiseBinaryLayer(BinaryOperation::Minimum, layerName.c_str());
2054 
2055  if (!layer)
2056  {
2057  throw NullPointerException(fmt::format("Layer {} pointer is null {}",
2058  operatorIndex, CHECK_LOCATION().AsString()));
2059  }
2060 
2061  TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0, 1});
2062  CheckMatchingQuantization(inputTensorInfo, outputTensorInfo, layerName, "Input 0", "Output 0");
2063  layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2064 
2065  auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2066  RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
2067 
2068  auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2069  RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
2070 }
2071 
2072 void TfLiteParserImpl::ParsePool(size_t subgraphIndex,
2073  size_t operatorIndex,
2074  PoolingAlgorithm algorithm)
2075 {
2076  CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2077 
2078  const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2079  const auto* options = operatorPtr->builtin_options.AsPool2DOptions();
2080 
2081  CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
2082 
2083  std::string layerName;
2084 
2085  switch (algorithm)
2086  {
2087  case PoolingAlgorithm::Average:
2088  layerName =
2089  fmt::format("AveragePool2D:{}:{}", subgraphIndex, operatorIndex);
2090  break;
2091  case PoolingAlgorithm::Max:
2092  layerName =
2093  fmt::format("MaxPool2D:{}:{}", subgraphIndex, operatorIndex);
2094  break;
2095  default:
2096  throw ParseException(fmt::format("Unsupported Pooling Algorithm {}", CHECK_LOCATION().AsString()));
2097  }
2098 
2099  Pooling2dDescriptor desc;
2100 
2101  desc.m_PoolType = algorithm;
2102  desc.m_StrideX = CHECKED_NON_NEGATIVE(options->stride_w);
2103  desc.m_StrideY = CHECKED_NON_NEGATIVE(options->stride_h);
2104  desc.m_PoolWidth = CHECKED_NON_NEGATIVE(options->filter_width);
2105  desc.m_PoolHeight = CHECKED_NON_NEGATIVE(options->filter_height);
2106  desc.m_PaddingMethod = PaddingMethod::Exclude;
2107  desc.m_OutputShapeRounding = OutputShapeRounding::Floor;
2109 
2110  auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2111  CHECK_VALID_SIZE(inputs.size(), 1);
2112  armnn::TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
2113 
2114  // assuming input is NHWC
2115  unsigned int inputHeight = inputTensorInfo.GetShape()[1];
2116  unsigned int inputWidth = inputTensorInfo.GetShape()[2];
2117 
2118  CalcPadding(inputHeight, desc.m_PoolHeight, desc.m_StrideY, 1u,
2119  desc.m_PadTop, desc.m_PadBottom, options->padding);
2120  CalcPadding(inputWidth, desc.m_PoolWidth, desc.m_StrideX, 1u,
2121  desc.m_PadLeft, desc.m_PadRight, options->padding);
2122 
2123  auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2124  CHECK_VALID_SIZE(outputs.size(), 1);
2125 
2126  IConnectableLayer* layer = m_Network->AddPooling2dLayer(desc, layerName.c_str());
2127 
2128  if (!layer)
2129  {
2130  throw NullPointerException(fmt::format("Layer {} pointer is null {}",
2131  operatorIndex, CHECK_LOCATION().AsString()));
2132  }
2133 
2134  armnn::TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0});
2135  CheckMatchingQuantization(inputTensorInfo, outputTensorInfo, layerName, "Input 0", "Output 0");
2136  layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2137 
2138  // register the input connection slots for the layer, connections are made after all layers have been created
2139  // only the tensors for the inputs are relevant, exclude the const tensors
2140  auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2141  RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
2142 
2143  layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
2144  // register the output connection slots for the layer, connections are made after all layers have been created
2145  auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2146  RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
2147 }
2148 
2149 void TfLiteParserImpl::ParseSlice(size_t subgraphIndex, size_t operatorIndex)
2150 {
2151  CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2152 
2153  auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2154  CHECK_VALID_SIZE(inputs.size(), 3);
2155  auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2156  CHECK_VALID_SIZE(outputs.size(), 1);
2157 
2158  SliceDescriptor desc;
2159 
2160  // set begin tensor info for slice descriptor
2161  armnn::TensorInfo beginTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1);
2162  BufferRawPtr beginBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
2163 
2164  std::vector<unsigned int> begin(beginTensorInfo.GetNumElements());
2165  ::memcpy(begin.data(), beginBufferPtr->data.data(), beginTensorInfo.GetNumBytes());
2166 
2167  // set size tensor info for slice descriptor
2168  armnn::TensorInfo sizeTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 2);
2169  BufferRawPtr sizeBufferPtr = GetBuffer(m_Model, inputs[2]->buffer);
2170 
2171  std::vector<int> signedSize(sizeTensorInfo.GetNumElements(), 1);
2172 
2173  // if size buffer data is not specified, all contents of size vector remain as values of 1
2174  if (sizeBufferPtr->data.data())
2175  {
2176  ::memcpy(signedSize.data(), sizeBufferPtr->data.data(), sizeTensorInfo.GetNumBytes());
2177  }
2178 
2179  std::vector<unsigned int> size(sizeTensorInfo.GetNumElements());
2180  TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
2181 
2182  for (unsigned int i = 0; i < signedSize.size(); ++i)
2183  {
2184  int signedValue = signedSize[i];
2185 
2186  if (signedValue < -1 || signedValue > static_cast<int>(inputTensorInfo.GetShape()[i] - begin[i]))
2187  {
2188  throw ParseException(fmt::format("Invalid value for size {} size must be in range "
2189  "[-1, inputDimSize - begin] [-1, {}] inclusive {}",
2190  signedValue,
2191  inputTensorInfo.GetShape()[i] - begin[i],
2192  CHECK_LOCATION().AsString()));
2193  }
2194 
2195  if (signedValue == -1)
2196  {
2197  size[i] = inputTensorInfo.GetShape()[i] - begin[i];
2198  }
2199  else
2200  {
2201  size[i] = static_cast<unsigned int>(signedValue);
2202  }
2203  }
2204 
2205  desc = SliceDescriptor(begin, size);
2206 
2207  auto layerName = fmt::format("Slice:{}:{}", subgraphIndex, operatorIndex);
2208 
2209  IConnectableLayer* const layer = m_Network->AddSliceLayer(desc, layerName.c_str());
2210 
2211  TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0});
2212  CheckMatchingQuantization(inputTensorInfo, outputTensorInfo, layerName, "Input 0", "Output 0");
2213  layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2214 
2215  // register the input connection slots for the layer, connections are made after all layers have been created
2216  // only the tensors for the inputs are relevant, exclude the const tensors
2217  auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2218  RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
2219 
2220  // register the output connection slots for the layer, connections are made after all layers have been created
2221  auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2222  RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
2223 }
2224 
2225 void TfLiteParserImpl::ParseSoftmax(size_t subgraphIndex, size_t operatorIndex)
2226 {
2227  CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2228  const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2229  const auto* options = operatorPtr->builtin_options.AsSoftmaxOptions();
2230 
2231  SoftmaxDescriptor desc;
2232  desc.m_Beta = options->beta;
2233 
2234  auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2235  CHECK_VALID_SIZE(inputs.size(), 1);
2236  auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2237  CHECK_VALID_SIZE(outputs.size(), 1);
2238 
2239  auto layerName = fmt::format("Softmax:{}:{}", subgraphIndex, operatorIndex);
2240  IConnectableLayer* const layer = m_Network->AddSoftmaxLayer(desc, layerName.c_str());
2241 
2242  armnn::TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0});
2243  layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2244 
2245  // register the input connection slots for the layer, connections are made after all layers have been created
2246  // only the tensors for the inputs are relevant, exclude the const tensors
2247  auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2248  RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
2249 
2250  // register the output connection slots for the layer, connections are made after all layers have been created
2251  auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2252  RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
2253 }
2254 
2255 void TfLiteParserImpl::ParseLogSoftmax(size_t subgraphIndex, size_t operatorIndex)
2256 {
2257  CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2258 
2259  LogSoftmaxDescriptor desc;
2260 
2261  auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2262  CHECK_VALID_SIZE(inputs.size(), 1);
2263  auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2264  CHECK_VALID_SIZE(outputs.size(), 1);
2265 
2266  auto layerName = fmt::format("LogSoftmax:{}:{}", subgraphIndex, operatorIndex);
2267  IConnectableLayer* const layer = m_Network->AddLogSoftmaxLayer(desc, layerName.c_str());
2268 
2269  armnn::TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0});
2270  layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2271 
2272  // register the input connection slots for the layer, connections are made after all layers have been created
2273  // only the tensors for the inputs are relevant, exclude the const tensors
2274  auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2275  RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
2276 
2277  // register the output connection slots for the layer, connections are made after all layers have been created
2278  auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2279  RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
2280 }
2281 
2282 void TfLiteParserImpl::ParseSpaceToBatchND(size_t subgraphIndex, size_t operatorIndex)
2283 {
2284  CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2285 
2286  auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2287  CHECK_VALID_SIZE(inputs.size(), 3);
2288 
2289  auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2290  CHECK_VALID_SIZE(outputs.size(), 1);
2291 
2292  armnn::TensorInfo blockShapeTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1);
2293  BufferRawPtr blockShapeBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
2294 
2295  armnn::TensorInfo padListTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 2);
2296  BufferRawPtr padListBufferPtr = GetBuffer(m_Model, inputs[2]->buffer);
2297 
2298  std::vector<unsigned int> blockShape(blockShapeTensorInfo.GetNumElements());
2299  ::memcpy(blockShape.data(), blockShapeBufferPtr->data.data(), blockShapeTensorInfo.GetNumBytes());
2300 
2301  std::vector<unsigned int> padListVector(padListTensorInfo.GetNumElements());
2302  ::memcpy(padListVector.data(), padListBufferPtr->data.data(), padListTensorInfo.GetNumBytes());
2303 
2304  size_t step = 2;
2305  std::vector<std::pair<unsigned int, unsigned int>> padList;
2306  for (unsigned int i = 0; i < padListTensorInfo.GetNumElements() / step; ++i)
2307  {
2308  padList.emplace_back(padListVector[i * step], padListVector[i * step + 1]);
2309  }
2310 
2312  desc.m_BlockShape = blockShape;
2313  desc.m_PadList = padList;
2315 
2316  auto layerName = fmt::format("SpaceToBatchND:{}:{}", subgraphIndex, operatorIndex);
2317 
2318  TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
2319 
2320  IConnectableLayer* layer = m_Network->AddSpaceToBatchNdLayer(desc, layerName.c_str());
2321 
2322  if (!layer)
2323  {
2324  throw NullPointerException(fmt::format("Layer {} pointer is null {}",
2325  operatorIndex, CHECK_LOCATION().AsString()));
2326  }
2327 
2328  TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0});
2329  CheckMatchingQuantization(inputTensorInfo, outputTensorInfo, layerName, "Input 0", "Output 0");
2330  layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2331 
2332  auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2333  RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
2334 
2335  auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2336  RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
2337 }
2338 
2339 void TfLiteParserImpl::ParseSpaceToDepth(size_t subgraphIndex, size_t operatorIndex)
2340 {
2341  CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2342 
2343  TfLiteParserImpl::TensorRawPtrVector inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2344  CHECK_VALID_SIZE(inputs.size(), 1);
2345  TfLiteParserImpl::TensorRawPtrVector outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2346  CHECK_VALID_SIZE(outputs.size(), 1);
2347 
2348  armnn::SpaceToDepthDescriptor descriptor;
2349 
2350  const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2351  const auto* options = operatorPtr->builtin_options.AsSpaceToDepthOptions();
2352  auto blockSize = options->block_size;
2353  if (blockSize < 2)
2354  {
2355  throw ParseException(
2356  fmt::format("Operation has invalid block size: {} Block size should be >= 2 {}",
2357  blockSize,
2358  CHECK_LOCATION().AsString()));
2359  }
2360  descriptor.m_BlockSize = armnn::numeric_cast<uint32_t>(blockSize);
2361 
2362  auto layerName = fmt::format("SpaceToDepth:{}:{}", subgraphIndex, operatorIndex);
2363  IConnectableLayer* layer = m_Network->AddSpaceToDepthLayer(descriptor, layerName.c_str());
2364 
2365  if (!layer)
2366  {
2367  throw NullPointerException(fmt::format("Layer {} pointer is null {}",
2368  operatorIndex, CHECK_LOCATION().AsString()));
2369  }
2370 
2371  TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0});
2372  layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2373 
2374  auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2375  RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
2376 
2377  auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2378  RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
2379 }
2380 
2382  const armnn::TensorInfo& inputTensorInfo)
2383 {
2384  CHECK_VALID_SIZE(squeezeDims.size(), 0, 1, 2, 3, 4);
2385  static const uint32_t dimensionSequence[] = { 0, 1, 2, 3 };
2386 
2387  if (inputTensorInfo.GetNumDimensions() > 4)
2388  {
2389  std::stringstream ss;
2390  ss << "Input tensor has unexpected number of dimensions:" << inputTensorInfo.GetNumDimensions()
2391  << " shape:" << inputTensorInfo.GetShape() << " "
2392  << CHECK_LOCATION().AsString();
2393  throw ParseException(ss.str());
2394  }
2395 
2396  if (squeezeDims.empty())
2397  {
2398  squeezeDims.assign(dimensionSequence,
2399  dimensionSequence+inputTensorInfo.GetNumDimensions());
2400  }
2401 
2402  std::vector<uint32_t> outputDims;
2403  for(unsigned int i = 0; i < inputTensorInfo.GetNumDimensions(); i++)
2404  {
2405  bool skipSqueeze = (std::find(squeezeDims.begin(), squeezeDims.end(), i) == squeezeDims.end());
2406  auto currentDimension = inputTensorInfo.GetShape()[i];
2407  if (skipSqueeze || currentDimension != 1)
2408  {
2409  outputDims.push_back(currentDimension);
2410  }
2411  }
2412 
2413  if (outputDims.size() > 4)
2414  {
2415  std::stringstream ss;
2416  ss << "Output tensor has unexpected number of dimensions:" << inputTensorInfo.GetNumDimensions()
2417  << " shape:" << inputTensorInfo.GetShape() << " "
2418  << CHECK_LOCATION().AsString();
2419  throw ParseException(ss.str());
2420  }
2421 
2422  TensorShape outShape = TensorShape(static_cast<unsigned int>(outputDims.size()),
2423  outputDims.data());
2424 
2425  // we need to preserve the tensor type and the quantization data as well
2426  TensorInfo outTensorInfo = inputTensorInfo;
2427  outTensorInfo.SetShape(outShape);
2428 
2429  return outTensorInfo;
2430 }
2431 
2432 void TfLiteParserImpl::ParseShape(size_t subgraphIndex, size_t operatorIndex)
2433 {
2434  CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2435 
2436  auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2437  CHECK_VALID_SIZE(inputs.size(), 1);
2438  auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2439  CHECK_VALID_SIZE(outputs.size(), 1);
2440 
2441  auto layerName = fmt::format("Shape:{}:{}", subgraphIndex, operatorIndex);
2442 
2443  IConnectableLayer* layer = m_Network->AddShapeLayer(layerName.c_str());
2444 
2445  if (!layer)
2446  {
2447  throw NullPointerException(fmt::format("Layer {} pointer is null {}",
2448  operatorIndex, CHECK_LOCATION().AsString()));
2449  }
2450 
2451  TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0});
2452  layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2453 
2454  // Check if output tensor type is Signed32 or Signed64
2455  if (outputTensorInfo.GetDataType() != armnn::DataType::Signed32 &&
2456  outputTensorInfo.GetDataType() != armnn::DataType::Signed64)
2457  {
2458  throw ParseException(
2459  fmt::format(
2460  "Output tensor data type is not supported. (Supported types: Signed32 & Signed64) {}",
2461  CHECK_LOCATION().AsString()));
2462  }
2463 
2464  auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2465  RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
2466 
2467  auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2468  RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
2469 }
2470 
2471 void TfLiteParserImpl::ParseSqueeze(size_t subgraphIndex, size_t operatorIndex)
2472 {
2473  CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2474 
2475  auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2476  CHECK_VALID_SIZE(inputs.size(), 1);
2477 
2478  auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2479  CHECK_VALID_SIZE(outputs.size(), 1);
2480 
2481  const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2482  const auto * options = operatorPtr->builtin_options.AsSqueezeOptions();
2483  auto layerName = fmt::format("Squeeze:{}:{}", subgraphIndex, operatorIndex);
2484 
2485  armnn::TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
2486 
2487  std::vector<uint32_t> squeezeDim;
2488  // A single negative dim index is interpreted as a negative index in python
2489  // Meaning the index will be the shape size plus the negative index value
2490  if (options->squeeze_dims.size() == 1 && options->squeeze_dims[0] < 0)
2491  {
2492  int32_t dim = static_cast<int32_t>(inputTensorInfo.GetShape().GetNumDimensions()) + options->squeeze_dims[0];
2493  squeezeDim.push_back(static_cast<uint32_t>(dim));
2494  }
2495  else
2496  {
2497  squeezeDim = AsUnsignedVector(options->squeeze_dims);
2498  }
2499 
2500  armnn::TensorInfo outputTensorInfo = TfLiteParserImpl::OutputShapeOfSqueeze(squeezeDim, inputTensorInfo);
2501 
2502  CheckMatchingQuantization(inputTensorInfo, outputTensorInfo, layerName, "Input 0", "Output 0");
2503 
2504  ReshapeDescriptor reshapeDesc;
2505  reshapeDesc.m_TargetShape = outputTensorInfo.GetShape();
2506 
2507  auto outputTensorIds = GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex);
2508  m_TensorInfos[outputTensorIds[0]] = outputTensorInfo;
2509 
2510  IConnectableLayer* layer = m_Network->AddReshapeLayer(reshapeDesc, layerName.c_str());
2511 
2512  if (!layer)
2513  {
2514  throw NullPointerException(fmt::format("Layer {} pointer is null {}",
2515  operatorIndex, CHECK_LOCATION().AsString()));
2516  }
2517 
2518  layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2519 
2520  auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2521  RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
2522 
2523  auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2524  RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
2525 }
2526 
2527 void TfLiteParserImpl::ParseStridedSlice(size_t subgraphIndex, size_t operatorIndex)
2528 {
2529  CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2530 
2531  auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2532  CHECK_VALID_SIZE(inputs.size(), 4);
2533 
2534  auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2535  CHECK_VALID_SIZE(outputs.size(), 1);
2536 
2537  const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2538  const auto* options = operatorPtr->builtin_options.AsStridedSliceOptions();
2539 
2541  desc.m_BeginMask = options->begin_mask;
2542  desc.m_EllipsisMask = options->ellipsis_mask;
2543  desc.m_EndMask = options->end_mask;
2544  desc.m_NewAxisMask = options->new_axis_mask;
2545  desc.m_ShrinkAxisMask = options->shrink_axis_mask;
2547 
2548  armnn::TensorInfo beginTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1);
2549  BufferRawPtr beginBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
2550 
2551  std::vector<int> begin(beginTensorInfo.GetNumElements());
2552  if (beginBufferPtr->data.data() != nullptr)
2553  {
2554  ::memcpy(begin.data(), beginBufferPtr->data.data(), beginTensorInfo.GetNumBytes());
2555  }
2556  else
2557  {
2558  throw ParseException("ParseStridedSlice: Invalid input - the begin vector is null");
2559  }
2560 
2561  armnn::TensorInfo endTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 2);
2562  BufferRawPtr endBufferPtr = GetBuffer(m_Model, inputs[2]->buffer);
2563 
2564  std::vector<int> end(endTensorInfo.GetNumElements());
2565  if (endBufferPtr->data.data() != nullptr)
2566  {
2567  ::memcpy(end.data(), endBufferPtr->data.data(), endTensorInfo.GetNumBytes());
2568  }
2569  else
2570  {
2571  throw ParseException("ParseStridedSlice: Invalid input - the end vector is null");
2572  }
2573 
2574  armnn::TensorInfo strideTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 3);
2575  BufferRawPtr strideBufferPtr = GetBuffer(m_Model, inputs[3]->buffer);
2576 
2577  std::vector<int> stride(strideTensorInfo.GetNumElements());
2578 
2579  if (strideBufferPtr->data.data() != nullptr)
2580  {
2581  ::memcpy(stride.data(), strideBufferPtr->data.data(), strideTensorInfo.GetNumBytes());
2582  }
2583  else
2584  {
2585  throw ParseException("ParseStridedSlice: Invalid input - the stride vector is null");
2586  }
2587 
2588  desc.m_Begin = begin;
2589  desc.m_End = end;
2590  desc.m_Stride = stride;
2591 
2592  auto layerName = fmt::format("StridedSlice:{}:{}", subgraphIndex, operatorIndex);
2593  IConnectableLayer* layer = m_Network->AddStridedSliceLayer(desc, layerName.c_str());
2594 
2595  if (!layer)
2596  {
2597  throw NullPointerException(fmt::format("Layer {} pointer is null {}",
2598  operatorIndex, CHECK_LOCATION().AsString()));
2599  }
2600 
2601  armnn::TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0});
2602  layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2603 
2604  auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2605  RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
2606 
2607  auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2608  RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
2609 }
2610 
2611 void TfLiteParserImpl::ParseSub(size_t subgraphIndex, size_t operatorIndex)
2612 {
2613  CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2614 
2615  const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2616  const auto* options = operatorPtr->builtin_options.AsSubOptions();
2617 
2618  auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2619  CHECK_VALID_SIZE(inputs.size(), 2);
2620 
2621  auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2622  CHECK_VALID_SIZE(outputs.size(), 1);
2623 
2624  armnn::TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
2625  armnn::TensorInfo input1TensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1);
2626 
2627  auto layerName = fmt::format("Sub:{}:{}", subgraphIndex, operatorIndex);
2628  IConnectableLayer* layer = m_Network->AddElementwiseBinaryLayer(BinaryOperation::Sub, layerName.c_str());
2629 
2630  if (!layer)
2631  {
2632  throw NullPointerException(fmt::format("Layer {} pointer is null {}",
2633  operatorIndex, CHECK_LOCATION().AsString()));
2634  }
2635 
2636  TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0, 1});
2637  layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2638 
2639  auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2640  RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
2641  if (options)
2642  {
2643  layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
2644  }
2645 
2646  auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2647  RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
2648 }
2649 
2650 void TfLiteParserImpl::ParseDiv(size_t subgraphIndex, size_t operatorIndex)
2651 {
2652  CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2653 
2654  const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2655  const auto* options = operatorPtr->builtin_options.AsDivOptions();
2656 
2657  auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2658  CHECK_VALID_SIZE(inputs.size(), 2);
2659 
2660  auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2661  CHECK_VALID_SIZE(outputs.size(), 1);
2662 
2663  armnn::TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
2664  armnn::TensorInfo input1TensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1);
2665 
2666  auto layerName = fmt::format("Div:{}:{}", subgraphIndex, operatorIndex);
2667  IConnectableLayer* layer = m_Network->AddElementwiseBinaryLayer(BinaryOperation::Div, layerName.c_str());
2668 
2669  if (!layer)
2670  {
2671  throw NullPointerException(fmt::format("Layer {} pointer is null {}",
2672  operatorIndex, CHECK_LOCATION().AsString()));
2673  }
2674 
2675  TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0, 1});
2676  layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2677 
2678  auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2679  RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
2680  if (options)
2681  {
2682  layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
2683  }
2684 
2685  auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2686  RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
2687 }
2688 
2689 void TfLiteParserImpl::ParseFloorDiv(size_t subgraphIndex, size_t operatorIndex)
2690 {
2691  CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2692 
2693  auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2694  CHECK_VALID_SIZE(inputs.size(), 2);
2695 
2696  auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2697  CHECK_VALID_SIZE(outputs.size(), 1);
2698 
2699  armnn::TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
2700  armnn::TensorInfo input1TensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1);
2701 
2702  auto layerName = fmt::format("Div:{}:{}", subgraphIndex, operatorIndex);
2703  IConnectableLayer* layer = m_Network->AddElementwiseBinaryLayer(BinaryOperation::Div, layerName.c_str());
2704 
2705  if (!layer)
2706  {
2707  throw NullPointerException(fmt::format("Layer {} pointer is null {}",
2708  operatorIndex, CHECK_LOCATION().AsString()));
2709  }
2710 
2711  TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0, 1});
2712  layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2713 
2714  auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2715  RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
2716  layer = AddFusedFloorLayer(layer, 0);
2717 
2718  auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2719  RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
2720 }
2721 
2722 void TfLiteParserImpl::ParseAdd(size_t subgraphIndex, size_t operatorIndex)
2723 {
2724  CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2725 
2726  const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2727  const auto* options = operatorPtr->builtin_options.AsAddOptions();
2728 
2729  auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2730  CHECK_VALID_SIZE(inputs.size(), 2);
2731 
2732  auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2733  CHECK_VALID_SIZE(outputs.size(), 1);
2734 
2735  armnn::TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
2736  armnn::TensorInfo input1TensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1);
2737 
2738  auto layerName = fmt::format("Add:{}:{}", subgraphIndex, operatorIndex);
2739  IConnectableLayer* layer = m_Network->AddElementwiseBinaryLayer(BinaryOperation::Add, layerName.c_str());
2740 
2741  if (!layer)
2742  {
2743  throw NullPointerException(fmt::format("Layer {} pointer is null {}",
2744  operatorIndex, CHECK_LOCATION().AsString()));
2745  }
2746 
2747  TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0, 1});
2748  layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2749 
2750  auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2751  RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
2752  if (options)
2753  {
2754  layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
2755  }
2756 
2757  auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2758  RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
2759 }
2760 
2761 void TfLiteParserImpl::ParseMul(size_t subgraphIndex, size_t operatorIndex)
2762 {
2763  CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2764 
2765  const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2766  const auto* options = operatorPtr->builtin_options.AsMulOptions();
2767 
2768  auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2769  CHECK_VALID_SIZE(inputs.size(), 2);
2770 
2771  auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2772  CHECK_VALID_SIZE(outputs.size(), 1);
2773 
2774  armnn::TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
2775  armnn::TensorInfo input1TensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1);
2776 
2777  auto layerName = fmt::format("Mul:{}:{}", subgraphIndex, operatorIndex);
2778  IConnectableLayer* layer = m_Network->AddElementwiseBinaryLayer(BinaryOperation::Mul, layerName.c_str());
2779 
2780  if (!layer)
2781  {
2782  throw NullPointerException(fmt::format("Layer {} pointer is null {}",
2783  operatorIndex, CHECK_LOCATION().AsString()));
2784  }
2785 
2786  TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0, 1});
2787  layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2788 
2789  auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2790  RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
2791  if (options)
2792  {
2793  layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
2794  }
2795 
2796  auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2797  RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
2798 }
2799 
2800 void TfLiteParserImpl::ParseMean(size_t subgraphIndex, size_t operatorIndex)
2801 {
2802  CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2803 
2804  auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2805 
2806  auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2807  CHECK_VALID_SIZE(outputs.size(), 1);
2808 
2809  TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
2810  TensorInfo dimTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1);
2811 
2812  armnn::MeanDescriptor desc;
2813  BufferRawPtr axisBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
2814  // Get const axis value from model and set it to descriptor.
2815  if (axisBufferPtr != nullptr)
2816  {
2817  std::vector<int32_t> axisData(dimTensorInfo.GetNumElements());
2818  ::memcpy(axisData.data(), axisBufferPtr->data.data(), dimTensorInfo.GetNumBytes());
2819 
2820  // Convert the axis to unsigned int and remove duplicates.
2821  auto rank = static_cast<int32_t>(inputTensorInfo.GetNumDimensions());
2822  std::set<unsigned int> uniqueAxis;
2823  std::transform(axisData.begin(),
2824  axisData.end(),
2825  std::inserter(uniqueAxis, uniqueAxis.begin()),
2826  [rank](int i)->unsigned int{
2827  return static_cast<uint32_t>(((i + rank) % rank)); });
2828  desc.m_Axis.assign(uniqueAxis.begin(), uniqueAxis.end());
2829  }
2830  else
2831  {
2832  for (uint32_t i = 0; i < inputTensorInfo.GetNumDimensions(); ++i)
2833  {
2834  desc.m_Axis.push_back(i);
2835  }
2836  }
2837 
2838  armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
2839 
2840  desc.m_KeepDims = inputTensorInfo.GetNumDimensions() == outputTensorInfo.GetNumDimensions() ? true : false;
2841 
2842  auto layerName = fmt::format("Mean:{}:{}", subgraphIndex, operatorIndex);
2843  IConnectableLayer* layer = m_Network->AddMeanLayer(desc, layerName.c_str());
2844 
2845  if (!layer)
2846  {
2847  throw NullPointerException(fmt::format("Layer {} pointer is null {}",
2848  operatorIndex, CHECK_LOCATION().AsString()));
2849  }
2850 
2851  outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0});
2852  layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2853 
2854  auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2855  RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
2856 
2857  auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2858  RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
2859 }
2860 
2861 void TfLiteParserImpl::ParsePad(size_t subgraphIndex, size_t operatorIndex)
2862 {
2863  CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2864 
2865  TfLiteParserImpl::TensorRawPtrVector inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2866 
2867  TfLiteParserImpl::TensorRawPtrVector outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2868  CHECK_VALID_SIZE(outputs.size(), 1);
2869 
2870  armnn::TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
2871  armnn::TensorInfo padTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1);
2872 
2873  std::vector<unsigned int> padBuffer = GetUIntBuffer(padTensorInfo, m_Model, inputs[1]->buffer);
2874 
2875  size_t step = 2;
2876  armnn::PadDescriptor desc;
2877  auto opcode = GetOpCode(m_Model, subgraphIndex, operatorIndex);
2878 
2879  if (opcode == tflite::BuiltinOperator_PAD)
2880  {
2881  CHECK_VALID_SIZE(inputs.size(), 2);
2882 
2883  if (inputTensorInfo.IsQuantized())
2884  {
2885  desc.m_PadValue = static_cast<float>(inputTensorInfo.GetQuantizationOffset());
2886  }
2887  }
2888  else if (opcode == tflite::BuiltinOperator_PADV2)
2889  {
2890  CHECK_VALID_SIZE(inputs.size(), 3);
2891 
2892  armnn::TensorInfo padValueTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 2);
2893 
2894  if (padValueTensorInfo.GetNumElements() != 1)
2895  {
2896  ARMNN_THROW_PARSE_EXCEPTION("Multiple padding values are not supported in PADV2");
2897  }
2898  BufferRawPtr padValueBufferPtr = GetBuffer(m_Model, inputs[2]->buffer);
2899 
2900  // Get the pad value from the input tensor
2901  if (padValueBufferPtr->data.size() > 0)
2902  {
2903  switch (padValueTensorInfo.GetDataType())
2904  {
2906  {
2907  std::vector<float> padValueBuffer(padValueTensorInfo.GetNumElements());
2908  ::memcpy(padValueBuffer.data(), padValueBufferPtr->data.data(), padValueBufferPtr->data.size());
2909  desc.m_PadValue = padValueBuffer[0];
2910  break;
2911  }
2913  {
2914  std::vector<uint8_t> padValueBuffer(padValueTensorInfo.GetNumElements());
2915  ::memcpy(padValueBuffer.data(), padValueBufferPtr->data.data(), padValueBufferPtr->data.size());
2916  desc.m_PadValue = armnn::Dequantize<uint8_t>(padValueBuffer[0],
2917  padValueTensorInfo.GetQuantizationScale(),
2918  padValueTensorInfo.GetQuantizationOffset());
2919  break;
2920  }
2923  {
2924  std::vector<int8_t> padValueBuffer(padValueTensorInfo.GetNumElements());
2925  ::memcpy(padValueBuffer.data(), padValueBufferPtr->data.data(), padValueBufferPtr->data.size());
2926  desc.m_PadValue = armnn::Dequantize<int8_t>(padValueBuffer[0],
2927  padValueTensorInfo.GetQuantizationScale(),
2928  padValueTensorInfo.GetQuantizationOffset());
2929  break;
2930  }
2931  default: ARMNN_THROW_PARSE_EXCEPTION("Unsupported DataType");
2932  }
2933  }
2934  else if (inputTensorInfo.IsQuantized())
2935  {
2936  desc.m_PadValue = static_cast<float>(inputTensorInfo.GetQuantizationOffset());
2937  }
2938  }
2939 
2940  for (unsigned int i = 0; i < padTensorInfo.GetNumElements() / step; ++i)
2941  {
2942  desc.m_PadList.emplace_back(padBuffer[i * step], padBuffer[i * step + 1]);
2943  }
2944 
2945  auto layerName = (opcode == tflite::BuiltinOperator_PAD) ? fmt::format("Pad:{}:{}", subgraphIndex, operatorIndex)
2946  : fmt::format("PadV2:{}:{}", subgraphIndex, operatorIndex);
2947 
2948  IConnectableLayer* layer = m_Network->AddPadLayer(desc, layerName.c_str());
2949 
2950  if (!layer)
2951  {
2952  throw NullPointerException(fmt::format("Layer {} pointer is null {}",
2953  operatorIndex, CHECK_LOCATION().AsString()));
2954  }
2955 
2956  TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0});
2957  layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2958 
2959  auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2960  RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
2961 
2962  auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2963  RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
2964 }
2965 
2966 void TfLiteParserImpl::ParseMirrorPad(size_t subgraphIndex, size_t operatorIndex)
2967 {
2968  CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2969 
2970  TfLiteParserImpl::TensorRawPtrVector inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2971  CHECK_VALID_SIZE(inputs.size(), 2);
2972 
2973  TfLiteParserImpl::TensorRawPtrVector outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2974  CHECK_VALID_SIZE(outputs.size(), 1);
2975 
2976  armnn::TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
2977 
2978  armnn::TensorInfo padTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1);
2979  BufferRawPtr bufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
2980 
2981  std::vector<unsigned int> padBuffer(padTensorInfo.GetNumElements());
2982  ::memcpy(padBuffer.data(), bufferPtr->data.data(), padTensorInfo.GetNumBytes());
2983 
2984  size_t step = 2;
2985  armnn::PadDescriptor desc;
2986  for (unsigned int i = 0; i < padTensorInfo.GetNumElements() / step; ++i)
2987  {
2988  desc.m_PadList.emplace_back(padBuffer[i * step], padBuffer[i * step + 1]);
2989  }
2990 
2991  const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2992  const auto* options = operatorPtr->builtin_options.AsMirrorPadOptions();
2993 
2994  if (options->mode == tflite::MirrorPadMode_REFLECT)
2995  {
2996  desc.m_PaddingMode = PaddingMode::Reflect;
2997  }
2998  else if (options->mode == tflite::MirrorPadMode_SYMMETRIC)
2999  {
3000  desc.m_PaddingMode = PaddingMode::Symmetric;
3001  }
3002  else
3003  {
3004  ARMNN_THROW_PARSE_EXCEPTION("PaddingMode must be either REFLECT or SYMMETRIC");
3005  }
3006 
3007  // If padding mode is Reflect then both paddings must be no greater than inputShape(i) - 1.
3008  // If padding mode is Symmetric then both paddings must be no greater than inputShape(i).
3009  auto inputShape = inputTensorInfo.GetShape();
3010  auto padList = desc.m_PadList;
3011 
3012  const unsigned int isReflect = static_cast<unsigned int>(desc.m_PaddingMode == PaddingMode::Reflect);
3013  for(unsigned int i = 0; i < padList.size(); ++i)
3014  {
3015  if(padList.at(i).first > (inputShape[i] - isReflect) ||
3016  padList.at(i).second > (inputShape[i] - isReflect))
3017  {
3018  ARMNN_THROW_PARSE_EXCEPTION("Padding values must be less (Reflect) or "
3019  "equal (Symmetric) to the dimension size.");
3020  }
3021  }
3022 
3023  auto layerName = fmt::format("MirrorPad:{}:{}", subgraphIndex, operatorIndex);
3024 
3025  IConnectableLayer* layer = m_Network->AddPadLayer(desc, layerName.c_str());
3026 
3027  if (!layer)
3028  {
3029  throw NullPointerException(fmt::format("Layer {} pointer is null {}",
3030  operatorIndex, CHECK_LOCATION().AsString()));
3031  }
3032 
3033  TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0});
3034  layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
3035 
3036  auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
3037  RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
3038 
3039  auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
3040  RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
3041 }
3042 
3043 void TfLiteParserImpl::ParsePrelu(size_t subgraphIndex, size_t operatorIndex)
3044 {
3045  CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
3046 
3047  auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
3048  CHECK_VALID_SIZE(inputs.size(), 2);
3049 
3050  auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
3051  CHECK_VALID_SIZE(outputs.size(), 1);
3052 
3053  auto layerName = fmt::format("Prelu:{}:{}", subgraphIndex, operatorIndex);
3054 
3055  armnn::TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
3056  armnn::TensorInfo alphaTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1);
3057 
3058  IConnectableLayer* layer = m_Network->AddPreluLayer(layerName.c_str());
3059 
3060  if (!layer)
3061  {
3062  throw NullPointerException(fmt::format("Layer {} pointer is null {}",
3063  operatorIndex, CHECK_LOCATION().AsString()));
3064  }
3065 
3066  if (IsConstTensor(inputs[1]))
3067  {
3068  auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
3069  armnn::IInputSlot* slot = &(layer->GetInputSlot(0));
3070  RegisterConsumerOfTensor(subgraphIndex, inputTensorIndexes[0], slot);
3071 
3072  auto alphaTensorAndData = CreateConstTensorNonPermuted(inputs[1], alphaTensorInfo,
3073  inputTensorInfo.GetDataType());
3074  std::string constLayerName = fmt::format("Constant:{}", inputs[1]->name);
3075  IConnectableLayer* constLayer =
3076  m_Network->AddConstantLayer(alphaTensorAndData.first, constLayerName.c_str());
3077 
3078  if (!constLayer)
3079  {
3080  throw NullPointerException(fmt::format("Layer {} pointer is null {}",
3081  operatorIndex, CHECK_LOCATION().AsString()));
3082  }
3083 
3084  constLayer->GetOutputSlot(0).SetTensorInfo(alphaTensorInfo);
3085  constLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(1));
3086  RegisterOutputSlots(subgraphIndex,
3087  VIRTUAL_OPERATOR_ID,
3088  constLayer,
3089  { inputTensorIndexes[1] });
3090  }
3091  else
3092  {
3093  auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
3094  RegisterInputSlots(subgraphIndex, operatorIndex, layer, inputTensorIndexes);
3095  }
3096 
3097  armnn::TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0, 1});
3098  layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
3099 
3100  auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
3101  RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
3102 }
3103 
3104 void TfLiteParserImpl::ParseQuantize(size_t subgraphIndex, size_t operatorIndex)
3105 {
3106  CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
3107 
3108  auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
3109  CHECK_VALID_SIZE(inputs.size(), 1);
3110 
3111  auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
3112  CHECK_VALID_SIZE(outputs.size(), 1);
3113 
3114  auto layerName = fmt::format("Quantize:{}:{}", subgraphIndex, operatorIndex);
3115 
3116  IConnectableLayer* layer = m_Network->AddQuantizeLayer(layerName.c_str());
3117 
3118  if (!layer)
3119  {
3120  throw NullPointerException(fmt::format("Layer {} pointer is null {}",
3121  operatorIndex, CHECK_LOCATION().AsString()));
3122  }
3123 
3124  TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0});
3125  layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
3126 
3127  auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
3128  RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
3129 
3130  auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
3131  RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
3132 }
3133 
3134 void TfLiteParserImpl::ParseRelu(size_t subgraphIndex, size_t operatorIndex)
3135 {
3136  ParseActivation(subgraphIndex,operatorIndex, ActivationFunction::ReLu);
3137 }
3138 
3139 void TfLiteParserImpl::ParseRelu6(size_t subgraphIndex, size_t operatorIndex)
3140 {
3141  ParseActivation(subgraphIndex,operatorIndex, ActivationFunction::BoundedReLu);
3142 }
3143 
3144 void TfLiteParserImpl::ParseLeakyRelu(size_t subgraphIndex, size_t operatorIndex)
3145 {
3146  ParseActivation(subgraphIndex, operatorIndex, ActivationFunction::LeakyReLu);
3147 }
3148 
3149 void TfLiteParserImpl::ParseLogistic(size_t subgraphIndex, size_t operatorIndex)
3150 {
3151  ParseActivation(subgraphIndex,operatorIndex,ActivationFunction::Sigmoid);
3152 }
3153 
3154 void TfLiteParserImpl::ParseTanH(size_t subgraphIndex, size_t operatorIndex)
3155 {
3156  ParseActivation(subgraphIndex,operatorIndex,ActivationFunction::TanH);
3157 }
3158 
3159 void TfLiteParserImpl::ParseElu(size_t subgraphIndex, size_t operatorIndex)
3160 {
3161  ParseActivation(subgraphIndex, operatorIndex, ActivationFunction::Elu);
3162 }
3163 
3164 void TfLiteParserImpl::ParseHardSwish(size_t subgraphIndex, size_t operatorIndex)
3165 {
3166  ParseActivation(subgraphIndex, operatorIndex, ActivationFunction::HardSwish);
3167 }
3168 
3169 void TfLiteParserImpl::ParseGelu(size_t subgraphIndex, size_t operatorIndex)
3170 {
3171  ParseActivation(subgraphIndex,operatorIndex,ActivationFunction::Gelu);
3172 }
3173 
3174 void TfLiteParserImpl::ParseActivation(size_t subgraphIndex, size_t operatorIndex, ActivationFunction activationType)
3175 {
3176  CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
3177  const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
3178  IgnoreUnused(operatorPtr);
3179 
3180  auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
3181  CHECK_VALID_SIZE(inputs.size(), 1);
3182 
3183  auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
3184  CHECK_VALID_SIZE(outputs.size(), 1);
3185 
3186  auto layerName = fmt::format("Activation:");
3187  ActivationDescriptor activationDesc;
3188  activationDesc.m_Function = activationType;
3189 
3190  switch (activationType)
3191  {
3192  case ActivationFunction::ReLu:
3193  {
3194  layerName += fmt::format("RELU:{}:{}", subgraphIndex, operatorIndex);
3195  break;
3196  }
3197  case ActivationFunction::BoundedReLu:
3198  {
3199  layerName += fmt::format("RELU6:{}:{}", subgraphIndex, operatorIndex);
3200  activationDesc.m_A = 6.0f;
3201  activationDesc.m_B = 0.0f;
3202  break;
3203  }
3204  case ActivationFunction::Sigmoid:
3205  {
3206  layerName += fmt::format("SIGMOID:{}:{}", subgraphIndex, operatorIndex);
3207  break;
3208  }
3209  case ActivationFunction::TanH:
3210  {
3211  layerName += fmt::format("TANH:{}:{}", subgraphIndex, operatorIndex);
3212  activationDesc.m_A = 1.0f;
3213  activationDesc.m_B = 1.0f;
3214  break;
3215  }
3216  case ActivationFunction::LeakyReLu:
3217  {
3218  layerName += fmt::format("LEAKYRELU:{}:{}", subgraphIndex, operatorIndex);
3219  const auto* options = operatorPtr->builtin_options.AsLeakyReluOptions();
3220  activationDesc.m_A = options->alpha;
3221  break;
3222  }
3223  case ActivationFunction::Elu:
3224  {
3225  layerName += fmt::format("ELU:{}:{}", subgraphIndex, operatorIndex);
3226  activationDesc.m_A = 1.0f;
3227  break;
3228  }
3229  case ActivationFunction::HardSwish:
3230  {
3231  layerName += fmt::format("HARDSWISH:{}:{}", subgraphIndex, operatorIndex);
3232  break;
3233  }
3234  case ActivationFunction::Gelu:
3235  {
3236  layerName += fmt::format("GELU:{}:{}", subgraphIndex, operatorIndex);
3237  break;
3238  }
3239  default:
3240  {
3241  throw ParseException(
3242  fmt::format("Unexpected ActivationFunction[{}] when creating layerName {} ",
3243  static_cast<int>(activationType), CHECK_LOCATION().AsString()));
3244  }
3245  }
3246 
3247  IConnectableLayer* const layer = m_Network->AddActivationLayer(activationDesc, layerName.c_str());
3248 
3249  TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0});
3250  layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
3251 
3252  // register the input connection slots for the layer, connections are made after all layers have been created
3253  // only the tensors for the inputs are relevant, exclude the const tensors
3254  auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
3255  RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
3256 
3257  // register the output connection slots for the layer, connections are made after all layers have been created
3258  auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
3259  RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
3260 }
3261 
3263  const std::vector<int32_t>& targetDimsIn)
3264 {
3265  std::vector<unsigned int> outputDims(targetDimsIn.begin(), targetDimsIn.end());
3266  const auto stretchDim = std::find(targetDimsIn.begin(), targetDimsIn.end(), -1);
3267 
3268  if (stretchDim != targetDimsIn.end())
3269  {
3270  if (std::find(std::next(stretchDim), targetDimsIn.end(), -1) != targetDimsIn.end())
3271  {
3272  throw ParseException(
3273  fmt::format("At most one component of shape can be -1 {}", CHECK_LOCATION().AsString()));
3274  }
3275 
3276  auto targetNumElements =
3277  armnn::numeric_cast<unsigned int>(
3278  std::accumulate(targetDimsIn.begin(), targetDimsIn.end(), -1, std::multiplies<int32_t>()));
3279 
3280  auto stretchIndex = static_cast<size_t>(std::distance(targetDimsIn.begin(), stretchDim));
3281 
3282  if (targetNumElements == 0)
3283  {
3284  if (inputTensorInfo.GetNumElements() == 0)
3285  {
3286  outputDims[stretchIndex] = 0;
3287  }
3288  else
3289  {
3290  throw ParseException(
3291  fmt::format("Input to reshape is a tensor with elements, but the requested shape has 0. {}",
3292  CHECK_LOCATION().AsString()));
3293  }
3294  }
3295  else
3296  {
3297  outputDims[stretchIndex] = inputTensorInfo.GetNumElements() / targetNumElements;
3298  }
3299  }
3300 
3301  TensorShape outputShape = TensorShape(static_cast<unsigned int>(outputDims.size()), outputDims.data());
3302 
3303  TensorInfo reshapeInfo = inputTensorInfo;
3304  reshapeInfo.SetShape(outputShape);
3305 
3306  return reshapeInfo;
3307 }
3308 
3309 void TfLiteParserImpl::ParseReshape(size_t subgraphIndex, size_t operatorIndex)
3310 {
3311  CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
3312 
3313  auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
3314 
3315  auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
3316  CHECK_VALID_SIZE(outputs.size(), 1);
3317 
3318  const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
3319  const auto* options = operatorPtr->builtin_options.AsReshapeOptions();
3320  auto layerName = fmt::format("Reshape:{}:{}", subgraphIndex, operatorIndex);
3321 
3322  armnn::TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
3323  armnn::TensorInfo actualOutputTensorInfo = ToTensorInfo(outputs[0]);
3324  CheckMatchingQuantization(inputTensorInfo, actualOutputTensorInfo, layerName, "Input 0", "Output 0");
3325 
3326  // Extracting new shape for the output
3327  // There are two ways it can be passed
3328  // * First is to define the target shape in the operator built-in options
3329  // * Second is to pass it as a second input tensor
3330  std::vector<int32_t> targetShape;
3331  bool targetShapeFound = false;
3332  // Check if built-in options were given
3333  if (options != nullptr)
3334  {
3335  // make sure the parameter is given
3336  if (options->new_shape.empty() == false)
3337  {
3338  targetShape = options->new_shape;
3339  targetShapeFound = true;
3340  }
3341  }
3342 
3343  // If there is no built-in option given or if the built-in new_shape parameter was empty
3344  if (!targetShapeFound)
3345  {
3346  // Check for a second input tensor
3347  if (inputs.size() > 1 && inputs[1] != nullptr)
3348  {
3349  if (inputs[1]->is_variable)
3350  {
3351  ARMNN_THROW_PARSE_EXCEPTION( "Target shapes defined in non-const input tensors is not supported");
3352  }
3353 
3354  if (inputs[1]->shape.size() != 1)
3355  {
3356  ARMNN_THROW_PARSE_EXCEPTION("Target 'shape' input is not a 1D tensor");
3357  }
3358 
3359  if (inputs[1]->type != tflite::TensorType_INT32)
3360  {
3361  ARMNN_THROW_PARSE_EXCEPTION("Target 'shape' input is not an int32 type");
3362  }
3363 
3364  // Extract target shape from input
3365  auto bufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
3366  auto values = reinterpret_cast<const int32_t*>(bufferPtr->data.data());
3367  if (values)
3368  {
3369  for (int i = 0; i < inputs[1]->shape[0]; ++i)
3370  {
3371  targetShape.push_back(values[i]);
3372  }
3373  }
3374  else
3375  {
3376  try
3377  {
3378  // We attempt to infer during Runtime.
3379  TensorShape reshapeShapes = ToTensorInfo(inputs[1]).GetShape();
3380 
3381  if (reshapeShapes[0] == actualOutputTensorInfo.GetNumDimensions())
3382  {
3383  for (unsigned int i = 0; i < actualOutputTensorInfo.GetShape().GetNumDimensions(); ++i)
3384  {
3385  targetShape.push_back(actualOutputTensorInfo.GetShape()[i]);
3386  }
3387  }
3388  // The parser only supports shape (batch, -1) or (-1) for non-constant shape input.
3389  else if (reshapeShapes[0] > 2)
3390  {
3391  throw ParseException(fmt::format("Invalid input shape '{}' in Reshape layer '{}' {}. "
3392  "When inferring during runtime, the parser only supports "
3393  "shape (batch, -1) or (-1) for target shape input.",
3394  reshapeShapes[0],
3395  layerName,
3396  CHECK_LOCATION().AsString()));
3397  }
3398  else
3399  {
3400  const int32_t numInputElements = inputTensorInfo.GetNumElements();
3401  const int32_t inputTensorShape = inputTensorInfo.GetShape()[0];
3402  if (reshapeShapes[0] == 1)
3403  {
3404  targetShape = {numInputElements};
3405  }
3406  else if (reshapeShapes[0] == 2)
3407  {
3408  targetShape = {inputTensorShape, numInputElements / inputTensorShape};
3409  }
3410  }
3411  }
3412  catch (const std::exception& exc)
3413  {
3414  ARMNN_THROW_PARSE_EXCEPTION("Failed attempt to infer during runtime the target shape input for "
3415  "Reshape operation. Reshape operator target shape input buffer data "
3416  "is null. " << exc.what());
3417  }
3418  }
3419  }
3420  else
3421  {
3422  ARMNN_THROW_PARSE_EXCEPTION("Target shape not defined in reshape parameters or input tensor. "
3423  "At least one method required");
3424  }
3425  }
3426 
3427  armnn::TensorInfo reshapeOutputTensorInfo =
3428  TfLiteParserImpl::OutputShapeOfReshape(inputTensorInfo, targetShape);
3429 
3430  // Check for valid input size and that reshape parameters equal output shape
3431  // The output shape can be provided to us in 2 ways:
3432  // 1. through the normal 'shape' parameter given by outputs[indx]->shape
3433  // 2. through additional parameter 'shape_signature' given by outputs[indx]->buffer.
3434  // This parameter can sometimes contain -1 value not visible in the 'shape' parameter.
3435  const armnn::TensorShape& reshapeOutputTensorShape = reshapeOutputTensorInfo.GetShape();
3436  if (inputs.size() > 1 && !CheckShape(reshapeOutputTensorShape, outputs[0]->shape))
3437  {
3438  // Attempt to extract output shape from secondary 'shape_signature'
3439  // parameter and try to CheckShape() with this param.
3440  std::vector<int32_t> secondaryOutputTargetShape = outputs[0]->shape_signature;
3441 
3442  // if outputs[0]->shape_signature contain a -1 value, we need to compute its actual value
3443  // from reshape input in order to correctly verify reshape parameters equal output shape
3444  armnn::TensorInfo secondaryReshapeOutputTensorInfo =
3445  TfLiteParserImpl::OutputShapeOfReshape(inputTensorInfo, secondaryOutputTargetShape);
3446 
3447  if (!CheckShape(reshapeOutputTensorShape, secondaryReshapeOutputTensorInfo.GetShape()))
3448  {
3449  std::stringstream ss;
3450  ss << "New shape defined in reshape parameters "
3451  << reshapeOutputTensorShape
3452  << " does not equal output shape "
3453  << actualOutputTensorInfo.GetShape()
3454  << ": "
3455  << CHECK_LOCATION().AsString();
3456  throw ParseException(ss.str());
3457  }
3458  }
3459  auto outputTensorIds = GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex);
3460 
3461  ReshapeDescriptor reshapeDesc;
3462  reshapeDesc.m_TargetShape = reshapeOutputTensorInfo.GetShape();
3463  m_TensorInfos[outputTensorIds[0]] = reshapeOutputTensorInfo;
3464 
3465  IConnectableLayer* layer = m_Network->AddReshapeLayer(reshapeDesc, layerName.c_str());
3466 
3467  if (!layer)
3468  {
3469  throw NullPointerException(fmt::format("Layer {} pointer is null {}",
3470  operatorIndex, CHECK_LOCATION().AsString()));
3471  }
3472 
3473  layer->GetOutputSlot(0).SetTensorInfo(reshapeOutputTensorInfo);
3474 
3475  auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
3476  RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
3477 
3478  auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
3479  RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
3480 }
3481 
3482 void TfLiteParserImpl::ParseResizeBilinear(size_t subgraphIndex, size_t operatorIndex)
3483 {
3484  ParseResize(subgraphIndex, operatorIndex, ResizeMethod::Bilinear);
3485 }
3486 
3487 void TfLiteParserImpl::ParseResizeNearestNeighbor(size_t subgraphIndex, size_t operatorIndex)
3488 {
3489  ParseResize(subgraphIndex, operatorIndex, ResizeMethod::NearestNeighbor);
3490 }
3491 
3492 void TfLiteParserImpl::ParseResize(size_t subgraphIndex, size_t operatorIndex, ResizeMethod resizeMethod)
3493 {
3494  CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
3495 
3496  auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
3497  CHECK_VALID_SIZE(inputs.size(), 2);
3498 
3499  auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
3500  CHECK_VALID_SIZE(outputs.size(), 1);
3501 
3502  armnn::TensorInfo sizeTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1);
3503 
3504  // Data for the parsed tensor args (size) must be stored locally.
3505  std::vector<int32_t> sizeTensorData(sizeTensorInfo.GetNumElements());
3506 
3507  BufferRawPtr sizeBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
3508  ::memcpy(sizeTensorData.data(), sizeBufferPtr->data.data(), sizeTensorInfo.GetNumBytes());
3509 
3510  ResizeDescriptor desc;
3511  desc.m_Method = resizeMethod;
3512  desc.m_TargetHeight = static_cast<uint32_t> (sizeTensorData[0]);
3513  desc.m_TargetWidth = static_cast<uint32_t> (sizeTensorData[1]);
3515 
3516  auto layerName = fmt::format("Resize:");
3517 
3518  switch (resizeMethod)
3519  {
3520  case ResizeMethod::Bilinear:
3521  {
3522  layerName += fmt::format("BILINEAR:{}:{}", subgraphIndex, operatorIndex);
3523 
3524  const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
3525  const auto * options = operatorPtr->builtin_options.AsResizeBilinearOptions();
3526 
3527  desc.m_AlignCorners = options->align_corners;
3528  break;
3529  }
3530  case ResizeMethod::NearestNeighbor:
3531  {
3532  layerName += fmt::format("NEARESTNEIGHBOR:{}:{}", subgraphIndex, operatorIndex);
3533  break;
3534  }
3535  default:
3536  {
3537  throw ParseException(
3538  fmt::format("Unexpected ResizeMethod[{}] when creating layerName {} ",
3539  static_cast<int>(resizeMethod), CHECK_LOCATION().AsString()));
3540  }
3541  }
3542 
3543  TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
3544 
3545  IConnectableLayer* layer = m_Network->AddResizeLayer(desc, layerName.c_str());
3546 
3547  if (!layer)
3548  {
3549  throw NullPointerException(fmt::format("Layer {} pointer is null {}",
3550  operatorIndex, CHECK_LOCATION().AsString()));
3551  }
3552 
3553  TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0});
3554  CheckMatchingQuantization(inputTensorInfo, outputTensorInfo, layerName, "Input 0", "Output 0");
3555  layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
3556 
3557  auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
3558  RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
3559 
3560  auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
3561  RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
3562 }
3563 
3564 void TfLiteParserImpl::ParseReverseV2(size_t subgraphIndex, size_t operatorIndex)
3565 {
3566  CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
3567 
3568  auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
3569  CHECK_VALID_SIZE(inputs.size(), 2);
3570 
3571  auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
3572  CHECK_VALID_SIZE(outputs.size(), 1);
3573 
3574  auto layerName = fmt::format("ReverseV2:{}:{}", subgraphIndex, operatorIndex);
3575 
3576  TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
3577  TensorInfo axisTensorInfo = ToTensorInfo(inputs[1]);
3578  TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
3579 
3580  IConnectableLayer* layer = m_Network->AddReverseV2Layer(layerName.c_str());
3581  ARMNN_ASSERT(layer != nullptr);
3582 
3583  layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
3584 
3585  auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
3586  RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
3587 
3588  auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
3589  RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
3590 }
3591 
3592 void TfLiteParserImpl::ParseTile(size_t subgraphIndex, size_t operatorIndex)
3593 {
3594  CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
3595 
3596  auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
3597  CHECK_VALID_SIZE(inputs.size(), 2);
3598 
3599  auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
3600  CHECK_VALID_SIZE(outputs.size(), 1);
3601 
3602  TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
3603  TensorInfo multiplesTensorInfo = ToTensorInfo(inputs[1]);
3604  TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
3605 
3606  auto layerName = fmt::format("Tile:{}:{}", subgraphIndex, operatorIndex);
3607 
3608  TileDescriptor descriptor;
3609 
3610  BufferRawPtr multiplesBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
3611  if (multiplesBufferPtr != nullptr)
3612  {
3613  std::vector<int32_t> multiplesData(multiplesTensorInfo.GetNumElements());
3614  ::memcpy(multiplesData.data(), multiplesBufferPtr->data.data(), multiplesTensorInfo.GetNumBytes());
3615  descriptor.m_Multiples.assign(multiplesData.begin(), multiplesData.end());
3616  }
3617  else
3618  {
3619  ARMNN_THROW_PARSE_EXCEPTION("For Tile layer, Multiples data was not found in the buffer.");
3620  }
3621 
3622  IConnectableLayer* layer = m_Network->AddTileLayer(descriptor, layerName.c_str());
3623  ARMNN_ASSERT(layer != nullptr);
3624 
3625  layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
3626 
3627  auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
3628  RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
3629 
3630  auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
3631  RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
3632 }
3633 
3634 void TfLiteParserImpl::ParseConcatenation(size_t subgraphIndex, size_t operatorIndex)
3635 {
3636  CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
3637 
3638  const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
3639  const auto* options = operatorPtr->builtin_options.AsConcatenationOptions();
3640 
3641  CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
3642 
3643  auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
3644  auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
3645  auto inputTensorIds = GetInputTensorIds(m_Model, subgraphIndex, operatorIndex);
3646 
3647  CHECK_VALID_SIZE(outputs.size(), 1);
3648 
3649  unsigned int numConcatView = static_cast<unsigned int>(inputs.size());
3650  uint32_t inputRank = InputTensorInfo(subgraphIndex, operatorIndex, 0).GetNumDimensions();
3651 
3652  const unsigned int concatDimInput = static_cast<unsigned int>(
3653  (static_cast<int>(inputRank) + options->axis) % static_cast<int>(inputRank));
3654 
3655  OriginsDescriptor concatDescriptor(static_cast<uint32_t>(numConcatView), inputRank);
3656  concatDescriptor.SetConcatAxis(concatDimInput);
3657  unsigned int mergeDimOrigin = 0;
3658 
3659  for (unsigned int viewIndex = 0; viewIndex < numConcatView; ++viewIndex)
3660  {
3661  TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, viewIndex);
3662 
3663  // This set up concatDescriptor view origin
3665  inputTensorInfo, concatDescriptor, concatDimInput, viewIndex, mergeDimOrigin);
3666  }
3667 
3668  auto layerName = fmt::format("Concatenation:{}:{}", subgraphIndex, operatorIndex);
3669 
3670  IConnectableLayer* layer = m_Network->AddConcatLayer(concatDescriptor, layerName.c_str());
3671 
3672  if (!layer)
3673  {
3674  throw NullPointerException(fmt::format("Layer {} pointer is null {}",
3675  operatorIndex, CHECK_LOCATION().AsString()));
3676  }
3677 
3678  TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {});
3679  layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
3680 
3681  auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
3682  RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes});
3683 
3684  // add fused activation layer
3685  layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
3686 
3687  auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
3688  RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
3689 }
3690 
3691 void TfLiteParserImpl::ParseFullyConnected(size_t subgraphIndex, size_t operatorIndex)
3692 {
3693  CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
3694 
3695  const auto& operatorRfr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
3696  const auto options = operatorRfr->builtin_options.AsFullyConnectedOptions();
3697 
3698  CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
3699 
3701  desc.m_BiasEnabled = false;
3702  desc.m_TransposeWeightMatrix = true;
3703 
3704  auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
3705  auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
3706  CHECK_VALID_SIZE(outputs.size(), 1);
3707 
3708  armnn::TensorInfo filterTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1);
3709 
3710  // Fully Connected Layer accepts two dimensional weights input
3711  int32_t weightsDimension = static_cast<int32_t>(filterTensorInfo.GetNumDimensions());
3712  if (weightsDimension != 2)
3713  {
3714  throw ParseException(
3715  fmt::format("Dimension {} for Fully Connected weights is not supported by Armnn. "
3716  "Node {}",
3717  weightsDimension,
3718  CHECK_LOCATION().AsString()));
3719  }
3720 
3721  armnn::IConnectableLayer* layer = nullptr;
3722  auto layerName = fmt::format("FullyConnected:{}:{}", subgraphIndex, operatorIndex);
3723 
3724  auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
3725  // Add the first input tensor to the registration list
3726  std::vector<unsigned int> tensorIndexesToRegister = {inputTensorIndexes[0]};
3727  armnn::TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
3728 
3729  desc.m_ConstantWeights = IsConstTensor(inputs[1]);
3730 
3731  // Add the weights input to the registration list, constant layers will be added by SetupConstantLayers if constant.
3732  tensorIndexesToRegister.emplace_back(inputTensorIndexes[1]);
3733 
3734  if (ShouldConstantTensorBeConverted(inputs[1], inputTensorInfo.GetDataType(), filterTensorInfo.GetDataType()))
3735  {
3736  m_ConstantsToDequantize.emplace_back(inputs[1]->buffer);
3737  }
3738 
3739  if (inputs.size() == 3)
3740  {
3741  desc.m_BiasEnabled = true;
3742  armnn::TensorInfo biasTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 2);
3743 
3744  // Add the biases input to the registration list, constant layer will be added by SetupConstantLayers.
3745  tensorIndexesToRegister.emplace_back(inputTensorIndexes[2]);
3746 
3747  if (ShouldConstantTensorBeConverted(inputs[2], inputTensorInfo.GetDataType(), biasTensorInfo.GetDataType()))
3748  {
3749  m_ConstantsToDequantize.emplace_back(inputs[2]->buffer);
3750  }
3751  }
3752 
3753  // Filters and biases are always passed to fully connected as inputs
3754  layer = m_Network->AddFullyConnectedLayer(desc, layerName.c_str());
3755 
3756  if (!layer)
3757  {
3758  throw NullPointerException(fmt::format("Layer {} pointer is null {}",
3759  operatorIndex, CHECK_LOCATION().AsString()));
3760  }
3761 
3762  unsigned int startingSlotIndex = 0;
3763  if (inputTensorInfo.GetNumDimensions() > 2)
3764  {
3765  // Add reshape to flatten to 2D [batch_size, input_size],
3766  // where "input_size" corresponds to the number of inputs to the layer,
3767  // matching the second dimension of weights,
3768  // and "batch_size" is calculated by dividing the number of elements by "input_size".
3769  std::vector<unsigned int> reshapedDimensions(2);
3770  reshapedDimensions[1] = filterTensorInfo.GetShape()[1];
3771  reshapedDimensions[0] = inputTensorInfo.GetNumElements() / reshapedDimensions[1];
3772 
3773  if (inputTensorInfo.GetNumElements() % reshapedDimensions[1] != 0)
3774  {
3775  throw ParseException(
3776  fmt::format("Failed to deduce input tensor shape from filter size {} {}",
3777  reshapedDimensions[1],
3778  CHECK_LOCATION().AsString()));
3779  }
3780 
3781  armnn::TensorInfo reshapedTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
3782  reshapedTensorInfo.SetShape(armnn::TensorShape{ 2, reshapedDimensions.data() });
3783  inputTensorInfo = reshapedTensorInfo;
3784 
3785  std::string reshapeLayerName = fmt::format("Reshape_for:{}", layer->GetName());
3786  armnn::ReshapeDescriptor reshapeDescriptor;
3787  reshapeDescriptor.m_TargetShape = reshapedTensorInfo.GetShape();
3788  armnn::IConnectableLayer* reshapeLayer = m_Network->AddReshapeLayer(reshapeDescriptor,
3789  reshapeLayerName.c_str());
3790 
3791  reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapedTensorInfo);
3792  reshapeLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
3793 
3794  RegisterInputSlots(subgraphIndex, operatorIndex, reshapeLayer, {inputTensorIndexes[0]});
3795  // Fc layer connects to the reshape layer, so we skip the first input slot when registering fc's input slots
3796  tensorIndexesToRegister.erase(tensorIndexesToRegister.begin());
3797  startingSlotIndex = 1;
3798  }
3799 
3800  RegisterInputSlots(subgraphIndex, operatorIndex, layer, tensorIndexesToRegister, startingSlotIndex);
3801 
3802  armnn::TensorInfo outputTensorInfo = OutputTensorInfoFromShapes(subgraphIndex, operatorIndex, layer, 0,
3803  { inputTensorInfo.GetShape(),
3804  filterTensorInfo.GetShape() });
3805 
3806  layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
3807 
3808  if (outputTensorInfo.GetNumDimensions() > 2)
3809  {
3810  // Calculate reshape to flatten to 2D [batch_size, input_size]
3811  std::vector<unsigned int> reshapedDimensions(2);
3812  reshapedDimensions[1] = filterTensorInfo.GetShape()[0];
3813  reshapedDimensions[0] = outputTensorInfo.GetNumElements() / reshapedDimensions[1];
3814  armnn::TensorInfo reshapedOutputTensorInfo = outputTensorInfo;
3815  if (outputTensorInfo.GetNumElements() % reshapedDimensions[1] != 0)
3816  {
3817  throw ParseException(
3818  fmt::format("Failed to deduce output tensor shape from filter size {} {}",
3819  reshapedDimensions[1],
3820  CHECK_LOCATION().AsString()));
3821  }
3822  reshapedOutputTensorInfo.SetShape(armnn::TensorShape{ 2, reshapedDimensions.data() });
3823  layer->GetOutputSlot(0).SetTensorInfo(reshapedOutputTensorInfo);
3824 
3825  std::string reshapeLayerName = fmt::format("ExpandDims:{}:{}", subgraphIndex, operatorIndex);
3826  layer = AddReshapeLayer(layer, 0, reshapeLayerName, outputTensorInfo);
3827  }
3828 
3829  // we need to add the activation layer and fortunately we don't need to care about the data layout
3830  armnn::IConnectableLayer* fusedActivationLayer = AddFusedActivationLayer(layer, 0,
3831  options->fused_activation_function);
3832 
3833  // register the output connection slots for the layer, connections are made after all layers have been created
3834  auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
3835  RegisterOutputSlots(subgraphIndex, operatorIndex, fusedActivationLayer, {outputTensorIndexes[0]});
3836 
3837  m_TensorInfos[outputTensorIndexes[0]] = layer->GetOutputSlot(0).GetTensorInfo();
3838 }
3839 
3840 void TfLiteParserImpl::ParseDetectionPostProcess(size_t subgraphIndex, size_t operatorIndex)
3841 {
3842  CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
3843 
3844  const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
3845 
3846  auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
3847  auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
3848  CHECK_VALID_SIZE(outputs.size(), 4);
3849 
3850  // Obtain custom options from flexbuffers
3851  auto custom_options = operatorPtr->custom_options;
3852  const flexbuffers::Map& m = flexbuffers::GetRoot(custom_options.data(), custom_options.size()).AsMap();
3853 
3854  // Obtain descriptor information from tf lite
3856  desc.m_MaxDetections = m["max_detections"].AsUInt32();
3857  desc.m_MaxClassesPerDetection = m["max_classes_per_detection"].AsUInt32();
3858  desc.m_NmsScoreThreshold = m["nms_score_threshold"].AsFloat();
3859  desc.m_NmsIouThreshold = m["nms_iou_threshold"].AsFloat();
3860  desc.m_NumClasses = m["num_classes"].AsUInt32();
3861  desc.m_ScaleH = m["h_scale"].AsFloat();
3862  desc.m_ScaleW = m["w_scale"].AsFloat();
3863  desc.m_ScaleX = m["x_scale"].AsFloat();
3864  desc.m_ScaleY = m["y_scale"].AsFloat();
3865 
3866  if (!(m["use_regular_nms"].IsNull()))
3867  {
3868  desc.m_UseRegularNms = m["use_regular_nms"].AsBool();
3869  }
3870  if (!(m["detections_per_class"].IsNull()))
3871  {
3872  desc.m_DetectionsPerClass = m["detections_per_class"].AsUInt32();
3873  }
3874 
3875  if (desc.m_NmsIouThreshold <= 0.0f || desc.m_NmsIouThreshold > 1.0f)
3876  {
3877  throw InvalidArgumentException("DetectionPostProcessTFLiteParser: Intersection over union threshold "
3878  "must be positive and less than or equal to 1.");
3879  }
3880 
3881  armnn::TensorInfo anchorTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 2);
3882  auto anchorTensorAndData = CreateConstTensorNonPermuted(inputs[2], anchorTensorInfo);
3883 
3884  auto layerName = fmt::format("DetectionPostProcess:{}:{}", subgraphIndex, operatorIndex);
3885  IConnectableLayer* layer = m_Network->AddDetectionPostProcessLayer(desc, anchorTensorAndData,
3886  layerName.c_str());
3887 
3888  if (!layer)
3889  {
3890  throw NullPointerException(fmt::format("Layer {} pointer is null {}",
3891  operatorIndex, CHECK_LOCATION().AsString()));
3892  }
3893 
3894  // The model does not specify the output shapes.
3895  // The output shapes are calculated from the max_detection and max_classes_per_detection.
3896  unsigned int numDetectedBox = desc.m_MaxDetections * desc.m_MaxClassesPerDetection;
3897  m_OverriddenOutputShapes.push_back({ 1, numDetectedBox, 4 });
3898  m_OverriddenOutputShapes.push_back({ 1, numDetectedBox });
3899  m_OverriddenOutputShapes.push_back({ 1, numDetectedBox });
3900  m_OverriddenOutputShapes.push_back({ 1 });
3901 
3902  for (unsigned int i = 0 ; i < outputs.size() ; ++i)
3903  {
3904  armnn::TensorInfo detectionBoxOutputTensorInfo = ToTensorInfo(outputs[i], m_OverriddenOutputShapes[i]);
3905  layer->GetOutputSlot(i).SetTensorInfo(detectionBoxOutputTensorInfo);
3906  }
3907 
3908  // Register the input connection slots for the layer, connections are made after all layers have been created
3909  // only the tensors for the inputs are relevant, exclude the const tensors
3910  auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
3911  RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
3912 
3913  // Register the output connection slots for the layer, connections are made after all layers have been created
3914  auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
3915  RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0],
3916  outputTensorIndexes[1],
3917  outputTensorIndexes[2],
3918  outputTensorIndexes[3]});
3919 }
3920 
3921 /// The TfLite Pack operator is equivalent to the ArmNN Stack operator
3922 void TfLiteParserImpl::ParsePack(size_t subgraphIndex, size_t operatorIndex)
3923 {
3924  CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
3925 
3926  auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
3927  auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
3928  CHECK_VALID_SIZE(outputs.size(), 1);
3929 
3930  if (inputs.size() < 1)
3931  {
3932  throw ParseException("Pack must have at least one input.");
3933  }
3934 
3935  const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
3936  const auto* options = operatorPtr->builtin_options.AsPackOptions();
3937 
3938  StackDescriptor desc;
3939  desc.m_Axis = static_cast<uint32_t>(options->axis);
3940  desc.m_NumInputs = static_cast<uint32_t>(inputs.size());
3941 
3942  // Use the tensor shape of the first input as the "correct" input shape in the descriptor
3943  armnn::TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
3944  desc.m_InputShape = inputTensorInfo.GetShape();
3945 
3946  auto layerName = fmt::format("Pack:{}:{}", subgraphIndex, operatorIndex);
3947  IConnectableLayer* layer = m_Network->AddStackLayer(desc, layerName.c_str());
3948 
3949  if (!layer)
3950  {
3951  throw NullPointerException(fmt::format("Layer {} pointer is null {}",
3952  operatorIndex, CHECK_LOCATION().AsString()));
3953  }
3954 
3955  armnn::TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {});
3956  layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
3957 
3958  auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
3959  RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes});
3960 
3961  auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
3962  RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
3963 }
3964 
3965 void TfLiteParserImpl::ParseUnidirectionalSequenceLSTM(size_t subgraphIndex, size_t operatorIndex)
3966 {
3967  CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
3968 
3969  auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
3970  auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
3971 
3972  if (inputs.size() < 2)
3973  {
3974  throw ParseException("UnidirectionalSequenceLSTM must have at least 2 input.");
3975  }
3976 
3977  const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
3978  const auto& subgraphPtr = m_Model->subgraphs[subgraphIndex];
3979  const auto nodeParams = operatorPtr->builtin_options.AsUnidirectionalSequenceLSTMOptions();
3980  CHECK_SUPPORTED_FUSED_ACTIVATION(nodeParams, subgraphIndex, operatorIndex);
3981  auto inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
3982  auto outputTensorInfo = ToTensorInfo(outputs[0]);
3983 
3984  // Set the params structure for the AddUnidirectionalSequenceLstmLayer call
3985  // Please refer to each operand at
3986  // https://www.tensorflow.org/mlir/tfl_ops#tflunidirectional_sequence_lstm_tflunidirectionalsequencelstmop
3987  armnn::LstmInputParams params;
3988 
3989  if (IsOptionalOperandPresent(operatorPtr->inputs[1]))
3990  {
3991  params.m_InputToInputWeights = CreateConstTensorPtr(subgraphPtr->tensors[operatorPtr->inputs[1]].get(),
3992  inputTensorInfo).first;
3993  }
3994 
3995  params.m_InputToForgetWeights = CreateConstTensorPtr(subgraphPtr->tensors[operatorPtr->inputs[2]].get(),
3996  inputTensorInfo).first;
3997  params.m_InputToCellWeights = CreateConstTensorPtr(subgraphPtr->tensors[operatorPtr->inputs[3]].get(),
3998  inputTensorInfo).first;
3999  params.m_InputToOutputWeights = CreateConstTensorPtr(subgraphPtr->tensors[operatorPtr->inputs[4]].get(),
4000  inputTensorInfo).first;
4001 
4002  // Recurrent weight tensors of size {n_cell, n_output}
4003  if (IsOptionalOperandPresent(operatorPtr->inputs[5]))
4004  {
4005  params.m_RecurrentToInputWeights = CreateConstTensorPtr(subgraphPtr->tensors[operatorPtr->inputs[5]].get(),
4006  inputTensorInfo).first;
4007  }
4008 
4009  params.m_RecurrentToForgetWeights = CreateConstTensorPtr(subgraphPtr->tensors[operatorPtr->inputs[6]].get(),
4010  inputTensorInfo).first;
4011  params.m_RecurrentToCellWeights = CreateConstTensorPtr(subgraphPtr->tensors[operatorPtr->inputs[7]].get(),
4012  inputTensorInfo).first;
4013  params.m_RecurrentToOutputWeights = CreateConstTensorPtr(subgraphPtr->tensors[operatorPtr->inputs[8]].get(),
4014  inputTensorInfo).first;
4015 
4016  // Peephole weights tensors of size {n_cell}, representing a diagonal matrix.
4017  if (IsOptionalOperandPresent(operatorPtr->inputs[9]))
4018  {
4019  params.m_CellToInputWeights = CreateConstTensorPtr(subgraphPtr->tensors[operatorPtr->inputs[9]].get(),
4020  inputTensorInfo).first;
4021  }
4022 
4023  if (IsOptionalOperandPresent(operatorPtr->inputs[10]))
4024  {
4025  params.m_CellToForgetWeights = CreateConstTensorPtr(subgraphPtr->tensors[operatorPtr->inputs[10]].get(),
4026  inputTensorInfo).first;
4027  }
4028 
4029  if (IsOptionalOperandPresent(operatorPtr->inputs[11]))
4030  {
4031  params.m_CellToOutputWeights = CreateConstTensorPtr(subgraphPtr->tensors[operatorPtr->inputs[11]].get(),
4032  inputTensorInfo).first;
4033  }
4034 
4035  // Gates bias tensors of size {n_cell}
4036  if (IsOptionalOperandPresent(operatorPtr->inputs[12]))
4037  {
4038  params.m_InputGateBias = CreateConstTensorPtr(subgraphPtr->tensors[operatorPtr->inputs[12]].get(),
4039  inputTensorInfo).first;
4040  }
4041 
4042  params.m_ForgetGateBias = CreateConstTensorPtr(subgraphPtr->tensors[operatorPtr->inputs[13]].get(),
4043  inputTensorInfo).first;
4044  params.m_CellBias = CreateConstTensorPtr(subgraphPtr->tensors[operatorPtr->inputs[14]].get(),
4045  inputTensorInfo).first;
4046  params.m_OutputGateBias = CreateConstTensorPtr(subgraphPtr->tensors[operatorPtr->inputs[15]].get(),
4047  inputTensorInfo).first;
4048 
4049  // Projection weight tensor of size {n_output, n_cell}
4050  if (IsOptionalOperandPresent(operatorPtr->inputs[16]))
4051  {
4052  params.m_ProjectionWeights = CreateConstTensorPtr(subgraphPtr->tensors[operatorPtr->inputs[16]].get(),
4053  inputTensorInfo).first;
4054  }
4055  // Projection bias tensor of size {n_output}
4056  if (IsOptionalOperandPresent(operatorPtr->inputs[17]))
4057  {
4058  params.m_ProjectionBias = CreateConstTensorPtr(subgraphPtr->tensors[operatorPtr->inputs[17]].get(),
4059  inputTensorInfo).first;
4060  }
4061 
4062  // These state tensors are defined as variable tensors, and will be modified by this op.
4063  armnn::TensorInfo outputStateInInfo = ToTensorInfo(subgraphPtr->tensors[operatorPtr->inputs[18]].get());
4064  m_ConstantsToBeCreated.push_back(operatorPtr->inputs[18]);
4065  armnn::TensorInfo cellStateInInfo = ToTensorInfo(subgraphPtr->tensors[operatorPtr->inputs[19]].get());
4066  m_ConstantsToBeCreated.push_back(operatorPtr->inputs[19]);
4067 
4068  // Layer norm coefficient tensors of size {n_cell}, representing a diagonal matrix.
4069  if (inputs.size() >= 21 && IsOptionalOperandPresent(operatorPtr->inputs[20]))
4070  {
4071  params.m_InputLayerNormWeights = CreateConstTensorPtr(subgraphPtr->tensors[operatorPtr->inputs[20]].get(),
4072  inputTensorInfo).first;
4073  }
4074 
4075  if (inputs.size() >= 22 && IsOptionalOperandPresent(operatorPtr->inputs[21]))
4076  {
4077  params.m_ForgetLayerNormWeights = CreateConstTensorPtr(subgraphPtr->tensors[operatorPtr->inputs[21]].get(),
4078  inputTensorInfo).first;
4079  }
4080 
4081  if (inputs.size() >= 23 && IsOptionalOperandPresent(operatorPtr->inputs[22]))
4082  {
4083  params.m_CellLayerNormWeights = CreateConstTensorPtr(subgraphPtr->tensors[operatorPtr->inputs[22]].get(),
4084  inputTensorInfo).first;
4085  }
4086 
4087  if (inputs.size() >= 24 && IsOptionalOperandPresent(operatorPtr->inputs[23]))
4088  {
4089  params.m_OutputLayerNormWeights = CreateConstTensorPtr(subgraphPtr->tensors[operatorPtr->inputs[23]].get(),
4090  inputTensorInfo).first;
4091  }
4092 
4093  // set the layer descriptor
4095  desc.m_ActivationFunc = nodeParams->fused_activation_function;
4096  desc.m_ClippingThresCell = nodeParams->cell_clip;
4097  desc.m_ClippingThresProj = nodeParams->proj_clip;
4098  desc.m_CifgEnabled = (params.m_InputToInputWeights == nullptr
4099  || params.m_RecurrentToInputWeights == nullptr
4100  || params.m_InputGateBias == nullptr);
4101  desc.m_PeepholeEnabled = (params.m_CellToForgetWeights != nullptr || params.m_CellToOutputWeights != nullptr);
4102  desc.m_ProjectionEnabled = (params.m_ProjectionWeights != nullptr);
4103  desc.m_LayerNormEnabled = (params.m_InputLayerNormWeights != nullptr
4104  || params.m_ForgetLayerNormWeights != nullptr
4105  || params.m_CellLayerNormWeights != nullptr
4106  || params.m_OutputLayerNormWeights != nullptr);
4107  desc.m_TimeMajor = nodeParams->time_major;
4108 
4109  if (operatorPtr->intermediates.size() > 3 && desc.m_LayerNormEnabled)
4110  {
4111  auto inputIntermediate = CreateConstTensorPtr(subgraphPtr->tensors[operatorPtr->intermediates[0]].get(),
4112  inputTensorInfo).first;
4113  auto inputIntermediateTensorInfo = inputIntermediate->GetInfo();
4114  desc.m_InputIntermediateScale = inputIntermediateTensorInfo.GetQuantizationScale();
4115 
4116  auto forgetIntermediate = CreateConstTensorPtr(subgraphPtr->tensors[operatorPtr->intermediates[1]].get(),
4117  inputTensorInfo).first;
4118  auto forgetIntermediateTensorInfo = forgetIntermediate->GetInfo();
4119  desc.m_ForgetIntermediateScale = forgetIntermediateTensorInfo.GetQuantizationScale();
4120 
4121  auto cellIntermediate = CreateConstTensorPtr(subgraphPtr->tensors[operatorPtr->intermediates[2]].get(),
4122  inputTensorInfo).first;
4123  auto cellIntermediateTensorInfo = cellIntermediate->GetInfo();
4124  desc.m_CellIntermediateScale = cellIntermediateTensorInfo.GetQuantizationScale();
4125 
4126  auto outputIntermediate = CreateConstTensorPtr(subgraphPtr->tensors[operatorPtr->intermediates[3]].get(),
4127  inputTensorInfo).first;
4128  auto outputIntermediateTensorInfo = outputIntermediate->GetInfo();
4129  desc.m_OutputIntermediateScale = outputIntermediateTensorInfo.GetQuantizationScale();
4130  }
4131  else
4132  {
4133  float defaultIntermediate = std::pow(2, -12);
4134  desc.m_InputIntermediateScale = defaultIntermediate;
4135  desc.m_ForgetIntermediateScale = defaultIntermediate;
4136  desc.m_CellIntermediateScale = defaultIntermediate;
4137  desc.m_OutputIntermediateScale = defaultIntermediate;
4138  }
4139 
4140  if (operatorPtr->intermediates.size() > 4)
4141  {
4142  auto hiddentensor = CreateConstTensorPtr(subgraphPtr->tensors[operatorPtr->intermediates[4]].get(),
4143  inputTensorInfo).first;
4144 
4145  desc.m_HiddenStateScale = hiddentensor->GetInfo().GetQuantizationScale();
4146  desc.m_HiddenStateZeroPoint = hiddentensor->GetInfo().GetQuantizationOffset();
4147  }
4148  unsigned int batchSize = desc.m_TimeMajor ? inputTensorInfo.GetShape()[1] : inputTensorInfo.GetShape()[0];
4149  unsigned int outputSize = outputTensorInfo.GetShape()[2];
4150  unsigned int numUnits = cellStateInInfo.GetShape()[1];
4151 
4152  armnn::DataType dataType = inputTensorInfo.GetDataType();
4153  float qScale = inputTensorInfo.GetQuantizationScale();
4154  float qOffset = inputTensorInfo.GetQuantizationOffset();
4155 
4156  armnn::TensorInfo scratchBufferTensorInfo({batchSize, numUnits * 3}, dataType, qScale, qOffset);
4157  if (!desc.m_CifgEnabled)
4158  {
4159  scratchBufferTensorInfo = armnn::TensorInfo({batchSize, numUnits * 4}, dataType, qScale, qOffset);
4160  }
4161  armnn::TensorInfo cellStateOutTensorInfo({batchSize, numUnits},
4162  cellStateInInfo.GetDataType(),
4163  cellStateInInfo.GetQuantizationScale(),
4164  cellStateInInfo.GetQuantizationOffset());
4165  armnn::TensorInfo outputStateOutTensorInfo({batchSize, outputSize}, dataType, qScale, qOffset);
4166 
4167  armnn::LstmInputParamsInfo paramsInfo;
4168  paramsInfo.m_InputToForgetWeights = &(params.m_InputToForgetWeights->GetInfo());
4169  paramsInfo.m_InputToCellWeights = &(params.m_InputToCellWeights->GetInfo());
4170  paramsInfo.m_InputToOutputWeights = &(params.m_InputToOutputWeights->GetInfo());
4171  paramsInfo.m_RecurrentToForgetWeights = &(params.m_RecurrentToForgetWeights->GetInfo());
4172  paramsInfo.m_RecurrentToCellWeights = &(params.m_RecurrentToCellWeights->GetInfo());
4173  paramsInfo.m_RecurrentToOutputWeights = &(params.m_RecurrentToOutputWeights->GetInfo());
4174  paramsInfo.m_ForgetGateBias = &(params.m_ForgetGateBias->GetInfo());
4175  paramsInfo.m_CellBias = &(params.m_CellBias->GetInfo());
4176  paramsInfo.m_OutputGateBias = &(params.m_OutputGateBias->GetInfo());
4177 
4178  if (!desc.m_CifgEnabled)
4179  {
4180  paramsInfo.m_InputToInputWeights = &(params.m_InputToInputWeights->GetInfo());
4181  paramsInfo.m_RecurrentToInputWeights = &(params.m_RecurrentToInputWeights->GetInfo());
4182  if (params.m_CellToInputWeights != nullptr)
4183  {
4184  paramsInfo.m_CellToInputWeights = &(params.m_CellToInputWeights->GetInfo());
4185  }
4186  paramsInfo.m_InputGateBias = &(params.m_InputGateBias->GetInfo());
4187  }
4188 
4189  if (desc.m_ProjectionEnabled)
4190  {
4191  paramsInfo.m_ProjectionWeights = &(params.m_ProjectionWeights->GetInfo());
4192  if (params.m_ProjectionBias != nullptr)
4193  {
4194  paramsInfo.m_ProjectionBias = &(params.m_ProjectionBias->GetInfo());
4195  }
4196  }
4197 
4198  if (desc.m_PeepholeEnabled)
4199  {
4200  paramsInfo.m_CellToForgetWeights = &(params.m_CellToForgetWeights->GetInfo());
4201  paramsInfo.m_CellToOutputWeights = &(params.m_CellToOutputWeights->GetInfo());
4202  }
4203 
4204  if (desc.m_LayerNormEnabled)
4205  {
4206  if(!desc.m_CifgEnabled)
4207  {
4208  paramsInfo.m_InputLayerNormWeights = &(params.m_InputLayerNormWeights->GetInfo());
4209  }
4210  paramsInfo.m_ForgetLayerNormWeights = &(params.m_ForgetLayerNormWeights->GetInfo());
4211  paramsInfo.m_CellLayerNormWeights = &(params.m_CellLayerNormWeights->GetInfo());
4212  paramsInfo.m_OutputLayerNormWeights = &(params.m_OutputLayerNormWeights->GetInfo());
4213  }
4214 
4215  auto layerName = fmt::format("UnidirectionalSequenceLSTM:{}:{}", subgraphIndex, operatorIndex);
4216  armnn::IConnectableLayer* layer = m_Network->AddUnidirectionalSequenceLstmLayer(desc, params);
4217 
4218  if (!layer)
4219  {
4220  throw NullPointerException(fmt::format("Layer {} pointer is null {}",
4221  operatorIndex, CHECK_LOCATION().AsString()));
4222  }
4223 
4224  // register the input connection slots for the layer, connections are made after all layers have been created
4225  // only the tensors for the inputs are relevant, exclude the const tensors
4226  auto inputTensorIndexes = AsUnsignedVector({operatorPtr->inputs[0],
4227  operatorPtr->inputs[18],
4228  operatorPtr->inputs[19]});
4229  RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0],
4230  inputTensorIndexes[1],
4231  inputTensorIndexes[2]});
4232 
4233  auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
4234 
4235  layer->GetOutputSlot(0).SetTensorInfo(outputStateOutTensorInfo);
4236  layer->GetOutputSlot(1).SetTensorInfo(cellStateOutTensorInfo);
4237  layer->GetOutputSlot(2).SetTensorInfo(outputTensorInfo);
4238 
4239  unsigned int tensorIndex = outputTensorIndexes[0];
4240  armnn::IOutputSlot* slot = &(layer->GetOutputSlot(2));
4241  RegisterProducerOfTensor(subgraphIndex, tensorIndex, slot);
4242 }
4243 
4244 void TfLiteParserImpl::ParseUnpack(size_t subgraphIndex, size_t operatorIndex)
4245 {
4246  CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
4247 
4248  const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
4249  const auto* options = operatorPtr->builtin_options.AsUnpackOptions();
4250 
4251  // This unpackAxis indicates the axis to unpack
4252  const unsigned int unpackAxis = CHECKED_NON_NEGATIVE(options->axis);
4253 
4254  auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
4255  CHECK_VALID_SIZE(inputs.size(), 1);
4256 
4257  armnn::TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
4258 
4259  if (unpackAxis >= inputTensorInfo.GetNumDimensions())
4260  {
4261  throw ParseException(
4262  fmt::format("The unpack axis: {} cannot be greater than or equal to "
4263  "the number of input dimension {} {}",
4264  unpackAxis,
4265  inputTensorInfo.GetNumDimensions(),
4266  CHECK_LOCATION().AsString()));
4267  }
4268 
4269  unsigned int unpackNum = CHECKED_NON_NEGATIVE(options->num);
4270  // If num is not defined, automatically infer from the length of the dimension axis.
4271  if(unpackNum == 0)
4272  {
4273  unpackNum = inputTensorInfo.GetShape()[unpackAxis];
4274  }
4275 
4276  // If unpack number cannot be inferred and is still zero, throw ParseException.
4277  if(unpackNum == 0)
4278  {
4279  throw ParseException("Number to unpack must greater than zero.");
4280  }
4281 
4282  auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
4283  CHECK_VALID_SIZE(outputs.size(), unpackNum);
4284 
4285  auto inputDimSize = inputTensorInfo.GetNumDimensions();
4286  std::vector<unsigned int> unpackDimSizes(inputDimSize);
4287 
4288  // Add current input shape to unpackDimSizes
4289  for (unsigned int i = 0; i < inputDimSize; ++i)
4290  {
4291  unpackDimSizes[i] = inputTensorInfo.GetShape()[i];
4292  }
4293 
4294  if (unpackDimSizes[unpackAxis] != unpackNum)
4295  {
4296  throw ParseException("Number to unpack must be the same as length of the dimension to "
4297  "unpack along.");
4298  }
4299 
4300  unpackDimSizes[unpackAxis] /= unpackNum;
4301 
4302  SplitterDescriptor splitDesc(unpackNum, static_cast<unsigned int>(unpackDimSizes.size()));
4303  for (unsigned int j = 0; j < unpackNum; ++j)
4304  {
4305  // Set the size of the views.
4306  for (unsigned int dimIdx = 0; dimIdx < unpackDimSizes.size(); ++dimIdx)
4307  {
4308  splitDesc.SetViewSize(j, dimIdx, unpackDimSizes[dimIdx]);
4309  }
4310  splitDesc.SetViewOriginCoord(j, unpackAxis, unpackDimSizes[unpackAxis] * j);
4311  }
4312  splitDesc.SetAxis(unpackAxis);
4313  auto layerName = fmt::format("Unpack:{}:{}", subgraphIndex, operatorIndex);
4314  IConnectableLayer* layer = m_Network->AddSplitterLayer(splitDesc, layerName.c_str());
4315 
4316  if (!layer)
4317  {
4318  throw NullPointerException(fmt::format("Layer {} pointer is null {}",
4319  operatorIndex, CHECK_LOCATION().AsString()));
4320  }
4321 
4322  TensorShape splitOutShape = TensorShape(static_cast<unsigned int>(unpackDimSizes.size()),
4323  unpackDimSizes.data());
4324 
4325  auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
4326  RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
4327 
4328  std::vector<unsigned int> reshapeDims;
4329  for (unsigned int axis = 0; axis < splitOutShape.GetNumDimensions(); ++axis)
4330  {
4331  if (axis != unpackAxis)
4332  {
4333  reshapeDims.push_back(splitOutShape[axis]);
4334  }
4335  }
4336 
4337  TensorShape reshapeOutputShape(splitOutShape.GetNumDimensions() -1, reshapeDims.data());
4338 
4339  // Create reshape to remove the unpacked dimension for unpack operator of each output from Splitter.
4340  for (unsigned int k = 0; k < layer->GetNumOutputSlots(); ++k)
4341  {
4342  armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[k], true);
4343  std::string reshapeLayerName = fmt::format("Reshape_for:{}", layer->GetName());
4345  desc.m_TargetShape = reshapeOutputShape;
4346  armnn::IConnectableLayer* reshapeLayer = m_Network->AddReshapeLayer(desc, layerName.c_str());
4347 
4348  layer->GetOutputSlot(k).SetTensorInfo(armnn::TensorInfo(splitOutShape,
4349  outputTensorInfo.GetDataType(),
4350  outputTensorInfo.GetQuantizationScale(),
4351  outputTensorInfo.GetQuantizationOffset()));
4352  layer->GetOutputSlot(k).Connect(reshapeLayer->GetInputSlot(0));
4353 
4354  reshapeLayer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
4355 
4356  uint32_t reshapedOutputId = CHECKED_NON_NEGATIVE(operatorPtr->outputs[k]);
4357  armnn::IOutputSlot* slot = &(reshapeLayer->GetOutputSlot(0));
4358  RegisterProducerOfTensor(subgraphIndex, reshapedOutputId, slot);
4359  }
4360 }
4361 
4362 void TfLiteParserImpl::ParseSplit(size_t subgraphIndex, size_t operatorIndex)
4363 {
4364  CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
4365 
4366  const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
4367  const auto* options = operatorPtr->builtin_options.AsSplitOptions();
4368 
4369  const unsigned int numSplits = CHECKED_NON_NEGATIVE(options->num_splits);
4370 
4371  // If number of splits cannot be inferred and is zero, throw ParseException.
4372  if(numSplits == 0)
4373  {
4374  throw ParseException("Number to splits must greater than zero.");
4375  }
4376 
4377  auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
4378  CHECK_VALID_SIZE(inputs.size(), 2);
4379  auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
4380  CHECK_VALID_SIZE(outputs.size(), numSplits);
4381 
4382  armnn::TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1);
4383  armnn::TensorInfo axisTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
4384 
4385  if (axisTensorInfo.GetNumElements() != 1)
4386  {
4387  throw ParseException(fmt::format("Axis tensor can only have 1 element {}",
4388  CHECK_LOCATION().AsString()));
4389  }
4390 
4391  BufferRawPtr axisBufferPtr = GetBuffer(m_Model, inputs[0]->buffer);
4392  if (axisBufferPtr == nullptr)
4393  {
4394  throw ParseException(
4395  fmt::format("Operation has invalid inputs. Failed to read axis. {}",
4396  CHECK_LOCATION().AsString()));
4397  }
4398 
4399  std::vector<int32_t> axisData(axisTensorInfo.GetNumElements());
4400  ::memcpy(axisData.data(), axisBufferPtr->data.data(), axisTensorInfo.GetNumBytes());
4401  int32_t axis = axisData[0];
4402 
4403  auto inputDimensions = static_cast<int32_t>(inputTensorInfo.GetNumDimensions());
4404  if (((axis < -inputDimensions) && (axis < 0)) || ((axis >= inputDimensions) && (axis > 0)))
4405  {
4406  // Square bracket denotes inclusive n while parenthesis denotes exclusive n
4407  // E.g. Rank 4 tensor can have axis in range [-4, 3)
4408  // -1 == 3, -2 == 2, -3 == 1, -4 == 0
4409  throw ParseException(
4410  fmt::format("Operation has invalid axis: {}. Axis must be in range [-n, n) {}",
4411  axis,
4412  CHECK_LOCATION().AsString()));
4413  }
4414 
4415  const unsigned int splitDim = armnnUtils::GetUnsignedAxis(inputTensorInfo.GetNumDimensions(), axis);
4416 
4417  auto inputDimSize = inputTensorInfo.GetNumDimensions();
4418  if (inputDimSize > MaxNumOfTensorDimensions)
4419  {
4420  throw ParseException(
4421  fmt::format("The number of dimensions: {} for input tensors of the split op cannot be greater than {} {}",
4422  inputTensorInfo.GetNumDimensions(),
4424  CHECK_LOCATION().AsString()));
4425  }
4426 
4427  std::vector<unsigned int> splitterDimSizes(inputDimSize);
4428 
4429  // Add current input shape to splitterDimSizes
4430  for (unsigned int i = 0; i < inputDimSize; ++i)
4431  {
4432  splitterDimSizes[i] = inputTensorInfo.GetShape()[i];
4433  }
4434 
4435  if (splitterDimSizes[splitDim] % numSplits != 0)
4436  {
4437  throw ParseException("Number of splits must evenly divide the dimension");
4438  }
4439  splitterDimSizes[splitDim] /= numSplits;
4440 
4441  SplitterDescriptor splitDesc(numSplits, inputDimSize);
4442  for (unsigned int j = 0; j < numSplits; ++j)
4443  {
4444  // Set the size of the views.
4445  for (unsigned int dimIdx = 0; dimIdx < splitterDimSizes.size(); ++dimIdx)
4446  {
4447  splitDesc.SetViewSize(j, dimIdx, splitterDimSizes[dimIdx]);
4448  }
4449  splitDesc.SetViewOriginCoord(j, splitDim, splitterDimSizes[splitDim] * j);
4450  }
4451  if (axisTensorInfo.GetNumElements() == 1)
4452  {
4453  splitDesc.SetAxis(axis);
4454  }
4455  auto layerName = fmt::format("Split:{}:{}", subgraphIndex, operatorIndex);
4456  IConnectableLayer* layer = m_Network->AddSplitterLayer(splitDesc, layerName.c_str());
4457 
4458  if (!layer)
4459  {
4460  throw NullPointerException(fmt::format("Layer {} pointer is null {}",
4461  operatorIndex, CHECK_LOCATION().AsString()));
4462  }
4463 
4464  auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
4465  RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[1]});
4466 
4467  for (unsigned int k = 0; k < layer->GetNumOutputSlots(); ++k)
4468  {
4469  armnn::TensorInfo tensorInfo = ToTensorInfo(outputs[k], true);
4470  layer->GetOutputSlot(k).SetTensorInfo(tensorInfo);
4471  }
4472 
4473  auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
4474  RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
4475 }
4476 
4477 unsigned int ComputeWrappedIndex(int idx, unsigned int numDimsIn)
4478 {
4479  int numDims = armnn::numeric_cast<int>(numDimsIn);
4480  int v = idx < 0 ? numDims + idx : idx;
4481 
4482  if (v < 0 || v > numDims)
4483  {
4484  throw ParseException(fmt::format("Unable to compute index {}", CHECK_LOCATION().AsString()));
4485  }
4486 
4487  return static_cast<unsigned int>(v);
4488 }
4489 
4490 void TfLiteParserImpl::ParseSplitV(size_t subgraphIndex, size_t operatorIndex)
4491 {
4492  CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
4493 
4494  const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
4495  const auto* options = operatorPtr->builtin_options.AsSplitVOptions();
4496 
4497  auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
4498  CHECK_VALID_SIZE(inputs.size(), 3);
4499 
4500  auto& inputTensor = inputs[0];
4501  auto& splitsTensor = inputs[1];
4502  auto& axisTensor = inputs[2];
4503 
4504  armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputTensor);
4505  armnn::TensorInfo splitsInfo = ToTensorInfo(splitsTensor);
4506  armnn::TensorInfo axisTensorInfo = ToTensorInfo(axisTensor);
4507 
4508  if (axisTensorInfo.GetNumElements() != 1)
4509  {
4510  throw ParseException(fmt::format("Axis tensor can only have 1 element {}",
4511  CHECK_LOCATION().AsString()));
4512  }
4513 
4514  // Inputs
4515  auto inputDimSize = inputTensorInfo.GetNumDimensions();
4516  if (inputDimSize > MaxNumOfTensorDimensions)
4517  {
4518  throw ParseException(
4519  fmt::format("The number of dimensions: {} for input tensors of the "
4520  "SplitV op cannot be greater than {} {}",
4521  inputTensorInfo.GetNumDimensions(),
4523  CHECK_LOCATION().AsString()));
4524  }
4525 
4526  // Get split axis
4527  BufferRawPtr axisBufferPtr = GetBuffer(m_Model, axisTensor->buffer);
4528  if (axisBufferPtr == nullptr)
4529  {
4530  throw ParseException(
4531  fmt::format("Operation has invalid inputs. Failed to read axis. {}",
4532  CHECK_LOCATION().AsString()));
4533  }
4534 
4535  std::vector<int> axisData(axisTensorInfo.GetNumElements());
4536  ::memcpy(axisData.data(), axisBufferPtr->data.data(), axisTensorInfo.GetNumBytes());
4537  int32_t axis = axisData[0];
4538 
4539  auto inputDimensions = static_cast<int32_t>(inputTensorInfo.GetNumDimensions());
4540  if (((axis < -inputDimensions) && (axis < 0)) || ((axis >= inputDimensions) && (axis > 0)))
4541  {
4542  // Square bracket denotes inclusive n while parenthesis denotes exclusive n
4543  // E.g. Rank 4 tensor can have axis in range [-4, 3)
4544  // -1 == 3, -2 == 2, -3 == 1, -4 == 0
4545  throw ParseException(
4546  fmt::format("Operation has invalid axis: {}. Axis must be in range [-n, n) {}",
4547  axis,
4548  CHECK_LOCATION().AsString()));
4549  }
4550  const unsigned int splitDim = ComputeWrappedIndex(axis, inputTensorInfo.GetNumDimensions());
4551 
4552  // Set split sizes
4553  CHECK_VALID_SIZE(splitsInfo.GetNumDimensions(), 1);
4554  unsigned int numSplits{0};
4555 
4556  if(options)
4557  {
4558  numSplits = CHECKED_NON_NEGATIVE(options->num_splits);
4559  }
4560  else
4561  {
4562  numSplits = splitsInfo.GetNumElements();
4563  }
4564 
4565  if (numSplits <=0)
4566  {
4567  throw ParseException("SplitV has invalid number of splits");
4568  }
4569 
4570  std::vector<int> splitsData(numSplits);
4571  BufferRawPtr splitsBufferPtr = GetBuffer(m_Model, splitsTensor->buffer);
4572  ::memcpy(splitsData.data(), splitsBufferPtr->data.data(), splitsInfo.GetNumBytes());
4573 
4574  unsigned int idx = 0;
4575  int numInferred{0};
4576  unsigned int inferIdx{0};
4577  int splitSum{0};
4578  for (auto split : splitsData)
4579  {
4580  if (split < 0)
4581  {
4582  numInferred++;
4583  inferIdx = idx;
4584  }
4585  else
4586  {
4587  splitSum += split;
4588  }
4589  idx++;
4590  }
4591  // Check for inferred Axis
4592  if (numInferred == 0)
4593  {
4594  if (splitSum != armnn::numeric_cast<int>(inputTensorInfo.GetShape()[splitDim]))
4595  {
4596  throw ParseException("SplitV split_sizes does not sum to the dimension of value along split_dim.");
4597  }
4598  }
4599  else if (numInferred == 1)
4600  {
4601  splitsData[inferIdx] = armnn::numeric_cast<int>(inputTensorInfo.GetShape()[splitDim]) - splitSum;
4602  }
4603  else
4604  {
4605  throw ParseException("Cannot infer split size for more than one split");
4606  }
4607 
4608  //Ouput size validation
4609  auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
4610  CHECK_VALID_SIZE(outputs.size(), numSplits);
4611 
4612  // Setup Armnn descriptor
4613  SplitterDescriptor splitDesc(numSplits, inputDimSize);
4614  unsigned int accumSplit = 0;
4615  for (unsigned int j = 0; j < numSplits; ++j)
4616  {
4617  unsigned int splitSize = armnn::numeric_cast<unsigned int>(splitsData[j]);
4618 
4619  // Set the size of the views.
4620  for (unsigned int dimIdx = 0; dimIdx < inputTensorInfo.GetNumDimensions(); ++dimIdx)
4621  {
4622  unsigned int dimSize = inputTensorInfo.GetShape()[dimIdx];
4623  if (dimIdx == splitDim)
4624  {
4625  dimSize = splitSize;
4626  }
4627  splitDesc.SetViewSize(j, dimIdx, dimSize);
4628  }
4629 
4630  splitDesc.SetViewOriginCoord(j, splitDim, accumSplit);
4631  accumSplit += splitSize;
4632  }
4633  splitDesc.SetAxis(axis);
4634 
4635  auto layerName = fmt::format("SplitV:{}:{}", subgraphIndex, operatorIndex);
4636  IConnectableLayer* layer = m_Network->AddSplitterLayer(splitDesc, layerName.c_str());
4637 
4638  if (!layer)
4639  {
4640  throw NullPointerException(fmt::format("Layer {} pointer is null {}",
4641  operatorIndex, CHECK_LOCATION().AsString()));
4642  }
4643 
4644  auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
4645  RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
4646 
4647  for (unsigned int k = 0; k < layer->GetNumOutputSlots(); ++k)
4648  {
4649  armnn::TensorInfo tensorInfo = ToTensorInfo(outputs[k], true);
4650  layer->GetOutputSlot(k).SetTensorInfo(tensorInfo);
4651  }
4652 
4653  auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
4654  RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
4655 }
4656 
4657 void TfLiteParserImpl::ParseArgMin(size_t subgraphIndex, size_t operatorIndex)
4658 {
4659  ParseArgMinMax(subgraphIndex, operatorIndex, armnn::ArgMinMaxFunction::Min);
4660 }
4661 
4662 void TfLiteParserImpl::ParseArgMax(size_t subgraphIndex, size_t operatorIndex)
4663 {
4664  ParseArgMinMax(subgraphIndex, operatorIndex, armnn::ArgMinMaxFunction::Max);
4665 }
4666 
4667 void TfLiteParserImpl::ParseArgMinMax(size_t subgraphIndex, size_t operatorIndex, ArgMinMaxFunction argMinMaxFunction)
4668 {
4669  CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
4670  auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
4671  CHECK_VALID_SIZE(inputs.size(), 2);
4672 
4673  auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
4674  CHECK_VALID_SIZE(outputs.size(), 1);
4675 
4676  armnn::TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
4677  armnn::TensorInfo axisTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1);
4678  armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
4679 
4680  if (axisTensorInfo.GetNumElements() != 1)
4681  {
4682  throw ParseException(fmt::format("Axis tensor can only have 1 element {}",
4683  CHECK_LOCATION().AsString()));
4684  }
4685 
4686  // Check if output tensor type is Signed32 or Signed64
4687  if (outputTensorInfo.GetDataType() != armnn::DataType::Signed32 &&
4688  outputTensorInfo.GetDataType() != armnn::DataType::Signed64)
4689  {
4690  throw ParseException(
4691  fmt::format(
4692  "Output tensor data type is not supported. (Supported types: Signed32 & Signed64) {}",
4693  CHECK_LOCATION().AsString()));
4694  }
4695 
4696  // Get const axis value from model and set it to descriptor.
4697  BufferRawPtr axisBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
4698  if (axisBufferPtr == nullptr)
4699  {
4700  throw ParseException(
4701  fmt::format("Operation has invalid inputs. Failed to read axis. {}",
4702  CHECK_LOCATION().AsString()));
4703  }
4704 
4705  std::vector<int32_t> axisData(axisTensorInfo.GetNumElements());
4706  ::memcpy(axisData.data(), axisBufferPtr->data.data(), axisTensorInfo.GetNumBytes());
4707  int32_t axis = axisData.front();
4708 
4709  auto inputDimensions = static_cast<int32_t>(inputTensorInfo.GetNumDimensions());
4710  if (((axis < -inputDimensions) && (axis < 0)) || ((axis >= inputDimensions) && (axis > 0)))
4711  {
4712  // Square bracket denotes inclusive n while parenthesis denotes exclusive n
4713  // E.g. Rank 4 tensor can have axis in range [-4, 3)
4714  // -1 == 3, -2 == 2, -3 == 1, -4 == 0
4715  throw ParseException(
4716  fmt::format("Operation has invalid axis: {}. Axis must be in range [-n, n) {}",
4717  axis,
4718  CHECK_LOCATION().AsString()));
4719  }
4720 
4721  ArgMinMaxDescriptor desc;
4722  desc.m_Axis = axis;
4723  desc.m_Function = argMinMaxFunction;
4724 
4725  // Register a ArgMin/ArgMax layer.
4726  auto layerName = argMinMaxFunction == ArgMinMaxFunction::Max ? "ArgMax:{}:{}" : "ArgMin:{}:{}";
4727  auto layerNameFormatted = fmt::format(layerName, subgraphIndex, operatorIndex);
4728  IConnectableLayer *layer = m_Network->AddArgMinMaxLayer(desc, layerNameFormatted.c_str());
4729 
4730  if (!layer)
4731  {
4732  throw NullPointerException(fmt::format("Layer {} pointer is null {}",
4733  operatorIndex, CHECK_LOCATION().AsString()));
4734  }
4735 
4736  outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0});
4737  layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
4738 
4739  // Register input tensor to the layer.
4740  auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
4741  RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
4742 
4743  // Register output tensor to the layer.
4744  auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
4745  RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
4746 }
4747 
4748 void TfLiteParserImpl::ParseGather(size_t subgraphIndex, size_t operatorIndex)
4749 {
4750  CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
4751 
4752  TfLiteParserImpl::TensorRawPtrVector inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
4753  CHECK_VALID_SIZE(inputs.size(), 2);
4754  TfLiteParserImpl::TensorRawPtrVector outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
4755  CHECK_VALID_SIZE(outputs.size(), 1);
4756 
4757  armnn::TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
4758  armnn::TensorInfo indicesTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1);
4759  armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
4760 
4761  armnn::GatherDescriptor gatherDescriptor;
4762 
4763  const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
4764  const auto* options = operatorPtr->builtin_options.AsGatherOptions();
4765  auto axis = options->axis;
4766 
4767  auto layerName = fmt::format("Gather:{}:{}", subgraphIndex, operatorIndex);
4768 
4769  auto inputDimensions = static_cast<int32_t>(inputTensorInfo.GetNumDimensions());
4770  auto indicesDimensions = indicesTensorInfo.GetNumDimensions();
4771  auto outputDimensions = outputTensorInfo.GetNumDimensions();
4772  if (((axis < -inputDimensions) && (axis < 0)) || ((axis >= inputDimensions) && (axis > 0)))
4773  {
4774  throw ParseException(
4775  fmt::format("Operation has invalid axis: {} It is out of bounds [ -{}, {} ) {}",
4776  axis,
4777  inputDimensions, inputDimensions,
4778  CHECK_LOCATION().AsString()));
4779  }
4780  if (outputDimensions != static_cast<unsigned int>(inputDimensions) + indicesDimensions - 1)
4781  {
4782  throw ParseException(
4783  fmt::format("Operation has invalid output dimensions: {} Output must be an ({} + {} - 1) -D tensor {}",
4784  outputDimensions,
4785  inputDimensions, indicesDimensions,
4786  CHECK_LOCATION().AsString()));
4787  }
4788 
4789  gatherDescriptor.m_Axis = axis;
4790 
4791  IConnectableLayer* layer = m_Network->AddGatherLayer(gatherDescriptor, layerName.c_str());
4792 
4793  if (!layer)
4794  {
4795  throw NullPointerException(fmt::format("Layer {} pointer is null {}",
4796  operatorIndex, CHECK_LOCATION().AsString()));
4797  }
4798 
4799  outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0, 1});
4800  layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
4801 
4802  auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
4803  RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
4804 
4805  auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
4806  RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
4807 }
4808 
4809 void TfLiteParserImpl::ParseGatherNd(size_t subgraphIndex, size_t operatorIndex)
4810 {
4811  CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
4812 
4813  TfLiteParserImpl::TensorRawPtrVector inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
4814  CHECK_VALID_SIZE(inputs.size(), 2);
4815  TfLiteParserImpl::TensorRawPtrVector outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
4816  CHECK_VALID_SIZE(outputs.size(), 1);
4817 
4818  armnn::TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
4819  armnn::TensorInfo indicesTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1);
4820 
4821  auto layerName = fmt::format("GatherNd:{}:{}", subgraphIndex, operatorIndex);
4822  IConnectableLayer* layer = m_Network->AddGatherNdLayer(layerName.c_str());
4823 
4824  if (!layer)
4825  {
4826  throw NullPointerException(fmt::format("Layer {} pointer is null {}",
4827  operatorIndex, CHECK_LOCATION().AsString()));
4828  }
4829 
4830  TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0, 1});
4831  layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
4832 
4833  auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
4834  RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
4835 
4836  auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
4837  RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
4838 }
4839 
4840 void TfLiteParserImpl::ParseDepthToSpace(size_t subgraphIndex, size_t operatorIndex)
4841 {
4842  CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
4843 
4844  TfLiteParserImpl::TensorRawPtrVector inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
4845  CHECK_VALID_SIZE(inputs.size(), 1);
4846  TfLiteParserImpl::TensorRawPtrVector outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
4847  CHECK_VALID_SIZE(outputs.size(), 1);
4848 
4849  armnn::DepthToSpaceDescriptor descriptor;
4850 
4851  const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
4852  const auto* options = operatorPtr->builtin_options.AsDepthToSpaceOptions();
4853  auto blockSize = options->block_size;
4854  if (blockSize < 2)
4855  {
4856  throw ParseException(
4857  fmt::format("Operation has invalid block size: {} Block size should be >= 2 {}",
4858  blockSize,
4859  CHECK_LOCATION().AsString()));
4860  }
4861  descriptor.m_BlockSize = armnn::numeric_cast<uint32_t>(blockSize);
4862 
4863  auto layerName = fmt::format("DepthToSpace:{}:{}", subgraphIndex, operatorIndex);
4864  IConnectableLayer* layer = m_Network->AddDepthToSpaceLayer(descriptor, layerName.c_str());
4865 
4866  if (!layer)
4867  {
4868  throw NullPointerException(fmt::format("Layer {} pointer is null {}",
4869  operatorIndex, CHECK_LOCATION().AsString()));
4870  }
4871 
4872  TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0});
4873  layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
4874 
4875  auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
4876  RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
4877 
4878  auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
4879  RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
4880 }
4881 
4882 void TfLiteParserImpl::ParseSum(size_t subgraphIndex, size_t operatorIndex)
4883 {
4884  ParseReduce(subgraphIndex, operatorIndex, armnn::ReduceOperation::Sum);
4885 }
4886 
4887 void TfLiteParserImpl::ParseReduceProd(size_t subgraphIndex, size_t operatorIndex)
4888 {
4889  ParseReduce(subgraphIndex, operatorIndex, armnn::ReduceOperation::Prod);
4890 }
4891 
4892 void TfLiteParserImpl::ParseReduceMax(size_t subgraphIndex, size_t operatorIndex)
4893 {
4894  ParseReduce(subgraphIndex, operatorIndex, armnn::ReduceOperation::Max);
4895 }
4896 
4897 void TfLiteParserImpl::ParseReduceMin(size_t subgraphIndex, size_t operatorIndex)
4898 {
4899  ParseReduce(subgraphIndex, operatorIndex, armnn::ReduceOperation::Min);
4900 }
4901 
4902 void TfLiteParserImpl::ParseReduce(size_t subgraphIndex, size_t operatorIndex, ReduceOperation reduceOperation)
4903 {
4904  CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
4905 
4906  const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
4907  const auto* options = operatorPtr->builtin_options.AsReducerOptions();
4908 
4909  auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
4910  CHECK_VALID_SIZE(inputs.size(), 2);
4911 
4912  auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
4913  CHECK_VALID_SIZE(outputs.size(), 1);
4914 
4915  auto layerName = fmt::format("Reduce:{}:{}", subgraphIndex, operatorIndex);
4916 
4917  armnn::TensorInfo inputTensorInfo0 = InputTensorInfo(subgraphIndex, operatorIndex, 0);
4918  armnn::TensorInfo inputTensorInfo1 = InputTensorInfo(subgraphIndex, operatorIndex, 1);
4919 
4920  ReduceDescriptor desc;
4921  BufferRawPtr axisBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
4922  // Get const axis value from model and set it to descriptor.
4923  if (axisBufferPtr != nullptr)
4924  {
4925  std::vector<int32_t> axisData(inputTensorInfo1.GetNumElements());
4926  ::memcpy(axisData.data(), axisBufferPtr->data.data(), inputTensorInfo1.GetNumBytes());
4927 
4928  // Convert the axis to unsigned int and remove duplicates.
4929  auto rank = static_cast<int32_t>(inputTensorInfo0.GetNumDimensions());
4930  std::set<unsigned int> uniqueAxis;
4931  std::transform(axisData.begin(),
4932  axisData.end(),
4933  std::inserter(uniqueAxis, uniqueAxis.begin()),
4934  [rank](int i)->unsigned int{
4935  return static_cast<uint32_t>(((i + rank) % rank)); });
4936  desc.m_vAxis.assign(uniqueAxis.begin(), uniqueAxis.end());
4937  }
4938  else
4939  {
4940  for (uint32_t i = 0; i < inputTensorInfo0.GetNumDimensions(); ++i)
4941  {
4942  desc.m_vAxis.push_back(i);
4943  }
4944  }
4945 
4946  desc.m_KeepDims = options->keep_dims;
4947  desc.m_ReduceOperation = reduceOperation;
4948 
4949  // Register a new layer object, Sum.
4950  IConnectableLayer* layer = m_Network->AddReduceLayer(desc, layerName.c_str());
4951 
4952  armnn::TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0});
4953  layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
4954 
4955  // Register input tensor to the layer.
4956  auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
4957  RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
4958 
4959  // Register output tensor to the layer.
4960  auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
4961  RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
4962 }
4963 
4964 void TfLiteParserImpl::ParseLocalResponseNormalization(size_t subgraphIndex, size_t operatorIndex)
4965 {
4966  CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
4967 
4968  auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
4969  CHECK_VALID_SIZE(inputs.size(), 1);
4970 
4971  auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
4972  CHECK_VALID_SIZE(outputs.size(), 1);
4973 
4974  auto layerName = fmt::format("LRN:{}:{}", subgraphIndex, operatorIndex);
4975  std::string layerNameFormatted = fmt::format(layerName, subgraphIndex, operatorIndex);
4976 
4977  armnn::TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
4978 
4979  const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
4980  const auto* options = operatorPtr->builtin_options.AsLocalResponseNormalizationOptions();
4981 
4982  armnn::NormalizationDescriptor descriptor;
4986  descriptor.m_NormSize = static_cast<uint32_t>(options->radius);
4987  descriptor.m_K = options->bias;
4988  descriptor.m_Alpha = options->alpha;
4989  descriptor.m_Beta = options->beta;
4990 
4991  // ArmNN expects normSize to be the full size of the normalization
4992  // window rather than the radius as in TfLite.
4993  descriptor.m_NormSize = 1 + (2 * descriptor.m_NormSize);
4994 
4995  IConnectableLayer* layer = m_Network->AddNormalizationLayer(descriptor, layerNameFormatted.c_str());
4996 
4997  if (!layer)
4998  {
4999  throw NullPointerException(fmt::format("Layer {} pointer is null {}",
5000  operatorIndex, CHECK_LOCATION().AsString()));
5001  }
5002 
5003  TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0});
5004  layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
5005 
5006  auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
5007  RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
5008 
5009  auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
5010  RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
5011 }
5012 
5013 void TfLiteParserImpl::ParseAbs(size_t subgraphIndex, size_t operatorIndex)
5014 {
5015  ParseElementwiseUnary(subgraphIndex, operatorIndex, armnn::UnaryOperation::Abs);
5016 }
5017 
5018 void TfLiteParserImpl::ParseCeil(size_t subgraphIndex, size_t operatorIndex)
5019 {
5020  ParseElementwiseUnary(subgraphIndex, operatorIndex, armnn::UnaryOperation::Ceil);
5021 }
5022 
5023 void TfLiteParserImpl::ParseExp(size_t subgraphIndex, size_t operatorIndex)
5024 {
5025  ParseElementwiseUnary(subgraphIndex, operatorIndex, armnn::UnaryOperation::Exp);
5026 }
5027 
5028 void TfLiteParserImpl::ParseLog(size_t subgraphIndex, size_t operatorIndex)
5029 {
5030  ParseElementwiseUnary(subgraphIndex, operatorIndex, armnn::UnaryOperation::Log);
5031 }
5032 
5033 void TfLiteParserImpl::ParseLogicalNot(size_t subgraphIndex, size_t operatorIndex)
5034 {
5035  ParseElementwiseUnary(subgraphIndex, operatorIndex, armnn::UnaryOperation::LogicalNot);
5036 }
5037 
5038 void TfLiteParserImpl::ParseNeg(size_t subgraphIndex, size_t operatorIndex)
5039 {
5040  ParseElementwiseUnary(subgraphIndex, operatorIndex, armnn::UnaryOperation::Neg);
5041 }
5042 
5043 void TfLiteParserImpl::ParsePower(size_t subgraphIndex, size_t operatorIndex)
5044 {
5045  CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
5046 
5047  auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
5048  CHECK_VALID_SIZE(inputs.size(), 2);
5049 
5050  auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
5051  CHECK_VALID_SIZE(outputs.size(), 1);
5052 
5053  auto layerName = fmt::format("Power:{}:{}", subgraphIndex, operatorIndex);
5054 
5055  TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
5056  TensorInfo input1TensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1);
5057  CheckMatchingQuantization(inputTensorInfo, input1TensorInfo, layerName, "Input 0", "Input 1");
5058 
5059  IConnectableLayer* layer = m_Network->AddElementwiseBinaryLayer(BinaryOperation::Power, layerName.c_str());
5060 
5061  if (!layer)
5062  {
5063  throw NullPointerException(fmt::format("Layer {} pointer is null {}",
5064  operatorIndex, CHECK_LOCATION().AsString()));
5065  }
5066 
5067  TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0, 1});
5068  CheckMatchingQuantization(inputTensorInfo, outputTensorInfo, layerName, "Input 0", "Output 0");
5069  layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
5070 
5071  auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
5072  RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
5073 
5074  auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
5075  RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
5076 }
5077 
5078 void TfLiteParserImpl::ParseRsqrt(size_t subgraphIndex, size_t operatorIndex)
5079 {
5080  ParseElementwiseUnary(subgraphIndex, operatorIndex, armnn::UnaryOperation::Rsqrt);
5081 }
5082 
5083 void TfLiteParserImpl::ParseSin(size_t subgraphIndex, size_t operatorIndex)
5084 {
5085  ParseElementwiseUnary(subgraphIndex, operatorIndex, armnn::UnaryOperation::Sin);
5086 }
5087 
5088 void TfLiteParserImpl::ParseSqrt(size_t subgraphIndex, size_t operatorIndex)
5089 {
5090  ParseElementwiseUnary(subgraphIndex, operatorIndex, armnn::UnaryOperation::Sqrt);
5091 }
5092 
5093 void TfLiteParserImpl::ParseSquare(size_t subgraphIndex, size_t operatorIndex)
5094 {
5095  CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
5096 
5097  auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
5098  CHECK_VALID_SIZE(inputs.size(), 1);
5099 
5100  auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
5101  CHECK_VALID_SIZE(outputs.size(), 1);
5102 
5103  armnn::TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
5104 
5105  auto layerName = fmt::format("Square:{}:{}", subgraphIndex, operatorIndex);
5106  IConnectableLayer* layer = m_Network->AddElementwiseBinaryLayer(BinaryOperation::Mul, layerName.c_str());
5107  ARMNN_ASSERT(layer != nullptr);
5108 
5109  TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0, 0});
5110  CheckMatchingQuantization(inputTensorInfo, outputTensorInfo, layerName, "Input 0", "Output 0");
5111  layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
5112 
5113  auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
5114  RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[0]});
5115 
5116  auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
5117  RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
5118 }
5119 
5120 void TfLiteParserImpl::ParseSquaredDifference(size_t subgraphIndex, size_t operatorIndex)
5121 {
5122  CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
5123 
5124  auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
5125  CHECK_VALID_SIZE(inputs.size(), 2);
5126 
5127  auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
5128  CHECK_VALID_SIZE(outputs.size(), 1);
5129 
5130  auto layerName = fmt::format("SquaredDifference:{}:{}", subgraphIndex, operatorIndex);
5131 
5132  TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
5133  TensorInfo input1TensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1);
5134 
5135  IConnectableLayer* layer = m_Network->AddElementwiseBinaryLayer(BinaryOperation::SqDiff, layerName.c_str());
5136 
5137  if (!layer)
5138  {
5139  throw NullPointerException(fmt::format("Layer {} pointer is null {}",
5140  operatorIndex, CHECK_LOCATION().AsString()));
5141  }
5142 
5143  TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0, 1});
5144  layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
5145 
5146  auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
5147  RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
5148 
5149  auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
5150  RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
5151 }
5152 
5153 void TfLiteParserImpl::ParseElementwiseUnary(size_t subgraphIndex, size_t operatorIndex, UnaryOperation unaryOperation)
5154 {
5155  CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
5156 
5157  auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
5158  CHECK_VALID_SIZE(inputs.size(), 1);
5159 
5160  auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
5161  CHECK_VALID_SIZE(outputs.size(), 1);
5162 
5163  std::string layerName = std::string(GetUnaryOperationAsCString(unaryOperation)) + ":{}:{}";
5164  std::string layerNameFormatted = fmt::format(layerName, subgraphIndex, operatorIndex);
5165 
5167  desc.m_Operation = unaryOperation;
5168  IConnectableLayer* layer = m_Network->AddElementwiseUnaryLayer(desc, layerNameFormatted.c_str());
5169 
5170  if (!layer)
5171  {
5172  throw NullPointerException(fmt::format("Layer {} pointer is null {}",
5173  operatorIndex, CHECK_LOCATION().AsString()));
5174  }
5175 
5176  TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0});
5177  layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
5178 
5179  auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
5180  RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
5181 
5182  auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
5183  RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
5184 }
5185 
5186 void TfLiteParserImpl::ParseEqual(size_t subgraphIndex, size_t operatorIndex)
5187 {
5188  ParseComparison(subgraphIndex, operatorIndex, armnn::ComparisonOperation::Equal);
5189 }
5190 
5191 void TfLiteParserImpl::ParseNotEqual(size_t subgraphIndex, size_t operatorIndex)
5192 {
5193  ParseComparison(subgraphIndex, operatorIndex, armnn::ComparisonOperation::NotEqual);
5194 }
5195 
5196 void TfLiteParserImpl::ParseGreater(size_t subgraphIndex, size_t operatorIndex)
5197 {
5198  ParseComparison(subgraphIndex, operatorIndex, armnn::ComparisonOperation::Greater);
5199 }
5200 
5201 void TfLiteParserImpl::ParseGreaterOrEqual(size_t subgraphIndex, size_t operatorIndex)
5202 {
5203  ParseComparison(subgraphIndex, operatorIndex, armnn::ComparisonOperation::GreaterOrEqual);
5204 }
5205 
5206 void TfLiteParserImpl::ParseLess(size_t subgraphIndex, size_t operatorIndex)
5207 {
5208  ParseComparison(subgraphIndex, operatorIndex, armnn::ComparisonOperation::Less);
5209 }
5210 
5211 void TfLiteParserImpl::ParseLessOrEqual(size_t subgraphIndex, size_t operatorIndex)
5212 {
5213  ParseComparison(subgraphIndex, operatorIndex, armnn::ComparisonOperation::LessOrEqual);
5214 }
5215 
5216 void TfLiteParserImpl::ParseComparison(size_t subgraphIndex, size_t operatorIndex,
5217  ComparisonOperation comparisonOperation)
5218 {
5219  CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
5220 
5221  auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
5222  CHECK_VALID_SIZE(inputs.size(), 2);
5223 
5224  auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
5225  CHECK_VALID_SIZE(outputs.size(), 1);
5226 
5227  auto layerName = std::string(GetComparisonOperationAsCString(comparisonOperation)) + ":{}:{}";
5228  std::string layerNameFormatted = fmt::format(layerName, subgraphIndex, operatorIndex);
5229 
5230  armnn::TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
5231  armnn::TensorInfo input1TensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1);
5232  CheckMatchingQuantization(inputTensorInfo, input1TensorInfo, layerNameFormatted, "Input 0", "Input 1");
5233 
5234  ComparisonDescriptor desc;
5235  desc.m_Operation = comparisonOperation;
5236  IConnectableLayer* layer = m_Network->AddComparisonLayer(desc, layerNameFormatted.c_str());
5237 
5238  if (!layer)
5239  {
5240  throw NullPointerException(fmt::format("Layer {} pointer is null {}",
5241  operatorIndex, CHECK_LOCATION().AsString()));
5242  }
5243 
5244  TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0, 1});
5245  layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
5246 
5247  auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
5248  RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
5249 
5250  auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
5251  RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
5252 }
5253 
5254 armnn::IConnectableLayer* TfLiteParserImpl::AddReshapeLayer(armnn::IConnectableLayer* layer,
5255  unsigned int outputSlot,
5256  std::string reshapeLayerName,
5257  armnn::TensorInfo outputShape)
5258 {
5259  ReshapeDescriptor desc;
5260  desc.m_TargetShape = outputShape.GetShape();
5261 
5262  IConnectableLayer* reshapeLayer =
5263  m_Network->AddReshapeLayer(desc, reshapeLayerName.c_str());
5264 
5265  auto & prevOutputSlot = layer->GetOutputSlot(outputSlot);
5266  prevOutputSlot.Connect(reshapeLayer->GetInputSlot(0));
5267  reshapeLayer->GetOutputSlot(0).SetTensorInfo(outputShape);
5268  return reshapeLayer;
5269 }
5270 
5271 armnn::IConnectableLayer* TfLiteParserImpl::AddFusedActivationLayer(armnn::IConnectableLayer* prevLayer,
5272  unsigned int outputSlot,
5273  tflite::ActivationFunctionType activationType)
5274 {
5275  ActivationDescriptor activationDesc;
5276  std::string layerName = prevLayer->GetName();
5277 
5278  switch(activationType)
5279  {
5280  case tflite::ActivationFunctionType_NONE:
5281  {
5282  // this is a no-op: return previous layer
5283  return prevLayer;
5284  }
5285  case tflite::ActivationFunctionType_RELU:
5286  {
5287  activationDesc.m_Function = ActivationFunction::ReLu;
5288  layerName += ":RELU";
5289  break;
5290  }
5291  case tflite::ActivationFunctionType_RELU6:
5292  {
5293  activationDesc.m_Function = ActivationFunction::BoundedReLu;
5294  activationDesc.m_A = 6.0f;
5295  activationDesc.m_B = 0.0f;
5296  layerName += ":RELU6";
5297  break;
5298  }
5299  case tflite::ActivationFunctionType_TANH:
5300  {
5301  activationDesc.m_Function = ActivationFunction::TanH;
5302  activationDesc.m_A = 1.0f;
5303  activationDesc.m_B = 1.0f;
5304  layerName += ":TANH";
5305  break;
5306  }
5307 
5308  // I only put these here as a reminder what others we could support
5309  case tflite::ActivationFunctionType_RELU_N1_TO_1:
5310  case tflite::ActivationFunctionType_SIGN_BIT:
5311  default:
5312  {
5313  throw ParseException(
5314  fmt::format("TfLite parser doesn't support fused activation: "
5315  "{}/{} {} ",
5316  activationType,
5317  tflite::EnumNameActivationFunctionType(activationType),
5318  CHECK_LOCATION().AsString()));
5319 
5320  }
5321  }
5322 
5323  IConnectableLayer* activationLayer =
5324  m_Network->AddActivationLayer(activationDesc, layerName.c_str());
5325 
5326  auto & prevOutputSlot = prevLayer->GetOutputSlot(outputSlot);
5327  prevOutputSlot.Connect(activationLayer->GetInputSlot(0));
5328  activationLayer->GetOutputSlot(0).SetTensorInfo(prevOutputSlot.GetTensorInfo());
5329  return activationLayer;
5330 }
5331 
5332 armnn::IConnectableLayer* TfLiteParserImpl::AddFusedFloorLayer(armnn::IConnectableLayer* prevLayer,
5333  unsigned int outputSlot)
5334 {
5335 
5336  auto& prevOutputSlot = prevLayer->GetOutputSlot(outputSlot);
5337  DataType dataType = prevOutputSlot.GetTensorInfo().GetDataType();
5338 
5339  if (dataType == DataType::Signed32)
5340  {
5341  return prevLayer;
5342  }
5343 
5344  std::string layerName = prevLayer->GetName();
5345  IConnectableLayer* floorLayer = m_Network->AddFloorLayer(layerName.c_str());
5346 
5347  prevOutputSlot.Connect(floorLayer->GetInputSlot(0));
5348  floorLayer->GetOutputSlot(0).SetTensorInfo(prevOutputSlot.GetTensorInfo());
5349 
5350  return floorLayer;
5351 }
5352 
5354 {
5355  if (fileName == nullptr)
5356  {
5357  throw InvalidArgumentException(fmt::format("Invalid (null) file name {}",
5358  CHECK_LOCATION().AsString()));
5359  }
5360  std::error_code errorCode;
5361  fs::path pathToFile(fileName);
5362  if (!fs::exists(pathToFile, errorCode))
5363  {
5364  //fmt::format() could not be used here (format error)
5365  std::stringstream msg;
5366  msg << "Cannot find the file (" << fileName << ") errorCode: " << errorCode
5367  << " " << CHECK_LOCATION().AsString();
5368  throw FileNotFoundException(msg.str());
5369  }
5370  if (!fs::is_regular_file(pathToFile))
5371  {
5372  // Exclude non regular files.
5373  throw InvalidArgumentException(fmt::format("File \"{}\" is not a regular file and cannot be loaded.",
5374  pathToFile.c_str()));
5375  }
5376 
5377  std::ifstream file(fileName, std::ios::binary);
5378  std::string fileContent((std::istreambuf_iterator<char>(file)), std::istreambuf_iterator<char>());
5379  return LoadModelFromBinary(reinterpret_cast<const uint8_t *>(fileContent.c_str()),
5380  fileContent.size());
5381 }
5382 
5384 {
5385  if (binaryContent == nullptr)
5386  {
5387  throw InvalidArgumentException(fmt::format("Invalid (null) binary content {}",
5388  CHECK_LOCATION().AsString()));
5389  }
5390  flatbuffers::Verifier verifier(binaryContent, len);
5391  if (verifier.VerifyBuffer<tflite::Model>() == false)
5392  {
5393  throw ParseException(
5394  fmt::format("Buffer doesn't conform to the expected Tensorflow Lite "
5395  "flatbuffers format. size:{} {}",
5396  len,
5397  CHECK_LOCATION().AsString()));
5398  }
5399  return tflite::UnPackModel(binaryContent);
5400 }
5401 
5403  size_t subgraphIndex,
5404  size_t operatorIndex)
5405 {
5406  CHECK_MODEL(model, subgraphIndex, operatorIndex);
5407 
5408  const auto& subgraphPtr = model->subgraphs[subgraphIndex];
5409  const auto& operatorPtr = subgraphPtr->operators[operatorIndex];
5410 
5411  size_t inputCount = operatorPtr->inputs.size();
5412  TensorRawPtrVector result;
5413  for (size_t i = 0; i < inputCount; ++i)
5414  {
5415  // If the input location is -1 then assume input is turned off.
5416  if (operatorPtr->inputs[i] == -1)
5417  {
5418  continue;
5419  }
5420  else
5421  {
5422  uint32_t inputId = CHECKED_NON_NEGATIVE(operatorPtr->inputs[i]);
5423  result.push_back(subgraphPtr->tensors[inputId].get());
5424  }
5425  }
5426  return result;
5427 }
5428 
5430  size_t subgraphIndex,
5431  size_t operatorIndex)
5432 {
5433  CHECK_MODEL(model, subgraphIndex, operatorIndex);
5434 
5435  const auto& subgraphPtr = model->subgraphs[subgraphIndex];
5436  const auto& operatorPtr = subgraphPtr->operators[operatorIndex];
5437 
5438  size_t outputCount = operatorPtr->outputs.size();
5439  TensorRawPtrVector result(outputCount);
5440  for (size_t i = 0; i < outputCount; ++i)
5441  {
5442  uint32_t outputId = CHECKED_NON_NEGATIVE(operatorPtr->outputs[i]);
5443  CHECK_TENSOR(model, subgraphIndex, outputId);
5444  result[i] = subgraphPtr->tensors[outputId].get();
5445  }
5446  return result;
5447 }
5448 
5450  size_t subgraphIndex)
5451 {
5452  CHECK_SUBGRAPH(model, subgraphIndex);
5453  const auto& subgraphPtr = model->subgraphs[subgraphIndex];
5454 
5455  size_t inputCount = subgraphPtr->inputs.size();
5456  TensorIdRawPtrVector result(inputCount);
5457  for (size_t i = 0; i < inputCount; ++i)
5458  {
5459  uint32_t inputId = CHECKED_NON_NEGATIVE(subgraphPtr->inputs[i]);
5460  CHECK_TENSOR(model, subgraphIndex, inputId);
5461  result[i] = std::make_pair(inputId, subgraphPtr->tensors[inputId].get());
5462  }
5463  return result;
5464 }
5465 
5467  size_t subgraphIndex)
5468 {
5469  CHECK_SUBGRAPH(model, subgraphIndex);
5470  const auto& subgraphPtr = model->subgraphs[subgraphIndex];
5471 
5472  size_t outputCount = subgraphPtr->outputs.size();
5473  TensorIdRawPtrVector result(outputCount);
5474  for (size_t i = 0; i < outputCount; ++i)
5475  {
5476  uint32_t outputId = CHECKED_NON_NEGATIVE(subgraphPtr->outputs[i]);
5477  result[i] = std::make_pair(outputId, subgraphPtr->tensors[outputId].get());
5478  }
5479  return result;
5480 }
5481 
5482 std::vector<int32_t>& TfLiteParserImpl::GetInputTensorIds(const ModelPtr& model,
5483  size_t subgraphIndex,
5484  size_t operatorIndex)
5485 {
5486  CHECK_MODEL(model, subgraphIndex, operatorIndex);
5487  const auto& subgraphPtr = model->subgraphs[subgraphIndex];
5488  const auto& operatorPtr = subgraphPtr->operators[operatorIndex];
5489  return operatorPtr->inputs;
5490 }
5491 
5492 std::vector<int32_t>& TfLiteParserImpl::GetOutputTensorIds(const ModelPtr& model,
5493  size_t subgraphIndex,
5494  size_t operatorIndex)
5495 {
5496  CHECK_MODEL(model, subgraphIndex, operatorIndex);
5497  const auto& subgraphPtr = model->subgraphs[subgraphIndex];
5498  const auto& operatorPtr = subgraphPtr->operators[operatorIndex];
5499  return operatorPtr->outputs;
5500 }
5501 
5502 void TfLiteParserImpl::RegisterInputSlots(size_t subgraphIndex,
5503  size_t operatorIndex,
5504  IConnectableLayer* layer,
5505  const std::vector<unsigned int>& tensorIndexes,
5506  unsigned int startingSlotIndex)
5507 {
5508  CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
5509 
5510  if (!layer)
5511  {
5512  throw NullPointerException(fmt::format("Layer {} pointer is null {}",
5513  operatorIndex, CHECK_LOCATION().AsString()));
5514  }
5515 
5516  if (tensorIndexes.size() + startingSlotIndex != layer->GetNumInputSlots())
5517  {
5518  throw ParseException(
5519  fmt::format("The number of tensor inputs ({}) does not match the number expected ({})"
5520  " for subgraph:{} operator index:{} {}",
5521  tensorIndexes.size(),
5522  layer->GetNumInputSlots(),
5523  subgraphIndex,
5524  operatorIndex,
5525  CHECK_LOCATION().AsString()));
5526  }
5527 
5528  for (unsigned int index = 0; index < tensorIndexes.size() ; ++index)
5529  {
5530  unsigned int tensorIndex = tensorIndexes[index];
5531  armnn::IInputSlot* slot = &(layer->GetInputSlot(startingSlotIndex + index));
5532  RegisterConsumerOfTensor(subgraphIndex, tensorIndex, slot);
5533  }
5534 }
5535 
5536 void TfLiteParserImpl::RegisterOutputSlots(size_t subgraphIndex,
5537  size_t operatorIndex,
5538  IConnectableLayer* layer,
5539  const std::vector<unsigned int>& tensorIndexes)
5540 {
5541  CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
5542 
5543  if (!layer)
5544  {
5545  throw NullPointerException(fmt::format("Layer {} pointer is null {}",
5546  operatorIndex, CHECK_LOCATION().AsString()));
5547  }
5548 
5549  if (tensorIndexes.size() != layer->GetNumOutputSlots())
5550  {
5551  throw ParseException(
5552  fmt::format("The number of tensor outputs ({}) does not match the number expected ({})"
5553  " for subgraph:{} operator index:{} {}",
5554  tensorIndexes.size(),
5555  layer->GetNumOutputSlots(),
5556  subgraphIndex,
5557  operatorIndex,
5558  CHECK_LOCATION().AsString()));
5559  }
5560 
5561  for (unsigned int slotIndex = 0; slotIndex < layer->GetNumOutputSlots(); ++slotIndex)
5562  {
5563  unsigned int tensorIndex = tensorIndexes[slotIndex];
5564  armnn::IOutputSlot* slot = &(layer->GetOutputSlot(slotIndex));
5565  RegisterProducerOfTensor(subgraphIndex, tensorIndex, slot);
5566  }
5567 }
5568 
5569 void TfLiteParserImpl::SetupInputLayerTensorInfos(size_t subgraphIndex)
5570 {
5571  CHECK_SUBGRAPH(m_Model, subgraphIndex);
5572 
5573  auto inputs = GetSubgraphInputs(m_Model, subgraphIndex);
5574  for (auto const& tensorIdAndPtr : inputs)
5575  {
5576  auto tensorInfo = ToTensorInfo(tensorIdAndPtr.second);
5577  m_TensorInfos.insert({tensorIdAndPtr.first, tensorInfo});
5578  }
5579 }
5580 
5581 void TfLiteParserImpl::SetupInputLayers(size_t subgraphIndex)
5582 {
5583  CHECK_SUBGRAPH(m_Model, subgraphIndex);
5584 
5585  auto inputs = GetSubgraphInputs(m_Model, subgraphIndex);
5586  for (auto const& tensorIdAndPtr : inputs)
5587  {
5588  auto bindingId = GenerateLayerBindingId(subgraphIndex, tensorIdAndPtr.first);
5589  IConnectableLayer* layer =
5590  m_Network->AddInputLayer(bindingId, tensorIdAndPtr.second->name.c_str());
5591 
5592  auto tensorInfo = ToTensorInfo(tensorIdAndPtr.second);
5593  layer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
5594 
5595  RegisterOutputSlots(subgraphIndex,
5596  VIRTUAL_OPERATOR_ID,
5597  layer,
5598  { static_cast<uint32_t>(tensorIdAndPtr.first) });
5599  }
5600 }
5601 
5602 void TfLiteParserImpl::SetupOutputLayers(size_t subgraphIndex)
5603 {
5604  CHECK_SUBGRAPH(m_Model, subgraphIndex);
5605 
5606  auto outputs = GetSubgraphOutputs(m_Model, subgraphIndex);
5607  for (auto const& tensorIdAndPtr : outputs)
5608  {
5609  auto bindingId = GenerateLayerBindingId(subgraphIndex, tensorIdAndPtr.first);
5610  IConnectableLayer* layer =
5611  m_Network->AddOutputLayer(bindingId, tensorIdAndPtr.second->name.c_str());
5612 
5613  RegisterInputSlots(subgraphIndex,
5614  VIRTUAL_OPERATOR_ID,
5615  layer,
5616  { static_cast<uint32_t>(tensorIdAndPtr.first) });
5617  }
5618 }
5619 
5620 void TfLiteParserImpl::SetupConstantLayerTensorInfos(size_t subgraph)
5621 {
5622  CHECK_SUBGRAPH(m_Model, subgraph);
5623 
5624  const auto & subgraphPtr = m_Model->subgraphs[subgraph];
5625  for (unsigned int subgraphIndex = 0; subgraphIndex < m_SubgraphConnections.size(); ++subgraphIndex)
5626  {
5627  for (unsigned int tensorIndex = 0; tensorIndex < m_SubgraphConnections[subgraphIndex].size(); ++tensorIndex)
5628  {
5629  if (m_SubgraphConnections[subgraphIndex][tensorIndex].outputSlot == nullptr &&
5630  m_SubgraphConnections[subgraphIndex][tensorIndex].inputSlots.size() > 0)
5631  {
5632  TensorRawPtr tensorPtr = subgraphPtr->tensors[tensorIndex].get();
5633 
5634  armnn::TensorInfo tensorInfo = ToTensorInfo(tensorPtr);
5635 
5636  m_TensorInfos.insert({tensorIndex, tensorInfo});
5637  }
5638  }
5639  }
5640 }
5641 
5642 void TfLiteParserImpl::SetupConstantLayers(size_t subgraph)
5643 {
5644  CHECK_SUBGRAPH(m_Model, subgraph);
5645 
5646  const auto & subgraphPtr = m_Model->subgraphs[subgraph];
5647  for (unsigned int subgraphIndex = 0; subgraphIndex < m_SubgraphConnections.size(); ++subgraphIndex)
5648  {
5649  for (unsigned int tensorIndex = 0; tensorIndex < m_SubgraphConnections[subgraphIndex].size(); ++tensorIndex)
5650  {
5651  if (m_SubgraphConnections[subgraphIndex][tensorIndex].outputSlot == nullptr &&
5652  m_SubgraphConnections[subgraphIndex][tensorIndex].inputSlots.size() > 0)
5653  {
5654  TensorRawPtr tensorPtr = subgraphPtr->tensors[tensorIndex].get();
5655 
5656  if (IsConstTensor(tensorPtr))
5657  {
5658  armnn::TensorInfo tensorInfo = ToTensorInfo(tensorPtr);
5659  armnn::DataType dataType = tensorInfo.GetDataType();
5660 
5661  if (std::find(m_ConstantsToDequantize.begin(), m_ConstantsToDequantize.end(), tensorPtr->buffer)
5662  != m_ConstantsToDequantize.end())
5663  {
5664  dataType = DataType::Float32;
5665  }
5666  auto tensorAndData = CreateConstTensorNonPermuted(tensorPtr, tensorInfo, dataType);
5667 
5668  std::string layerName = fmt::format("Constant:{}", tensorPtr->name);
5669  IConnectableLayer *layer = m_Network->AddConstantLayer(tensorAndData.first, layerName.c_str());
5670 
5671  layer->GetOutputSlot(0).SetTensorInfo(tensorAndData.first.GetInfo());
5672  RegisterOutputSlots(subgraphIndex,
5673  VIRTUAL_OPERATOR_ID,
5674  layer,
5675  { tensorIndex });
5676  }
5677  else if (ShouldConstantTensorBeCreated(tensorIndex))
5678  {
5679  armnn::TensorInfo tensorInfo = ToTensorInfo(tensorPtr);
5680  armnn::DataType dataType = tensorInfo.GetDataType();
5681 
5682  if (std::find(m_ConstantsToDequantize.begin(), m_ConstantsToDequantize.end(), tensorPtr->buffer)
5683  != m_ConstantsToDequantize.end())
5684  {
5685  dataType = DataType::Float32;
5686  }
5687  // Make sure isConstant flag is set.
5688  tensorInfo.SetConstant();
5689  tensorInfo.SetDataType(dataType);
5690 
5691  auto tensorAndData = ConstTensor(tensorInfo, std::vector<uint8_t>(tensorInfo.GetNumBytes()));
5692 
5693  std::string layerName = fmt::format("Constant:{}", tensorPtr->name);
5694  IConnectableLayer* layer = m_Network->AddConstantLayer(tensorAndData, layerName.c_str());
5695 
5696  layer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
5697  RegisterOutputSlots(subgraphIndex,
5698  VIRTUAL_OPERATOR_ID,
5699  layer,
5700  {tensorIndex});
5701  }
5702  else
5703  {
5704  throw ParseException(
5705  fmt::format("Invalid Tensor: Tensor should be constant. {}",
5706  CHECK_LOCATION().AsString()));
5707  }
5708  }
5709  }
5710  }
5711 }
5712 
5713 // example usage: BufferRawPtr bufferPtr = GetBuffer(m_Model, inputs[0]->buffer);
5715 {
5716  CHECK_BUFFER(model, bufferIndex);
5717  return model->buffers[bufferIndex].get();
5718 }
5719 
5720 template<typename T>
5721 std::pair<armnn::ConstTensor, TfLiteParserImpl::SupportedDataStorage>
5722 TfLiteParserImpl::CreateConstTensorAndStoreData(TfLiteParserImpl::BufferRawPtr bufferPtr,
5724  armnn::TensorInfo& tensorInfo,
5726 {
5727  // Make sure isConstant flag is set.
5728  tensorInfo.SetConstant();
5729 
5730  auto constData = CreateConstTensorImpl<T>(bufferPtr,
5731  tensorPtr,
5732  tensorInfo,
5733  permutationVector);
5734  TfLiteParserImpl::SupportedDataStorage storage(std::move(constData.second));
5735  return std::make_pair(constData.first, std::move(storage));
5736 }
5737 
5738 bool TfLiteParserImpl::ShouldConstantTensorBeCreated(unsigned int tensorIndex)
5739 {
5740  // If the TensorIndex appears in the list of ConstantsToBeCreated then return true
5741  return (std::find(m_ConstantsToBeCreated.begin(), m_ConstantsToBeCreated.end(), tensorIndex)
5742  != m_ConstantsToBeCreated.end());
5743 }
5744 
5745 bool TfLiteParserImpl::IsConstTensor(TensorRawPtr tensorPtr)
5746 {
5747  CHECK_TENSOR_PTR(tensorPtr);
5748  bool isConst = true;
5749 
5750  auto buffer = GetBuffer(m_Model, tensorPtr->buffer);
5751  if (buffer->data.size() == 0)
5752  {
5753  isConst = false;
5754  }
5755 
5756  return isConst;
5757 }
5758 
5759 std::pair<armnn::ConstTensor, TfLiteParserImpl::SupportedDataStorage>
5760 TfLiteParserImpl::CreateConstTensorPermuted(TensorRawPtr tensorPtr,
5761  armnn::TensorInfo& tensorInfo,
5763 {
5764  CHECK_TENSOR_PTR(tensorPtr);
5765  auto bufferPtr = GetBuffer(m_Model, tensorPtr->buffer);
5766  CHECK_BUFFER_SIZE(bufferPtr, tensorInfo, tensorPtr->buffer);
5767 
5768  // Make sure isConstant flag is set.
5769  tensorInfo.SetConstant();
5770 
5771  switch (tensorInfo.GetDataType())
5772  {
5774  return CreateConstTensorAndStoreData<float>(bufferPtr,
5775  tensorPtr,
5776  tensorInfo,
5777  permutationVector);
5779  return CreateConstTensorAndStoreData<uint8_t>(bufferPtr,
5780  tensorPtr,
5781  tensorInfo,
5782  permutationVector);
5784  return CreateConstTensorAndStoreData<int8_t>(bufferPtr,
5785  tensorPtr,
5786  tensorInfo,
5787  permutationVector);
5789  return CreateConstTensorAndStoreData<int8_t>(bufferPtr,
5790  tensorPtr,
5791  tensorInfo,
5792  permutationVector);
5794  return CreateConstTensorAndStoreData<int32_t>(bufferPtr,
5795  tensorPtr,
5796  tensorInfo,
5797  permutationVector);
5798  default:
5799  {
5800  std::stringstream errString;
5801  errString << "Unexpected datatype when creating const tensor: "
5802  << armnn::GetDataTypeName(tensorInfo.GetDataType())
5803  << " shape:" << tensorInfo.GetShape()
5804  << CHECK_LOCATION().AsString();
5805  throw ParseException(errString.str());
5806  }
5807  }
5808 }
5809 
5810 armnn::ConstTensor TfLiteParserImpl::CreateConstTensorNonPermuted(TensorRawPtr tensorPtr,
5811  armnn::TensorInfo& tensorInfo)
5812 {
5813  CHECK_TENSOR_PTR(tensorPtr);
5814  auto bufferPtr = GetBuffer(m_Model, tensorPtr->buffer);
5815  CHECK_BUFFER_SIZE(bufferPtr, tensorInfo, tensorPtr->buffer);
5816 
5817  // Make sure isConstant flag is set.
5818  tensorInfo.SetConstant();
5819 
5820  return ConstTensor(tensorInfo, bufferPtr->data.data());
5821 }
5822 
5823 std::pair<armnn::ConstTensor, std::unique_ptr<float[]>>
5824 TfLiteParserImpl::CreateConstTensorNonPermuted(TensorRawPtr tensorPtr,
5825  armnn::TensorInfo& tensorInfo,
5826  armnn::DataType inputDataType)
5827 {
5828  CHECK_TENSOR_PTR(tensorPtr);
5829  auto bufferPtr = GetBuffer(m_Model, tensorPtr->buffer);
5830  CHECK_BUFFER_SIZE(bufferPtr, tensorInfo, tensorPtr->buffer);
5831 
5832  // Make sure isConstant flag is set.
5833  tensorInfo.SetConstant();
5834 
5835  if (inputDataType == DataType::Float32 && tensorInfo.GetDataType() != DataType::Float32)
5836  {
5837  try
5838  {
5839  TensorInfo constTensorInfo(tensorInfo.GetShape(), DataType::Float32, 0.0f, 0, true);
5840  std::unique_ptr<float[]> data = armnnUtils::ToFloatArray(bufferPtr->data, tensorInfo);
5841  return std::make_pair(ConstTensor(constTensorInfo, data.get()), std::move(data));
5842  }
5843  catch (InvalidArgumentException&)
5844  {
5845  throw ParseException(
5846  fmt::format("Unsupported input/weights combination: Input {} not supported with Weights {}",
5847  GetDataTypeName(DataType::Float32),
5848  GetDataTypeName(tensorInfo.GetDataType()),
5849  CHECK_LOCATION().AsString()));
5850  }
5851  }
5852  else
5853  {
5854  return std::make_pair(ConstTensor(tensorInfo, bufferPtr->data.data()), std::unique_ptr<float[]>());
5855  }
5856 }
5857 
5858 std::pair<armnn::ConstTensor*, std::unique_ptr<float[]>>
5859 TfLiteParserImpl::CreateConstTensorPtr(TensorRawPtr tensorPtr, armnn::TensorInfo& inputTensorInfo)
5860 {
5861  CHECK_TENSOR_PTR(tensorPtr);
5862  armnn::TensorInfo tensorInfo = ToTensorInfo(tensorPtr);
5863  auto bufferPtr = GetBuffer(m_Model, tensorPtr->buffer);
5864  CHECK_BUFFER_SIZE(bufferPtr, tensorInfo, tensorPtr->buffer);
5865 
5866  // Make sure isConstant flag is set.
5867  tensorInfo.SetConstant();
5868 
5869  if (inputTensorInfo.GetDataType() == DataType::Float32 && tensorInfo.GetDataType() != DataType::Float32)
5870  {
5871  try
5872  {
5873  TensorInfo constTensorInfo(tensorInfo.GetShape(), DataType::Float32, 0.0f, 0, true);
5874  std::unique_ptr<float[]> data = armnnUtils::ToFloatArray(bufferPtr->data, tensorInfo);
5875  return std::make_pair(new ConstTensor(constTensorInfo, data.get()), std::move(data));
5876  }
5877  catch (InvalidArgumentException&)
5878  {
5879  throw ParseException(
5880  fmt::format("Unsupported input/weights combination: Input {} not supported with Weights {}",
5881  GetDataTypeName(DataType::Float32),
5882  GetDataTypeName(tensorInfo.GetDataType()),
5883  CHECK_LOCATION().AsString()));
5884  }
5885  }
5886  else
5887  {
5888  return std::make_pair(new ConstTensor(tensorInfo, bufferPtr->data.data()), std::unique_ptr<float[]>());
5889  }
5890 }
5891 
5893  const std::string& name) const
5894 {
5895  CHECK_SUBGRAPH(m_Model, subgraphId);
5896  auto inputs = GetSubgraphInputs(m_Model, subgraphId);
5897  for (auto const& input : inputs)
5898  {
5899  if (input.second->name == name)
5900  {
5901  auto bindingId = GenerateLayerBindingId(subgraphId, input.first);
5902  auto inputTensorInfo = ToTensorInfo(input.second);
5903  // Input tensors are always treated as constant tensors during network execution.
5904  inputTensorInfo.SetConstant(true);
5905  return std::make_pair(bindingId, inputTensorInfo);
5906  }
5907  }
5908 
5909  std::stringstream bindings;
5910  for (auto const& input : inputs)
5911  {
5912  bindings << "'" << input.second->name << "' ";
5913  }
5914 
5915  throw ParseException(
5916  fmt::format("No input binding found for subgraph:{} and name:{}. "
5917  "Possible inputs are: [{}] {}",
5918  subgraphId,
5919  name,
5920  bindings.str(),
5921  CHECK_LOCATION().AsString()));
5922 }
5923 
5925  const std::string& name) const
5926 {
5927  CHECK_SUBGRAPH(m_Model, subgraphId);
5928  auto outputs = GetSubgraphOutputs(m_Model, subgraphId);
5929  for (unsigned int i = 0; i < outputs.size(); ++i)
5930  {
5931  auto const output = outputs[i];
5932  if (output.second->name == name)
5933  {
5934  auto bindingId = GenerateLayerBindingId(subgraphId, output.first);
5935  std::vector<unsigned int> shape = m_OverriddenOutputShapes.size() > 0 ?
5936  m_OverriddenOutputShapes[i] : AsUnsignedVector(output.second->shape);
5937  return std::make_pair(bindingId, ToTensorInfo(output.second, shape));
5938  }
5939  }
5940 
5941  std::stringstream bindings;
5942  for (auto const& output : outputs)
5943  {
5944  bindings << "'" << output.second->name << "' ";
5945  }
5946 
5947  throw ParseException(
5948  fmt::format("No output binding found for subgraph:{} and name:{}. "
5949  "Possible outputs are: [{}] {}",
5950  subgraphId,
5951  name,
5952  bindings.str(),
5953  CHECK_LOCATION().AsString()));
5954 }
5955 
5957 {
5958  return m_Model->subgraphs.size();
5959 }
5960 
5961 std::vector<std::string> TfLiteParserImpl::GetSubgraphInputTensorNames(size_t subgraphId) const
5962 {
5963  CHECK_SUBGRAPH(m_Model, subgraphId);
5964  auto inputs = GetSubgraphInputs(m_Model, subgraphId);
5965  std::vector<std::string> result;
5966  result.reserve(inputs.size());
5967  for (auto const& input : inputs)
5968  {
5969  result.push_back(input.second->name);
5970  }
5971  return result;
5972 }
5973 
5974 std::vector<std::string> TfLiteParserImpl::GetSubgraphOutputTensorNames(size_t subgraphId) const
5975 {
5976  CHECK_SUBGRAPH(m_Model, subgraphId);
5977  auto outputs = GetSubgraphOutputs(m_Model, subgraphId);
5978  std::vector<std::string> result;
5979  result.reserve(outputs.size());
5980  for (auto const& output : outputs)
5981  {
5982  result.push_back(output.second->name);
5983  }
5984  return result;
5985 }
5986 
5987 const std::string TfLiteParserImpl::GetVersion()
5988 {
5989  return TFLITE_PARSER_VERSION;
5990 }
5991 
5992 TfLiteParserImpl::SupportedDataStorage::SupportedDataStorage(std::unique_ptr<float[]>&& data)
5993 : m_FloatData(std::move(data))
5994 , m_Uint8Data(nullptr)
5995 , m_Int8Data(nullptr)
5996 , m_Int32Data(nullptr)
5997 {
5998 }
5999 
6000 TfLiteParserImpl::SupportedDataStorage::SupportedDataStorage(std::unique_ptr<uint8_t[]>&& data)
6001 : m_FloatData(nullptr)
6002 , m_Uint8Data(std::move(data))
6003 , m_Int8Data(nullptr)
6004 , m_Int32Data(nullptr)
6005 {
6006 }
6007 
6008 TfLiteParserImpl::SupportedDataStorage::SupportedDataStorage(std::unique_ptr<int8_t[]>&& data)
6009 : m_FloatData(nullptr)
6010 , m_Uint8Data(nullptr)
6011 , m_Int8Data(std::move(data))
6012 , m_Int32Data(nullptr)
6013 {
6014 }
6015 
6016 TfLiteParserImpl::SupportedDataStorage::SupportedDataStorage(std::unique_ptr<int32_t[]>&& data)
6017 : m_FloatData(nullptr)
6018 , m_Uint8Data(nullptr)
6019 , m_Int8Data(nullptr)
6020 , m_Int32Data(std::move(data))
6021 {
6022 }
6023 
6024 } // armnnTfLiteParser
ARMNN_ASSERT
#define ARMNN_ASSERT(COND)
Definition: Assert.hpp:14
armnn::Convolution2dDescriptor::m_PadTop
uint32_t m_PadTop
Padding top value in the height dimension.
Definition: Descriptors.hpp:570
armnn::ArgMinMaxFunction::Max
@ Max
armnn::INetworkPtr
std::unique_ptr< INetwork, void(*)(INetwork *network)> INetworkPtr
Definition: INetwork.hpp:339
armnnTfLiteParser::TfLiteParserImpl::GetInputs
static TensorRawPtrVector GetInputs(const ModelPtr &model, size_t subgraphIndex, size_t operatorIndex)
Definition: TfLiteParser.cpp:5402
armnn::LstmInputParams::m_RecurrentToForgetWeights
const ConstTensor * m_RecurrentToForgetWeights
Definition: LstmParams.hpp:45
armnnTfLiteParser::TfLiteParserImpl::GetOutputs
static TensorRawPtrVector GetOutputs(const ModelPtr &model, size_t subgraphIndex, size_t operatorIndex)
Definition: TfLiteParser.cpp:5429
armnn::DataType::Boolean
@ Boolean
armnn::FullyConnectedDescriptor::m_ConstantWeights
bool m_ConstantWeights
Enable/disable constant weights and biases.
Definition: Descriptors.hpp:530
armnn::Pooling2dDescriptor::m_PaddingMethod
PaddingMethod m_PaddingMethod
The padding method to be used. (Exclude, IgnoreValue).
Definition: Descriptors.hpp:425
armnn::ViewsDescriptor
A ViewsDescriptor for the SplitterLayer.
Definition: Descriptors.hpp:244
armnn::TensorInfo::GetNumElements
unsigned int GetNumElements() const
Definition: Tensor.hpp:198
armnn::LstmInputParams::m_OutputLayerNormWeights
const ConstTensor * m_OutputLayerNormWeights
Definition: LstmParams.hpp:60
armnn::DetectionPostProcessDescriptor::m_NmsScoreThreshold
float m_NmsScoreThreshold
NMS score threshold.
Definition: Descriptors.hpp:751
armnn::ActivationDescriptor
An ActivationDescriptor for the ActivationLayer.
Definition: Descriptors.hpp:36
armnn::TransposeConvolution2dDescriptor::m_PadLeft
uint32_t m_PadLeft
Padding left value in the width dimension.
Definition: Descriptors.hpp:1469
armnnDeserializer::TensorRawPtr
const armnnSerializer::TensorInfo * TensorRawPtr
Definition: Deserializer.hpp:20
CHECK_BUFFER_SIZE
#define CHECK_BUFFER_SIZE(BUFFER_PTR, TENSOR_INFO, BUFFER_ID)
Definition: TfLiteParser.cpp:322
armnn::FullyConnectedDescriptor
A FullyConnectedDescriptor for the FullyConnectedLayer.
Definition: Descriptors.hpp:507
armnn::DetectionPostProcessDescriptor::m_ScaleX
float m_ScaleX
Center size encoding scale x.
Definition: Descriptors.hpp:759
armnnTfLiteParser::TfLiteParserImpl::TensorRawPtrVector
std::vector< TensorRawPtr > TensorRawPtrVector
Definition: TfLiteParser.hpp:36
armnn::ComparisonOperation::LessOrEqual
@ LessOrEqual
armnnDeserializer::CheckShape
bool CheckShape(const armnn::TensorShape &actual, const std::vector< uint32_t > &expected)
Definition: Deserializer.cpp:188
armnn::TensorInfo::GetNumBytes
unsigned int GetNumBytes() const
Definition: Tensor.cpp:427
armnn::TransposeConvolution2dDescriptor::m_StrideX
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
Definition: Descriptors.hpp:1477
armnnTfLiteParser::TfLiteParserImpl::TensorRawPtr
const tflite::TensorT * TensorRawPtr
Definition: TfLiteParser.hpp:35
armnnTfLiteParser::TfLiteParserImpl::CreateNetworkFromBinaryFile
armnn::INetworkPtr CreateNetworkFromBinaryFile(const char *graphFile)
Create the network from a flatbuffers binary file on disk.
Definition: TfLiteParser.cpp:952
armnn::Optional
Definition: Optional.hpp:270
armnn::IConnectableLayer::GetNumInputSlots
virtual unsigned int GetNumInputSlots() const =0
Returns the number of connectable input slots.
armnn::LstmDescriptor::m_TimeMajor
bool m_TimeMajor
Enable/disable time major.
Definition: Descriptors.hpp:1154
armnn::ResizeMethod
ResizeMethod
Definition: Types.hpp:166
Descriptors.hpp
armnn::SpaceToBatchNdDescriptor::m_DataLayout
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
Definition: Descriptors.hpp:1071
armnn::FullyConnectedDescriptor::m_TransposeWeightMatrix
bool m_TransposeWeightMatrix
Enable/disable transpose weight matrix.
Definition: Descriptors.hpp:528
armnn::ResizeDescriptor::m_TargetHeight
uint32_t m_TargetHeight
Target height value.
Definition: Descriptors.hpp:1009
armnn::DepthwiseConvolution2dDescriptor::m_BiasEnabled
bool m_BiasEnabled
Enable/disable bias.
Definition: Descriptors.hpp:708
armnn::Pooling2dDescriptor::m_PoolHeight
uint32_t m_PoolHeight
Pooling height value.
Definition: Descriptors.hpp:417
armnn::StridedSliceDescriptor::m_Begin
std::vector< int > m_Begin
Begin values for the input that will be sliced.
Definition: Descriptors.hpp:1342
armnn::LstmInputParams::m_ProjectionBias
const ConstTensor * m_ProjectionBias
Definition: LstmParams.hpp:56
armnn::DetectionPostProcessDescriptor::m_ScaleY
float m_ScaleY
Center size encoding scale y.
Definition: Descriptors.hpp:761
armnn::DetectionPostProcessDescriptor::m_MaxDetections
uint32_t m_MaxDetections
Maximum numbers of detections.
Definition: Descriptors.hpp:745
armnn::LstmInputParams::m_RecurrentToCellWeights
const ConstTensor * m_RecurrentToCellWeights
Definition: LstmParams.hpp:46
armnn::DataLayout::NHWC
@ NHWC
armnnTfLiteParser::TfLiteParserImpl::CreateNetworkFromBinary
armnn::INetworkPtr CreateNetworkFromBinary(const std::vector< uint8_t > &binaryContent)
Create the network from a flatbuffers binary.
Definition: TfLiteParser.cpp:959
armnn::LstmInputParams::m_CellBias
const ConstTensor * m_CellBias
Definition: LstmParams.hpp:53
armnn::Convolution3dDescriptor::m_PadFront
uint32_t m_PadFront
Padding front value in the depth dimension.
Definition: Descriptors.hpp:637
armnn::ResizeDescriptor
A ResizeDescriptor for the ResizeLayer.
Definition: Descriptors.hpp:985
armnn::ArgMinMaxDescriptor
An ArgMinMaxDescriptor for ArgMinMaxLayer.
Definition: Descriptors.hpp:67
armnn::IConnectableLayer::GetName
virtual const char * GetName() const =0
Returns the name of the layer.
armnnTfLiteParser::TfLiteParserImpl::SubgraphPtr
std::unique_ptr< tflite::SubGraphT > SubgraphPtr
Definition: TfLiteParser.hpp:31
armnn::ActivationDescriptor::m_A
float m_A
Alpha upper bound value used by the activation functions. (BoundedReLu, Linear, TanH,...
Definition: Descriptors.hpp:61
armnn::CheckLocation::m_Function
const char * m_Function
Definition: Exceptions.hpp:16
armnn::StridedSliceDescriptor::m_EllipsisMask
int32_t m_EllipsisMask
Ellipsis mask value.
Definition: Descriptors.hpp:1357
armnnUtils::GetUnsignedAxis
unsigned int GetUnsignedAxis(const unsigned int inputDimension, const int axis)
Definition: TensorUtils.cpp:236
armnn::SoftmaxDescriptor::m_Beta
float m_Beta
Exponentiation value.
Definition: Descriptors.hpp:190
armnn::GatherDescriptor
A GatherDescriptor for the GatherLayer.
Definition: Descriptors.hpp:965
armnn::TensorInfo::GetQuantizationScale
float GetQuantizationScale() const
Definition: Tensor.cpp:461
TypesUtils.hpp
armnn::DepthwiseConvolution2dDescriptor::m_DataLayout
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
Definition: Descriptors.hpp:710
armnn::L2NormalizationDescriptor::m_DataLayout
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
Definition: Descriptors.hpp:824
armnn::TensorInfo
Definition: Tensor.hpp:152
armnn::L2NormalizationDescriptor
A L2NormalizationDescriptor for the L2NormalizationLayer.
Definition: Descriptors.hpp:809
armnn::NormalizationAlgorithmMethod::LocalBrightness
@ LocalBrightness
Krichevsky 2012: Local Brightness Normalization.
armnn::NormalizationDescriptor::m_Beta
float m_Beta
Beta value for the normalization equation.
Definition: Descriptors.hpp:801
armnnTfLiteParser::TfLiteParserImpl::GetSubgraphOutputs
static TensorIdRawPtrVector GetSubgraphOutputs(const ModelPtr &model, size_t subgraphIndex)
Definition: TfLiteParser.cpp:5466
armnn::GetDataTypeName
constexpr const char * GetDataTypeName(DataType dataType)
Definition: TypesUtils.hpp:233
armnn::TensorInfo::SetDataType
void SetDataType(DataType type)
Definition: Tensor.hpp:201
armnn::NormalizationDescriptor
A NormalizationDescriptor for the NormalizationLayer.
Definition: Descriptors.hpp:769
armnn::Pooling2dDescriptor::m_StrideY
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
Definition: Descriptors.hpp:421
armnn::TensorInfo::GetNumDimensions
unsigned int GetNumDimensions() const
Definition: Tensor.hpp:197
armnn::BatchToSpaceNdDescriptor::m_BlockShape
std::vector< unsigned int > m_BlockShape
Block shape values.
Definition: Descriptors.hpp:898
CHECK_LOCATION
#define CHECK_LOCATION()
Definition: Exceptions.hpp:203
armnn::DataType::Float32
@ Float32
armnn::ResizeDescriptor::m_DataLayout
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
Definition: Descriptors.hpp:1014
Version.hpp
armnn::DepthwiseConvolution2dDescriptor::m_PadLeft
uint32_t m_PadLeft
Padding left value in the width dimension.
Definition: Descriptors.hpp:692
armnn::Convolution2dDescriptor::m_StrideY
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
Definition: Descriptors.hpp:576
armnn::Pooling2dDescriptor::m_PadTop
uint32_t m_PadTop
Padding top value in the height dimension.
Definition: Descriptors.hpp:411
armnnTfLiteParser::TfLiteParserImpl::GetSubgraphInputTensorNames
std::vector< std::string > GetSubgraphInputTensorNames(size_t subgraphId) const
Return the input tensor names for a given subgraph.
Definition: TfLiteParser.cpp:5961
armnn::Convolution3dDescriptor::m_PadTop
uint32_t m_PadTop
Padding top value in the height dimension.
Definition: Descriptors.hpp:633
armnnTfLiteParser::TfLiteParserImpl::GetOutputTensorIds
static std::vector< int32_t > & GetOutputTensorIds(const ModelPtr &model, size_t subgraphIndex, size_t operatorIndex)
Definition: TfLiteParser.cpp:5492
armnn::ArgMinMaxDescriptor::m_Function
ArgMinMaxFunction m_Function
Specify if the function is to find Min or Max.
Definition: Descriptors.hpp:81
armnn::SpaceToBatchNdDescriptor::m_BlockShape
std::vector< unsigned int > m_BlockShape
Block shape value.
Definition: Descriptors.hpp:1066
armnn::StridedSliceDescriptor::m_BeginMask
int32_t m_BeginMask
Begin mask value.
Definition: Descriptors.hpp:1350
armnn::LstmInputParams::m_CellToOutputWeights
const ConstTensor * m_CellToOutputWeights
Definition: LstmParams.hpp:50
armnnTfLiteParser::TfLiteParserImpl::GetSubgraphOutputTensorNames
std::vector< std::string > GetSubgraphOutputTensorNames(size_t subgraphId) const
Return the output tensor names for a given subgraph.
Definition: TfLiteParser.cpp:5974
armnnUtils::ProcessConcatInputTensorInfo
void ProcessConcatInputTensorInfo(armnn::TensorInfo &inputTensorInfo, armnn::OriginsDescriptor &concatDescriptor, const unsigned int &concatAxis, unsigned int inputIndex, unsigned int &mergeDimOrigin)
Definition: ParserHelper.cpp:19
armnn::Convolution3dDescriptor::m_DilationX
uint32_t m_DilationX
Dilation along x axis.
Definition: Descriptors.hpp:647
armnn::LstmInputParams::m_InputToCellWeights
const ConstTensor * m_InputToCellWeights
Definition: LstmParams.hpp:42
armnn::Convolution3dDescriptor::m_PadBottom
uint32_t m_PadBottom
Padding bottom value in the height dimension.
Definition: Descriptors.hpp:635
armnn::MaxNumOfTensorDimensions
constexpr unsigned int MaxNumOfTensorDimensions
Definition: Types.hpp:31
armnnTfLiteParser::TfLiteParserImpl::LoadModelFromFile
static ModelPtr LoadModelFromFile(const char *fileName)
Definition: TfLiteParser.cpp:5353
armnn::DataType::QAsymmU8
@ QAsymmU8
armnn::ArgMinMaxFunction
ArgMinMaxFunction
Definition: Types.hpp:103
armnn::DetectionPostProcessDescriptor::m_ScaleW
float m_ScaleW
Center size encoding scale weight.
Definition: Descriptors.hpp:763
armnn::LstmDescriptor::m_InputIntermediateScale
float m_InputIntermediateScale
Input intermediate quantization scale.
Definition: Descriptors.hpp:1156
armnn::DataType::QSymmS8
@ QSymmS8
armnn::StackDescriptor
A StackDescriptor for the StackLayer.
Definition: Descriptors.hpp:1251
armnn::IConnectableLayer::InferOutputShapes
virtual std::vector< TensorShape > InferOutputShapes(const std::vector< TensorShape > &inputShapes) const =0
Infer the shape of the output(s) based on the provided input shape(s)
armnnUtils::Permute
void Permute(const armnn::TensorShape &dstShape, const armnn::PermutationVector &mappings, const void *src, void *dst, size_t dataTypeSize)
Definition: Permute.cpp:164
armnn::IOutputSlot::GetTensorInfo
virtual const TensorInfo & GetTensorInfo() const =0
IgnoreUnused.hpp
armnn::NormalizationDescriptor::m_NormSize
uint32_t m_NormSize
Depth radius value.
Definition: Descriptors.hpp:797
armnn::BroadcastToDescriptor::m_BroadcastToShape
TensorShape m_BroadcastToShape
Target shape value.
Definition: Descriptors.hpp:1675
armnnUtils::Permuted
armnn::TensorShape Permuted(const armnn::TensorShape &srcShape, const armnn::PermutationVector &mappings)
Definition: Permute.cpp:125
armnnTfLiteParser::TfLiteParserImpl::BufferRawPtr
const tflite::BufferT * BufferRawPtr
Definition: TfLiteParser.hpp:40
armnnTfLiteParser::TfLiteParserImpl::OutputShapeOfReshape
static armnn::TensorInfo OutputShapeOfReshape(const armnn::TensorInfo &inputTensorInfo, const std::vector< int32_t > &targetDimsIn)
Definition: TfLiteParser.cpp:3262
armnn::Pooling2dDescriptor::m_PoolWidth
uint32_t m_PoolWidth
Pooling width value.
Definition: Descriptors.hpp:415
armnn::UnaryOperation::Neg
@ Neg
armnn::Convolution2dDescriptor::m_PadLeft
uint32_t m_PadLeft
Padding left value in the width dimension.
Definition: Descriptors.hpp:566
armnn::BatchToSpaceNdDescriptor::m_Crops
std::vector< std::pair< unsigned int, unsigned int > > m_Crops
The values to crop from the input dimension.
Definition: Descriptors.hpp:900
armnn::DepthwiseConvolution2dDescriptor::m_StrideY
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
Definition: Descriptors.hpp:702
armnnOnnxParser::ModelPtr
std::unique_ptr< onnx::ModelProto > ModelPtr
Definition: OnnxParser.hpp:23
armnn::CheckLocation::AsString
std::string AsString() const
Definition: Exceptions.hpp:29
armnn::Convolution2dDescriptor::m_DilationY
uint32_t m_DilationY
Dilation along y axis.
Definition: Descriptors.hpp:580
armnn::DetectionPostProcessDescriptor::m_MaxClassesPerDetection
uint32_t m_MaxClassesPerDetection
Maximum numbers of classes per detection, used in Fast NMS.
Definition: Descriptors.hpp:747
armnn::IConnectableLayer::GetNumOutputSlots
virtual unsigned int GetNumOutputSlots() const =0
Returns the number of connectable output slots.
armnn::DataType::QSymmS16
@ QSymmS16
armnn::NormalizationDescriptor::m_NormMethodType
NormalizationAlgorithmMethod m_NormMethodType
Normalization method algorithm to use (LocalBrightness, LocalContrast).
Definition: Descriptors.hpp:795
armnn::TransposeConvolution2dDescriptor::m_PadBottom
uint32_t m_PadBottom
Padding bottom value in the height dimension.
Definition: Descriptors.hpp:1475
tflite
Definition: armnn_external_delegate.cpp:12
NumericCast.hpp
armnn::LstmInputParams::m_ForgetGateBias
const ConstTensor * m_ForgetGateBias
Definition: LstmParams.hpp:52
armnn::NormalizationAlgorithmChannel::Across
@ Across
armnn::ReduceDescriptor::m_ReduceOperation
ReduceOperation m_ReduceOperation
Specifies the reduction operation to execute.
Definition: Descriptors.hpp:1558
TensorUtils.hpp
ARMNN_THROW_PARSE_EXCEPTION
#define ARMNN_THROW_PARSE_EXCEPTION(msg)
Definition: TfLiteParser.cpp:41
armnn::ComparisonOperation::NotEqual
@ NotEqual
CHECK_TENSOR_PTR
#define CHECK_TENSOR_PTR(TENSOR_PTR)
Definition: TfLiteParser.cpp:216
armnn::LstmInputParams::m_CellToInputWeights
const ConstTensor * m_CellToInputWeights
Definition: LstmParams.hpp:48
armnn::ComparisonOperation::GreaterOrEqual
@ GreaterOrEqual
armnn::Exception::what
virtual const char * what() const noexcept override
Definition: Exceptions.cpp:32
armnn::MeanDescriptor::m_KeepDims
bool m_KeepDims
Enable/disable keep dimensions. If true, then the reduced dimensions that are of length 1 are kept.
Definition: Descriptors.hpp:1192
ARMNN_LOG
#define ARMNN_LOG(severity)
Definition: Logging.hpp:212
armnn::DataLayout::NDHWC
@ NDHWC
armnnTfLiteParser::TfLiteParserImpl::LoadModelFromBinary
static ModelPtr LoadModelFromBinary(const uint8_t *binaryContent, size_t len)
Definition: TfLiteParser.cpp:5383
Assert.hpp
CHECKED_NON_NEGATIVE
#define CHECKED_NON_NEGATIVE(VALUE)
Definition: VerificationHelpers.hpp:35
armnn::ResizeDescriptor::m_Method
ResizeMethod m_Method
The Interpolation method to use (Bilinear, NearestNeighbor).
Definition: Descriptors.hpp:1012
armnn::SpaceToBatchNdDescriptor::m_PadList
std::vector< std::pair< unsigned int, unsigned int > > m_PadList
Specifies the padding values for the input dimension: heightPad{top, bottom} widthPad{left,...
Definition: Descriptors.hpp:1069
armnn::LstmInputParams::m_InputToOutputWeights
const ConstTensor * m_InputToOutputWeights
Definition: LstmParams.hpp:43
armnn::LstmDescriptor::m_PeepholeEnabled
bool m_PeepholeEnabled
Enable/disable peephole.
Definition: Descriptors.hpp:1148
armnn::TensorShape
Definition: Tensor.hpp:20
armnnTfLiteParser::TfLiteParserImpl::GetNetworkInputBindingInfo
BindingPointInfo GetNetworkInputBindingInfo(size_t subgraphId, const std::string &name) const
Retrieve binding info (layer id and tensor info) for the network input identified by the given layer ...
Definition: TfLiteParser.cpp:5892
armnn::Convolution3dDescriptor::m_PadRight
uint32_t m_PadRight
Padding right value in the width dimension.
Definition: Descriptors.hpp:631
VerificationHelpers.hpp
armnn::NetworkOptions
std::vector< BackendOptions > NetworkOptions
Definition: BackendOptions.hpp:16
armnn::NormalizationDescriptor::m_NormChannelType
NormalizationAlgorithmChannel m_NormChannelType
Normalization channel algorithm to use (Across, Within).
Definition: Descriptors.hpp:793
LstmParams.hpp
armnn::IOutputSlot
An output connection slot for a layer.
Definition: INetwork.hpp:53
armnn::LstmInputParams::m_CellToForgetWeights
const ConstTensor * m_CellToForgetWeights
Definition: LstmParams.hpp:49
armnn::StackDescriptor::m_NumInputs
uint32_t m_NumInputs
Number of input tensors.
Definition: Descriptors.hpp:1275
CHECK_TENSOR
#define CHECK_TENSOR(MODEL, SUBGRAPH_INDEX, TENSOR_INDEX)
Definition: TfLiteParser.cpp:203
armnn::DataType::Float16
@ Float16
armnn::CheckLocation
Definition: Exceptions.hpp:14
armnnTfLiteParser::BindingPointInfo
armnn::BindingPointInfo BindingPointInfo
Definition: ITfLiteParser.hpp:20
armnn::LstmInputParams::m_RecurrentToInputWeights
const ConstTensor * m_RecurrentToInputWeights
Definition: LstmParams.hpp:44
armnn::LstmDescriptor::m_ClippingThresProj
float m_ClippingThresProj
Clipping threshold value for the projection.
Definition: Descriptors.hpp:1144
armnn::StridedSliceDescriptor::m_DataLayout
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
Definition: Descriptors.hpp:1363
armnn::Pooling2dDescriptor::m_DataLayout
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
Definition: Descriptors.hpp:427
armnn::UnaryOperation::Rsqrt
@ Rsqrt
armnn::LstmInputParams::m_InputToInputWeights
const ConstTensor * m_InputToInputWeights
Definition: LstmParams.hpp:40
armnn::TensorShape::GetNumDimensions
unsigned int GetNumDimensions() const
Function that returns the tensor rank.
Definition: Tensor.cpp:174
armnn::DepthwiseConvolution2dDescriptor::m_DilationY
uint32_t m_DilationY
Dilation factor value for height dimension.
Definition: Descriptors.hpp:706
armnn::ComparisonOperation::Less
@ Less
armnn::UnaryOperation::Sqrt
@ Sqrt
armnn::UnaryOperation::LogicalNot
@ LogicalNot
armnn::Pooling2dDescriptor::m_PadBottom
uint32_t m_PadBottom
Padding bottom value in the height dimension.
Definition: Descriptors.hpp:413
armnn::LstmInputParams::m_RecurrentToOutputWeights
const ConstTensor * m_RecurrentToOutputWeights
Definition: LstmParams.hpp:47
armnn::Pooling2dDescriptor::m_PadRight
uint32_t m_PadRight
Padding right value in the width dimension.
Definition: Descriptors.hpp:409
armnn::FullyConnectedDescriptor::m_BiasEnabled
bool m_BiasEnabled
Enable/disable bias.
Definition: Descriptors.hpp:526
Logging.hpp
armnn::PadDescriptor
A PadDescriptor for the PadLayer.
Definition: Descriptors.hpp:1196
armnn::UnaryOperation::Exp
@ Exp
armnn::IOutputSlot::SetTensorInfo
virtual void SetTensorInfo(const TensorInfo &tensorInfo)=0
armnn::TransposeDescriptor
A TransposeDescriptor for the TransposeLayer.
Definition: Descriptors.hpp:1490
armnn::Convolution3dDescriptor::m_DilationZ
uint32_t m_DilationZ
Dilation along z axis.
Definition: Descriptors.hpp:651
armnn::DetectionPostProcessDescriptor::m_NumClasses
uint32_t m_NumClasses
Number of classes.
Definition: Descriptors.hpp:755
armnn_driver::Model
::android::nn::Model Model
Helper classes.
Definition: ConversionUtils.hpp:45
armnn::EmptyOptional
EmptyOptional is used to initialize the Optional class in case we want to have default value for an O...
Definition: Optional.hpp:32
armnn::StackDescriptor::m_Axis
uint32_t m_Axis
0-based axis along which to stack the input tensors.
Definition: Descriptors.hpp:1273
armnn::SliceDescriptor
A SliceDescriptor for the SliceLayer.
Definition: Descriptors.hpp:1228
armnn::DataType
DataType
Definition: Types.hpp:48
armnn::TensorInfo::IsQuantized
bool IsQuantized() const
Definition: Tensor.cpp:504
armnn::LstmInputParams::m_InputGateBias
const ConstTensor * m_InputGateBias
Definition: LstmParams.hpp:51
armnn::DetectionPostProcessDescriptor::m_NmsIouThreshold
float m_NmsIouThreshold
Intersection over union threshold.
Definition: Descriptors.hpp:753
armnn::TensorInfo::IsTypeSpaceMatch
bool IsTypeSpaceMatch(const TensorInfo &other) const
Check that the types are the same and, if quantize, that the quantization parameters are the same.
Definition: Tensor.cpp:432
armnn::Convolution2dDescriptor::m_BiasEnabled
bool m_BiasEnabled
Enable/disable bias.
Definition: Descriptors.hpp:582
armnn::ReshapeDescriptor
A ReshapeDescriptor for the ReshapeLayer.
Definition: Descriptors.hpp:1023
armnn::LstmDescriptor::m_HiddenStateScale
float m_HiddenStateScale
Hidden State quantization scale.
Definition: Descriptors.hpp:1166
armnnTfLiteParser::ITfLiteParserPtr
std::unique_ptr< ITfLiteParser, void(*)(ITfLiteParser *parser)> ITfLiteParserPtr
Definition: ITfLiteParser.hpp:24
armnn::InvalidArgumentException
Definition: Exceptions.hpp:80
armnn::UnaryOperation::Sin
@ Sin
CHECK_MODEL
#define CHECK_MODEL(MODEL, SUBGRAPH_INDEX, OPERATOR_INDEX)
Definition: TfLiteParser.cpp:182
armnnTfLiteParser::ITfLiteParser::CreateNetworkFromBinaryFile
armnn::INetworkPtr CreateNetworkFromBinaryFile(const char *graphFile)
Create the network from a flatbuffers binary file on disk.
Definition: TfLiteParser.cpp:73
armnn::PadDescriptor::m_PadValue
float m_PadValue
Optional value to use for padding, defaults to 0.
Definition: Descriptors.hpp:1221
armnn::LayerBindingId
int LayerBindingId
Type of identifiers for bindable layers (inputs, outputs).
Definition: Types.hpp:309
armnn::DepthwiseConvolution2dDescriptor::m_PadRight
uint32_t m_PadRight
Padding right value in the width dimension.
Definition: Descriptors.hpp:694
armnn::Convolution3dDescriptor::m_PadLeft
uint32_t m_PadLeft
Padding left value in the width dimension.
Definition: Descriptors.hpp:629
armnn::ActivationDescriptor::m_Function
ActivationFunction m_Function
The activation function to use (Sigmoid, TanH, Linear, ReLu, BoundedReLu, SoftReLu,...
Definition: Descriptors.hpp:59
armnnTfLiteParser::TfLiteParserImpl::TensorIdRawPtrVector
std::vector< TensorIdRawPtr > TensorIdRawPtrVector
Definition: TfLiteParser.hpp:38
armnn::NormalizationDescriptor::m_DataLayout
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
Definition: Descriptors.hpp:805
armnn::LstmDescriptor::m_HiddenStateZeroPoint
int32_t m_HiddenStateZeroPoint
Hidden State zero point.
Definition: Descriptors.hpp:1164
armnn::BatchMatMulDescriptor
A BatchMatMulDescriptor for the BatchMatMul operator.
Definition: Descriptors.hpp:1584
armnn::Convolution3dDescriptor::m_StrideY
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
Definition: Descriptors.hpp:643
armnn::ReduceOperation::Sum
@ Sum
armnn::IConnectableLayer::GetType
virtual LayerType GetType() const =0
Returns the armnn::LayerType of this layer.
armnnTfLiteParser::TfLiteParserImpl::GetNetworkOutputBindingInfo
BindingPointInfo GetNetworkOutputBindingInfo(size_t subgraphId, const std::string &name) const
Retrieve binding info (layer id and tensor info) for the network output identified by the given layer...
Definition: TfLiteParser.cpp:5924
armnn::Convolution2dDescriptor::m_DataLayout
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
Definition: Descriptors.hpp:584
Filesystem.hpp
armnnTfLiteParser::TfLiteParserImpl::LoadModel
armnn::INetworkPtr LoadModel(std::unique_ptr< tflite::ModelT > model)
Definition: TfLiteParser.cpp:967
armnn::GatherDescriptor::m_Axis
int32_t m_Axis
The axis in params to gather indices from.
Definition: Descriptors.hpp:981
armnn::Convolution3dDescriptor::m_StrideX
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
Definition: Descriptors.hpp:641
armnn::SpaceToBatchNdDescriptor
A SpaceToBatchNdDescriptor for the SpaceToBatchNdLayer.
Definition: Descriptors.hpp:1043
armnn::Convolution2dDescriptor::m_PadBottom
uint32_t m_PadBottom
Padding bottom value in the height dimension.
Definition: Descriptors.hpp:572
armnn::PermutationVector
Definition: Types.hpp:314
armnnTfLiteParser::TfLiteParserImpl::GetVersion
static const std::string GetVersion()
Retrieve version in X.Y.Z form.
Definition: TfLiteParser.cpp:5987
armnn::Convolution3dDescriptor
A Convolution3dDescriptor for the Convolution3dLayer.
Definition: Descriptors.hpp:588
armnn::ReshapeDescriptor::m_TargetShape
TensorShape m_TargetShape
Target shape value.
Definition: Descriptors.hpp:1039
armnn::TransposeConvolution2dDescriptor::m_StrideY
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
Definition: Descriptors.hpp:1479
armnn::StridedSliceDescriptor::m_EndMask
int32_t m_EndMask
End mask value.
Definition: Descriptors.hpp:1353
armnn::BaseTensor::GetInfo
const TensorInfo & GetInfo() const
Definition: Tensor.hpp:297
ParserHelper.hpp
armnn::Pooling2dDescriptor::m_PadLeft
uint32_t m_PadLeft
Padding left value in the width dimension.
Definition: Descriptors.hpp:407
Permute.hpp
armnn::ActivationFunction
ActivationFunction
Definition: Types.hpp:86
armnn::BoostLogSeverityMapping::info
@ info
CHECK_SUPPORTED_FUSED_ACTIVATION
#define CHECK_SUPPORTED_FUSED_ACTIVATION(OPTION, SUBGRAPH_INDEX, OPERATOR_INDEX)
Definition: TfLiteParser.cpp:343
armnn::StackDescriptor::m_InputShape
TensorShape m_InputShape
Required shape of all input tensors.
Definition: Descriptors.hpp:1277
armnn::UnaryOperation
UnaryOperation
Definition: Types.hpp:125
armnn::Convolution2dDescriptor::m_StrideX
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
Definition: Descriptors.hpp:574
armnn::Convolution2dDescriptor::m_PadRight
uint32_t m_PadRight
Padding right value in the width dimension.
Definition: Descriptors.hpp:568
armnn::TensorInfo::GetDataType
DataType GetDataType() const
Definition: Tensor.hpp:200
armnnTfLiteParser::TfLiteParserImpl
Definition: TfLiteParser.hpp:26
armnn::DetectionPostProcessDescriptor::m_DetectionsPerClass
uint32_t m_DetectionsPerClass
Detections per classes, used in Regular NMS.
Definition: Descriptors.hpp:749
armnn::DetectionPostProcessDescriptor::m_ScaleH
float m_ScaleH
Center size encoding scale height.
Definition: Descriptors.hpp:765
armnn::DataType::Signed32
@ Signed32
armnn::UnaryOperation::Ceil
@ Ceil
armnn::LstmInputParams::m_InputLayerNormWeights
const ConstTensor * m_InputLayerNormWeights
Definition: LstmParams.hpp:57
armnn::ReduceDescriptor::m_KeepDims
bool m_KeepDims
if true then output shape has no change.
Definition: Descriptors.hpp:1554
armnn::BatchToSpaceNdDescriptor
A BatchToSpaceNdDescriptor for the BatchToSpaceNdLayer.
Definition: Descriptors.hpp:875
armnn::Convolution2dDescriptor
A Convolution2dDescriptor for the Convolution2dLayer.
Definition: Descriptors.hpp:534
armnn::ReduceOperation::Prod
@ Prod
armnn::DepthwiseConvolution2dDescriptor::m_PadBottom
uint32_t m_PadBottom
Padding bottom value in the height dimension.
Definition: Descriptors.hpp:698
armnn::ComparisonDescriptor
A ComparisonDescriptor for the ComparisonLayer.
Definition: Descriptors.hpp:89
armnn::DataType::QAsymmS8
@ QAsymmS8
armnn::ComparisonDescriptor::m_Operation
ComparisonOperation m_Operation
Specifies the comparison operation to execute.
Definition: Descriptors.hpp:105
armnn::LstmDescriptor::m_OutputIntermediateScale
float m_OutputIntermediateScale
Output intermediate quantization scale.
Definition: Descriptors.hpp:1162
armnn::ElementwiseUnaryDescriptor::m_Operation
UnaryOperation m_Operation
Specifies the elementwiseUnary operation to execute.
Definition: Descriptors.hpp:145
armnn::ArgMinMaxFunction::Min
@ Min
armnn::StridedSliceDescriptor::m_ShrinkAxisMask
int32_t m_ShrinkAxisMask
Shrink axis mask value. If set, the nth specification shrinks the dimensionality by 1.
Definition: Descriptors.hpp:1355
armnn::StandInDescriptor
A StandInDescriptor for the StandIn layer.
Definition: Descriptors.hpp:1281
armnn::Pooling2dDescriptor::m_StrideX
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
Definition: Descriptors.hpp:419
armnn::SpaceToDepthDescriptor::m_BlockSize
unsigned int m_BlockSize
Scalar specifying the input block size. It must be >= 1.
Definition: Descriptors.hpp:1092
armnn::LstmInputParams::m_ForgetLayerNormWeights
const ConstTensor * m_ForgetLayerNormWeights
Definition: LstmParams.hpp:58
armnn::UnaryOperation::Log
@ Log
armnn::ResizeDescriptor::m_TargetWidth
uint32_t m_TargetWidth
Target width value.
Definition: Descriptors.hpp:1007
armnn::IOutputSlot::GetOwningIConnectableLayer
virtual const IConnectableLayer & GetOwningIConnectableLayer() const =0
armnn::StridedSliceDescriptor::m_Stride
std::vector< int > m_Stride
Stride values for the input that will be sliced.
Definition: Descriptors.hpp:1346
armnnTfLiteParser::TfLiteParserImpl::GetSubgraphInputs
static TensorIdRawPtrVector GetSubgraphInputs(const ModelPtr &model, size_t subgraphIndex)
Definition: TfLiteParser.cpp:5449
armnn::BackendOptions
Struct for the users to pass backend specific options.
Definition: BackendOptions.hpp:22
armnnDeserializer::ToTensorInfo
armnn::TensorInfo ToTensorInfo(TensorRawPtr tensorPtr)
Definition: Deserializer.cpp:654
armnn::PermutationVector::GetSize
SizeType GetSize() const
Definition: Types.hpp:357
armnn::LstmDescriptor
An LstmDescriptor for the LstmLayer.
Definition: Descriptors.hpp:1102
armnn::ComparisonOperation
ComparisonOperation
Definition: Types.hpp:109
armnn::StridedSliceDescriptor
A StridedSliceDescriptor for the StridedSliceLayer.
Definition: Descriptors.hpp:1303
armnn::TransposeConvolution2dDescriptor::m_OutputShape
std::vector< unsigned int > m_OutputShape
Definition: Descriptors.hpp:1486
Tensor.hpp
armnnTfLiteParser::TfLiteParserImpl::ModelPtr
std::unique_ptr< tflite::ModelT > ModelPtr
Definition: TfLiteParser.hpp:30
armnn::ResizeDescriptor::m_AlignCorners
bool m_AlignCorners
Aligned corners.
Definition: Descriptors.hpp:1016
armnn::TileDescriptor::m_Multiples
std::vector< uint32_t > m_Multiples
The vector to multiply the input shape by.
Definition: Descriptors.hpp:1656
armnnOnnxParser::CreateConstTensorImpl
std::pair< armnn::ConstTensor, std::unique_ptr< T[]> > CreateConstTensorImpl(const T *bufferPtr, armnn::TensorInfo &tensorInfo, const armnn::Optional< armnn::PermutationVector & > permutationVector)
Definition: OnnxParser.cpp:602
armnn::MeanDescriptor::m_Axis
std::vector< unsigned int > m_Axis
Values for the dimensions to reduce.
Definition: Descriptors.hpp:1190
armnn::LstmDescriptor::m_CifgEnabled
bool m_CifgEnabled
Enable/disable cifg (coupled input & forget gate).
Definition: Descriptors.hpp:1146
armnn::IOutputSlot::Connect
virtual int Connect(IInputSlot &destination)=0
armnn::NormalizationDescriptor::m_Alpha
float m_Alpha
Alpha value for the normalization equation.
Definition: Descriptors.hpp:799
armnn::GetUnaryOperationAsCString
constexpr char const * GetUnaryOperationAsCString(UnaryOperation operation)
Definition: TypesUtils.hpp:92
armnn::LstmDescriptor::m_ForgetIntermediateScale
float m_ForgetIntermediateScale
Forget intermediate quantization scale.
Definition: Descriptors.hpp:1158
armnn::TensorInfo::GetShape
const TensorShape & GetShape() const
Definition: Tensor.hpp:193
armnn::Convolution2dDescriptor::m_DilationX
uint32_t m_DilationX
Dilation along x axis.
Definition: Descriptors.hpp:578
armnn::Convolution3dDescriptor::m_PadBack
uint32_t m_PadBack
Padding back value in the depth dimension.
Definition: Descriptors.hpp:639
armnn::ReduceDescriptor::m_vAxis
std::vector< uint32_t > m_vAxis
The indices of the dimensions to reduce.
Definition: Descriptors.hpp:1556
armnn::PadDescriptor::m_PadList
std::vector< std::pair< unsigned int, unsigned int > > m_PadList
Specifies the padding for input dimension.
Definition: Descriptors.hpp:1218
std
Definition: BackendId.hpp:149
armnn::ParseException
Definition: Exceptions.hpp:92
armnn::LstmDescriptor::m_LayerNormEnabled
bool m_LayerNormEnabled
Enable/disable layer normalization.
Definition: Descriptors.hpp:1152
armnn::IgnoreUnused
void IgnoreUnused(Ts &&...)
Definition: IgnoreUnused.hpp:14
armnn::TransposeConvolution2dDescriptor::m_PadTop
uint32_t m_PadTop
Padding top value in the height dimension.
Definition: Descriptors.hpp:1473
armnn::TransposeConvolution2dDescriptor::m_PadRight
uint32_t m_PadRight
Padding right value in the width dimension.
Definition: Descriptors.hpp:1471
armnnTfLiteParser::TfLiteParserImpl::OperatorPtr
std::unique_ptr< tflite::OperatorT > OperatorPtr
Definition: TfLiteParser.hpp:32
TFLITE_PARSER_VERSION
#define TFLITE_PARSER_VERSION
TFLITE_PARSER_VERSION: "X.Y.Z" where: X = Major version number Y = Minor version number Z = Patch ver...
Definition: Version.hpp:25
TfLiteParser.hpp
armnn::LstmInputParams::m_OutputGateBias
const ConstTensor * m_OutputGateBias
Definition: LstmParams.hpp:54
BackendOptions.hpp
armnn::Convolution3dDescriptor::m_DilationY
uint32_t m_DilationY
Dilation along y axis.
Definition: Descriptors.hpp:649
armnn::OriginsDescriptor
An OriginsDescriptor for the ConcatLayer.
Definition: Descriptors.hpp:201
armnn::LstmInputParams::m_ProjectionWeights
const ConstTensor * m_ProjectionWeights
Definition: LstmParams.hpp:55
armnn::LstmInputParams::m_InputToForgetWeights
const ConstTensor * m_InputToForgetWeights
Definition: LstmParams.hpp:41
armnn::IConnectableLayer::GetOutputSlot
virtual const IOutputSlot & GetOutputSlot(unsigned int index) const =0
Get the const output slot handle by slot index.
armnn::TensorInfo::SetShape
void SetShape(const TensorShape &newShape)
Definition: Tensor.hpp:195
Exceptions.hpp
armnn
Copyright (c) 2021 ARM Limited and Contributors.
Definition: 01_00_quick_start.dox:6
armnn::ElementwiseUnaryDescriptor
A ElementwiseUnaryDescriptor for the ElementwiseUnaryLayer.
Definition: Descriptors.hpp:129
armnn::TransposeConvolution2dDescriptor
A TransposeConvolution2dDescriptor for the TransposeConvolution2dLayer.
Definition: Descriptors.hpp:1440
armnn::IConnectableLayer::GetInputSlot
virtual const IInputSlot & GetInputSlot(unsigned int index) const =0
Get a const input slot handle by slot index.
CHECK_VALID_SIZE
#define CHECK_VALID_SIZE(ACTUAL,...)
Definition: VerificationHelpers.hpp:32
armnn::Convolution3dDescriptor::m_BiasEnabled
bool m_BiasEnabled
Enable/disable bias.
Definition: Descriptors.hpp:653
armnn::ArgMinMaxDescriptor::m_Axis
int m_Axis
Axis to reduce across the input tensor.
Definition: Descriptors.hpp:83
armnn::ActivationDescriptor::m_B
float m_B
Beta lower bound value used by the activation functions. (BoundedReLu, Linear, TanH).
Definition: Descriptors.hpp:63
armnn::Convolution3dDescriptor::m_StrideZ
uint32_t m_StrideZ
Stride value when proceeding through input for the depth dimension.
Definition: Descriptors.hpp:645
armnn::DetectionPostProcessDescriptor::m_UseRegularNms
bool m_UseRegularNms
Use Regular NMS.
Definition: Descriptors.hpp:757
armnn::LstmInputParamsInfo
Definition: LstmParams.hpp:63
armnn::ReduceOperation
ReduceOperation
Definition: Types.hpp:157
armnn::NormalizationDescriptor::m_K
float m_K
Kappa value used for the across channel normalization equation.
Definition: Descriptors.hpp:803
armnn::UnaryOperation::Abs
@ Abs
armnn::LstmDescriptor::m_ProjectionEnabled
bool m_ProjectionEnabled
Enable/disable the projection layer.
Definition: Descriptors.hpp:1150
armnn::StridedSliceDescriptor::m_End
std::vector< int > m_End
End values for the input that will be sliced.
Definition: Descriptors.hpp:1344
armnn::ReduceOperation::Min
@ Min
armnnTfLiteParser::TfLiteParserImpl::GetSubgraphCount
size_t GetSubgraphCount() const
Return the number of subgraphs in the parsed model.
Definition: TfLiteParser.cpp:5956
armnn::PadDescriptor::m_PaddingMode
PaddingMode m_PaddingMode
Specifies the Padding mode (Constant, Reflect or Symmetric)
Definition: Descriptors.hpp:1224
CHECK_SUBGRAPH
#define CHECK_SUBGRAPH(MODEL, SUBGRAPH_INDEX)
Definition: TfLiteParser.cpp:140
armnn::ConstTensor
A tensor defined by a TensorInfo (shape and data type) and an immutable backing store.
Definition: Tensor.hpp:329
armnn::IConnectableLayer
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
Definition: INetwork.hpp:80
armnn::IInputSlot
An input connection slot for a layer.
Definition: INetwork.hpp:25
armnn::Pooling2dDescriptor::m_OutputShapeRounding
OutputShapeRounding m_OutputShapeRounding
The rounding method for the output shape. (Floor, Ceiling).
Definition: Descriptors.hpp:423
armnn::PoolingAlgorithm
PoolingAlgorithm
Definition: Types.hpp:150
armnn::TransposeConvolution2dDescriptor::m_OutputShapeEnabled
bool m_OutputShapeEnabled
Output shape if it has been specified.
Definition: Descriptors.hpp:1485
armnn::TransposeConvolution2dDescriptor::m_BiasEnabled
bool m_BiasEnabled
Enable/disable bias.
Definition: Descriptors.hpp:1481
armnn::DetectionPostProcessDescriptor
Definition: Descriptors.hpp:713
armnn::Convolution3dDescriptor::m_DataLayout
DataLayout m_DataLayout
The data layout to be used (NDHWC, NCDHW).
Definition: Descriptors.hpp:655
armnn::TensorInfo::SetConstant
void SetConstant(const bool IsConstant=true)
Marks the data corresponding to this tensor info as constant.
Definition: Tensor.cpp:514
armnn::DataType::Signed64
@ Signed64
armnn::TransposeConvolution2dDescriptor::m_DataLayout
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
Definition: Descriptors.hpp:1483
armnnTfLiteParser
Definition: ITfLiteParser.hpp:17
armnn::Pooling2dDescriptor
A Pooling2dDescriptor for the Pooling2dLayer.
Definition: Descriptors.hpp:371
armnn::LstmDescriptor::m_ActivationFunc
uint32_t m_ActivationFunc
The activation function to use.
Definition: Descriptors.hpp:1140
armnn::LstmInputParamsInfo::m_InputToForgetWeights
const TensorInfo * m_InputToForgetWeights
Definition: LstmParams.hpp:90
armnn::BatchToSpaceNdDescriptor::m_DataLayout
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
Definition: Descriptors.hpp:902
armnn::BroadcastToDescriptor
Definition: Descriptors.hpp:1659
armnn::TensorInfo::GetQuantizationOffset
int32_t GetQuantizationOffset() const
Definition: Tensor.cpp:478
armnn::DepthwiseConvolution2dDescriptor
A DepthwiseConvolution2dDescriptor for the DepthwiseConvolution2dLayer.
Definition: Descriptors.hpp:659
armnn::ComparisonOperation::Equal
@ Equal
armnn::ReduceDescriptor
A ReduceDescriptor for the REDUCE operators.
Definition: Descriptors.hpp:1538
armnn::DepthwiseConvolution2dDescriptor::m_DilationX
uint32_t m_DilationX
Dilation factor value for width dimension.
Definition: Descriptors.hpp:704
armnn::LstmDescriptor::m_ClippingThresCell
float m_ClippingThresCell
Clipping threshold value for the cell state.
Definition: Descriptors.hpp:1142
armnnTfLiteParser::TfLiteParserImpl::GetBuffer
static BufferRawPtr GetBuffer(const ModelPtr &model, size_t bufferIndex)
Definition: TfLiteParser.cpp:5714
armnn::NullPointerException
Definition: Exceptions.hpp:146
armnnUtils::ToFloatArray
std::unique_ptr< float[]> ToFloatArray(const std::vector< PrimitiveType > &data, const armnn::TensorInfo &tensorInfo)
Definition: TensorUtils.cpp:307
armnn::LstmInputParams
Definition: LstmParams.hpp:13
armnn::LstmInputParams::m_CellLayerNormWeights
const ConstTensor * m_CellLayerNormWeights
Definition: LstmParams.hpp:59
armnn::StridedSliceDescriptor::m_NewAxisMask
int32_t m_NewAxisMask
New axis mask value.
Definition: Descriptors.hpp:1360
CHECK_BUFFER
#define CHECK_BUFFER(MODEL, BUFFER_INDEX)
Definition: TfLiteParser.cpp:251
armnn::GetComparisonOperationAsCString
constexpr char const * GetComparisonOperationAsCString(ComparisonOperation operation)
Definition: TypesUtils.hpp:62
armnn::TensorShape::GetNumElements
unsigned int GetNumElements() const
Function that calculates the tensor elements by multiplying all dimension size which are Specified.
Definition: Tensor.cpp:181
armnn::MeanDescriptor
A MeanDescriptor for the MeanLayer.
Definition: Descriptors.hpp:1172
armnn::CheckLocation::FileLine
std::string FileLine() const
Definition: Exceptions.hpp:37
armnnTfLiteParser::TfLiteParserImpl::GetInputTensorIds
static std::vector< int32_t > & GetInputTensorIds(const ModelPtr &model, size_t subgraphIndex, size_t operatorIndex)
Definition: TfLiteParser.cpp:5482
armnn::OptionalReferenceSwitch< std::is_reference< T >::value, T >::value
const T & value() const
Definition: Optional.hpp:146
armnn::TileDescriptor
Definition: Descriptors.hpp:1640
armnn::SoftmaxDescriptor
A SoftmaxDescriptor for the SoftmaxLayer.
Definition: Descriptors.hpp:177
armnn::Pooling2dDescriptor::m_PoolType
PoolingAlgorithm m_PoolType
The pooling algorithm to use (Max. Average, L2).
Definition: Descriptors.hpp:405
armnnTfLiteParser::ITfLiteParser
Definition: ITfLiteParser.hpp:26
armnn::SpaceToDepthDescriptor
A SpaceToDepthDescriptor for the SpaceToDepthLayer.
Definition: Descriptors.hpp:1075
armnn::OptionalBase::has_value
bool has_value() const noexcept
Definition: Optional.hpp:53
armnn::FileNotFoundException
Definition: Exceptions.hpp:86
armnn::ReduceOperation::Max
@ Max
armnn::LstmDescriptor::m_CellIntermediateScale
float m_CellIntermediateScale
Cell intermediate quantization scale.
Definition: Descriptors.hpp:1160
armnnTfLiteParser::ComputeWrappedIndex
unsigned int ComputeWrappedIndex(int idx, unsigned int numDimsIn)
Definition: TfLiteParser.cpp:4477
armnnTfLiteParser::TfLiteParserImpl::OutputShapeOfSqueeze
static armnn::TensorInfo OutputShapeOfSqueeze(std::vector< uint32_t > squeezeDims, const armnn::TensorInfo &inputTensorInfo)
Definition: TfLiteParser.cpp:2381
armnn::ComparisonOperation::Greater
@ Greater
armnn::DepthwiseConvolution2dDescriptor::m_StrideX
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
Definition: Descriptors.hpp:700
armnn::DepthwiseConvolution2dDescriptor::m_PadTop
uint32_t m_PadTop
Padding top value in the height dimension.
Definition: Descriptors.hpp:696