ArmNN
 21.02
armnnTfParser Namespace Reference

Classes

class  ITfParser
 Parses a directed acyclic graph from a tensorflow protobuf file. More...
 
struct  WithOutputTensorIndex
 WithOutputTensorIndex wraps a value and an index. More...
 

Typedefs

using BindingPointInfo = armnn::BindingPointInfo
 
using ITfParserPtr = std::unique_ptr< ITfParser, void(*)(ITfParser *parser)>
 
using ParsedTfOperationPtr = std::unique_ptr< ParsedTfOperation >
 
using OutputOfParsedTfOperation = WithOutputTensorIndex< ParsedTfOperation * >
 
using OutputOfConstNodeDef = WithOutputTensorIndex< const tensorflow::NodeDef * >
 
using OutputId = WithOutputTensorIndex< std::string >
 

Functions

void CalcPadding (uint32_t inputSize, uint32_t filterSize, uint32_t stride, uint32_t dilation, uint32_t &paddingFront, uint32_t &paddingBack, bool samePadding)
 
DataType ConvertTfTensorDataType (const tensorflow::DataType tfDataType, const tensorflow::NodeDef &nodeDef)
 
TensorInfo OutputShapeOfExpandDims (const tensorflow::NodeDef &nodeDef, TensorInfo inputTensorInfo, std::int32_t expandDim)
 
unsigned int CheckPaddingTensor (const ConstTensor &paddingTensor, const TensorInfo &inputTensorInfo, const std::string &nodeName)
 
TensorInfo CalculatePaddedOutputTensorInfo (const TensorInfo &inputTensorInfo, const std::vector< std::pair< unsigned int, unsigned int >> &padList)
 
TensorInfo OutputShapeOfSqueeze (const tensorflow::NodeDef &nodeDef, TensorInfo inputTensorInfo)
 

Typedef Documentation

◆ BindingPointInfo

Definition at line 19 of file ITfParser.hpp.

◆ ITfParserPtr

using ITfParserPtr = std::unique_ptr<ITfParser, void(*)(ITfParser* parser)>

Definition at line 22 of file ITfParser.hpp.

◆ OutputId

using OutputId = WithOutputTensorIndex<std::string>

Definition at line 62 of file TfParser.hpp.

◆ OutputOfConstNodeDef

using OutputOfConstNodeDef = WithOutputTensorIndex<const tensorflow::NodeDef*>

Definition at line 61 of file TfParser.hpp.

◆ OutputOfParsedTfOperation

using OutputOfParsedTfOperation = WithOutputTensorIndex<ParsedTfOperation *>

Definition at line 60 of file TfParser.hpp.

◆ ParsedTfOperationPtr

using ParsedTfOperationPtr = std::unique_ptr<ParsedTfOperation>

Definition at line 35 of file TfParser.hpp.

Function Documentation

◆ CalcPadding()

void armnnTfParser::CalcPadding ( uint32_t  inputSize,
uint32_t  filterSize,
uint32_t  stride,
uint32_t  dilation,
uint32_t &  paddingFront,
uint32_t &  paddingBack,
bool  samePadding 
)

Definition at line 429 of file TfParser.cpp.

References ARMNN_ASSERT, CHECK_LOCATION, Layer::GetName(), Layer::GetNumOutputSlots(), Layer::GetOutputSlot(), and m_Layer.

Referenced by TfLiteParserImpl::CreateNetworkFromBinary(), OnnxParserImpl::CreateNetworkFromString(), ITfParser::TfParserImpl::ParseConv2D(), ITfParser::TfParserImpl::ParseDepthwiseConv2D(), and ITfParser::TfParserImpl::ParsePooling2d().

436 {
437  paddingFront = 0;
438  paddingBack = 0;
439  if (samePadding)
440  {
441  uint32_t outputSize = (inputSize + stride - 1) / stride;
442  uint32_t dilatedSize = filterSize + (dilation - 1) * (filterSize - 1);
443  uint32_t temp = (outputSize - 1) * stride + dilatedSize;
444  if (temp > inputSize)
445  {
446  paddingFront = (temp - inputSize) / 2;
447  paddingBack = (temp - inputSize) - paddingFront;
448  }
449  }
450 }

◆ CalculatePaddedOutputTensorInfo()

TensorInfo armnnTfParser::CalculatePaddedOutputTensorInfo ( const TensorInfo inputTensorInfo,
const std::vector< std::pair< unsigned int, unsigned int >> &  padList 
)

Definition at line 2176 of file TfParser.cpp.

References TensorInfo::GetNumDimensions(), TensorInfo::GetShape(), and TensorInfo::SetShape().

Referenced by ITfParser::TfParserImpl::ParsePad().

2178 {
2179  unsigned int numDims = inputTensorInfo.GetNumDimensions();
2180  std::vector<unsigned int> outDims;
2181  for (unsigned int i = 0; i < numDims; ++i)
2182  {
2183  unsigned int dimSize = inputTensorInfo.GetShape()[i];
2184  const std::pair<unsigned int, unsigned int>& dimPadding = padList[i];
2185  dimSize += dimPadding.first;
2186  dimSize += dimPadding.second;
2187  outDims.push_back(dimSize);
2188  }
2189  TensorInfo paddedTensorInfo = inputTensorInfo;
2190  unsigned int outDimsSize = static_cast<unsigned int>(outDims.size());
2191  paddedTensorInfo.SetShape(TensorShape{ outDimsSize, outDims.data() });
2192  return paddedTensorInfo;
2193 }
const TensorShape & GetShape() const
Definition: Tensor.hpp:187
void SetShape(const TensorShape &newShape)
Definition: Tensor.hpp:189
unsigned int GetNumDimensions() const
Definition: Tensor.hpp:191

◆ CheckPaddingTensor()

unsigned int armnnTfParser::CheckPaddingTensor ( const ConstTensor paddingTensor,
const TensorInfo inputTensorInfo,
const std::string &  nodeName 
)

Definition at line 2147 of file TfParser.cpp.

References CHECK_LOCATION, TensorInfo::GetNumDimensions(), and BaseTensor< MemoryType >::GetShape().

Referenced by ITfParser::TfParserImpl::ParsePad().

2150 {
2151  unsigned int rank = paddingTensor.GetShape()[0];
2152  unsigned int expectedRank = inputTensorInfo.GetNumDimensions();
2153  if (rank != expectedRank)
2154  {
2155  throw ParseException(
2156  fmt::format("Expected the padding tensor to be of rank {} not {} on Node {} {}.",
2157  expectedRank,
2158  rank,
2159  nodeName,
2160  CHECK_LOCATION().AsString()));
2161  }
2162  unsigned int second = paddingTensor.GetShape()[1];
2163  if (second != 2)
2164  {
2165  throw ParseException(
2166  fmt::format("Expected the padding tensor to be of dimensions "
2167  "[{1}, 2] not [{1}, {2}] on Node {3} {4}.",
2168  rank,
2169  second,
2170  nodeName,
2171  CHECK_LOCATION().AsString()));
2172  }
2173  return rank;
2174 }
const TensorShape & GetShape() const
Definition: Tensor.hpp:284
#define CHECK_LOCATION()
Definition: Exceptions.hpp:197
unsigned int GetNumDimensions() const
Definition: Tensor.hpp:191

◆ ConvertTfTensorDataType()

DataType armnnTfParser::ConvertTfTensorDataType ( const tensorflow::DataType  tfDataType,
const tensorflow::NodeDef &  nodeDef 
)

Definition at line 952 of file TfParser.cpp.

References CHECK_LOCATION, and ITfParser::MakeTfOperation.

Referenced by ITfParser::TfParserImpl::ParseConst().

954 {
955  switch (tfDataType)
956  {
957  case tensorflow::DT_FLOAT:
958  return DataType::Float32;
959  break;
960  case tensorflow::DT_INT32:
961  return DataType::Signed32;
962  break;
963  default:
964  throw ParseException(
965  fmt::format("Unknown DataType {} for node {} {}",
966  tensorflow::DataType_Name(tfDataType),
967  nodeDef.name(),
968  CHECK_LOCATION().AsString()));
969  }
970 }
#define CHECK_LOCATION()
Definition: Exceptions.hpp:197

◆ OutputShapeOfExpandDims()

TensorInfo armnnTfParser::OutputShapeOfExpandDims ( const tensorflow::NodeDef &  nodeDef,
TensorInfo  inputTensorInfo,
std::int32_t  expandDim 
)

Definition at line 1466 of file TfParser.cpp.

References ARMNN_ASSERT, CHECK_LOCATION, TensorInfo::GetNumDimensions(), TensorInfo::GetShape(), armnn::numeric_cast(), and TensorInfo::SetShape().

Referenced by ITfParser::TfParserImpl::ParseExpandDims().

1469 {
1470  ARMNN_ASSERT(nodeDef.op() == "ExpandDims");
1471 
1472  if (inputTensorInfo.GetNumDimensions() > 4) {
1473  throw ParseException(
1474  fmt::format("Unsupported number of dimensions: {} for input shape for ExpandDims {} {}",
1475  inputTensorInfo.GetNumDimensions(),
1476  nodeDef.name(),
1477  CHECK_LOCATION().AsString()));
1478  }
1479 
1480  std::int32_t inputDimSize = armnn::numeric_cast<int32_t>(inputTensorInfo.GetNumDimensions());
1481  std::vector<uint32_t> outputDims;
1482 
1483  // expandDim operation requires: -1-input.dims() <= dim <= input.dims()
1484  if (expandDim >= -1 - inputDimSize && expandDim <= inputDimSize)
1485  {
1486  // add current input shape to outputDims
1487  for (unsigned int i = 0; i < inputTensorInfo.GetNumDimensions(); ++i) {
1488  auto currentDimension = inputTensorInfo.GetShape()[i];
1489  outputDims.push_back(currentDimension);
1490  }
1491 
1492  // insert a dimension of 1 at index 'expandDim' of inputs shape
1493  if (expandDim >= 0)
1494  {
1495  auto getPosition = std::next(outputDims.begin() + 0, expandDim);
1496  outputDims.insert(getPosition, 1);
1497  }
1498 
1499  // if negative number for 'expandDim' then count backwards from the last element
1500  // and insert 1 dimension at index 'expandDim'
1501  if (expandDim < 0)
1502  {
1503  int outputDimSize = armnn::numeric_cast<int>(outputDims.size() + 1);
1504  auto getPosition = std::next(outputDims.begin() + outputDimSize, expandDim);
1505  outputDims.insert(getPosition, 1);
1506  }
1507  }
1508  else
1509  {
1511  fmt::format("Cannot expand dimension {} in input tensor with {} dimension {}",
1512  expandDim,
1513  inputDimSize,
1514  CHECK_LOCATION().AsString()));
1515  }
1516 
1517  if (outputDims.size() > 4)
1518  {
1519  throw ParseException(
1520  fmt::format("Unsupported number of dimensions: {} for output shape for ExpandDims {} {}",
1521  outputDims.size(),
1522  nodeDef.name(),
1523  CHECK_LOCATION().AsString()));
1524  }
1525 
1526  TensorShape outShape = TensorShape(static_cast<unsigned int>(outputDims.size()),
1527  outputDims.data());
1528 
1529  TensorInfo outTensorInfo = inputTensorInfo;
1530  outTensorInfo.SetShape(outShape);
1531 
1532  return outTensorInfo;
1533 }
const TensorShape & GetShape() const
Definition: Tensor.hpp:187
void SetShape(const TensorShape &newShape)
Definition: Tensor.hpp:189
#define ARMNN_ASSERT(COND)
Definition: Assert.hpp:14
#define CHECK_LOCATION()
Definition: Exceptions.hpp:197
std::enable_if_t< std::is_unsigned< Source >::value &&std::is_unsigned< Dest >::value, Dest > numeric_cast(Source source)
Definition: NumericCast.hpp:35
unsigned int GetNumDimensions() const
Definition: Tensor.hpp:191

◆ OutputShapeOfSqueeze()

TensorInfo armnnTfParser::OutputShapeOfSqueeze ( const tensorflow::NodeDef &  nodeDef,
TensorInfo  inputTensorInfo 
)

Definition at line 2485 of file TfParser.cpp.

References ARMNN_ASSERT, CHECK_LOCATION, TensorInfo::GetNumDimensions(), TensorInfo::GetShape(), and TensorInfo::SetShape().

Referenced by ITfParser::TfParserImpl::ParseSqueeze().

2486 {
2487  ARMNN_ASSERT(nodeDef.op() == "Squeeze");
2488  tensorflow::DataType tfDataType = ReadMandatoryNodeTypeAttribute(nodeDef, "T");
2489 
2490  DataType type;
2491  if (tfDataType == tensorflow::DT_FLOAT)
2492  {
2493  type = DataType::Float32;
2494  }
2495  else if (tfDataType == tensorflow::DT_INT32)
2496  {
2497  type = DataType::Signed32;
2498  }
2499  else
2500  {
2501  throw ParseException(
2502  fmt::format("Unsupported DataType {} for Squeeze operation {} {}",
2503  tensorflow::DataType_Name(tfDataType),
2504  nodeDef.name(),
2505  CHECK_LOCATION().AsString()));
2506  }
2507 
2508 
2509  if (inputTensorInfo.GetNumDimensions() > 4)
2510  {
2511  throw ParseException(
2512  fmt::format("Unsupported number of dimensions: {} for input shape for Squeeze {} {}",
2513  inputTensorInfo.GetNumDimensions(),
2514  nodeDef.name(),
2515  CHECK_LOCATION().AsString()));
2516  }
2517 
2518  std::vector<uint32_t> squeezeDims = ReadOptionalNodeUint32ListAttribute(nodeDef, "squeeze_dims");
2519  static const uint32_t dimensionSequence[] = { 0, 1, 2, 3 };
2520 
2521  if (squeezeDims.empty())
2522  {
2523  squeezeDims.assign(dimensionSequence,
2524  dimensionSequence+inputTensorInfo.GetNumDimensions());
2525  }
2526 
2527  std::vector<uint32_t> outputDims;
2528  for(unsigned int i = 0; i < inputTensorInfo.GetNumDimensions(); i++)
2529  {
2530  bool skipSqueeze = (std::find(squeezeDims.begin(), squeezeDims.end(), i) == squeezeDims.end());
2531  auto currentDimension = inputTensorInfo.GetShape()[i];
2532  if (skipSqueeze || currentDimension != 1)
2533  {
2534  outputDims.push_back(currentDimension);
2535  }
2536  }
2537 
2538  if (outputDims.size() > 4)
2539  {
2540  throw ParseException(
2541  fmt::format("Unsupported number of dimensions: {} for output shape for Squeeze {} {}",
2542  outputDims.size(),
2543  nodeDef.name(),
2544  CHECK_LOCATION().AsString()));
2545  }
2546 
2547  TensorShape outShape = TensorShape(static_cast<unsigned int>(outputDims.size()),
2548  outputDims.data());
2549 
2550  TensorInfo outTensorInfo = inputTensorInfo;
2551  outTensorInfo.SetShape(outShape);
2552  outTensorInfo.SetDataType(type);
2553 
2554  return outTensorInfo;
2555 }
const TensorShape & GetShape() const
Definition: Tensor.hpp:187
void SetShape(const TensorShape &newShape)
Definition: Tensor.hpp:189
DataType
Definition: Types.hpp:32
#define ARMNN_ASSERT(COND)
Definition: Assert.hpp:14
#define CHECK_LOCATION()
Definition: Exceptions.hpp:197
unsigned int GetNumDimensions() const
Definition: Tensor.hpp:191