ArmNN
 20.02
armnnTfParser Namespace Reference

Classes

class  ITfParser
 Parses a directed acyclic graph from a tensorflow protobuf file. More...
 
class  TfParser
 
struct  WithOutputTensorIndex
 WithOutputTensorIndex wraps a value and an index. More...
 

Typedefs

using BindingPointInfo = armnn::BindingPointInfo
 
using ITfParserPtr = std::unique_ptr< ITfParser, void(*)(ITfParser *parser)>
 
using ParsedTfOperationPtr = std::unique_ptr< ParsedTfOperation >
 
using OutputOfParsedTfOperation = WithOutputTensorIndex< ParsedTfOperation * >
 
using OutputOfConstNodeDef = WithOutputTensorIndex< const tensorflow::NodeDef * >
 
using OutputId = WithOutputTensorIndex< std::string >
 

Functions

void CalculateSamePadding (uint32_t inputSize, uint32_t stride, uint32_t filterSize, bool samePadding, uint32_t *paddingFront, uint32_t *paddingBack)
 
void CalcPadding (uint32_t input, uint32_t kernel, uint32_t stride, uint32_t &outPadHead, uint32_t &outPadTail, bool samePadding)
 
DataType ConvertTfTensorDataType (const tensorflow::DataType tfDataType, const tensorflow::NodeDef &nodeDef)
 
TensorInfo OutputShapeOfExpandDims (const tensorflow::NodeDef &nodeDef, TensorInfo inputTensorInfo)
 
unsigned int CheckPaddingTensor (const ConstTensor &paddingTensor, const TensorInfo &inputTensorInfo, const std::string &nodeName)
 
TensorInfo CalculatePaddedOutputTensorInfo (const TensorInfo &inputTensorInfo, const std::vector< std::pair< unsigned int, unsigned int >> &padList)
 
TensorInfo OutputShapeOfSqueeze (const tensorflow::NodeDef &nodeDef, TensorInfo inputTensorInfo)
 

Typedef Documentation

◆ BindingPointInfo

Definition at line 19 of file ITfParser.hpp.

◆ ITfParserPtr

using ITfParserPtr = std::unique_ptr<ITfParser, void(*)(ITfParser* parser)>

Definition at line 22 of file ITfParser.hpp.

◆ OutputId

using OutputId = WithOutputTensorIndex<std::string>

Definition at line 62 of file TfParser.hpp.

◆ OutputOfConstNodeDef

using OutputOfConstNodeDef = WithOutputTensorIndex<const tensorflow::NodeDef*>

Definition at line 61 of file TfParser.hpp.

◆ OutputOfParsedTfOperation

using OutputOfParsedTfOperation = WithOutputTensorIndex<ParsedTfOperation *>

Definition at line 60 of file TfParser.hpp.

◆ ParsedTfOperationPtr

using ParsedTfOperationPtr = std::unique_ptr<ParsedTfOperation>

Definition at line 35 of file TfParser.hpp.

Function Documentation

◆ CalcPadding()

void armnnTfParser::CalcPadding ( uint32_t  input,
uint32_t  kernel,
uint32_t  stride,
uint32_t &  outPadHead,
uint32_t &  outPadTail,
bool  samePadding 
)

Definition at line 421 of file TfParser.cpp.

References CalculateSamePadding(), CHECK_LOCATION, Layer::GetName(), Layer::GetNumOutputSlots(), Layer::GetOutputSlot(), and m_Layer.

Referenced by ConvertTfTensorDataType(), TfLiteParser::CreateNetworkFromBinary(), OnnxParser::CreateNetworkFromString(), and OutputShapeOfSqueeze().

423 {
424  CalculateSamePadding(input, stride, kernel, samePadding, &outPadHead, &outPadTail);
425 }
void CalculateSamePadding(uint32_t inputSize, uint32_t stride, uint32_t filterSize, bool samePadding, uint32_t *paddingFront, uint32_t *paddingBack)
Definition: TfParser.cpp:405

◆ CalculatePaddedOutputTensorInfo()

TensorInfo armnnTfParser::CalculatePaddedOutputTensorInfo ( const TensorInfo inputTensorInfo,
const std::vector< std::pair< unsigned int, unsigned int >> &  padList 
)

Definition at line 2138 of file TfParser.cpp.

References INetwork::AddConcatLayer(), INetwork::AddPadLayer(), INetwork::AddReshapeLayer(), INetwork::AddResizeLayer(), armnn::Bilinear, CHECK_LOCATION, CheckPaddingTensor(), IOutputSlot::Connect(), armnn::Float32, IConnectableLayer::GetInputSlot(), TensorInfo::GetNumDimensions(), IConnectableLayer::GetOutputSlot(), TensorInfo::GetShape(), IOutputSlot::GetTensorInfo(), OriginsDescriptor::GetViewOrigin(), armnn::IgnoreUnused(), ResizeDescriptor::m_DataLayout, ResizeDescriptor::m_Method, ResizeDescriptor::m_TargetHeight, ReshapeDescriptor::m_TargetShape, ResizeDescriptor::m_TargetWidth, armnn::NHWC, OriginsDescriptor::SetConcatAxis(), TensorInfo::SetShape(), IOutputSlot::SetTensorInfo(), and OriginsDescriptor::SetViewOriginCoord().

2140 {
2141  unsigned int numDims = inputTensorInfo.GetNumDimensions();
2142  std::vector<unsigned int> outDims;
2143  for (unsigned int i = 0; i < numDims; ++i)
2144  {
2145  unsigned int dimSize = inputTensorInfo.GetShape()[i];
2146  const std::pair<unsigned int, unsigned int>& dimPadding = padList[i];
2147  dimSize += dimPadding.first;
2148  dimSize += dimPadding.second;
2149  outDims.push_back(dimSize);
2150  }
2151  TensorInfo paddedTensorInfo = inputTensorInfo;
2152  unsigned int outDimsSize = static_cast<unsigned int>(outDims.size());
2153  paddedTensorInfo.SetShape(TensorShape{ outDimsSize, outDims.data() });
2154  return paddedTensorInfo;
2155 }
const TensorShape & GetShape() const
Definition: Tensor.hpp:88
void SetShape(const TensorShape &newShape)
Definition: Tensor.hpp:90
unsigned int GetNumDimensions() const
Definition: Tensor.hpp:92

◆ CalculateSamePadding()

void armnnTfParser::CalculateSamePadding ( uint32_t  inputSize,
uint32_t  stride,
uint32_t  filterSize,
bool  samePadding,
uint32_t *  paddingFront,
uint32_t *  paddingBack 
)
inline

Definition at line 405 of file TfParser.cpp.

Referenced by CalcPadding().

407  {
408  *paddingFront = 0;
409  *paddingBack = 0;
410 
411  if (samePadding) {
412  uint32_t outputSize = (inputSize + stride - 1) / stride;
413  uint32_t temp = (outputSize - 1) * stride + filterSize;
414  if (temp > inputSize) {
415  *paddingFront = (temp - inputSize) / 2;
416  *paddingBack = (temp - inputSize) - *paddingFront;
417  }
418  }
419 }

◆ CheckPaddingTensor()

unsigned int armnnTfParser::CheckPaddingTensor ( const ConstTensor paddingTensor,
const TensorInfo inputTensorInfo,
const std::string &  nodeName 
)

Definition at line 2106 of file TfParser.cpp.

References CHECK_LOCATION, TensorInfo::GetNumDimensions(), and BaseTensor< MemoryType >::GetShape().

Referenced by CalculatePaddedOutputTensorInfo().

2109 {
2110  unsigned int rank = paddingTensor.GetShape()[0];
2111  unsigned int expectedRank = inputTensorInfo.GetNumDimensions();
2112  if (rank != expectedRank)
2113  {
2114  throw ParseException(
2115  boost::str(
2116  boost::format(
2117  "Expected the padding tensor to be of rank %1 not %2 on Node %3 %4.")
2118  % expectedRank
2119  % rank
2120  % nodeName
2121  % CHECK_LOCATION().AsString()));
2122  }
2123  unsigned int second = paddingTensor.GetShape()[1];
2124  if (second != 2)
2125  {
2126  throw ParseException(
2127  boost::str(
2128  boost::format(
2129  "Expected the padding tensor to be of dimensions [%1, 2] not [%1, %2] on Node %3 %4.")
2130  % rank
2131  % second
2132  % nodeName
2133  % CHECK_LOCATION().AsString()));
2134  }
2135  return rank;
2136 }
const TensorShape & GetShape() const
Definition: Tensor.hpp:169
#define CHECK_LOCATION()
Definition: Exceptions.hpp:192
unsigned int GetNumDimensions() const
Definition: Tensor.hpp:92

◆ ConvertTfTensorDataType()

DataType armnnTfParser::ConvertTfTensorDataType ( const tensorflow::DataType  tfDataType,
const tensorflow::NodeDef &  nodeDef 
)

Definition at line 933 of file TfParser.cpp.

References INetwork::AddConvolution2dLayer(), INetwork::AddDepthwiseConvolution2dLayer(), CalcPadding(), CHECK_DATA_FORMAT, CHECK_LOCATION, CHECK_PADDING_TYPE, IOutputSlot::Connect(), armnn::GetDataTypeSize(), DataLayoutIndexed::GetHeightIndex(), IConnectableLayer::GetInputSlot(), TensorInfo::GetNumElements(), IConnectableLayer::GetOutputSlot(), TensorInfo::GetShape(), BaseTensor< MemoryType >::GetShape(), IOutputSlot::GetTensorInfo(), DataLayoutIndexed::GetWidthIndex(), armnn::IgnoreUnused(), Convolution2dDescriptor::m_BiasEnabled, DepthwiseConvolution2dDescriptor::m_BiasEnabled, Convolution2dDescriptor::m_DataLayout, DepthwiseConvolution2dDescriptor::m_DataLayout, Convolution2dDescriptor::m_PadBottom, DepthwiseConvolution2dDescriptor::m_PadBottom, Convolution2dDescriptor::m_PadLeft, DepthwiseConvolution2dDescriptor::m_PadLeft, Convolution2dDescriptor::m_PadRight, DepthwiseConvolution2dDescriptor::m_PadRight, Convolution2dDescriptor::m_PadTop, DepthwiseConvolution2dDescriptor::m_PadTop, Convolution2dDescriptor::m_StrideX, DepthwiseConvolution2dDescriptor::m_StrideX, Convolution2dDescriptor::m_StrideY, DepthwiseConvolution2dDescriptor::m_StrideY, armnnUtils::Permute(), armnnUtils::Permuted(), and IOutputSlot::SetTensorInfo().

935 {
936  switch (tfDataType)
937  {
938  case tensorflow::DT_FLOAT:
939  return DataType::Float32;
940  break;
941  case tensorflow::DT_INT32:
942  return DataType::Signed32;
943  break;
944  default:
945  throw ParseException(
946  boost::str(
947  boost::format(
948  "Unknown DataType %1% for node %2% %3%")
949  % tensorflow::DataType_Name(tfDataType)
950  % nodeDef.name()
951  % CHECK_LOCATION().AsString()));
952  }
953 }
#define CHECK_LOCATION()
Definition: Exceptions.hpp:192

◆ OutputShapeOfExpandDims()

TensorInfo armnnTfParser::OutputShapeOfExpandDims ( const tensorflow::NodeDef &  nodeDef,
TensorInfo  inputTensorInfo 
)

Definition at line 1468 of file TfParser.cpp.

References INetwork::AddActivationLayer(), INetwork::AddBatchNormalizationLayer(), INetwork::AddComparisonLayer(), INetwork::AddGatherLayer(), INetwork::AddMinimumLayer(), INetwork::AddReshapeLayer(), INetwork::AddStackLayer(), INetwork::AddSubtractionLayer(), INetwork::AddTransposeLayer(), CHECK_DATA_FORMAT, CHECK_LOCATION, IOutputSlot::Connect(), TensorInfo::GetDataType(), IConnectableLayer::GetInputSlot(), TensorShape::GetNumDimensions(), TensorInfo::GetNumDimensions(), IConnectableLayer::GetOutputSlot(), TensorInfo::GetShape(), IOutputSlot::GetTensorInfo(), armnn::IgnoreUnused(), ActivationDescriptor::m_A, StackDescriptor::m_Axis, BatchNormalizationDescriptor::m_DataLayout, BatchNormalizationDescriptor::m_Eps, ActivationDescriptor::m_Function, WithOutputTensorIndex< T >::m_Index, WithOutputTensorIndex< T >::m_IndexedValue, StackDescriptor::m_InputShape, StackDescriptor::m_NumInputs, ReshapeDescriptor::m_TargetShape, armnn::numeric_cast(), TensorInfo::SetDataType(), TensorInfo::SetShape(), IOutputSlot::SetTensorInfo(), and armnnUtils::TransposeTensorShape().

1469 {
1470  BOOST_ASSERT(nodeDef.op() == "ExpandDims");
1471 
1472  if (inputTensorInfo.GetNumDimensions() > 4) {
1473  throw ParseException(
1474  boost::str(
1475  boost::format(
1476  "Unsupported number of dimensions: %1% for input shape for ExpandDims %2% %3%")
1477  % inputTensorInfo.GetNumDimensions()
1478  % nodeDef.name()
1479  % CHECK_LOCATION().AsString()));
1480  }
1481 
1482  std::int32_t expandDim = ReadMandatoryNodeInt32Attribute(nodeDef, "Tdim");
1483 
1484  std::int32_t inputDimSize = boost::numeric_cast<int32_t>(inputTensorInfo.GetNumDimensions());
1485  std::vector<uint32_t> outputDims;
1486 
1487  // expandDim operation requires: -1-input.dims() <= dim <= input.dims()
1488  if (expandDim >= -1 - inputDimSize && expandDim <= inputDimSize)
1489  {
1490  // add current input shape to outputDims
1491  for (unsigned int i = 0; i < inputTensorInfo.GetNumDimensions(); ++i) {
1492  auto currentDimension = inputTensorInfo.GetShape()[i];
1493  outputDims.push_back(currentDimension);
1494  }
1495 
1496  // insert a dimension of 1 at index 'expandDim' of inputs shape
1497  if (expandDim >= 0)
1498  {
1499  auto getPosition = std::next(outputDims.begin() + 0, expandDim);
1500  outputDims.insert(getPosition, 1);
1501  }
1502 
1503  // if negative number for 'expandDim' then count backwards from the last element
1504  // and insert 1 dimension at index 'expandDim'
1505  if (expandDim < 0)
1506  {
1507  int outputDimSize = boost::numeric_cast<int>(outputDims.size() + 1);
1508  auto getPosition = std::next(outputDims.begin() + outputDimSize, expandDim);
1509  outputDims.insert(getPosition, 1);
1510  }
1511  }
1512  else
1513  {
1515  boost::str(
1516  boost::format(
1517  "Cannot expand dimension %1% in input tensor with %2% dimension %3%")
1518  % expandDim
1519  % inputDimSize
1520  % CHECK_LOCATION().AsString()));
1521  }
1522 
1523  if (outputDims.size() > 4)
1524  {
1525  throw ParseException(
1526  boost::str(
1527  boost::format(
1528  "Unsupported number of dimensions: %1% for output shape for ExpandDims %2% %3%")
1529  % outputDims.size()
1530  % nodeDef.name()
1531  % CHECK_LOCATION().AsString()));
1532  }
1533 
1534  TensorShape outShape = TensorShape(static_cast<unsigned int>(outputDims.size()),
1535  outputDims.data());
1536 
1537  TensorInfo outTensorInfo = inputTensorInfo;
1538  outTensorInfo.SetShape(outShape);
1539 
1540  return outTensorInfo;
1541 }
const TensorShape & GetShape() const
Definition: Tensor.hpp:88
void SetShape(const TensorShape &newShape)
Definition: Tensor.hpp:90
std::enable_if_t< std::is_unsigned< Source >::value &&std::is_unsigned< Dest >::value, Dest > numeric_cast(Source source)
Definition: NumericCast.hpp:33
#define CHECK_LOCATION()
Definition: Exceptions.hpp:192
unsigned int GetNumDimensions() const
Definition: Tensor.hpp:92

◆ OutputShapeOfSqueeze()

TensorInfo armnnTfParser::OutputShapeOfSqueeze ( const tensorflow::NodeDef &  nodeDef,
TensorInfo  inputTensorInfo 
)

Definition at line 2463 of file TfParser.cpp.

References INetwork::AddActivationLayer(), INetwork::AddAdditionLayer(), INetwork::AddDivisionLayer(), INetwork::AddElementwiseUnaryLayer(), INetwork::AddFullyConnectedLayer(), INetwork::AddInputLayer(), INetwork::AddMaximumLayer(), INetwork::AddMeanLayer(), INetwork::AddMultiplicationLayer(), INetwork::AddNormalizationLayer(), INetwork::AddOutputLayer(), INetwork::AddPooling2dLayer(), INetwork::AddReshapeLayer(), INetwork::AddSoftmaxLayer(), INetwork::AddSplitterLayer(), INetwork::AddStridedSliceLayer(), CalcPadding(), armnnUtils::CalculateReducedOutputTensoInfo(), armnnUtils::CalculateStridedSliceOutputTensorInfo(), CHECK_DATA_FORMAT, CHECK_LOCATION, CHECK_PADDING_TYPE, IOutputSlot::Connect(), TensorInfo::GetDataType(), DataLayoutIndexed::GetHeightIndex(), IConnectableLayer::GetInputSlot(), TensorShape::GetNumDimensions(), TensorInfo::GetNumDimensions(), IConnectableLayer::GetNumOutputSlots(), IConnectableLayer::GetOutputSlot(), TensorInfo::GetShape(), BaseTensor< MemoryType >::GetShape(), IOutputSlot::GetTensorInfo(), DataLayoutIndexed::GetWidthIndex(), armnn::IgnoreUnused(), ActivationDescriptor::m_A, NormalizationDescriptor::m_Alpha, MeanDescriptor::m_Axis, ActivationDescriptor::m_B, StridedSliceDescriptor::m_Begin, StridedSliceDescriptor::m_BeginMask, NormalizationDescriptor::m_Beta, FullyConnectedDescriptor::m_BiasEnabled, Pooling2dDescriptor::m_DataLayout, NormalizationDescriptor::m_DataLayout, StridedSliceDescriptor::m_DataLayout, StridedSliceDescriptor::m_EllipsisMask, StridedSliceDescriptor::m_End, StridedSliceDescriptor::m_EndMask, ActivationDescriptor::m_Function, NormalizationDescriptor::m_K, MeanDescriptor::m_KeepDims, m_Layer, StridedSliceDescriptor::m_NewAxisMask, NormalizationDescriptor::m_NormChannelType, NormalizationDescriptor::m_NormMethodType, NormalizationDescriptor::m_NormSize, Pooling2dDescriptor::m_OutputShapeRounding, Pooling2dDescriptor::m_PadBottom, Pooling2dDescriptor::m_PaddingMethod, Pooling2dDescriptor::m_PadLeft, Pooling2dDescriptor::m_PadRight, Pooling2dDescriptor::m_PadTop, Pooling2dDescriptor::m_PoolHeight, Pooling2dDescriptor::m_PoolType, Pooling2dDescriptor::m_PoolWidth, StridedSliceDescriptor::m_ShrinkAxisMask, StridedSliceDescriptor::m_Stride, Pooling2dDescriptor::m_StrideX, Pooling2dDescriptor::m_StrideY, ReshapeDescriptor::m_TargetShape, armnn::NHWC, armnn::numeric_cast(), TfParser::ParsedMatMulTfOperation, TfParser::ParsedMulTfOperation, TensorInfo::SetShape(), IOutputSlot::SetTensorInfo(), ViewsDescriptor::SetViewOriginCoord(), and ViewsDescriptor::SetViewSize().

2464 {
2465  BOOST_ASSERT(nodeDef.op() == "Squeeze");
2466  tensorflow::DataType tfDataType = ReadMandatoryNodeTypeAttribute(nodeDef, "T");
2467 
2468  DataType type;
2469  if (tfDataType == tensorflow::DT_FLOAT)
2470  {
2471  type = DataType::Float32;
2472  }
2473  else if (tfDataType == tensorflow::DT_INT32)
2474  {
2475  type = DataType::Signed32;
2476  }
2477  else
2478  {
2479  throw ParseException(
2480  boost::str(
2481  boost::format("Unsupported DataType %1% for Squeeze operation %2% %3%")
2482  % tensorflow::DataType_Name(tfDataType)
2483  % nodeDef.name()
2484  % CHECK_LOCATION().AsString()));
2485  }
2486 
2487 
2488  if (inputTensorInfo.GetNumDimensions() > 4)
2489  {
2490  throw ParseException(
2491  boost::str(
2492  boost::format(
2493  "Unsupported number of dimensions: %1% for input shape for Squeeze %2% %3%")
2494  % inputTensorInfo.GetNumDimensions()
2495  % nodeDef.name()
2496  % CHECK_LOCATION().AsString()));
2497  }
2498 
2499  std::vector<uint32_t> squeezeDims = ReadOptionalNodeUint32ListAttribute(nodeDef, "squeeze_dims");
2500  static const uint32_t dimensionSequence[] = { 0, 1, 2, 3 };
2501 
2502  if (squeezeDims.empty())
2503  {
2504  squeezeDims.assign(dimensionSequence,
2505  dimensionSequence+inputTensorInfo.GetNumDimensions());
2506  }
2507 
2508  std::vector<uint32_t> outputDims;
2509  for(unsigned int i = 0; i < inputTensorInfo.GetNumDimensions(); i++)
2510  {
2511  bool skipSqueeze = (std::find(squeezeDims.begin(), squeezeDims.end(), i) == squeezeDims.end());
2512  auto currentDimension = inputTensorInfo.GetShape()[i];
2513  if (skipSqueeze || currentDimension != 1)
2514  {
2515  outputDims.push_back(currentDimension);
2516  }
2517  }
2518 
2519  if (outputDims.size() > 4)
2520  {
2521  throw ParseException(
2522  boost::str(
2523  boost::format(
2524  "Unsupported number of dimensions: %1% for output shape for Squeeze %2% %3%")
2525  % outputDims.size()
2526  % nodeDef.name()
2527  % CHECK_LOCATION().AsString()));
2528  }
2529 
2530  TensorShape outShape = TensorShape(static_cast<unsigned int>(outputDims.size()),
2531  outputDims.data());
2532 
2533  TensorInfo outTensorInfo = inputTensorInfo;
2534  outTensorInfo.SetShape(outShape);
2535  outTensorInfo.SetDataType(type);
2536 
2537  return outTensorInfo;
2538 }
const TensorShape & GetShape() const
Definition: Tensor.hpp:88
void SetShape(const TensorShape &newShape)
Definition: Tensor.hpp:90
DataType
Definition: Types.hpp:32
#define CHECK_LOCATION()
Definition: Exceptions.hpp:192
unsigned int GetNumDimensions() const
Definition: Tensor.hpp:92