ArmNN
 20.05
armnnTfParser Namespace Reference

Classes

class  ITfParser
 Parses a directed acyclic graph from a tensorflow protobuf file. More...
 
class  TfParser
 
struct  WithOutputTensorIndex
 WithOutputTensorIndex wraps a value and an index. More...
 

Typedefs

using BindingPointInfo = armnn::BindingPointInfo
 
using ITfParserPtr = std::unique_ptr< ITfParser, void(*)(ITfParser *parser)>
 
using ParsedTfOperationPtr = std::unique_ptr< ParsedTfOperation >
 
using OutputOfParsedTfOperation = WithOutputTensorIndex< ParsedTfOperation * >
 
using OutputOfConstNodeDef = WithOutputTensorIndex< const tensorflow::NodeDef * >
 
using OutputId = WithOutputTensorIndex< std::string >
 

Functions

void CalculateSamePadding (uint32_t inputSize, uint32_t stride, uint32_t filterSize, bool samePadding, uint32_t *paddingFront, uint32_t *paddingBack)
 
void CalcPadding (uint32_t input, uint32_t kernel, uint32_t stride, uint32_t &outPadHead, uint32_t &outPadTail, bool samePadding)
 
DataType ConvertTfTensorDataType (const tensorflow::DataType tfDataType, const tensorflow::NodeDef &nodeDef)
 
TensorInfo OutputShapeOfExpandDims (const tensorflow::NodeDef &nodeDef, TensorInfo inputTensorInfo)
 
unsigned int CheckPaddingTensor (const ConstTensor &paddingTensor, const TensorInfo &inputTensorInfo, const std::string &nodeName)
 
TensorInfo CalculatePaddedOutputTensorInfo (const TensorInfo &inputTensorInfo, const std::vector< std::pair< unsigned int, unsigned int >> &padList)
 
TensorInfo OutputShapeOfSqueeze (const tensorflow::NodeDef &nodeDef, TensorInfo inputTensorInfo)
 

Typedef Documentation

◆ BindingPointInfo

Definition at line 19 of file ITfParser.hpp.

◆ ITfParserPtr

using ITfParserPtr = std::unique_ptr<ITfParser, void(*)(ITfParser* parser)>

Definition at line 22 of file ITfParser.hpp.

◆ OutputId

using OutputId = WithOutputTensorIndex<std::string>

Definition at line 62 of file TfParser.hpp.

◆ OutputOfConstNodeDef

using OutputOfConstNodeDef = WithOutputTensorIndex<const tensorflow::NodeDef*>

Definition at line 61 of file TfParser.hpp.

◆ OutputOfParsedTfOperation

using OutputOfParsedTfOperation = WithOutputTensorIndex<ParsedTfOperation *>

Definition at line 60 of file TfParser.hpp.

◆ ParsedTfOperationPtr

using ParsedTfOperationPtr = std::unique_ptr<ParsedTfOperation>

Definition at line 35 of file TfParser.hpp.

Function Documentation

◆ CalcPadding()

void armnnTfParser::CalcPadding ( uint32_t  input,
uint32_t  kernel,
uint32_t  stride,
uint32_t &  outPadHead,
uint32_t &  outPadTail,
bool  samePadding 
)

Definition at line 420 of file TfParser.cpp.

References ARMNN_ASSERT, CalculateSamePadding(), CHECK_LOCATION, Layer::GetName(), Layer::GetNumOutputSlots(), Layer::GetOutputSlot(), and m_Layer.

Referenced by ConvertTfTensorDataType(), TfLiteParser::CreateNetworkFromBinary(), OnnxParser::CreateNetworkFromString(), and OutputShapeOfSqueeze().

422 {
423  CalculateSamePadding(input, stride, kernel, samePadding, &outPadHead, &outPadTail);
424 }
void CalculateSamePadding(uint32_t inputSize, uint32_t stride, uint32_t filterSize, bool samePadding, uint32_t *paddingFront, uint32_t *paddingBack)
Definition: TfParser.cpp:404

◆ CalculatePaddedOutputTensorInfo()

TensorInfo armnnTfParser::CalculatePaddedOutputTensorInfo ( const TensorInfo inputTensorInfo,
const std::vector< std::pair< unsigned int, unsigned int >> &  padList 
)

Definition at line 2137 of file TfParser.cpp.

References INetwork::AddConcatLayer(), INetwork::AddPadLayer(), INetwork::AddReshapeLayer(), INetwork::AddResizeLayer(), armnn::Bilinear, CHECK_LOCATION, CheckPaddingTensor(), IOutputSlot::Connect(), armnn::Float32, IConnectableLayer::GetInputSlot(), TensorInfo::GetNumDimensions(), IConnectableLayer::GetOutputSlot(), TensorInfo::GetShape(), IOutputSlot::GetTensorInfo(), OriginsDescriptor::GetViewOrigin(), armnn::IgnoreUnused(), ResizeDescriptor::m_DataLayout, ResizeDescriptor::m_Method, ResizeDescriptor::m_TargetHeight, ReshapeDescriptor::m_TargetShape, ResizeDescriptor::m_TargetWidth, armnn::NHWC, OriginsDescriptor::SetConcatAxis(), TensorInfo::SetShape(), IOutputSlot::SetTensorInfo(), and OriginsDescriptor::SetViewOriginCoord().

2139 {
2140  unsigned int numDims = inputTensorInfo.GetNumDimensions();
2141  std::vector<unsigned int> outDims;
2142  for (unsigned int i = 0; i < numDims; ++i)
2143  {
2144  unsigned int dimSize = inputTensorInfo.GetShape()[i];
2145  const std::pair<unsigned int, unsigned int>& dimPadding = padList[i];
2146  dimSize += dimPadding.first;
2147  dimSize += dimPadding.second;
2148  outDims.push_back(dimSize);
2149  }
2150  TensorInfo paddedTensorInfo = inputTensorInfo;
2151  unsigned int outDimsSize = static_cast<unsigned int>(outDims.size());
2152  paddedTensorInfo.SetShape(TensorShape{ outDimsSize, outDims.data() });
2153  return paddedTensorInfo;
2154 }
const TensorShape & GetShape() const
Definition: Tensor.hpp:88
void SetShape(const TensorShape &newShape)
Definition: Tensor.hpp:90
unsigned int GetNumDimensions() const
Definition: Tensor.hpp:92

◆ CalculateSamePadding()

void armnnTfParser::CalculateSamePadding ( uint32_t  inputSize,
uint32_t  stride,
uint32_t  filterSize,
bool  samePadding,
uint32_t *  paddingFront,
uint32_t *  paddingBack 
)
inline

Definition at line 404 of file TfParser.cpp.

Referenced by CalcPadding().

406  {
407  *paddingFront = 0;
408  *paddingBack = 0;
409 
410  if (samePadding) {
411  uint32_t outputSize = (inputSize + stride - 1) / stride;
412  uint32_t temp = (outputSize - 1) * stride + filterSize;
413  if (temp > inputSize) {
414  *paddingFront = (temp - inputSize) / 2;
415  *paddingBack = (temp - inputSize) - *paddingFront;
416  }
417  }
418 }

◆ CheckPaddingTensor()

unsigned int armnnTfParser::CheckPaddingTensor ( const ConstTensor paddingTensor,
const TensorInfo inputTensorInfo,
const std::string &  nodeName 
)

Definition at line 2105 of file TfParser.cpp.

References CHECK_LOCATION, TensorInfo::GetNumDimensions(), and BaseTensor< MemoryType >::GetShape().

Referenced by CalculatePaddedOutputTensorInfo().

2108 {
2109  unsigned int rank = paddingTensor.GetShape()[0];
2110  unsigned int expectedRank = inputTensorInfo.GetNumDimensions();
2111  if (rank != expectedRank)
2112  {
2113  throw ParseException(
2114  boost::str(
2115  boost::format(
2116  "Expected the padding tensor to be of rank %1 not %2 on Node %3 %4.")
2117  % expectedRank
2118  % rank
2119  % nodeName
2120  % CHECK_LOCATION().AsString()));
2121  }
2122  unsigned int second = paddingTensor.GetShape()[1];
2123  if (second != 2)
2124  {
2125  throw ParseException(
2126  boost::str(
2127  boost::format(
2128  "Expected the padding tensor to be of dimensions [%1, 2] not [%1, %2] on Node %3 %4.")
2129  % rank
2130  % second
2131  % nodeName
2132  % CHECK_LOCATION().AsString()));
2133  }
2134  return rank;
2135 }
const TensorShape & GetShape() const
Definition: Tensor.hpp:169
#define CHECK_LOCATION()
Definition: Exceptions.hpp:192
unsigned int GetNumDimensions() const
Definition: Tensor.hpp:92

◆ ConvertTfTensorDataType()

DataType armnnTfParser::ConvertTfTensorDataType ( const tensorflow::DataType  tfDataType,
const tensorflow::NodeDef &  nodeDef 
)

Definition at line 932 of file TfParser.cpp.

References INetwork::AddConvolution2dLayer(), INetwork::AddDepthwiseConvolution2dLayer(), ARMNN_ASSERT, CalcPadding(), CHECK_DATA_FORMAT, CHECK_LOCATION, CHECK_PADDING_TYPE, IOutputSlot::Connect(), armnn::GetDataTypeSize(), DataLayoutIndexed::GetHeightIndex(), IConnectableLayer::GetInputSlot(), TensorInfo::GetNumElements(), IConnectableLayer::GetOutputSlot(), TensorInfo::GetShape(), BaseTensor< MemoryType >::GetShape(), IOutputSlot::GetTensorInfo(), DataLayoutIndexed::GetWidthIndex(), armnn::IgnoreUnused(), Convolution2dDescriptor::m_BiasEnabled, DepthwiseConvolution2dDescriptor::m_BiasEnabled, Convolution2dDescriptor::m_DataLayout, DepthwiseConvolution2dDescriptor::m_DataLayout, Convolution2dDescriptor::m_PadBottom, DepthwiseConvolution2dDescriptor::m_PadBottom, Convolution2dDescriptor::m_PadLeft, DepthwiseConvolution2dDescriptor::m_PadLeft, Convolution2dDescriptor::m_PadRight, DepthwiseConvolution2dDescriptor::m_PadRight, Convolution2dDescriptor::m_PadTop, DepthwiseConvolution2dDescriptor::m_PadTop, Convolution2dDescriptor::m_StrideX, DepthwiseConvolution2dDescriptor::m_StrideX, Convolution2dDescriptor::m_StrideY, DepthwiseConvolution2dDescriptor::m_StrideY, armnnUtils::Permute(), armnnUtils::Permuted(), and IOutputSlot::SetTensorInfo().

934 {
935  switch (tfDataType)
936  {
937  case tensorflow::DT_FLOAT:
938  return DataType::Float32;
939  break;
940  case tensorflow::DT_INT32:
941  return DataType::Signed32;
942  break;
943  default:
944  throw ParseException(
945  boost::str(
946  boost::format(
947  "Unknown DataType %1% for node %2% %3%")
948  % tensorflow::DataType_Name(tfDataType)
949  % nodeDef.name()
950  % CHECK_LOCATION().AsString()));
951  }
952 }
#define CHECK_LOCATION()
Definition: Exceptions.hpp:192

◆ OutputShapeOfExpandDims()

TensorInfo armnnTfParser::OutputShapeOfExpandDims ( const tensorflow::NodeDef &  nodeDef,
TensorInfo  inputTensorInfo 
)

Definition at line 1467 of file TfParser.cpp.

References INetwork::AddActivationLayer(), INetwork::AddBatchNormalizationLayer(), INetwork::AddComparisonLayer(), INetwork::AddGatherLayer(), INetwork::AddMinimumLayer(), INetwork::AddReshapeLayer(), INetwork::AddStackLayer(), INetwork::AddSubtractionLayer(), INetwork::AddTransposeLayer(), ARMNN_ASSERT, CHECK_DATA_FORMAT, CHECK_LOCATION, IOutputSlot::Connect(), TensorInfo::GetDataType(), IConnectableLayer::GetInputSlot(), TensorShape::GetNumDimensions(), TensorInfo::GetNumDimensions(), IConnectableLayer::GetOutputSlot(), TensorInfo::GetShape(), IOutputSlot::GetTensorInfo(), armnn::IgnoreUnused(), ActivationDescriptor::m_A, StackDescriptor::m_Axis, BatchNormalizationDescriptor::m_DataLayout, BatchNormalizationDescriptor::m_Eps, ActivationDescriptor::m_Function, WithOutputTensorIndex< T >::m_Index, WithOutputTensorIndex< T >::m_IndexedValue, StackDescriptor::m_InputShape, StackDescriptor::m_NumInputs, ReshapeDescriptor::m_TargetShape, armnn::numeric_cast(), TensorInfo::SetDataType(), TensorInfo::SetShape(), IOutputSlot::SetTensorInfo(), and armnnUtils::TransposeTensorShape().

1468 {
1469  ARMNN_ASSERT(nodeDef.op() == "ExpandDims");
1470 
1471  if (inputTensorInfo.GetNumDimensions() > 4) {
1472  throw ParseException(
1473  boost::str(
1474  boost::format(
1475  "Unsupported number of dimensions: %1% for input shape for ExpandDims %2% %3%")
1476  % inputTensorInfo.GetNumDimensions()
1477  % nodeDef.name()
1478  % CHECK_LOCATION().AsString()));
1479  }
1480 
1481  std::int32_t expandDim = ReadMandatoryNodeInt32Attribute(nodeDef, "Tdim");
1482 
1483  std::int32_t inputDimSize = boost::numeric_cast<int32_t>(inputTensorInfo.GetNumDimensions());
1484  std::vector<uint32_t> outputDims;
1485 
1486  // expandDim operation requires: -1-input.dims() <= dim <= input.dims()
1487  if (expandDim >= -1 - inputDimSize && expandDim <= inputDimSize)
1488  {
1489  // add current input shape to outputDims
1490  for (unsigned int i = 0; i < inputTensorInfo.GetNumDimensions(); ++i) {
1491  auto currentDimension = inputTensorInfo.GetShape()[i];
1492  outputDims.push_back(currentDimension);
1493  }
1494 
1495  // insert a dimension of 1 at index 'expandDim' of inputs shape
1496  if (expandDim >= 0)
1497  {
1498  auto getPosition = std::next(outputDims.begin() + 0, expandDim);
1499  outputDims.insert(getPosition, 1);
1500  }
1501 
1502  // if negative number for 'expandDim' then count backwards from the last element
1503  // and insert 1 dimension at index 'expandDim'
1504  if (expandDim < 0)
1505  {
1506  int outputDimSize = boost::numeric_cast<int>(outputDims.size() + 1);
1507  auto getPosition = std::next(outputDims.begin() + outputDimSize, expandDim);
1508  outputDims.insert(getPosition, 1);
1509  }
1510  }
1511  else
1512  {
1514  boost::str(
1515  boost::format(
1516  "Cannot expand dimension %1% in input tensor with %2% dimension %3%")
1517  % expandDim
1518  % inputDimSize
1519  % CHECK_LOCATION().AsString()));
1520  }
1521 
1522  if (outputDims.size() > 4)
1523  {
1524  throw ParseException(
1525  boost::str(
1526  boost::format(
1527  "Unsupported number of dimensions: %1% for output shape for ExpandDims %2% %3%")
1528  % outputDims.size()
1529  % nodeDef.name()
1530  % CHECK_LOCATION().AsString()));
1531  }
1532 
1533  TensorShape outShape = TensorShape(static_cast<unsigned int>(outputDims.size()),
1534  outputDims.data());
1535 
1536  TensorInfo outTensorInfo = inputTensorInfo;
1537  outTensorInfo.SetShape(outShape);
1538 
1539  return outTensorInfo;
1540 }
const TensorShape & GetShape() const
Definition: Tensor.hpp:88
void SetShape(const TensorShape &newShape)
Definition: Tensor.hpp:90
#define ARMNN_ASSERT(COND)
Definition: Assert.hpp:14
std::enable_if_t< std::is_unsigned< Source >::value &&std::is_unsigned< Dest >::value, Dest > numeric_cast(Source source)
Definition: NumericCast.hpp:33
#define CHECK_LOCATION()
Definition: Exceptions.hpp:192
unsigned int GetNumDimensions() const
Definition: Tensor.hpp:92

◆ OutputShapeOfSqueeze()

TensorInfo armnnTfParser::OutputShapeOfSqueeze ( const tensorflow::NodeDef &  nodeDef,
TensorInfo  inputTensorInfo 
)

Definition at line 2462 of file TfParser.cpp.

References INetwork::AddActivationLayer(), INetwork::AddAdditionLayer(), INetwork::AddDivisionLayer(), INetwork::AddElementwiseUnaryLayer(), INetwork::AddFullyConnectedLayer(), INetwork::AddInputLayer(), INetwork::AddMaximumLayer(), INetwork::AddMeanLayer(), INetwork::AddMultiplicationLayer(), INetwork::AddNormalizationLayer(), INetwork::AddOutputLayer(), INetwork::AddPooling2dLayer(), INetwork::AddReshapeLayer(), INetwork::AddSoftmaxLayer(), INetwork::AddSplitterLayer(), INetwork::AddStridedSliceLayer(), ARMNN_ASSERT, CalcPadding(), armnnUtils::CalculateReducedOutputTensoInfo(), armnnUtils::CalculateStridedSliceOutputTensorInfo(), CHECK_DATA_FORMAT, CHECK_LOCATION, CHECK_PADDING_TYPE, IOutputSlot::Connect(), TensorInfo::GetDataType(), DataLayoutIndexed::GetHeightIndex(), IConnectableLayer::GetInputSlot(), TensorShape::GetNumDimensions(), TensorInfo::GetNumDimensions(), IConnectableLayer::GetNumOutputSlots(), IConnectableLayer::GetOutputSlot(), TensorInfo::GetShape(), BaseTensor< MemoryType >::GetShape(), IOutputSlot::GetTensorInfo(), DataLayoutIndexed::GetWidthIndex(), armnn::IgnoreUnused(), ActivationDescriptor::m_A, NormalizationDescriptor::m_Alpha, MeanDescriptor::m_Axis, ActivationDescriptor::m_B, StridedSliceDescriptor::m_Begin, StridedSliceDescriptor::m_BeginMask, NormalizationDescriptor::m_Beta, FullyConnectedDescriptor::m_BiasEnabled, Pooling2dDescriptor::m_DataLayout, NormalizationDescriptor::m_DataLayout, StridedSliceDescriptor::m_DataLayout, StridedSliceDescriptor::m_EllipsisMask, StridedSliceDescriptor::m_End, StridedSliceDescriptor::m_EndMask, ActivationDescriptor::m_Function, NormalizationDescriptor::m_K, MeanDescriptor::m_KeepDims, m_Layer, StridedSliceDescriptor::m_NewAxisMask, NormalizationDescriptor::m_NormChannelType, NormalizationDescriptor::m_NormMethodType, NormalizationDescriptor::m_NormSize, Pooling2dDescriptor::m_OutputShapeRounding, Pooling2dDescriptor::m_PadBottom, Pooling2dDescriptor::m_PaddingMethod, Pooling2dDescriptor::m_PadLeft, Pooling2dDescriptor::m_PadRight, Pooling2dDescriptor::m_PadTop, Pooling2dDescriptor::m_PoolHeight, Pooling2dDescriptor::m_PoolType, Pooling2dDescriptor::m_PoolWidth, StridedSliceDescriptor::m_ShrinkAxisMask, StridedSliceDescriptor::m_Stride, Pooling2dDescriptor::m_StrideX, Pooling2dDescriptor::m_StrideY, ReshapeDescriptor::m_TargetShape, armnn::NHWC, armnn::numeric_cast(), TfParser::ParsedMatMulTfOperation, TfParser::ParsedMulTfOperation, TensorInfo::SetShape(), IOutputSlot::SetTensorInfo(), ViewsDescriptor::SetViewOriginCoord(), and ViewsDescriptor::SetViewSize().

2463 {
2464  ARMNN_ASSERT(nodeDef.op() == "Squeeze");
2465  tensorflow::DataType tfDataType = ReadMandatoryNodeTypeAttribute(nodeDef, "T");
2466 
2467  DataType type;
2468  if (tfDataType == tensorflow::DT_FLOAT)
2469  {
2470  type = DataType::Float32;
2471  }
2472  else if (tfDataType == tensorflow::DT_INT32)
2473  {
2474  type = DataType::Signed32;
2475  }
2476  else
2477  {
2478  throw ParseException(
2479  boost::str(
2480  boost::format("Unsupported DataType %1% for Squeeze operation %2% %3%")
2481  % tensorflow::DataType_Name(tfDataType)
2482  % nodeDef.name()
2483  % CHECK_LOCATION().AsString()));
2484  }
2485 
2486 
2487  if (inputTensorInfo.GetNumDimensions() > 4)
2488  {
2489  throw ParseException(
2490  boost::str(
2491  boost::format(
2492  "Unsupported number of dimensions: %1% for input shape for Squeeze %2% %3%")
2493  % inputTensorInfo.GetNumDimensions()
2494  % nodeDef.name()
2495  % CHECK_LOCATION().AsString()));
2496  }
2497 
2498  std::vector<uint32_t> squeezeDims = ReadOptionalNodeUint32ListAttribute(nodeDef, "squeeze_dims");
2499  static const uint32_t dimensionSequence[] = { 0, 1, 2, 3 };
2500 
2501  if (squeezeDims.empty())
2502  {
2503  squeezeDims.assign(dimensionSequence,
2504  dimensionSequence+inputTensorInfo.GetNumDimensions());
2505  }
2506 
2507  std::vector<uint32_t> outputDims;
2508  for(unsigned int i = 0; i < inputTensorInfo.GetNumDimensions(); i++)
2509  {
2510  bool skipSqueeze = (std::find(squeezeDims.begin(), squeezeDims.end(), i) == squeezeDims.end());
2511  auto currentDimension = inputTensorInfo.GetShape()[i];
2512  if (skipSqueeze || currentDimension != 1)
2513  {
2514  outputDims.push_back(currentDimension);
2515  }
2516  }
2517 
2518  if (outputDims.size() > 4)
2519  {
2520  throw ParseException(
2521  boost::str(
2522  boost::format(
2523  "Unsupported number of dimensions: %1% for output shape for Squeeze %2% %3%")
2524  % outputDims.size()
2525  % nodeDef.name()
2526  % CHECK_LOCATION().AsString()));
2527  }
2528 
2529  TensorShape outShape = TensorShape(static_cast<unsigned int>(outputDims.size()),
2530  outputDims.data());
2531 
2532  TensorInfo outTensorInfo = inputTensorInfo;
2533  outTensorInfo.SetShape(outShape);
2534  outTensorInfo.SetDataType(type);
2535 
2536  return outTensorInfo;
2537 }
const TensorShape & GetShape() const
Definition: Tensor.hpp:88
void SetShape(const TensorShape &newShape)
Definition: Tensor.hpp:90
DataType
Definition: Types.hpp:32
#define ARMNN_ASSERT(COND)
Definition: Assert.hpp:14
#define CHECK_LOCATION()
Definition: Exceptions.hpp:192
unsigned int GetNumDimensions() const
Definition: Tensor.hpp:92