ArmNN  NotReleased
armnnTfParser Namespace Reference

Classes

class  ITfParser
 Parses a directed acyclic graph from a tensorflow protobuf file. More...
 
class  TfParser
 
struct  WithOutputTensorIndex
 

Typedefs

using BindingPointInfo = armnn::BindingPointInfo
 
using ITfParserPtr = std::unique_ptr< ITfParser, void(*)(ITfParser *parser)>
 
using ParsedTfOperationPtr = std::unique_ptr< ParsedTfOperation >
 
using OutputOfParsedTfOperation = WithOutputTensorIndex< ParsedTfOperation * >
 
using OutputOfConstNodeDef = WithOutputTensorIndex< const tensorflow::NodeDef * >
 
using OutputId = WithOutputTensorIndex< std::string >
 

Functions

void CalculateSamePadding (uint32_t inputSize, uint32_t stride, uint32_t filterSize, bool samePadding, uint32_t *paddingFront, uint32_t *paddingBack)
 
void CalcPadding (uint32_t input, uint32_t kernel, uint32_t stride, uint32_t &outPadHead, uint32_t &outPadTail, bool samePadding)
 
DataType ConvertTfTensorDataType (const tensorflow::DataType tfDataType, const tensorflow::NodeDef &nodeDef)
 
TensorInfo OutputShapeOfExpandDims (const tensorflow::NodeDef &nodeDef, TensorInfo inputTensorInfo)
 
unsigned int CheckPaddingTensor (const ConstTensor &paddingTensor, const TensorInfo &inputTensorInfo, const std::string &nodeName)
 
TensorInfo CalculatePaddedOutputTensorInfo (const TensorInfo &inputTensorInfo, const std::vector< std::pair< unsigned int, unsigned int >> &padList)
 
TensorInfo OutputShapeOfSqueeze (const tensorflow::NodeDef &nodeDef, TensorInfo inputTensorInfo)
 

Typedef Documentation

◆ BindingPointInfo

Definition at line 19 of file ITfParser.hpp.

◆ ITfParserPtr

using ITfParserPtr = std::unique_ptr<ITfParser, void(*)(ITfParser* parser)>

Definition at line 22 of file ITfParser.hpp.

◆ OutputId

using OutputId = WithOutputTensorIndex<std::string>

Definition at line 62 of file TfParser.hpp.

◆ OutputOfConstNodeDef

using OutputOfConstNodeDef = WithOutputTensorIndex<const tensorflow::NodeDef*>

Definition at line 61 of file TfParser.hpp.

◆ OutputOfParsedTfOperation

using OutputOfParsedTfOperation = WithOutputTensorIndex<ParsedTfOperation *>

Definition at line 60 of file TfParser.hpp.

◆ ParsedTfOperationPtr

using ParsedTfOperationPtr = std::unique_ptr<ParsedTfOperation>

Definition at line 35 of file TfParser.hpp.

Function Documentation

◆ CalcPadding()

void armnnTfParser::CalcPadding ( uint32_t  input,
uint32_t  kernel,
uint32_t  stride,
uint32_t &  outPadHead,
uint32_t &  outPadTail,
bool  samePadding 
)

Definition at line 419 of file TfParser.cpp.

References CalculateSamePadding(), CHECK_LOCATION, Layer::GetName(), Layer::GetNumOutputSlots(), Layer::GetOutputSlot(), and m_Layer.

Referenced by ConvertTfTensorDataType(), TfLiteParser::CreateNetworkFromBinary(), OnnxParser::CreateNetworkFromString(), and OutputShapeOfSqueeze().

421 {
422  CalculateSamePadding(input, stride, kernel, samePadding, &outPadHead, &outPadTail);
423 }
void CalculateSamePadding(uint32_t inputSize, uint32_t stride, uint32_t filterSize, bool samePadding, uint32_t *paddingFront, uint32_t *paddingBack)
Definition: TfParser.cpp:403

◆ CalculatePaddedOutputTensorInfo()

TensorInfo armnnTfParser::CalculatePaddedOutputTensorInfo ( const TensorInfo inputTensorInfo,
const std::vector< std::pair< unsigned int, unsigned int >> &  padList 
)

Definition at line 2089 of file TfParser.cpp.

References INetwork::AddConcatLayer(), INetwork::AddPadLayer(), INetwork::AddReshapeLayer(), INetwork::AddResizeLayer(), armnn::Bilinear, CHECK_LOCATION, CheckPaddingTensor(), IOutputSlot::Connect(), armnn::Float32, IConnectableLayer::GetInputSlot(), TensorInfo::GetNumDimensions(), IConnectableLayer::GetOutputSlot(), TensorInfo::GetShape(), IOutputSlot::GetTensorInfo(), OriginsDescriptor::GetViewOrigin(), ResizeDescriptor::m_DataLayout, ResizeDescriptor::m_Method, ResizeDescriptor::m_TargetHeight, ReshapeDescriptor::m_TargetShape, ResizeDescriptor::m_TargetWidth, armnn::NHWC, OriginsDescriptor::SetConcatAxis(), TensorInfo::SetShape(), IOutputSlot::SetTensorInfo(), and OriginsDescriptor::SetViewOriginCoord().

2091 {
2092  unsigned int numDims = inputTensorInfo.GetNumDimensions();
2093  std::vector<unsigned int> outDims;
2094  for (unsigned int i = 0; i < numDims; ++i)
2095  {
2096  unsigned int dimSize = inputTensorInfo.GetShape()[i];
2097  const std::pair<unsigned int, unsigned int>& dimPadding = padList[i];
2098  dimSize += dimPadding.first;
2099  dimSize += dimPadding.second;
2100  outDims.push_back(dimSize);
2101  }
2102  TensorInfo paddedTensorInfo = inputTensorInfo;
2103  unsigned int outDimsSize = static_cast<unsigned int>(outDims.size());
2104  paddedTensorInfo.SetShape(TensorShape{ outDimsSize, outDims.data() });
2105  return paddedTensorInfo;
2106 }
unsigned int GetNumDimensions() const
Definition: Tensor.hpp:92
void SetShape(const TensorShape &newShape)
Definition: Tensor.hpp:90
const TensorShape & GetShape() const
Definition: Tensor.hpp:88

◆ CalculateSamePadding()

void armnnTfParser::CalculateSamePadding ( uint32_t  inputSize,
uint32_t  stride,
uint32_t  filterSize,
bool  samePadding,
uint32_t *  paddingFront,
uint32_t *  paddingBack 
)
inline

Definition at line 403 of file TfParser.cpp.

Referenced by CalcPadding().

405  {
406  *paddingFront = 0;
407  *paddingBack = 0;
408 
409  if (samePadding) {
410  uint32_t outputSize = (inputSize + stride - 1) / stride;
411  uint32_t temp = (outputSize - 1) * stride + filterSize;
412  if (temp > inputSize) {
413  *paddingFront = (temp - inputSize) / 2;
414  *paddingBack = (temp - inputSize) - *paddingFront;
415  }
416  }
417 }

◆ CheckPaddingTensor()

unsigned int armnnTfParser::CheckPaddingTensor ( const ConstTensor paddingTensor,
const TensorInfo inputTensorInfo,
const std::string &  nodeName 
)

Definition at line 2057 of file TfParser.cpp.

References CHECK_LOCATION, TensorInfo::GetNumDimensions(), and BaseTensor< MemoryType >::GetShape().

Referenced by CalculatePaddedOutputTensorInfo().

2060 {
2061  unsigned int rank = paddingTensor.GetShape()[0];
2062  unsigned int expectedRank = inputTensorInfo.GetNumDimensions();
2063  if (rank != expectedRank)
2064  {
2065  throw ParseException(
2066  boost::str(
2067  boost::format(
2068  "Expected the padding tensor to be of rank %1 not %2 on Node %3 %4.")
2069  % expectedRank
2070  % rank
2071  % nodeName
2072  % CHECK_LOCATION().AsString()));
2073  }
2074  unsigned int second = paddingTensor.GetShape()[1];
2075  if (second != 2)
2076  {
2077  throw ParseException(
2078  boost::str(
2079  boost::format(
2080  "Expected the padding tensor to be of dimensions [%1, 2] not [%1, %2] on Node %3 %4.")
2081  % rank
2082  % second
2083  % nodeName
2084  % CHECK_LOCATION().AsString()));
2085  }
2086  return rank;
2087 }
unsigned int GetNumDimensions() const
Definition: Tensor.hpp:92
const TensorShape & GetShape() const
Definition: Tensor.hpp:169
#define CHECK_LOCATION()
Definition: Exceptions.hpp:169

◆ ConvertTfTensorDataType()

DataType armnnTfParser::ConvertTfTensorDataType ( const tensorflow::DataType  tfDataType,
const tensorflow::NodeDef &  nodeDef 
)

Definition at line 931 of file TfParser.cpp.

References INetwork::AddConvolution2dLayer(), INetwork::AddDepthwiseConvolution2dLayer(), CalcPadding(), CHECK_DATA_FORMAT, CHECK_LOCATION, CHECK_PADDING_TYPE, IOutputSlot::Connect(), armnn::GetDataTypeSize(), DataLayoutIndexed::GetHeightIndex(), IConnectableLayer::GetInputSlot(), TensorInfo::GetNumElements(), IConnectableLayer::GetOutputSlot(), TensorInfo::GetShape(), BaseTensor< MemoryType >::GetShape(), IOutputSlot::GetTensorInfo(), DataLayoutIndexed::GetWidthIndex(), Convolution2dDescriptor::m_BiasEnabled, DepthwiseConvolution2dDescriptor::m_BiasEnabled, Convolution2dDescriptor::m_DataLayout, DepthwiseConvolution2dDescriptor::m_DataLayout, Convolution2dDescriptor::m_PadBottom, DepthwiseConvolution2dDescriptor::m_PadBottom, Convolution2dDescriptor::m_PadLeft, DepthwiseConvolution2dDescriptor::m_PadLeft, Convolution2dDescriptor::m_PadRight, DepthwiseConvolution2dDescriptor::m_PadRight, Convolution2dDescriptor::m_PadTop, DepthwiseConvolution2dDescriptor::m_PadTop, Convolution2dDescriptor::m_StrideX, DepthwiseConvolution2dDescriptor::m_StrideX, Convolution2dDescriptor::m_StrideY, DepthwiseConvolution2dDescriptor::m_StrideY, armnnUtils::Permute(), armnnUtils::Permuted(), and IOutputSlot::SetTensorInfo().

933 {
934  switch (tfDataType)
935  {
936  case tensorflow::DT_FLOAT:
937  return DataType::Float32;
938  break;
939  case tensorflow::DT_INT32:
940  return DataType::Signed32;
941  break;
942  default:
943  throw ParseException(
944  boost::str(
945  boost::format(
946  "Unknown DataType %1% for node %2% %3%")
947  % tensorflow::DataType_Name(tfDataType)
948  % nodeDef.name()
949  % CHECK_LOCATION().AsString()));
950  }
951 }
#define CHECK_LOCATION()
Definition: Exceptions.hpp:169

◆ OutputShapeOfExpandDims()

TensorInfo armnnTfParser::OutputShapeOfExpandDims ( const tensorflow::NodeDef &  nodeDef,
TensorInfo  inputTensorInfo 
)

Definition at line 1466 of file TfParser.cpp.

References INetwork::AddActivationLayer(), INetwork::AddBatchNormalizationLayer(), INetwork::AddComparisonLayer(), INetwork::AddGatherLayer(), INetwork::AddMinimumLayer(), INetwork::AddReshapeLayer(), INetwork::AddStackLayer(), INetwork::AddSubtractionLayer(), CHECK_DATA_FORMAT, CHECK_LOCATION, IOutputSlot::Connect(), TensorInfo::GetDataType(), IConnectableLayer::GetInputSlot(), TensorShape::GetNumDimensions(), TensorInfo::GetNumDimensions(), IConnectableLayer::GetOutputSlot(), TensorInfo::GetShape(), IOutputSlot::GetTensorInfo(), ActivationDescriptor::m_A, StackDescriptor::m_Axis, BatchNormalizationDescriptor::m_DataLayout, BatchNormalizationDescriptor::m_Eps, ActivationDescriptor::m_Function, WithOutputTensorIndex< T >::m_Index, WithOutputTensorIndex< T >::m_IndexedValue, StackDescriptor::m_InputShape, StackDescriptor::m_NumInputs, ReshapeDescriptor::m_TargetShape, TensorInfo::SetDataType(), TensorInfo::SetShape(), and IOutputSlot::SetTensorInfo().

1467 {
1468  BOOST_ASSERT(nodeDef.op() == "ExpandDims");
1469 
1470  if (inputTensorInfo.GetNumDimensions() > 4) {
1471  throw ParseException(
1472  boost::str(
1473  boost::format(
1474  "Unsupported number of dimensions: %1% for input shape for ExpandDims %2% %3%")
1475  % inputTensorInfo.GetNumDimensions()
1476  % nodeDef.name()
1477  % CHECK_LOCATION().AsString()));
1478  }
1479 
1480  std::int32_t expandDim = ReadMandatoryNodeInt32Attribute(nodeDef, "Tdim");
1481 
1482  std::int32_t inputDimSize = boost::numeric_cast<int32_t>(inputTensorInfo.GetNumDimensions());
1483  std::vector<uint32_t> outputDims;
1484 
1485  // expandDim operation requires: -1-input.dims() <= dim <= input.dims()
1486  if (expandDim >= -1 - inputDimSize && expandDim <= inputDimSize)
1487  {
1488  // add current input shape to outputDims
1489  for (unsigned int i = 0; i < inputTensorInfo.GetNumDimensions(); ++i) {
1490  auto currentDimension = inputTensorInfo.GetShape()[i];
1491  outputDims.push_back(currentDimension);
1492  }
1493 
1494  // insert a dimension of 1 at index 'expandDim' of inputs shape
1495  if (expandDim >= 0)
1496  {
1497  auto getPosition = std::next(outputDims.begin() + 0, expandDim);
1498  outputDims.insert(getPosition, 1);
1499  }
1500 
1501  // if negative number for 'expandDim' then count backwards from the last element
1502  // and insert 1 dimension at index 'expandDim'
1503  if (expandDim < 0)
1504  {
1505  int outputDimSize = boost::numeric_cast<int>(outputDims.size() + 1);
1506  auto getPosition = std::next(outputDims.begin() + outputDimSize, expandDim);
1507  outputDims.insert(getPosition, 1);
1508  }
1509  }
1510  else
1511  {
1513  boost::str(
1514  boost::format(
1515  "Cannot expand dimension %1% in input tensor with %2% dimension %3%")
1516  % expandDim
1517  % inputDimSize
1518  % CHECK_LOCATION().AsString()));
1519  }
1520 
1521  if (outputDims.size() > 4)
1522  {
1523  throw ParseException(
1524  boost::str(
1525  boost::format(
1526  "Unsupported number of dimensions: %1% for output shape for ExpandDims %2% %3%")
1527  % outputDims.size()
1528  % nodeDef.name()
1529  % CHECK_LOCATION().AsString()));
1530  }
1531 
1532  TensorShape outShape = TensorShape(static_cast<unsigned int>(outputDims.size()),
1533  outputDims.data());
1534 
1535  TensorInfo outTensorInfo = inputTensorInfo;
1536  outTensorInfo.SetShape(outShape);
1537 
1538  return outTensorInfo;
1539 }
unsigned int GetNumDimensions() const
Definition: Tensor.hpp:92
#define CHECK_LOCATION()
Definition: Exceptions.hpp:169
void SetShape(const TensorShape &newShape)
Definition: Tensor.hpp:90
const TensorShape & GetShape() const
Definition: Tensor.hpp:88

◆ OutputShapeOfSqueeze()

TensorInfo armnnTfParser::OutputShapeOfSqueeze ( const tensorflow::NodeDef &  nodeDef,
TensorInfo  inputTensorInfo 
)

Definition at line 2414 of file TfParser.cpp.

References INetwork::AddActivationLayer(), INetwork::AddAdditionLayer(), INetwork::AddDivisionLayer(), INetwork::AddElementwiseUnaryLayer(), INetwork::AddFullyConnectedLayer(), INetwork::AddInputLayer(), INetwork::AddMaximumLayer(), INetwork::AddMeanLayer(), INetwork::AddMultiplicationLayer(), INetwork::AddNormalizationLayer(), INetwork::AddOutputLayer(), INetwork::AddPooling2dLayer(), INetwork::AddReshapeLayer(), INetwork::AddSoftmaxLayer(), INetwork::AddSplitterLayer(), INetwork::AddStridedSliceLayer(), CalcPadding(), armnnUtils::CalculateReducedOutputTensoInfo(), armnnUtils::CalculateStridedSliceOutputTensorInfo(), CHECK_DATA_FORMAT, CHECK_LOCATION, CHECK_PADDING_TYPE, IOutputSlot::Connect(), TensorInfo::GetDataType(), DataLayoutIndexed::GetHeightIndex(), IConnectableLayer::GetInputSlot(), TensorShape::GetNumDimensions(), TensorInfo::GetNumDimensions(), IConnectableLayer::GetNumOutputSlots(), IConnectableLayer::GetOutputSlot(), TensorInfo::GetShape(), BaseTensor< MemoryType >::GetShape(), IOutputSlot::GetTensorInfo(), DataLayoutIndexed::GetWidthIndex(), ActivationDescriptor::m_A, NormalizationDescriptor::m_Alpha, MeanDescriptor::m_Axis, ActivationDescriptor::m_B, StridedSliceDescriptor::m_Begin, StridedSliceDescriptor::m_BeginMask, NormalizationDescriptor::m_Beta, FullyConnectedDescriptor::m_BiasEnabled, Pooling2dDescriptor::m_DataLayout, NormalizationDescriptor::m_DataLayout, StridedSliceDescriptor::m_DataLayout, StridedSliceDescriptor::m_EllipsisMask, StridedSliceDescriptor::m_End, StridedSliceDescriptor::m_EndMask, ActivationDescriptor::m_Function, NormalizationDescriptor::m_K, MeanDescriptor::m_KeepDims, m_Layer, StridedSliceDescriptor::m_NewAxisMask, NormalizationDescriptor::m_NormChannelType, NormalizationDescriptor::m_NormMethodType, NormalizationDescriptor::m_NormSize, Pooling2dDescriptor::m_OutputShapeRounding, Pooling2dDescriptor::m_PadBottom, Pooling2dDescriptor::m_PaddingMethod, Pooling2dDescriptor::m_PadLeft, Pooling2dDescriptor::m_PadRight, Pooling2dDescriptor::m_PadTop, Pooling2dDescriptor::m_PoolHeight, Pooling2dDescriptor::m_PoolType, Pooling2dDescriptor::m_PoolWidth, StridedSliceDescriptor::m_ShrinkAxisMask, StridedSliceDescriptor::m_Stride, Pooling2dDescriptor::m_StrideX, Pooling2dDescriptor::m_StrideY, ReshapeDescriptor::m_TargetShape, armnn::NHWC, TfParser::ParsedMatMulTfOperation, TfParser::ParsedMulTfOperation, TensorInfo::SetShape(), IOutputSlot::SetTensorInfo(), ViewsDescriptor::SetViewOriginCoord(), and ViewsDescriptor::SetViewSize().

2415 {
2416  BOOST_ASSERT(nodeDef.op() == "Squeeze");
2417  tensorflow::DataType tfDataType = ReadMandatoryNodeTypeAttribute(nodeDef, "T");
2418 
2419  DataType type;
2420  if (tfDataType == tensorflow::DT_FLOAT)
2421  {
2422  type = DataType::Float32;
2423  }
2424  else if (tfDataType == tensorflow::DT_INT32)
2425  {
2426  type = DataType::Signed32;
2427  }
2428  else
2429  {
2430  throw ParseException(
2431  boost::str(
2432  boost::format("Unsupported DataType %1% for Squeeze operation %2% %3%")
2433  % tensorflow::DataType_Name(tfDataType)
2434  % nodeDef.name()
2435  % CHECK_LOCATION().AsString()));
2436  }
2437 
2438 
2439  if (inputTensorInfo.GetNumDimensions() > 4)
2440  {
2441  throw ParseException(
2442  boost::str(
2443  boost::format(
2444  "Unsupported number of dimensions: %1% for input shape for Squeeze %2% %3%")
2445  % inputTensorInfo.GetNumDimensions()
2446  % nodeDef.name()
2447  % CHECK_LOCATION().AsString()));
2448  }
2449 
2450  std::vector<uint32_t> squeezeDims = ReadOptionalNodeUint32ListAttribute(nodeDef, "squeeze_dims");
2451  static const uint32_t dimensionSequence[] = { 0, 1, 2, 3 };
2452 
2453  if (squeezeDims.empty())
2454  {
2455  squeezeDims.assign(dimensionSequence,
2456  dimensionSequence+inputTensorInfo.GetNumDimensions());
2457  }
2458 
2459  std::vector<uint32_t> outputDims;
2460  for(unsigned int i = 0; i < inputTensorInfo.GetNumDimensions(); i++)
2461  {
2462  bool skipSqueeze = (std::find(squeezeDims.begin(), squeezeDims.end(), i) == squeezeDims.end());
2463  auto currentDimension = inputTensorInfo.GetShape()[i];
2464  if (skipSqueeze || currentDimension != 1)
2465  {
2466  outputDims.push_back(currentDimension);
2467  }
2468  }
2469 
2470  if (outputDims.size() > 4)
2471  {
2472  throw ParseException(
2473  boost::str(
2474  boost::format(
2475  "Unsupported number of dimensions: %1% for output shape for Squeeze %2% %3%")
2476  % outputDims.size()
2477  % nodeDef.name()
2478  % CHECK_LOCATION().AsString()));
2479  }
2480 
2481  TensorShape outShape = TensorShape(static_cast<unsigned int>(outputDims.size()),
2482  outputDims.data());
2483 
2484  TensorInfo outTensorInfo = inputTensorInfo;
2485  outTensorInfo.SetShape(outShape);
2486  outTensorInfo.SetDataType(type);
2487 
2488  return outTensorInfo;
2489 }
unsigned int GetNumDimensions() const
Definition: Tensor.hpp:92
#define CHECK_LOCATION()
Definition: Exceptions.hpp:169
void SetShape(const TensorShape &newShape)
Definition: Tensor.hpp:90
DataType
Definition: Types.hpp:32
const TensorShape & GetShape() const
Definition: Tensor.hpp:88