ArmNN
 20.11
armnnTfParser Namespace Reference

Classes

class  ITfParser
 Parses a directed acyclic graph from a tensorflow protobuf file. More...
 
class  TfParser
 
struct  WithOutputTensorIndex
 WithOutputTensorIndex wraps a value and an index. More...
 

Typedefs

using BindingPointInfo = armnn::BindingPointInfo
 
using ITfParserPtr = std::unique_ptr< ITfParser, void(*)(ITfParser *parser)>
 
using ParsedTfOperationPtr = std::unique_ptr< ParsedTfOperation >
 
using OutputOfParsedTfOperation = WithOutputTensorIndex< ParsedTfOperation * >
 
using OutputOfConstNodeDef = WithOutputTensorIndex< const tensorflow::NodeDef * >
 
using OutputId = WithOutputTensorIndex< std::string >
 

Functions

void CalculateSamePadding (uint32_t inputSize, uint32_t stride, uint32_t filterSize, bool samePadding, uint32_t *paddingFront, uint32_t *paddingBack)
 
void CalcPadding (uint32_t input, uint32_t kernel, uint32_t stride, uint32_t &outPadHead, uint32_t &outPadTail, bool samePadding)
 
DataType ConvertTfTensorDataType (const tensorflow::DataType tfDataType, const tensorflow::NodeDef &nodeDef)
 
TensorInfo OutputShapeOfExpandDims (const tensorflow::NodeDef &nodeDef, TensorInfo inputTensorInfo, std::int32_t expandDim)
 
unsigned int CheckPaddingTensor (const ConstTensor &paddingTensor, const TensorInfo &inputTensorInfo, const std::string &nodeName)
 
TensorInfo CalculatePaddedOutputTensorInfo (const TensorInfo &inputTensorInfo, const std::vector< std::pair< unsigned int, unsigned int >> &padList)
 
TensorInfo OutputShapeOfSqueeze (const tensorflow::NodeDef &nodeDef, TensorInfo inputTensorInfo)
 

Typedef Documentation

◆ BindingPointInfo

Definition at line 19 of file ITfParser.hpp.

◆ ITfParserPtr

using ITfParserPtr = std::unique_ptr<ITfParser, void(*)(ITfParser* parser)>

Definition at line 22 of file ITfParser.hpp.

◆ OutputId

using OutputId = WithOutputTensorIndex<std::string>

Definition at line 62 of file TfParser.hpp.

◆ OutputOfConstNodeDef

using OutputOfConstNodeDef = WithOutputTensorIndex<const tensorflow::NodeDef*>

Definition at line 61 of file TfParser.hpp.

◆ OutputOfParsedTfOperation

using OutputOfParsedTfOperation = WithOutputTensorIndex<ParsedTfOperation *>

Definition at line 60 of file TfParser.hpp.

◆ ParsedTfOperationPtr

using ParsedTfOperationPtr = std::unique_ptr<ParsedTfOperation>

Definition at line 35 of file TfParser.hpp.

Function Documentation

◆ CalcPadding()

void armnnTfParser::CalcPadding ( uint32_t  input,
uint32_t  kernel,
uint32_t  stride,
uint32_t &  outPadHead,
uint32_t &  outPadTail,
bool  samePadding 
)

Definition at line 406 of file TfParser.cpp.

References ARMNN_ASSERT, CalculateSamePadding(), CHECK_LOCATION, Layer::GetName(), Layer::GetNumOutputSlots(), Layer::GetOutputSlot(), and m_Layer.

Referenced by ConvertTfTensorDataType(), TfLiteParser::CreateNetworkFromBinary(), OnnxParser::CreateNetworkFromString(), and OutputShapeOfSqueeze().

408 {
409  CalculateSamePadding(input, stride, kernel, samePadding, &outPadHead, &outPadTail);
410 }
void CalculateSamePadding(uint32_t inputSize, uint32_t stride, uint32_t filterSize, bool samePadding, uint32_t *paddingFront, uint32_t *paddingBack)
Definition: TfParser.cpp:390

◆ CalculatePaddedOutputTensorInfo()

TensorInfo armnnTfParser::CalculatePaddedOutputTensorInfo ( const TensorInfo inputTensorInfo,
const std::vector< std::pair< unsigned int, unsigned int >> &  padList 
)

Definition at line 2126 of file TfParser.cpp.

References INetwork::AddConcatLayer(), INetwork::AddPadLayer(), INetwork::AddReshapeLayer(), INetwork::AddResizeLayer(), armnn::Bilinear, CHECK_LOCATION, CheckPaddingTensor(), IOutputSlot::Connect(), armnn::Float32, IConnectableLayer::GetInputSlot(), TensorInfo::GetNumDimensions(), IConnectableLayer::GetOutputSlot(), TensorInfo::GetShape(), IOutputSlot::GetTensorInfo(), OriginsDescriptor::GetViewOrigin(), armnn::IgnoreUnused(), ResizeDescriptor::m_DataLayout, ResizeDescriptor::m_Method, ResizeDescriptor::m_TargetHeight, ReshapeDescriptor::m_TargetShape, ResizeDescriptor::m_TargetWidth, armnn::NHWC, OriginsDescriptor::SetConcatAxis(), TensorInfo::SetShape(), IOutputSlot::SetTensorInfo(), and OriginsDescriptor::SetViewOriginCoord().

2128 {
2129  unsigned int numDims = inputTensorInfo.GetNumDimensions();
2130  std::vector<unsigned int> outDims;
2131  for (unsigned int i = 0; i < numDims; ++i)
2132  {
2133  unsigned int dimSize = inputTensorInfo.GetShape()[i];
2134  const std::pair<unsigned int, unsigned int>& dimPadding = padList[i];
2135  dimSize += dimPadding.first;
2136  dimSize += dimPadding.second;
2137  outDims.push_back(dimSize);
2138  }
2139  TensorInfo paddedTensorInfo = inputTensorInfo;
2140  unsigned int outDimsSize = static_cast<unsigned int>(outDims.size());
2141  paddedTensorInfo.SetShape(TensorShape{ outDimsSize, outDims.data() });
2142  return paddedTensorInfo;
2143 }
const TensorShape & GetShape() const
Definition: Tensor.hpp:187
void SetShape(const TensorShape &newShape)
Definition: Tensor.hpp:189
unsigned int GetNumDimensions() const
Definition: Tensor.hpp:191

◆ CalculateSamePadding()

void armnnTfParser::CalculateSamePadding ( uint32_t  inputSize,
uint32_t  stride,
uint32_t  filterSize,
bool  samePadding,
uint32_t *  paddingFront,
uint32_t *  paddingBack 
)
inline

Definition at line 390 of file TfParser.cpp.

Referenced by CalcPadding().

392  {
393  *paddingFront = 0;
394  *paddingBack = 0;
395 
396  if (samePadding) {
397  uint32_t outputSize = (inputSize + stride - 1) / stride;
398  uint32_t temp = (outputSize - 1) * stride + filterSize;
399  if (temp > inputSize) {
400  *paddingFront = (temp - inputSize) / 2;
401  *paddingBack = (temp - inputSize) - *paddingFront;
402  }
403  }
404 }

◆ CheckPaddingTensor()

unsigned int armnnTfParser::CheckPaddingTensor ( const ConstTensor paddingTensor,
const TensorInfo inputTensorInfo,
const std::string &  nodeName 
)

Definition at line 2097 of file TfParser.cpp.

References CHECK_LOCATION, TensorInfo::GetNumDimensions(), and BaseTensor< MemoryType >::GetShape().

Referenced by CalculatePaddedOutputTensorInfo().

2100 {
2101  unsigned int rank = paddingTensor.GetShape()[0];
2102  unsigned int expectedRank = inputTensorInfo.GetNumDimensions();
2103  if (rank != expectedRank)
2104  {
2105  throw ParseException(
2106  fmt::format("Expected the padding tensor to be of rank {} not {} on Node {} {}.",
2107  expectedRank,
2108  rank,
2109  nodeName,
2110  CHECK_LOCATION().AsString()));
2111  }
2112  unsigned int second = paddingTensor.GetShape()[1];
2113  if (second != 2)
2114  {
2115  throw ParseException(
2116  fmt::format("Expected the padding tensor to be of dimensions "
2117  "[{1}, 2] not [{1}, {2}] on Node {3} {4}.",
2118  rank,
2119  second,
2120  nodeName,
2121  CHECK_LOCATION().AsString()));
2122  }
2123  return rank;
2124 }
const TensorShape & GetShape() const
Definition: Tensor.hpp:284
#define CHECK_LOCATION()
Definition: Exceptions.hpp:197
unsigned int GetNumDimensions() const
Definition: Tensor.hpp:191

◆ ConvertTfTensorDataType()

DataType armnnTfParser::ConvertTfTensorDataType ( const tensorflow::DataType  tfDataType,
const tensorflow::NodeDef &  nodeDef 
)

Definition at line 903 of file TfParser.cpp.

References INetwork::AddConvolution2dLayer(), INetwork::AddDepthwiseConvolution2dLayer(), ARMNN_ASSERT, CalcPadding(), CHECK_DATA_FORMAT, CHECK_LOCATION, CHECK_PADDING_TYPE, IOutputSlot::Connect(), armnn::GetDataTypeSize(), DataLayoutIndexed::GetHeightIndex(), IConnectableLayer::GetInputSlot(), TensorInfo::GetNumElements(), IConnectableLayer::GetOutputSlot(), TensorInfo::GetShape(), BaseTensor< MemoryType >::GetShape(), IOutputSlot::GetTensorInfo(), DataLayoutIndexed::GetWidthIndex(), armnn::IgnoreUnused(), Convolution2dDescriptor::m_BiasEnabled, DepthwiseConvolution2dDescriptor::m_BiasEnabled, Convolution2dDescriptor::m_DataLayout, DepthwiseConvolution2dDescriptor::m_DataLayout, Convolution2dDescriptor::m_PadBottom, DepthwiseConvolution2dDescriptor::m_PadBottom, Convolution2dDescriptor::m_PadLeft, DepthwiseConvolution2dDescriptor::m_PadLeft, Convolution2dDescriptor::m_PadRight, DepthwiseConvolution2dDescriptor::m_PadRight, Convolution2dDescriptor::m_PadTop, DepthwiseConvolution2dDescriptor::m_PadTop, Convolution2dDescriptor::m_StrideX, DepthwiseConvolution2dDescriptor::m_StrideX, Convolution2dDescriptor::m_StrideY, DepthwiseConvolution2dDescriptor::m_StrideY, armnnUtils::Permute(), armnnUtils::Permuted(), and IOutputSlot::SetTensorInfo().

905 {
906  switch (tfDataType)
907  {
908  case tensorflow::DT_FLOAT:
909  return DataType::Float32;
910  break;
911  case tensorflow::DT_INT32:
912  return DataType::Signed32;
913  break;
914  default:
915  throw ParseException(
916  fmt::format("Unknown DataType {} for node {} {}",
917  tensorflow::DataType_Name(tfDataType),
918  nodeDef.name(),
919  CHECK_LOCATION().AsString()));
920  }
921 }
#define CHECK_LOCATION()
Definition: Exceptions.hpp:197

◆ OutputShapeOfExpandDims()

TensorInfo armnnTfParser::OutputShapeOfExpandDims ( const tensorflow::NodeDef &  nodeDef,
TensorInfo  inputTensorInfo,
std::int32_t  expandDim 
)

Definition at line 1420 of file TfParser.cpp.

References INetwork::AddActivationLayer(), INetwork::AddBatchNormalizationLayer(), INetwork::AddComparisonLayer(), INetwork::AddGatherLayer(), INetwork::AddMinimumLayer(), INetwork::AddReshapeLayer(), INetwork::AddStackLayer(), INetwork::AddSubtractionLayer(), INetwork::AddTransposeLayer(), ARMNN_ASSERT, CHECK_DATA_FORMAT, CHECK_LOCATION, IOutputSlot::Connect(), TensorInfo::GetDataType(), IConnectableLayer::GetInputSlot(), TensorShape::GetNumDimensions(), TensorInfo::GetNumDimensions(), TensorInfo::GetNumElements(), IConnectableLayer::GetOutputSlot(), TensorInfo::GetShape(), IOutputSlot::GetTensorInfo(), armnn::IgnoreUnused(), ActivationDescriptor::m_A, GatherDescriptor::m_Axis, StackDescriptor::m_Axis, BatchNormalizationDescriptor::m_DataLayout, BatchNormalizationDescriptor::m_Eps, ActivationDescriptor::m_Function, WithOutputTensorIndex< T >::m_Index, WithOutputTensorIndex< T >::m_IndexedValue, StackDescriptor::m_InputShape, StackDescriptor::m_NumInputs, ReshapeDescriptor::m_TargetShape, armnn::numeric_cast(), TensorInfo::SetDataType(), TensorInfo::SetShape(), IOutputSlot::SetTensorInfo(), armnn::Signed32, and armnnUtils::TransposeTensorShape().

1423 {
1424  ARMNN_ASSERT(nodeDef.op() == "ExpandDims");
1425 
1426  if (inputTensorInfo.GetNumDimensions() > 4) {
1427  throw ParseException(
1428  fmt::format("Unsupported number of dimensions: {} for input shape for ExpandDims {} {}",
1429  inputTensorInfo.GetNumDimensions(),
1430  nodeDef.name(),
1431  CHECK_LOCATION().AsString()));
1432  }
1433 
1434  std::int32_t inputDimSize = armnn::numeric_cast<int32_t>(inputTensorInfo.GetNumDimensions());
1435  std::vector<uint32_t> outputDims;
1436 
1437  // expandDim operation requires: -1-input.dims() <= dim <= input.dims()
1438  if (expandDim >= -1 - inputDimSize && expandDim <= inputDimSize)
1439  {
1440  // add current input shape to outputDims
1441  for (unsigned int i = 0; i < inputTensorInfo.GetNumDimensions(); ++i) {
1442  auto currentDimension = inputTensorInfo.GetShape()[i];
1443  outputDims.push_back(currentDimension);
1444  }
1445 
1446  // insert a dimension of 1 at index 'expandDim' of inputs shape
1447  if (expandDim >= 0)
1448  {
1449  auto getPosition = std::next(outputDims.begin() + 0, expandDim);
1450  outputDims.insert(getPosition, 1);
1451  }
1452 
1453  // if negative number for 'expandDim' then count backwards from the last element
1454  // and insert 1 dimension at index 'expandDim'
1455  if (expandDim < 0)
1456  {
1457  int outputDimSize = armnn::numeric_cast<int>(outputDims.size() + 1);
1458  auto getPosition = std::next(outputDims.begin() + outputDimSize, expandDim);
1459  outputDims.insert(getPosition, 1);
1460  }
1461  }
1462  else
1463  {
1465  fmt::format("Cannot expand dimension {} in input tensor with {} dimension {}",
1466  expandDim,
1467  inputDimSize,
1468  CHECK_LOCATION().AsString()));
1469  }
1470 
1471  if (outputDims.size() > 4)
1472  {
1473  throw ParseException(
1474  fmt::format("Unsupported number of dimensions: {} for output shape for ExpandDims {} {}",
1475  outputDims.size(),
1476  nodeDef.name(),
1477  CHECK_LOCATION().AsString()));
1478  }
1479 
1480  TensorShape outShape = TensorShape(static_cast<unsigned int>(outputDims.size()),
1481  outputDims.data());
1482 
1483  TensorInfo outTensorInfo = inputTensorInfo;
1484  outTensorInfo.SetShape(outShape);
1485 
1486  return outTensorInfo;
1487 }
const TensorShape & GetShape() const
Definition: Tensor.hpp:187
void SetShape(const TensorShape &newShape)
Definition: Tensor.hpp:189
#define ARMNN_ASSERT(COND)
Definition: Assert.hpp:14
#define CHECK_LOCATION()
Definition: Exceptions.hpp:197
std::enable_if_t< std::is_unsigned< Source >::value &&std::is_unsigned< Dest >::value, Dest > numeric_cast(Source source)
Definition: NumericCast.hpp:35
unsigned int GetNumDimensions() const
Definition: Tensor.hpp:191

◆ OutputShapeOfSqueeze()

TensorInfo armnnTfParser::OutputShapeOfSqueeze ( const tensorflow::NodeDef &  nodeDef,
TensorInfo  inputTensorInfo 
)

Definition at line 2435 of file TfParser.cpp.

References INetwork::AddActivationLayer(), INetwork::AddAdditionLayer(), INetwork::AddDivisionLayer(), INetwork::AddElementwiseUnaryLayer(), INetwork::AddFullyConnectedLayer(), INetwork::AddInputLayer(), INetwork::AddMaximumLayer(), INetwork::AddMeanLayer(), INetwork::AddMultiplicationLayer(), INetwork::AddNormalizationLayer(), INetwork::AddOutputLayer(), INetwork::AddPooling2dLayer(), INetwork::AddReshapeLayer(), INetwork::AddSoftmaxLayer(), INetwork::AddSplitterLayer(), INetwork::AddStridedSliceLayer(), ARMNN_ASSERT, CalcPadding(), armnnUtils::CalculateReducedOutputTensoInfo(), armnnUtils::CalculateStridedSliceOutputTensorInfo(), CHECK_DATA_FORMAT, CHECK_LOCATION, CHECK_PADDING_TYPE, IOutputSlot::Connect(), TensorInfo::GetDataType(), DataLayoutIndexed::GetHeightIndex(), IConnectableLayer::GetInputSlot(), TensorShape::GetNumDimensions(), TensorInfo::GetNumDimensions(), IConnectableLayer::GetNumOutputSlots(), IConnectableLayer::GetOutputSlot(), TensorInfo::GetShape(), BaseTensor< MemoryType >::GetShape(), IOutputSlot::GetTensorInfo(), DataLayoutIndexed::GetWidthIndex(), armnn::IgnoreUnused(), ActivationDescriptor::m_A, NormalizationDescriptor::m_Alpha, MeanDescriptor::m_Axis, ActivationDescriptor::m_B, StridedSliceDescriptor::m_Begin, StridedSliceDescriptor::m_BeginMask, NormalizationDescriptor::m_Beta, FullyConnectedDescriptor::m_BiasEnabled, Pooling2dDescriptor::m_DataLayout, NormalizationDescriptor::m_DataLayout, StridedSliceDescriptor::m_DataLayout, StridedSliceDescriptor::m_EllipsisMask, StridedSliceDescriptor::m_End, StridedSliceDescriptor::m_EndMask, ActivationDescriptor::m_Function, NormalizationDescriptor::m_K, MeanDescriptor::m_KeepDims, m_Layer, StridedSliceDescriptor::m_NewAxisMask, NormalizationDescriptor::m_NormChannelType, NormalizationDescriptor::m_NormMethodType, NormalizationDescriptor::m_NormSize, Pooling2dDescriptor::m_OutputShapeRounding, Pooling2dDescriptor::m_PadBottom, Pooling2dDescriptor::m_PaddingMethod, Pooling2dDescriptor::m_PadLeft, Pooling2dDescriptor::m_PadRight, Pooling2dDescriptor::m_PadTop, Pooling2dDescriptor::m_PoolHeight, Pooling2dDescriptor::m_PoolType, Pooling2dDescriptor::m_PoolWidth, StridedSliceDescriptor::m_ShrinkAxisMask, StridedSliceDescriptor::m_Stride, Pooling2dDescriptor::m_StrideX, Pooling2dDescriptor::m_StrideY, ReshapeDescriptor::m_TargetShape, armnn::NHWC, armnn::numeric_cast(), TfParser::ParsedMatMulTfOperation, TfParser::ParsedMulTfOperation, TensorInfo::SetShape(), IOutputSlot::SetTensorInfo(), ViewsDescriptor::SetViewOriginCoord(), and ViewsDescriptor::SetViewSize().

2436 {
2437  ARMNN_ASSERT(nodeDef.op() == "Squeeze");
2438  tensorflow::DataType tfDataType = ReadMandatoryNodeTypeAttribute(nodeDef, "T");
2439 
2440  DataType type;
2441  if (tfDataType == tensorflow::DT_FLOAT)
2442  {
2443  type = DataType::Float32;
2444  }
2445  else if (tfDataType == tensorflow::DT_INT32)
2446  {
2447  type = DataType::Signed32;
2448  }
2449  else
2450  {
2451  throw ParseException(
2452  fmt::format("Unsupported DataType {} for Squeeze operation {} {}",
2453  tensorflow::DataType_Name(tfDataType),
2454  nodeDef.name(),
2455  CHECK_LOCATION().AsString()));
2456  }
2457 
2458 
2459  if (inputTensorInfo.GetNumDimensions() > 4)
2460  {
2461  throw ParseException(
2462  fmt::format("Unsupported number of dimensions: {} for input shape for Squeeze {} {}",
2463  inputTensorInfo.GetNumDimensions(),
2464  nodeDef.name(),
2465  CHECK_LOCATION().AsString()));
2466  }
2467 
2468  std::vector<uint32_t> squeezeDims = ReadOptionalNodeUint32ListAttribute(nodeDef, "squeeze_dims");
2469  static const uint32_t dimensionSequence[] = { 0, 1, 2, 3 };
2470 
2471  if (squeezeDims.empty())
2472  {
2473  squeezeDims.assign(dimensionSequence,
2474  dimensionSequence+inputTensorInfo.GetNumDimensions());
2475  }
2476 
2477  std::vector<uint32_t> outputDims;
2478  for(unsigned int i = 0; i < inputTensorInfo.GetNumDimensions(); i++)
2479  {
2480  bool skipSqueeze = (std::find(squeezeDims.begin(), squeezeDims.end(), i) == squeezeDims.end());
2481  auto currentDimension = inputTensorInfo.GetShape()[i];
2482  if (skipSqueeze || currentDimension != 1)
2483  {
2484  outputDims.push_back(currentDimension);
2485  }
2486  }
2487 
2488  if (outputDims.size() > 4)
2489  {
2490  throw ParseException(
2491  fmt::format("Unsupported number of dimensions: {} for output shape for Squeeze {} {}",
2492  outputDims.size(),
2493  nodeDef.name(),
2494  CHECK_LOCATION().AsString()));
2495  }
2496 
2497  TensorShape outShape = TensorShape(static_cast<unsigned int>(outputDims.size()),
2498  outputDims.data());
2499 
2500  TensorInfo outTensorInfo = inputTensorInfo;
2501  outTensorInfo.SetShape(outShape);
2502  outTensorInfo.SetDataType(type);
2503 
2504  return outTensorInfo;
2505 }
const TensorShape & GetShape() const
Definition: Tensor.hpp:187
void SetShape(const TensorShape &newShape)
Definition: Tensor.hpp:189
DataType
Definition: Types.hpp:32
#define ARMNN_ASSERT(COND)
Definition: Assert.hpp:14
#define CHECK_LOCATION()
Definition: Exceptions.hpp:197
unsigned int GetNumDimensions() const
Definition: Tensor.hpp:191