ArmNN
 21.02
ITfParser::TfParserImpl Struct Reference

#include <TfParser.hpp>

Public Types

using OperationParsingFunction = ParsedTfOperationPtr(TfParserImpl::*)(const tensorflow::NodeDef &nodeDef, const tensorflow::GraphDef &graphDef)
 

Public Member Functions

armnn::INetworkPtr CreateNetworkFromTextFile (const char *graphFile, const std::map< std::string, armnn::TensorShape > &inputShapes, const std::vector< std::string > &requestedOutputs)
 Creates the network from a protobuf text file on the disk. More...
 
armnn::INetworkPtr CreateNetworkFromBinaryFile (const char *graphFile, const std::map< std::string, armnn::TensorShape > &inputShapes, const std::vector< std::string > &requestedOutputs)
 Creates the network from a protobuf binary file on the disk. More...
 
armnn::INetworkPtr CreateNetworkFromString (const char *protoText, const std::map< std::string, armnn::TensorShape > &inputShapes, const std::vector< std::string > &requestedOutputs)
 Creates the network directly from protobuf text in a string. Useful for debugging/testing. More...
 
BindingPointInfo GetNetworkInputBindingInfo (const std::string &name) const
 Retrieves binding info (layer id and tensor info) for the network input identified by the given layer name. More...
 
BindingPointInfo GetNetworkOutputBindingInfo (const std::string &name) const
 Retrieves binding info (layer id and tensor info) for the network output identified by the given layer name. More...
 
 TfParserImpl ()
 
 ~TfParserImpl ()=default
 
 TfParserImpl (const TfParserImpl &)=delete
 
TfParserImploperator= (const TfParserImpl &)=delete
 
armnn::INetworkPtr CreateNetworkFromGraphDef (const tensorflow::GraphDef &graphDef, const std::map< std::string, armnn::TensorShape > &inputShapes, const std::vector< std::string > &requestedOutputs)
 Parses a GraphDef loaded into memory from one of the other CreateNetwork*. More...
 
void LoadGraphDef (const tensorflow::GraphDef &graphDef)
 Sets up variables and then performs BFS to parse all nodes. More...
 
void LoadNodeDef (const tensorflow::NodeDef &nodeDef, const tensorflow::GraphDef &graphDef)
 Parses a given node, assuming nodes before it in the graph have been done. More...
 
const tensorflow::NodeDef * ResolveIdentityNode (const tensorflow::NodeDef *nodeDef)
 Handling identity layers as the input for Conv2D layer. More...
 
std::vector< OutputOfConstNodeDefGetTfInputNodes (const tensorflow::NodeDef &nodeDef) const
 Finds the nodes connected as inputs of the given node in the graph. More...
 
std::vector< OutputOfParsedTfOperationGetInputParsedTfOperationsChecked (const tensorflow::NodeDef &nodeDef, std::size_t expectedNumInputs)
 Finds the IParsedTfOperations for the nodes connected as inputs of the given node in the graph, and throws an exception if the number of inputs does not match the expected one. More...
 
ParsedTfOperationPtr ParseConst (const tensorflow::NodeDef &nodeDef, const tensorflow::GraphDef &graphDef)
 
template<typename Type >
bool HasParsedConstTensor (const std::string &nodeName) const
 Checks if there is a pre-parsed const tensor available with the given name and Type. More...
 
template<typename Type >
bool HasParsedConstTensor (ParsedTfOperation *parsedTfOpPtr) const
 
unsigned int GetConstInputIndex (const std::vector< OutputOfParsedTfOperation > &inputs)
 
ParsedTfOperationPtr ParseAdd (const tensorflow::NodeDef &nodeDef, const tensorflow::GraphDef &graphDef)
 
ParsedTfOperationPtr ParseAddN (const tensorflow::NodeDef &nodeDef, const tensorflow::GraphDef &graphDef)
 
ParsedTfOperationPtr ParseBiasAdd (const tensorflow::NodeDef &nodeDef, const tensorflow::GraphDef &graphDef)
 
ParsedTfOperationPtr ParseConv2D (const tensorflow::NodeDef &nodeDef, const tensorflow::GraphDef &graphDef)
 
ParsedTfOperationPtr ParseDepthwiseConv2D (const tensorflow::NodeDef &nodeDef, const tensorflow::GraphDef &graphDef)
 
ParsedTfOperationPtr ParseExpandDims (const tensorflow::NodeDef &nodeDef, const tensorflow::GraphDef &graphDef)
 
ParsedTfOperationPtr ParseFusedBatchNorm (const tensorflow::NodeDef &nodeDef, const tensorflow::GraphDef &graphDef)
 
ParsedTfOperationPtr ParseConcat (const tensorflow::NodeDef &nodeDef, const tensorflow::GraphDef &graphDef)
 
ParsedTfOperationPtr ParseIdentity (const tensorflow::NodeDef &nodeDef, const tensorflow::GraphDef &graphDef)
 
ParsedTfOperationPtr ParseLrn (const tensorflow::NodeDef &nodeDef, const tensorflow::GraphDef &graphDef)
 
ParsedTfOperationPtr ParseMatMul (const tensorflow::NodeDef &nodeDef, const tensorflow::GraphDef &graphDef)
 
ParsedTfOperationPtr ParseMean (const tensorflow::NodeDef &nodeDef, const tensorflow::GraphDef &graphDef)
 
ParsedTfOperationPtr ParseMul (const tensorflow::NodeDef &nodeDef, const tensorflow::GraphDef &graphDef)
 
ParsedTfOperationPtr ParsePlaceholder (const tensorflow::NodeDef &nodeDef, const tensorflow::GraphDef &graphDef)
 
ParsedTfOperationPtr ParseRealDiv (const tensorflow::NodeDef &nodeDef, const tensorflow::GraphDef &graphDef)
 
ParsedTfOperationPtr ParseRelu (const tensorflow::NodeDef &nodeDef, const tensorflow::GraphDef &graphDef)
 
ParsedTfOperationPtr ParseRelu6 (const tensorflow::NodeDef &nodeDef, const tensorflow::GraphDef &graphDef)
 
ParsedTfOperationPtr ParseReshape (const tensorflow::NodeDef &nodeDef, const tensorflow::GraphDef &graphDef)
 
ParsedTfOperationPtr ParseResizeBilinear (const tensorflow::NodeDef &nodeDef, const tensorflow::GraphDef &graphDef)
 
ParsedTfOperationPtr ParseRsqrt (const tensorflow::NodeDef &nodeDef, const tensorflow::GraphDef &graphDef)
 
ParsedTfOperationPtr ParseShape (const tensorflow::NodeDef &nodeDef, const tensorflow::GraphDef &graphDef)
 
ParsedTfOperationPtr ParseSqueeze (const tensorflow::NodeDef &nodeDef, const tensorflow::GraphDef &graphDef)
 
ParsedTfOperationPtr ParseSigmoid (const tensorflow::NodeDef &nodeDef, const tensorflow::GraphDef &graphDef)
 
ParsedTfOperationPtr ParseSoftmax (const tensorflow::NodeDef &nodeDef, const tensorflow::GraphDef &graphDef)
 
ParsedTfOperationPtr ParseSoftplus (const tensorflow::NodeDef &nodeDef, const tensorflow::GraphDef &graphDef)
 
ParsedTfOperationPtr ParseSplit (const tensorflow::NodeDef &nodeDef, const tensorflow::GraphDef &graphDef)
 
ParsedTfOperationPtr ParseStridedSlice (const tensorflow::NodeDef &nodeDef, const tensorflow::GraphDef &graphDef)
 
ParsedTfOperationPtr ParseTanh (const tensorflow::NodeDef &nodeDef, const tensorflow::GraphDef &graphDef)
 
ParsedTfOperationPtr ParseMaxPool (const tensorflow::NodeDef &nodeDef, const tensorflow::GraphDef &graphDef)
 
ParsedTfOperationPtr ParseAvgPool (const tensorflow::NodeDef &nodeDef, const tensorflow::GraphDef &graphDef)
 
ParsedTfOperationPtr ParsePooling2d (const tensorflow::NodeDef &nodeDef, const tensorflow::GraphDef &graphDef, armnn::PoolingAlgorithm pooltype)
 
ParsedTfOperationPtr ParseEqual (const tensorflow::NodeDef &nodeDef, const tensorflow::GraphDef &graphDef)
 
ParsedTfOperationPtr ParseMaximum (const tensorflow::NodeDef &nodeDef, const tensorflow::GraphDef &graphDef)
 
ParsedTfOperationPtr ParseMinimum (const tensorflow::NodeDef &nodeDef, const tensorflow::GraphDef &graphDef)
 
ParsedTfOperationPtr ParseGather (const tensorflow::NodeDef &nodeDef, const tensorflow::GraphDef &graphDef)
 
ParsedTfOperationPtr ParseGreater (const tensorflow::NodeDef &nodeDef, const tensorflow::GraphDef &graphDef)
 
ParsedTfOperationPtr ParsePad (const tensorflow::NodeDef &nodeDef, const tensorflow::GraphDef &graphDef)
 
ParsedTfOperationPtr ParseSub (const tensorflow::NodeDef &nodeDef, const tensorflow::GraphDef &graphDef)
 
ParsedTfOperationPtr ParseStack (const tensorflow::NodeDef &nodeDef, const tensorflow::GraphDef &graphDef)
 
ParsedTfOperationPtr ParseTranspose (const tensorflow::NodeDef &nodeDef, const tensorflow::GraphDef &graphDef)
 
ParsedTfOperationPtr AddActivationLayer (const tensorflow::NodeDef &nodeDef, armnn::ActivationDescriptor &desc)
 
ParsedTfOperationPtr AddAdditionLayer (const tensorflow::NodeDef &nodeDef, bool isBiasAdd=false)
 
ParsedTfOperationPtr AddRealDivLayer (const tensorflow::NodeDef &nodeDef)
 
ParsedTfOperationPtr AddMaximumLayer (const tensorflow::NodeDef &nodeDef)
 
armnn::IConnectableLayerAddMultiplicationLayer (const tensorflow::NodeDef &nodeDef)
 
armnn::IConnectableLayerAddFullyConnectedLayer (const tensorflow::NodeDef &matMulNodeDef, const tensorflow::NodeDef *addNodeDef, const char *armnnLayerName)
 
bool IsSupportedLeakyReluPattern (const tensorflow::NodeDef &mulNodeDef, size_t alphaLayerIndex, const OutputOfParsedTfOperation &otherOp, armnn::IOutputSlot **outputOfLeakyRelu, armnn::ActivationDescriptor &desc)
 
std::pair< armnn::IOutputSlot *, armnn::IOutputSlot * > ProcessElementwiseInputSlots (const tensorflow::NodeDef &nodeDef, const std::string &layerName)
 
ParsedTfOperationPtr ProcessComparisonLayer (armnn::IOutputSlot *input0Slot, armnn::IOutputSlot *input1Slot, armnn::IConnectableLayer *const layer, const tensorflow::NodeDef &nodeDef)
 
ParsedTfOperationPtr ProcessElementwiseLayer (armnn::IOutputSlot *input0Slot, armnn::IOutputSlot *input1Slot, armnn::IConnectableLayer *const layer, const tensorflow::NodeDef &nodeDef)
 
armnn::IConnectableLayerCreateAdditionLayer (const tensorflow::NodeDef &nodeDef, armnn::IOutputSlot *input0Slot, armnn::IOutputSlot *input1Slot, const std::string &layerName)
 
armnn::IConnectableLayerCreateAdditionLayer (const tensorflow::NodeDef &nodeDef, const OutputOfParsedTfOperation &opOne, const OutputOfParsedTfOperation &opTwo, unsigned int numberOfAddition)
 
armnn::IConnectableLayerCreateAdditionLayer (const tensorflow::NodeDef &nodeDef, armnn::IConnectableLayer *layerOne, armnn::IConnectableLayer *layerTwo, unsigned int numberOfAddition, unsigned long numberOfLayersToConnect, bool isOdd)
 
armnn::IConnectableLayerCreateAdditionLayer (const tensorflow::NodeDef &nodeDef, const OutputOfParsedTfOperation &op, armnn::IConnectableLayer *layer)
 
void TrackInputBinding (armnn::IConnectableLayer *layer, armnn::LayerBindingId id, const armnn::TensorInfo &tensorInfo)
 
void TrackOutputBinding (armnn::IConnectableLayer *layer, armnn::LayerBindingId id, const armnn::TensorInfo &tensorInfo)
 
void Cleanup ()
 

Static Public Member Functions

static const std::string GetVersion ()
 Retrieve version in X.Y.Z form. More...
 
static std::pair< armnn::LayerBindingId, armnn::TensorInfoGetBindingInfo (const std::string &layerName, const char *bindingPointDesc, const std::unordered_map< std::string, BindingPointInfo > &nameToBindingInfo)
 
static void TrackBindingPoint (armnn::IConnectableLayer *layer, armnn::LayerBindingId id, const armnn::TensorInfo &tensorInfo, const char *bindingPointDesc, std::unordered_map< std::string, BindingPointInfo > &nameToBindingInfo)
 

Public Attributes

armnn::INetworkPtr m_Network
 The network we're building. Gets cleared after it is passed to the user. More...
 
std::map< std::string, armnn::TensorShapem_InputShapes
 
std::vector< std::string > m_RequestedOutputs
 
std::unordered_map< std::string, const tensorflow::NodeDef * > m_NodesByName
 Map of nodes extracted from the GraphDef to speed up parsing. More...
 
std::unordered_map< std::string, ParsedTfOperationPtrm_ParsedTfOperations
 
std::unordered_map< std::string, BindingPointInfom_NetworkInputsBindingInfo
 Maps input layer names to their corresponding ids and tensor info. More...
 
std::unordered_map< std::string, BindingPointInfom_NetworkOutputsBindingInfo
 Maps output layer names to their corresponding ids and tensor info. More...
 

Static Public Attributes

static const std::map< std::string, OperationParsingFunctionms_OperationNameToParsingFunctions
 Map of TensorFlow operation names to parsing member functions. More...
 
static const std::list< std::string > m_ControlInputs
 

Detailed Description

Definition at line 64 of file TfParser.hpp.

Member Typedef Documentation

◆ OperationParsingFunction

using OperationParsingFunction = ParsedTfOperationPtr(TfParserImpl::*)(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)

Definition at line 254 of file TfParser.hpp.

Constructor & Destructor Documentation

◆ TfParserImpl() [1/2]

Definition at line 540 of file TfParser.cpp.

541  : m_Network(nullptr, nullptr)
542 {
543 }
armnn::INetworkPtr m_Network
The network we&#39;re building. Gets cleared after it is passed to the user.
Definition: TfParser.hpp:251

◆ ~TfParserImpl()

~TfParserImpl ( )
default

◆ TfParserImpl() [2/2]

TfParserImpl ( const TfParserImpl )
delete

Member Function Documentation

◆ AddActivationLayer()

ParsedTfOperationPtr AddActivationLayer ( const tensorflow::NodeDef &  nodeDef,
armnn::ActivationDescriptor desc 
)

Definition at line 2984 of file TfParser.cpp.

References IOutputSlot::Connect(), ITfParser::TfParserImpl::GetInputParsedTfOperationsChecked(), IConnectableLayer::GetInputSlot(), IConnectableLayer::GetOutputSlot(), IOutputSlot::GetTensorInfo(), ITfParser::TfParserImpl::m_Network, and IOutputSlot::SetTensorInfo().

Referenced by ITfParser::TfParserImpl::ParseRelu(), ITfParser::TfParserImpl::ParseRelu6(), ITfParser::TfParserImpl::ParseSigmoid(), ITfParser::TfParserImpl::ParseSoftplus(), and ITfParser::TfParserImpl::ParseTanh().

2986 {
2987  std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
2988 
2989  IConnectableLayer* const layer = m_Network->AddActivationLayer(activationDesc, nodeDef.name().c_str());
2990 
2991  IOutputSlot& prevLayerOutputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2992  prevLayerOutputSlot.Connect(layer->GetInputSlot(0));
2993  layer->GetOutputSlot(0).SetTensorInfo(prevLayerOutputSlot.GetTensorInfo());
2994  return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2995 }
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
Definition: INetwork.hpp:62
armnn::INetworkPtr m_Network
The network we&#39;re building. Gets cleared after it is passed to the user.
Definition: TfParser.hpp:251
virtual void SetTensorInfo(const TensorInfo &tensorInfo)=0
An output connection slot for a layer.
Definition: INetwork.hpp:38
std::vector< OutputOfParsedTfOperation > GetInputParsedTfOperationsChecked(const tensorflow::NodeDef &nodeDef, std::size_t expectedNumInputs)
Finds the IParsedTfOperations for the nodes connected as inputs of the given node in the graph...
Definition: TfParser.cpp:615
virtual const IInputSlot & GetInputSlot(unsigned int index) const =0
Get a const input slot handle by slot index.
virtual const TensorInfo & GetTensorInfo() const =0
virtual const IOutputSlot & GetOutputSlot(unsigned int index) const =0
Get the const output slot handle by slot index.
virtual int Connect(IInputSlot &destination)=0

◆ AddAdditionLayer()

ParsedTfOperationPtr AddAdditionLayer ( const tensorflow::NodeDef &  nodeDef,
bool  isBiasAdd = false 
)

Definition at line 3118 of file TfParser.cpp.

References CHECK_DATA_FORMAT, CHECK_LOCATION, IOutputSlot::Connect(), ITfParser::TfParserImpl::GetInputParsedTfOperationsChecked(), IConnectableLayer::GetInputSlot(), TensorShape::GetNumDimensions(), TensorInfo::GetNumDimensions(), IConnectableLayer::GetOutputSlot(), TensorInfo::GetShape(), IOutputSlot::GetTensorInfo(), ITfParser::TfParserImpl::m_Network, TensorInfo::SetShape(), and IOutputSlot::SetTensorInfo().

Referenced by ITfParser::TfParserImpl::ParseAdd(), ITfParser::TfParserImpl::ParseAddN(), and ITfParser::TfParserImpl::ParseBiasAdd().

3119 {
3120  std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
3121 
3122  IOutputSlot* input0Slot = &inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
3123  IOutputSlot* input1Slot = &inputs[1].m_IndexedValue->ResolveArmnnOutputSlot(inputs[1].m_Index);
3124 
3125  const TensorInfo& input0Info = input0Slot->GetTensorInfo();
3126  const TensorInfo& input1Info = input1Slot->GetTensorInfo();
3127 
3128  if (isBiasAdd)
3129  {
3130  // BiasAdd takes bias as a 1D tensor. We need to add a reshape layer to create a 4D tensor
3131  // with the same data in the correct dimension for broadcast in addition.
3132  if(input1Info.GetNumDimensions() != 1)
3133  {
3134  throw ParseException(
3135  fmt::format("Unsupported bias for BiasAdd. It should be a 1D vector. "
3136  "Got {} dimensions for input {}. Node {} {}",
3137  input1Info.GetNumDimensions(),
3138  inputs[1].m_IndexedValue->GetNode().name(),
3139  nodeDef.name(),
3140  CHECK_LOCATION().AsString()));
3141  }
3142 
3143  const std::string dataFormat = ReadMandatoryNodeStringAttribute(nodeDef, "data_format");
3144 
3145  CHECK_DATA_FORMAT(nodeDef, dataFormat, "BiasAdd");
3146  input1Slot = AddBroadcastReshapeLayer(input0Slot, input1Slot, dataFormat == "NHWC", *m_Network, nodeDef);
3147  }
3148  else
3149  {
3150  if (input0Info.GetNumDimensions() == 1)
3151  {
3152  const bool isNHWC = true;
3153  input0Slot = AddBroadcastReshapeLayer(input1Slot, input0Slot, isNHWC, *m_Network, nodeDef);
3154  }
3155 
3156  if (input1Info.GetNumDimensions() == 1)
3157  {
3158  const bool isNHWC = true;
3159  input1Slot = AddBroadcastReshapeLayer(input0Slot, input1Slot, isNHWC, *m_Network, nodeDef);
3160  }
3161  }
3162 
3163  IConnectableLayer* const layer = m_Network->AddAdditionLayer(nodeDef.name().c_str());
3164 
3165  input0Slot->Connect(layer->GetInputSlot(0));
3166  input1Slot->Connect(layer->GetInputSlot(1));
3167 
3168  if (input0Info.GetNumDimensions() == input1Info.GetNumDimensions())
3169  {
3170  const TensorShape& input0Shape = input0Info.GetShape();
3171  const TensorShape& input1Shape = input1Info.GetShape();
3172 
3173  std::vector<unsigned int> outputShape;
3174  outputShape.reserve(input0Shape.GetNumDimensions());
3175  TensorInfo outputInfo(input0Info);
3176 
3177  for (unsigned int i = 0; i < input0Shape.GetNumDimensions(); i++)
3178  {
3179  outputShape.push_back(std::max(input0Shape[i], input1Shape[i]));
3180  }
3181 
3182  outputInfo.SetShape(TensorShape(input0Shape.GetNumDimensions(), outputShape.data()));
3183 
3184  layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
3185  }
3186  else if (input0Info.GetNumDimensions() == 1 && isBiasAdd == false)
3187  {
3188  layer->GetOutputSlot(0).SetTensorInfo(input1Slot->GetTensorInfo());
3189  }
3190  else
3191  {
3192  layer->GetOutputSlot(0).SetTensorInfo(input0Slot->GetTensorInfo());
3193  }
3194 
3195  return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
3196 }
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
Definition: INetwork.hpp:62
const TensorShape & GetShape() const
Definition: Tensor.hpp:187
armnn::INetworkPtr m_Network
The network we&#39;re building. Gets cleared after it is passed to the user.
Definition: TfParser.hpp:251
virtual void SetTensorInfo(const TensorInfo &tensorInfo)=0
An output connection slot for a layer.
Definition: INetwork.hpp:38
#define CHECK_LOCATION()
Definition: Exceptions.hpp:197
#define CHECK_DATA_FORMAT(NODE_DEF, FORMAT, NODE_TYPE)
Definition: TfParser.cpp:356
unsigned int GetNumDimensions() const
Function that returns the tensor rank.
Definition: Tensor.cpp:174
std::vector< OutputOfParsedTfOperation > GetInputParsedTfOperationsChecked(const tensorflow::NodeDef &nodeDef, std::size_t expectedNumInputs)
Finds the IParsedTfOperations for the nodes connected as inputs of the given node in the graph...
Definition: TfParser.cpp:615
virtual const IInputSlot & GetInputSlot(unsigned int index) const =0
Get a const input slot handle by slot index.
virtual const TensorInfo & GetTensorInfo() const =0
virtual const IOutputSlot & GetOutputSlot(unsigned int index) const =0
Get the const output slot handle by slot index.
virtual int Connect(IInputSlot &destination)=0
OptimizeForType< Layer, AddBroadcastReshapeLayerImpl > AddBroadcastReshapeLayer
unsigned int GetNumDimensions() const
Definition: Tensor.hpp:191

◆ AddFullyConnectedLayer()

IConnectableLayer * AddFullyConnectedLayer ( const tensorflow::NodeDef &  matMulNodeDef,
const tensorflow::NodeDef *  addNodeDef,
const char *  armnnLayerName 
)

Definition at line 3315 of file TfParser.cpp.

References ARMNN_ASSERT, CHECK_LOCATION, ITfParser::TfParserImpl::GetInputParsedTfOperationsChecked(), BaseTensor< MemoryType >::GetShape(), FullyConnectedDescriptor::m_BiasEnabled, ITfParser::TfParserImpl::m_Network, and ITfParser::ParsedTfOperation.

Referenced by ITfParser::TfParserImpl::ParseAdd().

3317 {
3318  // Finds bias const (if applicable).
3319  ParsedConstTfOperation<float>* biasNode = nullptr;
3320  if (addNodeDef != nullptr)
3321  {
3322  std::vector<OutputOfParsedTfOperation> addInputs = GetInputParsedTfOperationsChecked(*addNodeDef, 2);
3323  // Finds our inputs.
3324  if (HasParsedConstTensor<float>(addInputs[0].m_IndexedValue->GetNode().name()))
3325  {
3326  biasNode = PolymorphicDowncast<ParsedConstTfOperation<float>*>(addInputs[0].m_IndexedValue);
3327  }
3328  else if (HasParsedConstTensor<float>(addInputs[1].m_IndexedValue->GetNode().name()))
3329  {
3330  biasNode = PolymorphicDowncast<ParsedConstTfOperation<float>*>(addInputs[1].m_IndexedValue);
3331  }
3332  else
3333  {
3334  throw ParseException(
3335  fmt::format("ArmNN only supports fully connected layers with constant bias. "
3336  "Inputs {} and {}. AddNode {}. MatMulNode {} {}",
3337  addInputs[0].m_IndexedValue->GetNode().name(),
3338  addInputs[1].m_IndexedValue->GetNode().name(),
3339  addNodeDef->name(),
3340  matMulNodeDef.name(),
3341  CHECK_LOCATION().AsString()));
3342  }
3343  }
3344 
3345  // Finds matmul inputs.
3346  ParsedConstTfOperation<float>* weightNode = nullptr;
3347  ParsedTfOperation* inputNode = nullptr;
3348  unsigned int inputIdx = 0;
3349  std::vector<OutputOfParsedTfOperation> mulInputs = GetInputParsedTfOperationsChecked(matMulNodeDef, 2);
3350  if (HasParsedConstTensor<float>(mulInputs[0].m_IndexedValue->GetNode().name()))
3351  {
3352  weightNode = PolymorphicDowncast<ParsedConstTfOperation<float>*>(mulInputs[0].m_IndexedValue);
3353  inputNode = mulInputs[1].m_IndexedValue;
3354  inputIdx = mulInputs[1].m_Index;
3355  }
3356  else if (HasParsedConstTensor<float>(mulInputs[1].m_IndexedValue->GetNode().name()))
3357  {
3358  weightNode = PolymorphicDowncast<ParsedConstTfOperation<float>*>(mulInputs[1].m_IndexedValue);
3359  inputNode = mulInputs[0].m_IndexedValue;
3360  inputIdx = mulInputs[0].m_Index;
3361  }
3362  else
3363  {
3364  throw ParseException(
3365  fmt::format("ArmNN only supports fully connected layers with constant weights. "
3366  "Inputs {} and {}. MatMulNode {} {}",
3367  mulInputs[0].m_IndexedValue->GetNode().name(),
3368  mulInputs[1].m_IndexedValue->GetNode().name(),
3369  matMulNodeDef.name(),
3370  CHECK_LOCATION().AsString()));
3371  }
3372 
3373  std::vector<float> weightTensorData;
3374  // Handles weight.
3375  ConstTensor weights = weightNode->GetConstTensor(weightTensorData);
3376 
3378  desc.m_BiasEnabled = addNodeDef != nullptr;
3379 
3380  IConnectableLayer* layer = nullptr;
3381  Optional<ConstTensor> optionalBiases;
3382  std::vector<float> biasTensorData;
3383  // Makes the layer.
3384  if (addNodeDef != nullptr)
3385  {
3386  ConstTensor biases = biasNode->GetConstTensor(biasTensorData);
3387 
3388  if (weights.GetShape()[1] != biases.GetShape()[0])
3389  {
3390  throw ParseException(
3391  fmt::format("Shape of matmul weights and bias do not match. "
3392  "AddNode {}. MatMulNode {} {}",
3393  addNodeDef->name(),
3394  matMulNodeDef.name(),
3395  CHECK_LOCATION().AsString()));
3396  }
3397 
3398  optionalBiases = Optional<ConstTensor>(biases);
3399  }
3400  layer = m_Network->AddFullyConnectedLayer(desc, weights, optionalBiases, armnnLayerName);
3401 
3402  ARMNN_ASSERT(layer != nullptr);
3403 
3404  inputNode->ResolveArmnnOutputSlot(inputIdx).Connect(layer->GetInputSlot(0));
3405  unsigned int batches = inputNode->ResolveArmnnOutputSlot(inputIdx).GetTensorInfo().GetShape()[0];
3406 
3407  // Handles output.
3408  TensorInfo outputInfo({ batches, weights.GetShape()[1] }, DataType::Float32);
3409  layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
3410  return layer;
3411 }
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
Definition: INetwork.hpp:62
const TensorShape & GetShape() const
Definition: Tensor.hpp:284
friend class ParsedTfOperation
Definition: ITfParser.hpp:61
armnn::INetworkPtr m_Network
The network we&#39;re building. Gets cleared after it is passed to the user.
Definition: TfParser.hpp:251
A FullyConnectedDescriptor for the FullyConnectedLayer.
bool m_BiasEnabled
Enable/disable bias.
A tensor defined by a TensorInfo (shape and data type) and an immutable backing store.
Definition: Tensor.hpp:314
#define ARMNN_ASSERT(COND)
Definition: Assert.hpp:14
#define CHECK_LOCATION()
Definition: Exceptions.hpp:197
std::vector< OutputOfParsedTfOperation > GetInputParsedTfOperationsChecked(const tensorflow::NodeDef &nodeDef, std::size_t expectedNumInputs)
Finds the IParsedTfOperations for the nodes connected as inputs of the given node in the graph...
Definition: TfParser.cpp:615

◆ AddMaximumLayer()

ParsedTfOperationPtr AddMaximumLayer ( const tensorflow::NodeDef &  nodeDef)

Definition at line 3236 of file TfParser.cpp.

References IOutputSlot::Connect(), ITfParser::TfParserImpl::GetInputParsedTfOperationsChecked(), IConnectableLayer::GetInputSlot(), TensorShape::GetNumDimensions(), TensorInfo::GetNumDimensions(), IConnectableLayer::GetOutputSlot(), TensorInfo::GetShape(), IOutputSlot::GetTensorInfo(), ITfParser::TfParserImpl::m_Network, TensorInfo::SetShape(), and IOutputSlot::SetTensorInfo().

Referenced by ITfParser::TfParserImpl::ParseMaximum().

3237 {
3238  std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
3239 
3240  IOutputSlot* input0Slot = &inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
3241  IOutputSlot* input1Slot = &inputs[1].m_IndexedValue->ResolveArmnnOutputSlot(inputs[1].m_Index);
3242 
3243  auto const input0NumDims = input0Slot->GetTensorInfo().GetNumDimensions();
3244  auto const input1NumDims = input1Slot->GetTensorInfo().GetNumDimensions();
3245 
3246  if (input0NumDims < input1NumDims)
3247  {
3248  const bool isNHWC = true;
3249  input0Slot = AddBroadcastReshapeLayer(input1Slot, input0Slot, isNHWC, *m_Network, nodeDef);
3250  }
3251  if (input1NumDims < input0NumDims)
3252  {
3253  const bool isNHWC = true;
3254  input1Slot = AddBroadcastReshapeLayer(input0Slot, input1Slot, isNHWC, *m_Network, nodeDef);
3255  }
3256 
3257  IConnectableLayer* const layer = m_Network->AddMaximumLayer(nodeDef.name().c_str());
3258 
3259  input0Slot->Connect(layer->GetInputSlot(0));
3260  input1Slot->Connect(layer->GetInputSlot(1));
3261 
3262  TensorInfo outputInfo = input0Slot->GetTensorInfo();
3263  std::vector<unsigned int> outputShape;
3264 
3265  const TensorShape& input0Shape = input0Slot->GetTensorInfo().GetShape();
3266  const TensorShape& input1Shape = input1Slot->GetTensorInfo().GetShape();
3267 
3268  for (unsigned int i = 0; i < input0Shape.GetNumDimensions(); i++)
3269  {
3270  outputShape.push_back(std::max(input0Shape[i], input1Shape[i]));
3271  }
3272 
3273  outputInfo.SetShape(TensorShape(input0Shape.GetNumDimensions(), outputShape.data()));
3274  layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
3275 
3276  return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
3277 }
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
Definition: INetwork.hpp:62
const TensorShape & GetShape() const
Definition: Tensor.hpp:187
armnn::INetworkPtr m_Network
The network we&#39;re building. Gets cleared after it is passed to the user.
Definition: TfParser.hpp:251
virtual void SetTensorInfo(const TensorInfo &tensorInfo)=0
void SetShape(const TensorShape &newShape)
Definition: Tensor.hpp:189
An output connection slot for a layer.
Definition: INetwork.hpp:38
unsigned int GetNumDimensions() const
Function that returns the tensor rank.
Definition: Tensor.cpp:174
std::vector< OutputOfParsedTfOperation > GetInputParsedTfOperationsChecked(const tensorflow::NodeDef &nodeDef, std::size_t expectedNumInputs)
Finds the IParsedTfOperations for the nodes connected as inputs of the given node in the graph...
Definition: TfParser.cpp:615
virtual const IInputSlot & GetInputSlot(unsigned int index) const =0
Get a const input slot handle by slot index.
virtual const TensorInfo & GetTensorInfo() const =0
virtual const IOutputSlot & GetOutputSlot(unsigned int index) const =0
Get the const output slot handle by slot index.
virtual int Connect(IInputSlot &destination)=0
OptimizeForType< Layer, AddBroadcastReshapeLayerImpl > AddBroadcastReshapeLayer
unsigned int GetNumDimensions() const
Definition: Tensor.hpp:191

◆ AddMultiplicationLayer()

IConnectableLayer * AddMultiplicationLayer ( const tensorflow::NodeDef &  nodeDef)

Definition at line 3279 of file TfParser.cpp.

References IOutputSlot::Connect(), ITfParser::TfParserImpl::GetInputParsedTfOperationsChecked(), IConnectableLayer::GetInputSlot(), TensorInfo::GetNumDimensions(), IConnectableLayer::GetOutputSlot(), IOutputSlot::GetTensorInfo(), ITfParser::TfParserImpl::m_Network, and IOutputSlot::SetTensorInfo().

3280 {
3281  std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
3282 
3283  IConnectableLayer* const layer = m_Network->AddMultiplicationLayer(nodeDef.name().c_str());
3284  IOutputSlot* input0Slot = &inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
3285  IOutputSlot* input1Slot = &inputs[1].m_IndexedValue->ResolveArmnnOutputSlot(inputs[1].m_Index);
3286 
3287  auto const input0NumDims = input0Slot->GetTensorInfo().GetNumDimensions();
3288  auto const input1NumDims = input1Slot->GetTensorInfo().GetNumDimensions();
3289 
3290  if (input0NumDims < input1NumDims)
3291  {
3292  const bool isNHWC = true;
3293  input0Slot = AddBroadcastReshapeLayer(input1Slot, input0Slot, isNHWC, *m_Network, nodeDef);
3294  }
3295  if (input1NumDims < input0NumDims)
3296  {
3297  const bool isNHWC = true;
3298  input1Slot = AddBroadcastReshapeLayer(input0Slot, input1Slot, isNHWC, *m_Network, nodeDef);
3299  }
3300 
3301  input0Slot->Connect(layer->GetInputSlot(0));
3302  input1Slot->Connect(layer->GetInputSlot(1));
3303 
3304  if (input0NumDims < input1NumDims)
3305  {
3306  layer->GetOutputSlot(0).SetTensorInfo(input1Slot->GetTensorInfo());
3307  }
3308  else
3309  {
3310  layer->GetOutputSlot(0).SetTensorInfo(input0Slot->GetTensorInfo());
3311  }
3312  return layer;
3313 }
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
Definition: INetwork.hpp:62
armnn::INetworkPtr m_Network
The network we&#39;re building. Gets cleared after it is passed to the user.
Definition: TfParser.hpp:251
virtual void SetTensorInfo(const TensorInfo &tensorInfo)=0
An output connection slot for a layer.
Definition: INetwork.hpp:38
std::vector< OutputOfParsedTfOperation > GetInputParsedTfOperationsChecked(const tensorflow::NodeDef &nodeDef, std::size_t expectedNumInputs)
Finds the IParsedTfOperations for the nodes connected as inputs of the given node in the graph...
Definition: TfParser.cpp:615
virtual const IInputSlot & GetInputSlot(unsigned int index) const =0
Get a const input slot handle by slot index.
virtual const TensorInfo & GetTensorInfo() const =0
virtual const IOutputSlot & GetOutputSlot(unsigned int index) const =0
Get the const output slot handle by slot index.
virtual int Connect(IInputSlot &destination)=0
OptimizeForType< Layer, AddBroadcastReshapeLayerImpl > AddBroadcastReshapeLayer
unsigned int GetNumDimensions() const
Definition: Tensor.hpp:191

◆ AddRealDivLayer()

ParsedTfOperationPtr AddRealDivLayer ( const tensorflow::NodeDef &  nodeDef)

Definition at line 3198 of file TfParser.cpp.

References IOutputSlot::Connect(), ITfParser::TfParserImpl::GetInputParsedTfOperationsChecked(), IConnectableLayer::GetInputSlot(), TensorInfo::GetNumDimensions(), IConnectableLayer::GetOutputSlot(), IOutputSlot::GetTensorInfo(), ITfParser::TfParserImpl::m_Network, and IOutputSlot::SetTensorInfo().

Referenced by ITfParser::TfParserImpl::ParseRealDiv().

3199 {
3200  std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
3201 
3202  IConnectableLayer* const layer = m_Network->AddDivisionLayer(nodeDef.name().c_str());
3203  IOutputSlot* input0Slot = &inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
3204  IOutputSlot* input1Slot = &inputs[1].m_IndexedValue->ResolveArmnnOutputSlot(inputs[1].m_Index);
3205 
3206  auto const input0NumDims = input0Slot->GetTensorInfo().GetNumDimensions();
3207  auto const input1NumDims = input1Slot->GetTensorInfo().GetNumDimensions();
3208 
3209 
3210  if (input0NumDims < input1NumDims)
3211  {
3212  const bool isNHWC = true;
3213  input0Slot = AddBroadcastReshapeLayer(input1Slot, input0Slot, isNHWC, *m_Network, nodeDef);
3214  }
3215  if (input1NumDims < input0NumDims)
3216  {
3217  const bool isNHWC = true;
3218  input1Slot = AddBroadcastReshapeLayer(input0Slot, input1Slot, isNHWC, *m_Network, nodeDef);
3219  }
3220 
3221  input0Slot->Connect(layer->GetInputSlot(0));
3222  input1Slot->Connect(layer->GetInputSlot(1));
3223 
3224  if (input0NumDims < input1NumDims)
3225  {
3226  layer->GetOutputSlot(0).SetTensorInfo(input1Slot->GetTensorInfo());
3227  }
3228  else
3229  {
3230  layer->GetOutputSlot(0).SetTensorInfo(input0Slot->GetTensorInfo());
3231 
3232  }
3233  return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
3234 }
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
Definition: INetwork.hpp:62
armnn::INetworkPtr m_Network
The network we&#39;re building. Gets cleared after it is passed to the user.
Definition: TfParser.hpp:251
virtual void SetTensorInfo(const TensorInfo &tensorInfo)=0
An output connection slot for a layer.
Definition: INetwork.hpp:38
std::vector< OutputOfParsedTfOperation > GetInputParsedTfOperationsChecked(const tensorflow::NodeDef &nodeDef, std::size_t expectedNumInputs)
Finds the IParsedTfOperations for the nodes connected as inputs of the given node in the graph...
Definition: TfParser.cpp:615
virtual const IInputSlot & GetInputSlot(unsigned int index) const =0
Get a const input slot handle by slot index.
virtual const TensorInfo & GetTensorInfo() const =0
virtual const IOutputSlot & GetOutputSlot(unsigned int index) const =0
Get the const output slot handle by slot index.
virtual int Connect(IInputSlot &destination)=0
OptimizeForType< Layer, AddBroadcastReshapeLayerImpl > AddBroadcastReshapeLayer
unsigned int GetNumDimensions() const
Definition: Tensor.hpp:191

◆ Cleanup()

void Cleanup ( )

Definition at line 3669 of file TfParser.cpp.

References ITfParser::TfParserImpl::m_InputShapes, ITfParser::TfParserImpl::m_NodesByName, ITfParser::TfParserImpl::m_ParsedTfOperations, and ITfParser::TfParserImpl::m_RequestedOutputs.

Referenced by ITfParser::TfParserImpl::CreateNetworkFromGraphDef().

3670 {
3671  // Cleanup, in case we reuse this parser.
3672  m_InputShapes.clear();
3673  m_RequestedOutputs.clear();
3674  m_NodesByName.clear();
3675  m_ParsedTfOperations.clear();
3676 }
std::map< std::string, armnn::TensorShape > m_InputShapes
Definition: TfParser.hpp:261
std::unordered_map< std::string, ParsedTfOperationPtr > m_ParsedTfOperations
Definition: TfParser.hpp:267
std::vector< std::string > m_RequestedOutputs
Definition: TfParser.hpp:262
std::unordered_map< std::string, const tensorflow::NodeDef * > m_NodesByName
Map of nodes extracted from the GraphDef to speed up parsing.
Definition: TfParser.hpp:265

◆ CreateAdditionLayer() [1/4]

IConnectableLayer * CreateAdditionLayer ( const tensorflow::NodeDef &  nodeDef,
armnn::IOutputSlot input0Slot,
armnn::IOutputSlot input1Slot,
const std::string &  layerName 
)

Definition at line 650 of file TfParser.cpp.

References CHECK_LOCATION, IOutputSlot::Connect(), IConnectableLayer::GetInputSlot(), TensorShape::GetNumDimensions(), TensorInfo::GetNumDimensions(), IConnectableLayer::GetOutputSlot(), TensorInfo::GetShape(), IOutputSlot::GetTensorInfo(), ITfParser::TfParserImpl::m_Network, TensorInfo::SetShape(), and IOutputSlot::SetTensorInfo().

Referenced by ITfParser::TfParserImpl::CreateAdditionLayer(), and ITfParser::TfParserImpl::ParseAddN().

655 {
656  const TensorInfo& input0Info = input0Slot->GetTensorInfo();
657  const TensorInfo& input1Info = input1Slot->GetTensorInfo();
658 
659  const unsigned int input0Dim = input0Info.GetNumDimensions();
660  const unsigned int input1Dim = input1Info.GetNumDimensions();
661  if (input0Dim != input1Dim)
662  {
663  // broadcasting where input0 and input1 have different number of dimensions
664  // is only supported for 1D and 4D tensors pair
665  if (input0Dim == 1 && input1Dim == 4)
666  {
667  input0Slot = AddBroadcastReshapeLayer(input1Slot, input0Slot, true, *m_Network, nodeDef);
668  }
669  else if (input0Dim == 4 && input1Dim == 1)
670  {
671  input1Slot = AddBroadcastReshapeLayer(input0Slot, input1Slot, true, *m_Network, nodeDef);
672  }
673  else
674  {
675  throw ParseException(
676  fmt::format("Unsupported broadcast configuration for {} operation {} {}",
677  layerName,
678  nodeDef.name(),
679  CHECK_LOCATION().AsString()));
680  }
681  }
682  IConnectableLayer* const layer = m_Network->AddAdditionLayer(layerName.c_str());
683 
684  input0Slot->Connect(layer->GetInputSlot(0));
685  input1Slot->Connect(layer->GetInputSlot(1));
686 
687  // Ensure the output tensor has the correct dimensions even if a broadcast has been done
688  TensorInfo outputInfo = input0Slot->GetTensorInfo();
689  std::vector<unsigned int> outputShape;
690 
691  const TensorShape& input0Shape = input0Slot->GetTensorInfo().GetShape();
692  const TensorShape& input1Shape = input1Slot->GetTensorInfo().GetShape();
693 
694  for (unsigned int i = 0; i < input0Shape.GetNumDimensions(); i++)
695  {
696  outputShape.push_back(std::max(input0Shape[i], input1Shape[i]));
697  }
698 
699  outputInfo.SetShape(TensorShape(input0Shape.GetNumDimensions(), outputShape.data()));
700  layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
701 
702  return layer;
703 }
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
Definition: INetwork.hpp:62
const TensorShape & GetShape() const
Definition: Tensor.hpp:187
armnn::INetworkPtr m_Network
The network we&#39;re building. Gets cleared after it is passed to the user.
Definition: TfParser.hpp:251
virtual void SetTensorInfo(const TensorInfo &tensorInfo)=0
void SetShape(const TensorShape &newShape)
Definition: Tensor.hpp:189
#define CHECK_LOCATION()
Definition: Exceptions.hpp:197
unsigned int GetNumDimensions() const
Function that returns the tensor rank.
Definition: Tensor.cpp:174
virtual const IInputSlot & GetInputSlot(unsigned int index) const =0
Get a const input slot handle by slot index.
virtual const TensorInfo & GetTensorInfo() const =0
virtual const IOutputSlot & GetOutputSlot(unsigned int index) const =0
Get the const output slot handle by slot index.
virtual int Connect(IInputSlot &destination)=0
OptimizeForType< Layer, AddBroadcastReshapeLayerImpl > AddBroadcastReshapeLayer
unsigned int GetNumDimensions() const
Definition: Tensor.hpp:191

◆ CreateAdditionLayer() [2/4]

IConnectableLayer * CreateAdditionLayer ( const tensorflow::NodeDef &  nodeDef,
const OutputOfParsedTfOperation opOne,
const OutputOfParsedTfOperation opTwo,
unsigned int  numberOfAddition 
)

Definition at line 724 of file TfParser.cpp.

References ITfParser::TfParserImpl::CreateAdditionLayer(), WithOutputTensorIndex< T >::m_Index, and WithOutputTensorIndex< T >::m_IndexedValue.

729 {
730  IOutputSlot* input0Slot = &opOne.m_IndexedValue->ResolveArmnnOutputSlot(opOne.m_Index);
731  IOutputSlot* input1Slot = &opTwo.m_IndexedValue->ResolveArmnnOutputSlot(opTwo.m_Index);
732  std::string layerName(nodeDef.name());
733  layerName.append("_addN_").append(std::to_string(numberOfAddition));
734  return CreateAdditionLayer(nodeDef, input0Slot, input1Slot, layerName);
735 }
armnn::IConnectableLayer * CreateAdditionLayer(const tensorflow::NodeDef &nodeDef, armnn::IOutputSlot *input0Slot, armnn::IOutputSlot *input1Slot, const std::string &layerName)
Definition: TfParser.cpp:650
An output connection slot for a layer.
Definition: INetwork.hpp:38

◆ CreateAdditionLayer() [3/4]

IConnectableLayer * CreateAdditionLayer ( const tensorflow::NodeDef &  nodeDef,
armnn::IConnectableLayer layerOne,
armnn::IConnectableLayer layerTwo,
unsigned int  numberOfAddition,
unsigned long  numberOfLayersToConnect,
bool  isOdd 
)

Definition at line 705 of file TfParser.cpp.

References ITfParser::TfParserImpl::CreateAdditionLayer(), and IConnectableLayer::GetOutputSlot().

712 {
713  IOutputSlot* input0Slot = &layerOne->GetOutputSlot(0);
714  IOutputSlot* input1Slot = &layerTwo->GetOutputSlot(0);
715  std::string layerName(nodeDef.name());
716  if (isOdd || numberOfLayersToConnect != 2)
717  {
718  // we are not connecting the final layer
719  layerName.append("_addN_").append(std::to_string(numberOfAddition));
720  }
721  return CreateAdditionLayer(nodeDef, input0Slot, input1Slot, layerName);
722 }
armnn::IConnectableLayer * CreateAdditionLayer(const tensorflow::NodeDef &nodeDef, armnn::IOutputSlot *input0Slot, armnn::IOutputSlot *input1Slot, const std::string &layerName)
Definition: TfParser.cpp:650
An output connection slot for a layer.
Definition: INetwork.hpp:38
virtual const IOutputSlot & GetOutputSlot(unsigned int index) const =0
Get the const output slot handle by slot index.

◆ CreateAdditionLayer() [4/4]

IConnectableLayer * CreateAdditionLayer ( const tensorflow::NodeDef &  nodeDef,
const OutputOfParsedTfOperation op,
armnn::IConnectableLayer layer 
)

Definition at line 737 of file TfParser.cpp.

References ITfParser::TfParserImpl::CreateAdditionLayer(), IConnectableLayer::GetOutputSlot(), WithOutputTensorIndex< T >::m_Index, and WithOutputTensorIndex< T >::m_IndexedValue.

741 {
742  IOutputSlot* input0Slot = &op.m_IndexedValue->ResolveArmnnOutputSlot(op.m_Index);
743  IOutputSlot* input1Slot = &layer->GetOutputSlot(0);
744  return CreateAdditionLayer(nodeDef, input0Slot, input1Slot, nodeDef.name());
745 }
armnn::IConnectableLayer * CreateAdditionLayer(const tensorflow::NodeDef &nodeDef, armnn::IOutputSlot *input0Slot, armnn::IOutputSlot *input1Slot, const std::string &layerName)
Definition: TfParser.cpp:650
An output connection slot for a layer.
Definition: INetwork.hpp:38
virtual const IOutputSlot & GetOutputSlot(unsigned int index) const =0
Get the const output slot handle by slot index.

◆ CreateNetworkFromBinaryFile()

INetworkPtr CreateNetworkFromBinaryFile ( const char *  graphFile,
const std::map< std::string, armnn::TensorShape > &  inputShapes,
const std::vector< std::string > &  requestedOutputs 
)

Creates the network from a protobuf binary file on the disk.

Definition at line 3605 of file TfParser.cpp.

References CHECK_LOCATION, and ITfParser::TfParserImpl::CreateNetworkFromGraphDef().

3608 {
3609  FILE* fd = fopen(graphFile, "rb");
3610 
3611  if (fd == nullptr)
3612  {
3613  throw FileNotFoundException(
3614  fmt::format("Graph file {} failed to open {}",
3615  graphFile,
3616  CHECK_LOCATION().AsString()));
3617  }
3618 
3619  // Parses the file into a message.
3620  tensorflow::GraphDef graphDef;
3621 
3622  google::protobuf::io::FileInputStream inStream(fileno(fd));
3623  google::protobuf::io::CodedInputStream codedStream(&inStream);
3624  codedStream.SetTotalBytesLimit(INT_MAX);
3625  bool success = graphDef.ParseFromCodedStream(&codedStream);
3626  fclose(fd);
3627 
3628  if (!success)
3629  {
3630  throw ParseException(
3631  fmt::format("Failed to parse protobuf file {} {}",
3632  graphFile,
3633  CHECK_LOCATION().AsString()));
3634  }
3635 
3636  return CreateNetworkFromGraphDef(graphDef, inputShapes, requestedOutputs);
3637 }
armnn::INetworkPtr CreateNetworkFromGraphDef(const tensorflow::GraphDef &graphDef, const std::map< std::string, armnn::TensorShape > &inputShapes, const std::vector< std::string > &requestedOutputs)
Parses a GraphDef loaded into memory from one of the other CreateNetwork*.
Definition: TfParser.cpp:3639
#define CHECK_LOCATION()
Definition: Exceptions.hpp:197

◆ CreateNetworkFromGraphDef()

INetworkPtr CreateNetworkFromGraphDef ( const tensorflow::GraphDef &  graphDef,
const std::map< std::string, armnn::TensorShape > &  inputShapes,
const std::vector< std::string > &  requestedOutputs 
)

Parses a GraphDef loaded into memory from one of the other CreateNetwork*.

Definition at line 3639 of file TfParser.cpp.

References CHECK_LOCATION, ITfParser::TfParserImpl::Cleanup(), ITfParser::TfParserImpl::LoadGraphDef(), ITfParser::TfParserImpl::m_InputShapes, ITfParser::TfParserImpl::m_Network, and ITfParser::TfParserImpl::m_RequestedOutputs.

Referenced by ITfParser::TfParserImpl::CreateNetworkFromBinaryFile(), ITfParser::TfParserImpl::CreateNetworkFromString(), and ITfParser::TfParserImpl::CreateNetworkFromTextFile().

3642 {
3643  m_Network = INetwork::Create();
3644 
3645  m_InputShapes = inputShapes;
3646  if (requestedOutputs.size() == 0)
3647  {
3648  throw ParseException(
3649  fmt::format("requestedOutputs must have at least one entry {}",
3650  CHECK_LOCATION().AsString()));
3651  }
3652  m_RequestedOutputs = requestedOutputs;
3653 
3654  try
3655  {
3656  LoadGraphDef(graphDef);
3657  }
3658  catch (const ParseException& e)
3659  {
3660  Cleanup();
3661  throw e;
3662  }
3663 
3664  Cleanup();
3665 
3666  return std::move(m_Network);
3667 }
std::map< std::string, armnn::TensorShape > m_InputShapes
Definition: TfParser.hpp:261
void LoadGraphDef(const tensorflow::GraphDef &graphDef)
Sets up variables and then performs BFS to parse all nodes.
Definition: TfParser.cpp:3486
armnn::INetworkPtr m_Network
The network we&#39;re building. Gets cleared after it is passed to the user.
Definition: TfParser.hpp:251
std::vector< std::string > m_RequestedOutputs
Definition: TfParser.hpp:262
#define CHECK_LOCATION()
Definition: Exceptions.hpp:197

◆ CreateNetworkFromString()

INetworkPtr CreateNetworkFromString ( const char *  protoText,
const std::map< std::string, armnn::TensorShape > &  inputShapes,
const std::vector< std::string > &  requestedOutputs 
)

Creates the network directly from protobuf text in a string. Useful for debugging/testing.

Definition at line 3587 of file TfParser.cpp.

References CHECK_LOCATION, and ITfParser::TfParserImpl::CreateNetworkFromGraphDef().

3590 {
3591  // Parses the string into a message.
3592  tensorflow::GraphDef graphDef;
3593  bool success = google::protobuf::TextFormat::ParseFromString(protoText, &graphDef);
3594 
3595  if (!success)
3596  {
3597  throw ParseException(
3598  fmt::format("Failed to parse graph file {}",
3599  CHECK_LOCATION().AsString()));
3600  }
3601 
3602  return CreateNetworkFromGraphDef(graphDef, inputShapes, requestedOutputs);
3603 }
armnn::INetworkPtr CreateNetworkFromGraphDef(const tensorflow::GraphDef &graphDef, const std::map< std::string, armnn::TensorShape > &inputShapes, const std::vector< std::string > &requestedOutputs)
Parses a GraphDef loaded into memory from one of the other CreateNetwork*.
Definition: TfParser.cpp:3639
#define CHECK_LOCATION()
Definition: Exceptions.hpp:197

◆ CreateNetworkFromTextFile()

INetworkPtr CreateNetworkFromTextFile ( const char *  graphFile,
const std::map< std::string, armnn::TensorShape > &  inputShapes,
const std::vector< std::string > &  requestedOutputs 
)

Creates the network from a protobuf text file on the disk.

Definition at line 3556 of file TfParser.cpp.

References CHECK_LOCATION, and ITfParser::TfParserImpl::CreateNetworkFromGraphDef().

3559 {
3560  FILE* fd = fopen(graphFile, "r");
3561 
3562  if (fd == nullptr)
3563  {
3564  throw FileNotFoundException(
3565  fmt::format("Graph file {} failed to open {}",
3566  graphFile,
3567  CHECK_LOCATION().AsString()));
3568  }
3569 
3570  // Parses the file into a message.
3571  tensorflow::GraphDef graphDef;
3572  auto input = new google::protobuf::io::FileInputStream(fileno(fd));
3573  bool success = google::protobuf::TextFormat::Parse(input, &graphDef);
3574  delete input;
3575  fclose(fd);
3576 
3577  if (!success)
3578  {
3579  throw ParseException(
3580  fmt::format("Failed to parse graph file {}",
3581  CHECK_LOCATION().AsString()));
3582  }
3583 
3584  return CreateNetworkFromGraphDef(graphDef, inputShapes, requestedOutputs);
3585 }
armnn::INetworkPtr CreateNetworkFromGraphDef(const tensorflow::GraphDef &graphDef, const std::map< std::string, armnn::TensorShape > &inputShapes, const std::vector< std::string > &requestedOutputs)
Parses a GraphDef loaded into memory from one of the other CreateNetwork*.
Definition: TfParser.cpp:3639
#define CHECK_LOCATION()
Definition: Exceptions.hpp:197

◆ GetBindingInfo()

std::pair< LayerBindingId, TensorInfo > GetBindingInfo ( const std::string &  layerName,
const char *  bindingPointDesc,
const std::unordered_map< std::string, BindingPointInfo > &  nameToBindingInfo 
)
static

Definition at line 3688 of file TfParser.cpp.

References CHECK_LOCATION.

Referenced by ITfParser::TfParserImpl::GetNetworkInputBindingInfo(), and ITfParser::TfParserImpl::GetNetworkOutputBindingInfo().

3691 {
3692  auto it = nameToBindingInfo.find(layerName);
3693  if (it == nameToBindingInfo.end())
3694  {
3696  fmt::format("Unknown {} '{}' {}",
3697  bindingPointDesc,
3698  layerName,
3699  CHECK_LOCATION().AsString()));
3700  }
3701  return it->second;
3702 }
#define CHECK_LOCATION()
Definition: Exceptions.hpp:197

◆ GetConstInputIndex()

unsigned int GetConstInputIndex ( const std::vector< OutputOfParsedTfOperation > &  inputs)

Definition at line 1198 of file TfParser.cpp.

References CHECK_LOCATION.

Referenced by ITfParser::TfParserImpl::ParseConcat(), ITfParser::TfParserImpl::ParseSplit(), and ITfParser::TfParserImpl::ParseTranspose().

1199 {
1200  for (unsigned int i = 0; i < inputs.size(); i++)
1201  {
1202  if (HasParsedConstTensor<int32_t>(inputs[i].m_IndexedValue->GetNode().name()))
1203  {
1204  return i;
1205  }
1206  }
1207  throw ParseException(
1208  fmt::format("ArmNN only supports operators with constant axis. {}",
1209  CHECK_LOCATION().AsString()));
1210 
1211 }
#define CHECK_LOCATION()
Definition: Exceptions.hpp:197

◆ GetInputParsedTfOperationsChecked()

std::vector< OutputOfParsedTfOperation > GetInputParsedTfOperationsChecked ( const tensorflow::NodeDef &  nodeDef,
std::size_t  expectedNumInputs 
)

Finds the IParsedTfOperations for the nodes connected as inputs of the given node in the graph, and throws an exception if the number of inputs does not match the expected one.

This will automatically resolve any identity nodes. The result vector contains the parsed operation together with the output tensor index to make the connection unambiguous.

Definition at line 615 of file TfParser.cpp.

References CHECK_LOCATION, ITfParser::TfParserImpl::GetTfInputNodes(), ITfParser::TfParserImpl::m_ParsedTfOperations, and ITfParser::ParsedTfOperation.

Referenced by ITfParser::TfParserImpl::AddActivationLayer(), ITfParser::TfParserImpl::AddAdditionLayer(), ITfParser::TfParserImpl::AddFullyConnectedLayer(), ITfParser::TfParserImpl::AddMaximumLayer(), ITfParser::TfParserImpl::AddMultiplicationLayer(), ITfParser::TfParserImpl::AddRealDivLayer(), ITfParser::TfParserImpl::IsSupportedLeakyReluPattern(), ITfParser::TfParserImpl::ParseAdd(), ITfParser::TfParserImpl::ParseAddN(), ITfParser::TfParserImpl::ParseConcat(), ITfParser::TfParserImpl::ParseConv2D(), ITfParser::TfParserImpl::ParseDepthwiseConv2D(), ITfParser::TfParserImpl::ParseExpandDims(), ITfParser::TfParserImpl::ParseFusedBatchNorm(), ITfParser::TfParserImpl::ParseGather(), ITfParser::TfParserImpl::ParseIdentity(), ITfParser::TfParserImpl::ParseLrn(), ITfParser::TfParserImpl::ParseMaximum(), ITfParser::TfParserImpl::ParseMean(), ITfParser::TfParserImpl::ParsePad(), ITfParser::TfParserImpl::ParsePlaceholder(), ITfParser::TfParserImpl::ParsePooling2d(), ITfParser::TfParserImpl::ParseReshape(), ITfParser::TfParserImpl::ParseResizeBilinear(), ITfParser::TfParserImpl::ParseRsqrt(), ITfParser::TfParserImpl::ParseShape(), ITfParser::TfParserImpl::ParseSoftmax(), ITfParser::TfParserImpl::ParseSplit(), ITfParser::TfParserImpl::ParseSqueeze(), ITfParser::TfParserImpl::ParseStack(), ITfParser::TfParserImpl::ParseStridedSlice(), ITfParser::TfParserImpl::ParseSub(), ITfParser::TfParserImpl::ParseTranspose(), and ITfParser::TfParserImpl::ProcessElementwiseInputSlots().

617 {
618  // Fetches the tensorflow nodes connected as inputs and validate the size.
619  std::vector<OutputOfConstNodeDef> nodes = GetTfInputNodes(nodeDef);
620  const std::size_t numInputs = nodes.size();
621  if (numInputs != expectedNumInputs)
622  {
623  throw ParseException(
624  fmt::format("Unexpected number of inputs for node {}. Expected {}, found {} {}",
625  nodeDef.name(),
626  expectedNumInputs,
627  numInputs,
628  CHECK_LOCATION().AsString()));
629  }
630  // Fetches the corresponding ParsedTfOperation operations
631  std::vector<OutputOfParsedTfOperation> result;
632  for (auto&& node : nodes)
633  {
634  auto it = m_ParsedTfOperations.find(node.m_IndexedValue->name());
635  if (it == m_ParsedTfOperations.end())
636  {
637  throw ParseException(
638  fmt::format("Node with name '{}' has not been parsed {}",
639  node.m_IndexedValue->name(),
640  CHECK_LOCATION().AsString()));
641  }
642  ParsedTfOperation* parsedOp = it->second.get();
643  // Transparently 'skip' any Identity operations. This simplifies the logic inside the ParseXXX() functions.
644  parsedOp = parsedOp->ResolveIdentityOperations();
645  result.push_back(OutputOfParsedTfOperation(parsedOp,node.m_Index));
646  }
647  return result;
648 }
WithOutputTensorIndex< ParsedTfOperation * > OutputOfParsedTfOperation
Definition: TfParser.hpp:60
friend class ParsedTfOperation
Definition: ITfParser.hpp:61
std::unordered_map< std::string, ParsedTfOperationPtr > m_ParsedTfOperations
Definition: TfParser.hpp:267
#define CHECK_LOCATION()
Definition: Exceptions.hpp:197
std::vector< OutputOfConstNodeDef > GetTfInputNodes(const tensorflow::NodeDef &nodeDef) const
Finds the nodes connected as inputs of the given node in the graph.
Definition: TfParser.cpp:578

◆ GetNetworkInputBindingInfo()

BindingPointInfo GetNetworkInputBindingInfo ( const std::string &  name) const

Retrieves binding info (layer id and tensor info) for the network input identified by the given layer name.

Definition at line 3678 of file TfParser.cpp.

References ITfParser::TfParserImpl::GetBindingInfo(), and ITfParser::TfParserImpl::m_NetworkInputsBindingInfo.

3679 {
3680  return GetBindingInfo(name, "input", m_NetworkInputsBindingInfo);
3681 }
std::unordered_map< std::string, BindingPointInfo > m_NetworkInputsBindingInfo
Maps input layer names to their corresponding ids and tensor info.
Definition: TfParser.hpp:270
static std::pair< armnn::LayerBindingId, armnn::TensorInfo > GetBindingInfo(const std::string &layerName, const char *bindingPointDesc, const std::unordered_map< std::string, BindingPointInfo > &nameToBindingInfo)
Definition: TfParser.cpp:3688

◆ GetNetworkOutputBindingInfo()

BindingPointInfo GetNetworkOutputBindingInfo ( const std::string &  name) const

Retrieves binding info (layer id and tensor info) for the network output identified by the given layer name.

Definition at line 3683 of file TfParser.cpp.

References ITfParser::TfParserImpl::GetBindingInfo(), and ITfParser::TfParserImpl::m_NetworkOutputsBindingInfo.

3684 {
3685  return GetBindingInfo(name, "output", m_NetworkOutputsBindingInfo);
3686 }
static std::pair< armnn::LayerBindingId, armnn::TensorInfo > GetBindingInfo(const std::string &layerName, const char *bindingPointDesc, const std::unordered_map< std::string, BindingPointInfo > &nameToBindingInfo)
Definition: TfParser.cpp:3688
std::unordered_map< std::string, BindingPointInfo > m_NetworkOutputsBindingInfo
Maps output layer names to their corresponding ids and tensor info.
Definition: TfParser.hpp:273

◆ GetTfInputNodes()

std::vector< OutputOfConstNodeDef > GetTfInputNodes ( const tensorflow::NodeDef &  nodeDef) const

Finds the nodes connected as inputs of the given node in the graph.

Definition at line 578 of file TfParser.cpp.

References CHECK_LOCATION, WithOutputTensorIndex< T >::m_Index, WithOutputTensorIndex< T >::m_IndexedValue, and ITfParser::TfParserImpl::m_NodesByName.

Referenced by ITfParser::TfParserImpl::GetInputParsedTfOperationsChecked(), ITfParser::TfParserImpl::LoadGraphDef(), ITfParser::TfParserImpl::ParseConcat(), ITfParser::TfParserImpl::ParseExpandDims(), ITfParser::TfParserImpl::ParseSplit(), ITfParser::TfParserImpl::ParseStack(), and ITfParser::TfParserImpl::ParseStridedSlice().

579 {
580  std::vector<OutputOfConstNodeDef> ret;
581 
582  if (nodeDef.op() == "Const")
583  {
584  // For some reason const node can have "Control Inputs". We ignore them for now.
585  return ret;
586  }
587 
588  ret.reserve(armnn::numeric_cast<size_t>(nodeDef.input_size()));
589  for (int j = 0; j < nodeDef.input_size(); ++j)
590  {
591  OutputId outputId = ParseOutputId(nodeDef.input(j));
592 
593  if (nodeDef.input(j)[0] == '^') // I couldn't find a better test for control inputs.
594  {
595  // We currently allow Control Input from TensorFlow graph but we ignore them from ArmNN graph.
596  continue;
597  }
598 
599  auto inputIt = m_NodesByName.find(outputId.m_IndexedValue);
600  if (inputIt == m_NodesByName.end())
601  {
602  throw ParseException(
603  fmt::format("Can't find node '{}', which is listed as an input of '{}' {}",
604  nodeDef.input(j),
605  nodeDef.name(),
606  CHECK_LOCATION().AsString()));
607  }
608  ret.push_back(OutputOfConstNodeDef(inputIt->second,outputId.m_Index));
609  }
610 
611  return ret;
612 }
WithOutputTensorIndex< std::string > OutputId
Definition: TfParser.hpp:62
WithOutputTensorIndex< const tensorflow::NodeDef * > OutputOfConstNodeDef
Definition: TfParser.hpp:61
#define CHECK_LOCATION()
Definition: Exceptions.hpp:197
std::unordered_map< std::string, const tensorflow::NodeDef * > m_NodesByName
Map of nodes extracted from the GraphDef to speed up parsing.
Definition: TfParser.hpp:265

◆ GetVersion()

const std::string GetVersion ( )
static

Retrieve version in X.Y.Z form.

Definition at line 3740 of file TfParser.cpp.

References TF_PARSER_VERSION.

3741 {
3742  return TF_PARSER_VERSION;
3743 }
#define TF_PARSER_VERSION
TF_PARSER_VERSION: "X.Y.Z" where: X = Major version number Y = Minor version number Z = Patch version...
Definition: Version.hpp:25

◆ HasParsedConstTensor() [1/2]

bool HasParsedConstTensor ( const std::string &  nodeName) const

Checks if there is a pre-parsed const tensor available with the given name and Type.

Definition at line 1182 of file TfParser.cpp.

References ITfParser::TfParserImpl::m_ParsedTfOperations.

1183 {
1184  auto it = m_ParsedTfOperations.find(nodeName);
1185  if (it == m_ParsedTfOperations.end())
1186  {
1187  return false;
1188  }
1189  return dynamic_cast<ParsedConstTfOperation<Type>*>(it->second.get()) != nullptr;
1190 }
std::unordered_map< std::string, ParsedTfOperationPtr > m_ParsedTfOperations
Definition: TfParser.hpp:267

◆ HasParsedConstTensor() [2/2]

bool HasParsedConstTensor ( ParsedTfOperation parsedTfOpPtr) const

Definition at line 1193 of file TfParser.cpp.

1194 {
1195  return dynamic_cast<ParsedConstTfOperation<Type>*>(parsedTfOpPtr) != nullptr;
1196 }

◆ IsSupportedLeakyReluPattern()

bool IsSupportedLeakyReluPattern ( const tensorflow::NodeDef &  mulNodeDef,
size_t  alphaLayerIndex,
const OutputOfParsedTfOperation otherOp,
armnn::IOutputSlot **  outputOfLeakyRelu,
armnn::ActivationDescriptor desc 
)

Definition at line 1709 of file TfParser.cpp.

References ARMNN_ASSERT, ITfParser::TfParserImpl::GetInputParsedTfOperationsChecked(), ActivationDescriptor::m_A, ActivationDescriptor::m_Function, WithOutputTensorIndex< T >::m_Index, and WithOutputTensorIndex< T >::m_IndexedValue.

Referenced by ITfParser::TfParserImpl::ParseMaximum().

1714 {
1715  const tensorflow::NodeDef& otherNodeDef = otherOp.m_IndexedValue->GetNode();
1716 
1717  // Verifying all these assumptions hold:
1718  //
1719  // 1, the mulNodeDef is an elementwise multiplication node "Mul"
1720  // 2, the alphaLayerIndex selects a constant node from the inputs of the "Mul" node
1721  // 3, the inputLayerIndex selects a layer which has the same name as otherNodeDef
1722  //
1723 
1724  if (mulNodeDef.op() == "Mul")
1725  {
1726  size_t otherLayerIndex = (alphaLayerIndex == 0 ? 1 : 0);
1727  std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(mulNodeDef, 2);
1728 
1729  ARMNN_ASSERT(inputs.size() == 2);
1730  ARMNN_ASSERT((otherLayerIndex == 0 || alphaLayerIndex == 0));
1731  ARMNN_ASSERT((otherLayerIndex == 1 || alphaLayerIndex == 1));
1732  ARMNN_ASSERT(((otherLayerIndex + alphaLayerIndex) == 1));
1733 
1734  if (inputs[otherLayerIndex].m_IndexedValue->GetNode().name() == otherNodeDef.name())
1735  {
1736  if (HasParsedConstTensor<float>(inputs[alphaLayerIndex].m_IndexedValue->GetNode().name()))
1737  {
1738  ParsedConstTfOperation<float>* alpha =
1739  PolymorphicDowncast<ParsedConstTfOperation<float> *>(
1740  inputs[alphaLayerIndex].m_IndexedValue);
1741 
1742  std::vector<float> const_data;
1743  ConstTensor const_tensor = alpha->GetConstTensor(const_data);
1744 
1745  if (const_data.size() == 1)
1746  {
1747  desc.m_Function = ActivationFunction::LeakyReLu;
1748  desc.m_A = const_data[0];
1749 
1750  *outputOfLeakyRelu = &(otherOp.m_IndexedValue->ResolveArmnnOutputSlot(otherOp.m_Index));
1751  return true;
1752  }
1753  }
1754  }
1755  }
1756  return false;
1757 }
A tensor defined by a TensorInfo (shape and data type) and an immutable backing store.
Definition: Tensor.hpp:314
#define ARMNN_ASSERT(COND)
Definition: Assert.hpp:14
float m_A
Alpha upper bound value used by the activation functions. (BoundedReLu, Linear, TanH, Elu).
Definition: Descriptors.hpp:50
std::vector< OutputOfParsedTfOperation > GetInputParsedTfOperationsChecked(const tensorflow::NodeDef &nodeDef, std::size_t expectedNumInputs)
Finds the IParsedTfOperations for the nodes connected as inputs of the given node in the graph...
Definition: TfParser.cpp:615
ActivationFunction m_Function
The activation function to use (Sigmoid, TanH, Linear, ReLu, BoundedReLu, SoftReLu, LeakyReLu, Abs, Sqrt, Square, Elu).
Definition: Descriptors.hpp:48

◆ LoadGraphDef()

void LoadGraphDef ( const tensorflow::GraphDef &  graphDef)

Sets up variables and then performs BFS to parse all nodes.

Definition at line 3486 of file TfParser.cpp.

References CHECK_LOCATION, ITfParser::TfParserImpl::GetTfInputNodes(), ITfParser::TfParserImpl::LoadNodeDef(), ITfParser::TfParserImpl::m_InputShapes, ITfParser::TfParserImpl::m_NetworkInputsBindingInfo, ITfParser::TfParserImpl::m_NetworkOutputsBindingInfo, ITfParser::TfParserImpl::m_NodesByName, and ITfParser::TfParserImpl::m_RequestedOutputs.

Referenced by ITfParser::TfParserImpl::CreateNetworkFromGraphDef().

3487 {
3488  // Adds all nodes to our map.
3489  m_NodesByName.clear();
3492 
3493  for (int i = 0; i < graphDef.node_size(); ++i)
3494  {
3495  const tensorflow::NodeDef& node = graphDef.node(i);
3496  m_NodesByName[node.name()] = &node;
3497  }
3498 
3499  // Checks that the input nodes the user has requested exist.
3500  for (const auto& pair : m_InputShapes)
3501  {
3502  const std::string& requestedInputName = pair.first;
3503  auto nodeIt = m_NodesByName.find(requestedInputName);
3504  if (nodeIt == m_NodesByName.end())
3505  {
3506  throw ParseException(
3507  fmt::format("Couldn't find requested input node '{}' in graph {}",
3508  requestedInputName,
3509  CHECK_LOCATION().AsString()));
3510  }
3511  }
3512 
3513  // Finds the output nodes the user requested.
3514  std::vector<const tensorflow::NodeDef*> targetNodes;
3515  for (const std::string& requestedOutputName : m_RequestedOutputs)
3516  {
3517  auto nodeIt = m_NodesByName.find(requestedOutputName);
3518  if (nodeIt == m_NodesByName.end())
3519  {
3520  throw ParseException(
3521  fmt::format("Couldn't find requested output node '{}' in graph {}",
3522  requestedOutputName,
3523  CHECK_LOCATION().AsString()));
3524  }
3525  targetNodes.push_back(nodeIt->second);
3526  }
3527 
3528  // Sorts them into a linear ordering such that all inputs of a node are before the node itself.
3529  std::vector<const tensorflow::NodeDef*> sortedNodes;
3530  if (!armnnUtils::GraphTopologicalSort<const tensorflow::NodeDef*>(
3531  targetNodes,
3532  [this](const tensorflow::NodeDef* node)
3533  {
3534  auto outputs = GetTfInputNodes(*node);
3535  std::vector<const tensorflow::NodeDef*> nodesOnly;
3536  for (const auto & o : outputs) {
3537  nodesOnly.push_back(o.m_IndexedValue);
3538  }
3539  return nodesOnly;
3540  },
3541  sortedNodes))
3542  {
3543  throw ParseException(
3544  fmt::format("Cycle detected in graph {}",
3545  CHECK_LOCATION().AsString()));
3546  }
3547 
3548  // Parses each node in order, knowing that all inputs of a node will be processed before the node itself.
3549  for (const auto& it : sortedNodes)
3550  {
3551  const tensorflow::NodeDef& currentNode = *it;
3552  LoadNodeDef(currentNode, graphDef);
3553  }
3554 }
std::map< std::string, armnn::TensorShape > m_InputShapes
Definition: TfParser.hpp:261
std::vector< std::string > m_RequestedOutputs
Definition: TfParser.hpp:262
std::unordered_map< std::string, BindingPointInfo > m_NetworkInputsBindingInfo
Maps input layer names to their corresponding ids and tensor info.
Definition: TfParser.hpp:270
#define CHECK_LOCATION()
Definition: Exceptions.hpp:197
std::vector< OutputOfConstNodeDef > GetTfInputNodes(const tensorflow::NodeDef &nodeDef) const
Finds the nodes connected as inputs of the given node in the graph.
Definition: TfParser.cpp:578
std::unordered_map< std::string, const tensorflow::NodeDef * > m_NodesByName
Map of nodes extracted from the GraphDef to speed up parsing.
Definition: TfParser.hpp:265
std::unordered_map< std::string, BindingPointInfo > m_NetworkOutputsBindingInfo
Maps output layer names to their corresponding ids and tensor info.
Definition: TfParser.hpp:273
void LoadNodeDef(const tensorflow::NodeDef &nodeDef, const tensorflow::GraphDef &graphDef)
Parses a given node, assuming nodes before it in the graph have been done.
Definition: TfParser.cpp:3413

◆ LoadNodeDef()

void LoadNodeDef ( const tensorflow::NodeDef &  nodeDef,
const tensorflow::GraphDef &  graphDef 
)

Parses a given node, assuming nodes before it in the graph have been done.

Definition at line 3413 of file TfParser.cpp.

References CHECK_LOCATION, IConnectableLayer::GetInputSlot(), ITfParser::TfParserImpl::m_ControlInputs, ITfParser::TfParserImpl::m_Network, ITfParser::TfParserImpl::m_NetworkOutputsBindingInfo, ITfParser::TfParserImpl::m_ParsedTfOperations, ITfParser::TfParserImpl::m_RequestedOutputs, ITfParser::TfParserImpl::ms_OperationNameToParsingFunctions, armnn::numeric_cast(), ITfParser::ParsedTfOperation, and ITfParser::TfParserImpl::TrackOutputBinding().

Referenced by ITfParser::TfParserImpl::LoadGraphDef().

3414 {
3415  // Gets the type of the node (assume float).
3416  tensorflow::DataType type = tensorflow::DT_FLOAT;
3417  if (nodeDef.attr().count("T") != 0)
3418  {
3419  auto attr = nodeDef.attr().at("T");
3420  type = attr.type();
3421  }
3422  else if (nodeDef.attr().count("dtype") != 0)
3423  {
3424  auto attr = nodeDef.attr().at("dtype");
3425  type = attr.type();
3426  }
3427 
3428  if ((type != tensorflow::DT_FLOAT && type != tensorflow::DT_INT32) && nodeDef.op() != "Const")
3429  {
3430  throw ParseException(
3431  fmt::format("Currently only FLOAT and INT32 are supported for tensorflow nodes (apart from Const). "
3432  "Got {} for Node {} {}",
3433  tensorflow::DataType_Name(type),
3434  nodeDef.name(),
3435  CHECK_LOCATION().AsString()));
3436  }
3437 
3438  const std::string& operation = nodeDef.op();
3439  auto itControlInput = std::find(m_ControlInputs.begin(), m_ControlInputs.end(), operation);
3440  if (itControlInput != m_ControlInputs.end())
3441  {
3442  // We currently allow Control Input from TensorFlow graph but we ignore them from ArmNN graph.
3443  return;
3444  }
3445  auto it = ms_OperationNameToParsingFunctions.find(operation);
3446  if (it != ms_OperationNameToParsingFunctions.end())
3447  {
3448  auto func = it->second;
3449  ParsedTfOperationPtr parsedTfOperation = (this->*func)(nodeDef, graphDef);
3450  ParsedTfOperation* parsedTfOperationRaw = parsedTfOperation.get();
3451 
3452  // Stores the parsed operation so that dependent layers can connect to it.
3453  auto it = m_ParsedTfOperations.find(nodeDef.name());
3454  if (it != m_ParsedTfOperations.end())
3455  {
3456  throw ParseException(fmt::format("Name {} used by more than one node", nodeDef.name()));
3457  }
3458  m_ParsedTfOperations[nodeDef.name()] = std::move(parsedTfOperation);
3459 
3460  // If this node was requested as an output from the network, then adds an ArmNN output layer.
3461  if (std::find(m_RequestedOutputs.begin(), m_RequestedOutputs.end(), nodeDef.name()) !=
3462  m_RequestedOutputs.end())
3463  {
3464  auto outId = ParseOutputId(nodeDef.name());
3466  IOutputSlot& prevSlot = parsedTfOperationRaw->ResolveArmnnOutputSlot(outId.m_Index);
3467 
3468  TensorInfo tensorInfo = prevSlot.GetTensorInfo();
3469 
3470  IConnectableLayer* outputLayer = m_Network->AddOutputLayer(layerId, nodeDef.name().c_str());
3471 
3472  prevSlot.Connect(outputLayer->GetInputSlot(0));
3473 
3474  TrackOutputBinding(outputLayer, layerId, tensorInfo);
3475  }
3476  }
3477  else
3478  {
3479  throw ParseException(
3480  fmt::format("Unsupported operation {} in tensorflow::GraphDef {}",
3481  operation,
3482  CHECK_LOCATION().AsString()));
3483  }
3484 }
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
Definition: INetwork.hpp:62
friend class ParsedTfOperation
Definition: ITfParser.hpp:61
armnn::INetworkPtr m_Network
The network we&#39;re building. Gets cleared after it is passed to the user.
Definition: TfParser.hpp:251
std::unordered_map< std::string, ParsedTfOperationPtr > m_ParsedTfOperations
Definition: TfParser.hpp:267
std::vector< std::string > m_RequestedOutputs
Definition: TfParser.hpp:262
int LayerBindingId
Type of identifiers for bindable layers (inputs, outputs).
Definition: Types.hpp:210
std::unique_ptr< ParsedTfOperation > ParsedTfOperationPtr
Definition: TfParser.hpp:35
DataType
Definition: Types.hpp:32
An output connection slot for a layer.
Definition: INetwork.hpp:38
#define CHECK_LOCATION()
Definition: Exceptions.hpp:197
static const std::list< std::string > m_ControlInputs
Definition: TfParser.hpp:259
virtual const IInputSlot & GetInputSlot(unsigned int index) const =0
Get a const input slot handle by slot index.
std::enable_if_t< std::is_unsigned< Source >::value &&std::is_unsigned< Dest >::value, Dest > numeric_cast(Source source)
Definition: NumericCast.hpp:35
static const std::map< std::string, OperationParsingFunction > ms_OperationNameToParsingFunctions
Map of TensorFlow operation names to parsing member functions.
Definition: TfParser.hpp:257
void TrackOutputBinding(armnn::IConnectableLayer *layer, armnn::LayerBindingId id, const armnn::TensorInfo &tensorInfo)
Definition: TfParser.cpp:3711
std::unordered_map< std::string, BindingPointInfo > m_NetworkOutputsBindingInfo
Maps output layer names to their corresponding ids and tensor info.
Definition: TfParser.hpp:273

◆ operator=()

TfParserImpl& operator= ( const TfParserImpl )
delete

◆ ParseAdd()

ParsedTfOperationPtr ParseAdd ( const tensorflow::NodeDef &  nodeDef,
const tensorflow::GraphDef &  graphDef 
)

Definition at line 826 of file TfParser.cpp.

References ITfParser::TfParserImpl::AddAdditionLayer(), ITfParser::TfParserImpl::AddFullyConnectedLayer(), ITfParser::TfParserImpl::GetInputParsedTfOperationsChecked(), and armnn::IgnoreUnused().

828 {
829  IgnoreUnused(graphDef);
830  std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
831 
832  // If one of the inputs is a MatMul and the other is a const, then we handle both nodes
833  // together as FullyConnected.
834  if (inputs[0].m_IndexedValue->GetNode().op() == "MatMul" &&
835  HasParsedConstTensor<float>(inputs[1].m_IndexedValue->GetNode().name()))
836  {
837  IConnectableLayer* layer =
838  AddFullyConnectedLayer(inputs[0].m_IndexedValue->GetNode(),
839  &nodeDef,nodeDef.name().c_str());
840  return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
841  }
842  else if (HasParsedConstTensor<float>(inputs[0].m_IndexedValue->GetNode().name()) &&
843  inputs[1].m_IndexedValue->GetNode().op() == "MatMul")
844  {
845  IConnectableLayer* layer =
846  AddFullyConnectedLayer(inputs[1].m_IndexedValue->GetNode(),
847  &nodeDef,nodeDef.name().c_str());
848  return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
849  }
850  else
851  {
852  // Otherwise it's just a regular addition.
853  return AddAdditionLayer(nodeDef);
854  }
855 }
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
Definition: INetwork.hpp:62
armnn::IConnectableLayer * AddFullyConnectedLayer(const tensorflow::NodeDef &matMulNodeDef, const tensorflow::NodeDef *addNodeDef, const char *armnnLayerName)
Definition: TfParser.cpp:3315
void IgnoreUnused(Ts &&...)
ParsedTfOperationPtr AddAdditionLayer(const tensorflow::NodeDef &nodeDef, bool isBiasAdd=false)
Definition: TfParser.cpp:3118
std::vector< OutputOfParsedTfOperation > GetInputParsedTfOperationsChecked(const tensorflow::NodeDef &nodeDef, std::size_t expectedNumInputs)
Finds the IParsedTfOperations for the nodes connected as inputs of the given node in the graph...
Definition: TfParser.cpp:615

◆ ParseAddN()

ParsedTfOperationPtr ParseAddN ( const tensorflow::NodeDef &  nodeDef,
const tensorflow::GraphDef &  graphDef 
)

Definition at line 747 of file TfParser.cpp.

References ITfParser::TfParserImpl::AddAdditionLayer(), CHECK_LOCATION, ITfParser::TfParserImpl::CreateAdditionLayer(), ITfParser::TfParserImpl::GetInputParsedTfOperationsChecked(), and armnn::IgnoreUnused().

749 {
750  IgnoreUnused(graphDef);
751  uint32_t numberOfInputs = ReadMandatoryNodeUint32Attribute(nodeDef, "N");
752  if (numberOfInputs < 2)
753  {
754  // should never happen
755  throw ParseException(
756  fmt::format("AddN Node with name '{}' has less than two ({}) inputs {}",
757  nodeDef.name(),
758  std::to_string(numberOfInputs),
759  CHECK_LOCATION().AsString()));
760  }
761  else if (numberOfInputs == 2)
762  {
763  //this is the same as a simple Add operation
764  return AddAdditionLayer(nodeDef, false);
765  }
766  else
767  {
768  // build a binary tree of Add layers and return the final Add as the return from the function
769  // if we have an odd number of inputs then the final Add will consist of a layer connecting to an
770  // OutputOfParsedTfOperation, otherwise it will be two layers being added together
771  std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, numberOfInputs);
772  unsigned int numberOfAdditions = 0;
773  std::vector<IConnectableLayer*> layers;
774  // NOTE: at this point we will have a minimum of three inputs
775  for (unsigned int i = 0; i < numberOfInputs; ++i)
776  {
777  // every time i is odd we have two inputs to process.
778  bool onSecondItem = i % 2;
779  if (onSecondItem)
780  {
781  ++numberOfAdditions;
783  nodeDef, inputs[ i - 1], inputs[i], numberOfAdditions);
784  layers.push_back(newLayer);
785  }
786  }
787 
788  std::vector<IConnectableLayer*> layersToConnect(layers);
789  unsigned long numberOfLayersToConnect = layersToConnect.size();
790  bool isOdd = numberOfInputs % 2;
791 
792  while (numberOfLayersToConnect > 1)
793  {
794  layers.clear();
795  for (unsigned long i = 0; i < numberOfLayersToConnect; ++i) {
796  bool onSecondItem = i % 2;
797  if (onSecondItem) {
798  ++numberOfAdditions;
800  nodeDef,
801  layersToConnect[i - 1],
802  layersToConnect[i],
803  numberOfAdditions,
804  numberOfLayersToConnect,
805  isOdd);
806  layers.push_back(newLayer);
807  }
808  }
809  //OK... need to go again... maybe
810  layersToConnect = layers;
811  numberOfLayersToConnect = layersToConnect.size();
812  }
813  IConnectableLayer* finalLayer = layersToConnect[0];
814  // if we had an odd number of inputs we need to connect the final layer to the
815  // last OutputOfParsedTfOperation in order to create the last Add layer we will
816  // be handing back.
817  if (isOdd)
818  {
819  // connect the final layer to the last op
820  finalLayer = CreateAdditionLayer(nodeDef, inputs[numberOfInputs - 1], finalLayer);
821  }
822  return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, finalLayer);
823  }
824 }
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
Definition: INetwork.hpp:62
void IgnoreUnused(Ts &&...)
armnn::IConnectableLayer * CreateAdditionLayer(const tensorflow::NodeDef &nodeDef, armnn::IOutputSlot *input0Slot, armnn::IOutputSlot *input1Slot, const std::string &layerName)
Definition: TfParser.cpp:650
#define CHECK_LOCATION()
Definition: Exceptions.hpp:197
ParsedTfOperationPtr AddAdditionLayer(const tensorflow::NodeDef &nodeDef, bool isBiasAdd=false)
Definition: TfParser.cpp:3118
std::vector< OutputOfParsedTfOperation > GetInputParsedTfOperationsChecked(const tensorflow::NodeDef &nodeDef, std::size_t expectedNumInputs)
Finds the IParsedTfOperations for the nodes connected as inputs of the given node in the graph...
Definition: TfParser.cpp:615

◆ ParseAvgPool()

ParsedTfOperationPtr ParseAvgPool ( const tensorflow::NodeDef &  nodeDef,
const tensorflow::GraphDef &  graphDef 
)

Definition at line 3003 of file TfParser.cpp.

References ITfParser::TfParserImpl::ParsePooling2d().

3005 {
3006  return ParsePooling2d(nodeDef, graphDef, PoolingAlgorithm::Average);
3007 }
ParsedTfOperationPtr ParsePooling2d(const tensorflow::NodeDef &nodeDef, const tensorflow::GraphDef &graphDef, armnn::PoolingAlgorithm pooltype)
Definition: TfParser.cpp:3009

◆ ParseBiasAdd()

ParsedTfOperationPtr ParseBiasAdd ( const tensorflow::NodeDef &  nodeDef,
const tensorflow::GraphDef &  graphDef 
)

Definition at line 857 of file TfParser.cpp.

References ITfParser::TfParserImpl::AddAdditionLayer(), ARMNN_ASSERT, armnn::IgnoreUnused(), ITfParser::ParsedIdentityTfOperation, and ITfParser::ParsedTfOperation.

859 {
860  IgnoreUnused(graphDef);
861  return AddAdditionLayer(nodeDef, true);
862 }
void IgnoreUnused(Ts &&...)
ParsedTfOperationPtr AddAdditionLayer(const tensorflow::NodeDef &nodeDef, bool isBiasAdd=false)
Definition: TfParser.cpp:3118

◆ ParseConcat()

ParsedTfOperationPtr ParseConcat ( const tensorflow::NodeDef &  nodeDef,
const tensorflow::GraphDef &  graphDef 
)

Definition at line 2266 of file TfParser.cpp.

References CHECK_LOCATION, IOutputSlot::Connect(), ITfParser::TfParserImpl::GetConstInputIndex(), ITfParser::TfParserImpl::GetInputParsedTfOperationsChecked(), IConnectableLayer::GetInputSlot(), TensorInfo::GetNumDimensions(), IConnectableLayer::GetOutputSlot(), TensorInfo::GetShape(), IOutputSlot::GetTensorInfo(), ITfParser::TfParserImpl::GetTfInputNodes(), OriginsDescriptor::GetViewOrigin(), armnn::IgnoreUnused(), ITfParser::TfParserImpl::m_Network, OriginsDescriptor::SetConcatAxis(), IOutputSlot::SetTensorInfo(), and OriginsDescriptor::SetViewOriginCoord().

2268 {
2269  IgnoreUnused(graphDef);
2270  std::vector<OutputOfConstNodeDef> nodes = GetTfInputNodes(nodeDef);
2271 
2272  // In tensorflow, we have the last input of the Concat layer as the axis for concatenation.
2273  unsigned int numInputs = static_cast<unsigned int>(nodes.size());
2274 
2275  std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, numInputs);
2276 
2277  // Constant tensor index
2278  unsigned int index = GetConstInputIndex(inputs);
2279  // Get the axis tensor data
2280  ParsedConstTfOperation<int32_t>* shapeNode =
2281  PolymorphicDowncast<ParsedConstTfOperation<int32_t>*>(inputs[index].m_IndexedValue);
2282 
2283  std::vector<int32_t> axisTensorData;
2284  shapeNode->GetConstTensor(axisTensorData);
2285 
2286  // This concatDim indicates the data format: 3 is the NHWC, 1 is the NCHW.
2287  const unsigned int concatDim = static_cast<unsigned int>(axisTensorData[0]);
2288 
2289  // Armnn supports concatenation along the channel dimension for data formats NHWC and NCHW.
2290  if (concatDim == 0 || concatDim == 2)
2291  {
2292  throw ParseException(
2293  fmt::format("Dimension {} for concatenation is not supported by Armnn. "
2294  "Node {} {}",
2295  concatDim,
2296  nodeDef.name(),
2297  CHECK_LOCATION().AsString()));
2298  }
2299 
2300  const unsigned int supportedNumDims = 4;
2301  unsigned int numConcatViews = numInputs - 1;
2302  OriginsDescriptor concatDescriptor(static_cast<uint32_t>(numConcatViews), supportedNumDims);
2303  concatDescriptor.SetConcatAxis(concatDim);
2304  TensorShape mergeDims(supportedNumDims);
2305  unsigned int mergeDim = 0;
2306  for (unsigned int viewIndex = 0; viewIndex < numConcatViews; ++viewIndex)
2307  {
2308  // Need to double check whether it should be
2309  IOutputSlot& inputSlot = inputs[viewIndex].m_IndexedValue->ResolveArmnnOutputSlot(inputs[viewIndex].m_Index);
2310  TensorInfo inputTensorInfo = inputSlot.GetTensorInfo();
2311 
2312  // Double check dimensions of the tensors
2313  if (inputTensorInfo.GetNumDimensions() != supportedNumDims)
2314  {
2315  throw armnn::ParseException(
2316  fmt::format("The number of dimensions: {} for input tensors of the "
2317  "concatenation op should be {} {}",
2318  inputTensorInfo.GetNumDimensions(),
2319  supportedNumDims,
2320  CHECK_LOCATION().AsString()));
2321  }
2322 
2323  // Copy the input tensor shape to mergeDimSizes and initialize the view origin coordinates for the current input
2324  mergeDims = inputTensorInfo.GetShape();
2325  unsigned int* viewOrigin = const_cast<unsigned int*>(concatDescriptor.GetViewOrigin(viewIndex));
2326  std::fill(viewOrigin, viewOrigin + supportedNumDims, 0);
2327 
2328  // Update the view origin coordinates and the merge dimension value
2329  concatDescriptor.SetViewOriginCoord(viewIndex, concatDim, mergeDim);
2330  mergeDim += mergeDims[concatDim];
2331  }
2332 
2333  // Update the output shape
2334  mergeDims[concatDim] = mergeDim;
2335  armnn::IConnectableLayer *layer = m_Network->AddConcatLayer(concatDescriptor, nodeDef.name().c_str());
2336 
2337  layer->GetOutputSlot(0).SetTensorInfo(armnn::TensorInfo(mergeDims, DataType::Float32));
2338 
2339  for (unsigned int viewIndex = 0; viewIndex < numConcatViews; ++viewIndex)
2340  {
2341  IOutputSlot& inputSlot = inputs[viewIndex].m_IndexedValue->ResolveArmnnOutputSlot(inputs[viewIndex].m_Index);
2342  inputSlot.Connect(layer->GetInputSlot(viewIndex));
2343  }
2344 
2345  return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2346 }
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
Definition: INetwork.hpp:62
const TensorShape & GetShape() const
Definition: Tensor.hpp:187
void IgnoreUnused(Ts &&...)
armnn::INetworkPtr m_Network
The network we&#39;re building. Gets cleared after it is passed to the user.
Definition: TfParser.hpp:251
virtual void SetTensorInfo(const TensorInfo &tensorInfo)=0
An output connection slot for a layer.
Definition: INetwork.hpp:38
An OriginsDescriptor for the ConcatLayer.
#define CHECK_LOCATION()
Definition: Exceptions.hpp:197
unsigned int GetConstInputIndex(const std::vector< OutputOfParsedTfOperation > &inputs)
Definition: TfParser.cpp:1198
std::vector< OutputOfParsedTfOperation > GetInputParsedTfOperationsChecked(const tensorflow::NodeDef &nodeDef, std::size_t expectedNumInputs)
Finds the IParsedTfOperations for the nodes connected as inputs of the given node in the graph...
Definition: TfParser.cpp:615
virtual const IInputSlot & GetInputSlot(unsigned int index) const =0
Get a const input slot handle by slot index.
std::vector< OutputOfConstNodeDef > GetTfInputNodes(const tensorflow::NodeDef &nodeDef) const
Finds the nodes connected as inputs of the given node in the graph.
Definition: TfParser.cpp:578
virtual const TensorInfo & GetTensorInfo() const =0
virtual const IOutputSlot & GetOutputSlot(unsigned int index) const =0
Get the const output slot handle by slot index.
virtual int Connect(IInputSlot &destination)=0
unsigned int GetNumDimensions() const
Definition: Tensor.hpp:191

◆ ParseConst()

ParsedTfOperationPtr ParseConst ( const tensorflow::NodeDef &  nodeDef,
const tensorflow::GraphDef &  graphDef 
)

Definition at line 1086 of file TfParser.cpp.

References ARMNN_ASSERT, CHECK_LOCATION, armnnTfParser::ConvertTfTensorDataType(), armnn::GetDataTypeSize(), and armnn::IgnoreUnused().

1088 {
1089  IgnoreUnused(graphDef);
1090  ARMNN_ASSERT(nodeDef.op() == "Const");
1091 
1092  if (nodeDef.attr().count("value") == 0)
1093  {
1094  throw ParseException(
1095  fmt::format("Value not found for Const node - {} {}",
1096  nodeDef.name(),
1097  CHECK_LOCATION().AsString()));
1098  }
1099 
1100  const tensorflow::TensorProto& tfTensor = nodeDef.attr().at("value").tensor();
1101  const tensorflow::TensorShapeProto& tfTensorShape = tfTensor.tensor_shape();
1102  const tensorflow::DataType tfDataType = ReadMandatoryNodeTypeAttribute(nodeDef, "dtype");
1103 
1104  const auto GetDimensionSize = [](auto& d) { return d.size(); };
1105 
1106  std::vector<unsigned int> dimensionSizes;
1107  std::transform(tfTensorShape.dim().begin(), tfTensorShape.dim().end(),
1108  std::back_inserter(dimensionSizes), GetDimensionSize);
1109 
1110  // Calculates number of elements.
1111  const DataType dataType = ConvertTfTensorDataType(tfDataType, nodeDef);
1112  unsigned int numElements = 0U;
1113 
1114  if (!dimensionSizes.empty())
1115  {
1116  numElements = std::accumulate(dimensionSizes.begin(), dimensionSizes.end(),
1117  1U, std::multiplies<unsigned int>());
1118  }
1119 
1120  std::vector<int8_t> tensorData;
1121 
1122  // Get tensor data from the list of values attribute.
1123  if (tfTensor.tensor_content().empty())
1124  {
1125  InvokeParseFunction<ParseTfTensorValueList>::Result<void>(dataType, tfTensor, numElements, tensorData);
1126 
1127  // If the tensor shape is not defined, but there is a value list, then interpret the data as a 1D
1128  // tensor of the provided number of elements.
1129  if (numElements == 0)
1130  {
1131  const unsigned int tfNumElements =
1132  static_cast<unsigned int>(tensorData.size()) / GetDataTypeSize(dataType);
1133  dimensionSizes.push_back(tfNumElements);
1134  }
1135  }
1136  // Gets tensor data from tensor content attribute.
1137  else
1138  {
1139  tensorData.assign(tfTensor.tensor_content().begin(), tfTensor.tensor_content().end());
1140 
1141  // Checks if a tensor shape is defined for the tensor content.
1142  if (numElements == 0)
1143  {
1144  throw ParseException(
1145  fmt::format("No tensor shape found for Const node - {} {}",
1146  nodeDef.name(),
1147  CHECK_LOCATION().AsString()));
1148  }
1149  }
1150 
1151  // Const node requires at least a list of values or a content attribute.
1152  if (tensorData.empty())
1153  {
1154  throw ParseException(
1155  fmt::format("No tensor data found for Const node - {} {}",
1156  nodeDef.name(),
1157  CHECK_LOCATION().AsString()));
1158  }
1159 
1160  const TensorInfo tensorInfo(static_cast<unsigned int>(dimensionSizes.size()),
1161  dimensionSizes.data(),
1162  dataType);
1163 
1164  // If we have a list of values, then the length of the list must be
1165  // less than or equal to the number of elements implied by the shape argument.
1166  if (tensorData.size() > tensorInfo.GetNumBytes())
1167  {
1168  throw ParseException(
1169  fmt::format("Number of elements ({}) should be less than or equal "
1170  "to the number of elements implied by the shape argument ({}) for Const node - {} {}",
1171  (tensorData.size() / GetDataTypeSize(dataType)),
1172  tensorInfo.GetNumElements(),
1173  nodeDef.name(),
1174  CHECK_LOCATION().AsString()));
1175  }
1176 
1177  return InvokeParseFunction<MakeTfOperation<ParsedConstTfOperation>>::Result<ParsedTfOperationPtr>(
1178  dataType, this, nodeDef, tensorData, tensorInfo);
1179 }
void IgnoreUnused(Ts &&...)
DataType
Definition: Types.hpp:32
#define ARMNN_ASSERT(COND)
Definition: Assert.hpp:14
#define CHECK_LOCATION()
Definition: Exceptions.hpp:197
DataType ConvertTfTensorDataType(const tensorflow::DataType tfDataType, const tensorflow::NodeDef &nodeDef)
Definition: TfParser.cpp:952
constexpr unsigned int GetDataTypeSize(DataType dataType)
Definition: TypesUtils.hpp:126

◆ ParseConv2D()

ParsedTfOperationPtr ParseConv2D ( const tensorflow::NodeDef &  nodeDef,
const tensorflow::GraphDef &  graphDef 
)

Definition at line 1213 of file TfParser.cpp.

References armnnTfParser::CalcPadding(), CHECK_DATA_FORMAT, CHECK_LOCATION, CHECK_PADDING_TYPE, IOutputSlot::Connect(), DataLayoutIndexed::GetHeightIndex(), ITfParser::TfParserImpl::GetInputParsedTfOperationsChecked(), IConnectableLayer::GetInputSlot(), TensorInfo::GetNumElements(), IConnectableLayer::GetOutputSlot(), TensorInfo::GetShape(), BaseTensor< MemoryType >::GetShape(), IOutputSlot::GetTensorInfo(), DataLayoutIndexed::GetWidthIndex(), armnn::IgnoreUnused(), Convolution2dDescriptor::m_BiasEnabled, Convolution2dDescriptor::m_DataLayout, Convolution2dDescriptor::m_DilationX, Convolution2dDescriptor::m_DilationY, ITfParser::TfParserImpl::m_Network, Convolution2dDescriptor::m_PadBottom, Convolution2dDescriptor::m_PadLeft, Convolution2dDescriptor::m_PadRight, Convolution2dDescriptor::m_PadTop, Convolution2dDescriptor::m_StrideX, Convolution2dDescriptor::m_StrideY, armnnUtils::Permute(), armnnUtils::Permuted(), and IOutputSlot::SetTensorInfo().

1215 {
1216  IgnoreUnused(graphDef);
1217  std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
1218  IOutputSlot& inputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
1219  TensorInfo inputTensorInfo = inputSlot.GetTensorInfo();
1220 
1221  if (!HasParsedConstTensor<float>(inputs[1].m_IndexedValue->GetNode().name()))
1222  {
1223  throw ParseException(
1224  fmt::format("ArmNN only supports Convolution layers with constant weights for {}, input {} {}",
1225  nodeDef.name(),
1226  inputs[1].m_IndexedValue->GetNode().name(),
1227  CHECK_LOCATION().AsString()));
1228  }
1229  ParsedConstTfOperation<float>* weightNode =
1230  PolymorphicDowncast<ParsedConstTfOperation<float> *>(inputs[1].m_IndexedValue);
1231 
1232  std::string paddingString = ReadMandatoryNodeStringAttribute(nodeDef, "padding");
1233  std::string dataFormat = ReadMandatoryNodeStringAttribute(nodeDef, "data_format");
1234  std::vector<uint32_t> strides = ReadMandatoryNodeUint32ListAttribute(nodeDef, "strides");
1235 
1237  desc.m_BiasEnabled = false;
1238 
1239  CHECK_DATA_FORMAT(nodeDef, dataFormat, "Conv2D");
1240 
1241  DataLayout dataLayout = dataFormat == "NHWC" ? DataLayout::NHWC : DataLayout::NCHW;
1242 
1243  desc.m_DataLayout = dataLayout;
1244 
1245  DataLayoutIndexed dataLayoutIndexed(dataLayout);
1246 
1247  desc.m_StrideX = strides[dataLayoutIndexed.GetWidthIndex()];
1248  desc.m_StrideY = strides[dataLayoutIndexed.GetHeightIndex()];
1249 
1250  std::vector<uint32_t> dilations = ReadOptionalNodeUint32ListAttribute(nodeDef, "dilations");
1251  if (!dilations.empty())
1252  {
1253  desc.m_DilationX = dilations[dataLayoutIndexed.GetWidthIndex()];
1254  desc.m_DilationY = dilations[dataLayoutIndexed.GetHeightIndex()];
1255  }
1256 
1257  uint32_t inputHeight = inputTensorInfo.GetShape()[dataLayoutIndexed.GetHeightIndex()];
1258  uint32_t inputWidth = inputTensorInfo.GetShape()[dataLayoutIndexed.GetWidthIndex()];
1259 
1260  // Mappings from TensorFlow filter tensors to the ArmNN filter tensors.
1261  // Tensorflow weights are [H, W, In, Out].
1262  // ArmNN weights have to be [Out, H, W, In] when the data layout is NHWC,
1263  // and [Out, In, H, W] when the data layout is NCHW.
1264  PermutationVector permutationVector =
1265  dataLayout == DataLayout::NHWC ?
1266  std::initializer_list<unsigned int>{ 1, 2, 3, 0 } : // NHWC: [H, W, In, Out] -> [Out, H, W, In]
1267  std::initializer_list<unsigned int>{ 2, 3, 1, 0 }; // NCHW: [H, W, In, Out] -> [Out, In, H, W]
1268 
1269  // Swizzle the tensor using the given permutation vector.
1270  const TensorInfo& weightTensorInfo = weightNode->GetTensorInfo();
1271  const TensorInfo weightTensorSwizzledInfo = armnnUtils::Permuted(weightTensorInfo, permutationVector);
1272 
1273  // Swizzles the content of the tensor's permanent storage into a local storage.
1274  std::vector<float> weightTensorSwizzledData(weightTensorInfo.GetNumElements());
1275  armnnUtils::Permute(weightTensorSwizzledInfo.GetShape(), permutationVector,
1276  weightNode->GetStorage(), weightTensorSwizzledData.data(), sizeof(float));
1277 
1278  // Create a weight tensor with the newly swizzled data.
1279  ConstTensor weightTensor(weightTensorSwizzledInfo, weightTensorSwizzledData);
1280 
1281  uint32_t weightHeight = weightTensor.GetShape()[dataLayoutIndexed.GetHeightIndex()];
1282  uint32_t weightWidth = weightTensor.GetShape()[dataLayoutIndexed.GetWidthIndex()];
1283 
1284  bool padding = false;
1285  TensorInfo outputInfo;
1286  unsigned int outputHeight = 0;
1287  unsigned int outputWidth = 0;
1288 
1289  CHECK_PADDING_TYPE(nodeDef, paddingString);
1290 
1291  if (paddingString == "SAME")
1292  {
1293  padding = true;
1294  }
1295  else if (paddingString == "VALID")
1296  {
1297  padding = false;
1298  }
1299 
1300  CalcPadding(inputHeight, weightHeight, desc.m_StrideY, desc.m_DilationY, desc.m_PadTop, desc.m_PadBottom, padding);
1301  CalcPadding(inputWidth, weightWidth, desc.m_StrideX, desc.m_DilationX, desc.m_PadLeft, desc.m_PadRight, padding);
1302 
1303  // Calculate output height and width
1304  unsigned int dilatedFilterWidth = weightWidth + (desc.m_DilationX - 1) * (weightWidth - 1);
1305  unsigned int readWidth = (inputWidth + desc.m_PadLeft + desc.m_PadRight) - dilatedFilterWidth;
1306  outputWidth = 1 + (readWidth / desc.m_StrideX);
1307 
1308  unsigned int dilatedFilterHeight = weightHeight + (desc.m_DilationY - 1) * (weightHeight - 1);
1309  unsigned int readHeight = (inputHeight + desc.m_PadTop + desc.m_PadBottom) - dilatedFilterHeight;
1310  outputHeight = 1 + (readHeight / desc.m_StrideY);
1311 
1312  switch (dataLayout)
1313  {
1314  case DataLayout::NHWC:
1315  outputInfo = TensorInfo({ inputTensorInfo.GetShape()[0],
1316  outputHeight,
1317  outputWidth,
1318  weightTensor.GetShape()[0] },
1319  DataType::Float32);
1320  break;
1321  case DataLayout::NCHW:
1322  default:
1323  outputInfo = TensorInfo({ inputTensorInfo.GetShape()[0],
1324  weightTensor.GetShape()[0],
1325  outputHeight,
1326  outputWidth },
1327  DataType::Float32);
1328  break;
1329  }
1330 
1331  IConnectableLayer* layer = m_Network->AddConvolution2dLayer(desc,
1332  weightTensor,
1333  EmptyOptional(),
1334  nodeDef.name().c_str());
1335  layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
1336  inputSlot.Connect(layer->GetInputSlot(0));
1337 
1338  return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
1339 }
uint32_t m_PadBottom
Padding bottom value in the height dimension.
bool m_BiasEnabled
Enable/disable bias.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
Definition: INetwork.hpp:62
DataLayout
Definition: Types.hpp:50
const TensorShape & GetShape() const
Definition: Tensor.hpp:187
A Convolution2dDescriptor for the Convolution2dLayer.
uint32_t m_PadRight
Padding right value in the width dimension.
void IgnoreUnused(Ts &&...)
armnn::INetworkPtr m_Network
The network we&#39;re building. Gets cleared after it is passed to the user.
Definition: TfParser.hpp:251
uint32_t m_DilationY
Dilation along y axis.
virtual void SetTensorInfo(const TensorInfo &tensorInfo)=0
uint32_t m_PadTop
Padding top value in the height dimension.
void Permute(const armnn::TensorShape &dstShape, const armnn::PermutationVector &mappings, const void *src, void *dst, size_t dataTypeSize)
Definition: Permute.cpp:131
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
An output connection slot for a layer.
Definition: INetwork.hpp:38
Provides access to the appropriate indexes for Channels, Height and Width based on DataLayout...
A tensor defined by a TensorInfo (shape and data type) and an immutable backing store.
Definition: Tensor.hpp:314
void CalcPadding(uint32_t inputSize, uint32_t filterSize, uint32_t stride, uint32_t dilation, uint32_t &paddingFront, uint32_t &paddingBack, bool samePadding)
Definition: TfParser.cpp:429
#define CHECK_LOCATION()
Definition: Exceptions.hpp:197
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
uint32_t m_DilationX
Dilation along x axis.
#define CHECK_DATA_FORMAT(NODE_DEF, FORMAT, NODE_TYPE)
Definition: TfParser.cpp:356
EmptyOptional is used to initialize the Optional class in case we want to have default value for an O...
Definition: Optional.hpp:32
std::vector< OutputOfParsedTfOperation > GetInputParsedTfOperationsChecked(const tensorflow::NodeDef &nodeDef, std::size_t expectedNumInputs)
Finds the IParsedTfOperations for the nodes connected as inputs of the given node in the graph...
Definition: TfParser.cpp:615
virtual const IInputSlot & GetInputSlot(unsigned int index) const =0
Get a const input slot handle by slot index.
virtual const TensorInfo & GetTensorInfo() const =0
virtual const IOutputSlot & GetOutputSlot(unsigned int index) const =0
Get the const output slot handle by slot index.
#define CHECK_PADDING_TYPE(NODE_DEF, PADDING)
Definition: TfParser.cpp:368
virtual int Connect(IInputSlot &destination)=0
armnn::TensorShape Permuted(const armnn::TensorShape &srcShape, const armnn::PermutationVector &mappings)
Definition: Permute.cpp:98
uint32_t m_PadLeft
Padding left value in the width dimension.
unsigned int GetNumElements() const
Definition: Tensor.hpp:192

◆ ParseDepthwiseConv2D()

ParsedTfOperationPtr ParseDepthwiseConv2D ( const tensorflow::NodeDef &  nodeDef,
const tensorflow::GraphDef &  graphDef 
)

Definition at line 1341 of file TfParser.cpp.

References armnnTfParser::CalcPadding(), CHECK_DATA_FORMAT, CHECK_LOCATION, CHECK_PADDING_TYPE, IOutputSlot::Connect(), DataLayoutIndexed::GetHeightIndex(), ITfParser::TfParserImpl::GetInputParsedTfOperationsChecked(), IConnectableLayer::GetInputSlot(), TensorInfo::GetNumElements(), IConnectableLayer::GetOutputSlot(), TensorInfo::GetShape(), BaseTensor< MemoryType >::GetShape(), IOutputSlot::GetTensorInfo(), DataLayoutIndexed::GetWidthIndex(), armnn::IgnoreUnused(), DepthwiseConvolution2dDescriptor::m_BiasEnabled, DepthwiseConvolution2dDescriptor::m_DataLayout, DepthwiseConvolution2dDescriptor::m_DilationX, DepthwiseConvolution2dDescriptor::m_DilationY, ITfParser::TfParserImpl::m_Network, DepthwiseConvolution2dDescriptor::m_PadBottom, DepthwiseConvolution2dDescriptor::m_PadLeft, DepthwiseConvolution2dDescriptor::m_PadRight, DepthwiseConvolution2dDescriptor::m_PadTop, DepthwiseConvolution2dDescriptor::m_StrideX, DepthwiseConvolution2dDescriptor::m_StrideY, armnnUtils::Permute(), armnnUtils::Permuted(), and IOutputSlot::SetTensorInfo().

1343 {
1344  IgnoreUnused(graphDef);
1345  std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
1346  IOutputSlot& inputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
1347  TensorInfo inputTensorInfo = inputSlot.GetTensorInfo();
1348 
1349  if (!HasParsedConstTensor<float>(inputs[1].m_IndexedValue->GetNode().name()))
1350  {
1351  throw ParseException(
1352  fmt::format("ArmNN only supports Depthwise Convolution layer with constant weights. "
1353  "Non const input found {} for node {} {}",
1354  inputs[1].m_IndexedValue->GetNode().name(),
1355  nodeDef.name(),
1356  CHECK_LOCATION().AsString()));
1357  }
1358 
1359  ParsedConstTfOperation<float>* weightNode =
1360  PolymorphicDowncast<ParsedConstTfOperation<float> *>(inputs[1].m_IndexedValue);
1361 
1362  std::string paddingString = ReadMandatoryNodeStringAttribute(nodeDef, "padding");
1363  std::string dataFormat = ReadMandatoryNodeStringAttribute(nodeDef, "data_format");
1364  std::vector<uint32_t> strides = ReadMandatoryNodeUint32ListAttribute(nodeDef, "strides");
1365 
1367  desc.m_BiasEnabled = false;
1368 
1369  CHECK_DATA_FORMAT(nodeDef, dataFormat, "DepthwiseConv2dNative");
1370 
1371  DataLayout dataLayout = dataFormat == "NHWC" ? DataLayout::NHWC : DataLayout::NCHW;
1372 
1373  desc.m_DataLayout = dataLayout;
1374 
1375  DataLayoutIndexed dataLayoutIndexed(dataLayout);
1376 
1377  desc.m_StrideX = strides[dataLayoutIndexed.GetWidthIndex()];
1378  desc.m_StrideY = strides[dataLayoutIndexed.GetHeightIndex()];
1379  std::vector<uint32_t> dilations = ReadOptionalNodeUint32ListAttribute(nodeDef, "dilations");
1380  if (!dilations.empty())
1381  {
1382  desc.m_DilationX = dilations[dataLayoutIndexed.GetWidthIndex()];
1383  desc.m_DilationY = dilations[dataLayoutIndexed.GetHeightIndex()];
1384  }
1385 
1386  uint32_t inputHeight = inputTensorInfo.GetShape()[dataLayoutIndexed.GetHeightIndex()];
1387  uint32_t inputWidth = inputTensorInfo.GetShape()[dataLayoutIndexed.GetWidthIndex()];
1388 
1389  // Mappings from TensorFlow filter tensors to the ArmNN filter tensors.
1390  // Tensorflow weights come in the format [H, W, I, M].
1391  // ArmNN weights have to be [M, I, H, W].
1392  PermutationVector permutationVector{ 2, 3, 1, 0 }; // [H, W, I, M] -> [M, I, H, W]
1393 
1394  // Swizzle the tensor using the given permutation vector.
1395  const TensorInfo& weightTensorInfo = weightNode->GetTensorInfo();
1396  const TensorInfo weightTensorSwizzledInfo = armnnUtils::Permuted(weightTensorInfo, permutationVector);
1397 
1398  // Swizzles the content of the tensor's permanent storage into a local storage.
1399  std::vector<float> weightTensorSwizzledData(weightTensorInfo.GetNumElements());
1400  armnnUtils::Permute(weightTensorSwizzledInfo.GetShape(), permutationVector,
1401  weightNode->GetStorage(), weightTensorSwizzledData.data(), sizeof(float));
1402 
1403  // Create a weight tensor with the newly swizzled data.
1404  ConstTensor weightTensor(weightTensorSwizzledInfo, weightTensorSwizzledData);
1405 
1406  uint32_t weightHeight = weightTensor.GetShape()[2];
1407  uint32_t weightWidth = weightTensor.GetShape()[3];
1408 
1409  bool padding = false;
1410  TensorInfo outputInfo;
1411  unsigned int outputHeight = 0;
1412  unsigned int outputWidth = 0;
1413 
1414  CHECK_PADDING_TYPE(nodeDef, paddingString);
1415 
1416  if (paddingString == "SAME")
1417  {
1418  padding = true;
1419  }
1420  else if (paddingString == "VALID")
1421  {
1422  padding = false;
1423  }
1424 
1425  CalcPadding(inputHeight, weightHeight, desc.m_StrideY, desc.m_DilationY, desc.m_PadTop, desc.m_PadBottom, padding);
1426  CalcPadding(inputWidth, weightWidth, desc.m_StrideX, desc.m_DilationX, desc.m_PadLeft, desc.m_PadRight, padding);
1427 
1428  // Calculate output height and width
1429  unsigned int dilatedFilterWidth = weightWidth + (desc.m_DilationX - 1) * (weightWidth - 1);
1430  unsigned int readWidth = (inputWidth + desc.m_PadLeft + desc.m_PadRight) - dilatedFilterWidth;
1431  outputWidth = 1 + (readWidth / desc.m_StrideX);
1432 
1433  unsigned int dilatedFilterHeight = weightHeight + (desc.m_DilationY - 1) * (weightHeight - 1);
1434  unsigned int readHeight = (inputHeight + desc.m_PadTop + desc.m_PadBottom) - dilatedFilterHeight;
1435  outputHeight = 1 + (readHeight / desc.m_StrideY);
1436 
1437  switch (dataLayout)
1438  {
1439  case DataLayout::NHWC:
1440  outputInfo = TensorInfo({ inputTensorInfo.GetShape()[0],
1441  outputHeight,
1442  outputWidth,
1443  weightTensor.GetShape()[0] * weightTensor.GetShape()[1]},
1444  DataType::Float32);
1445  break;
1446  case DataLayout::NCHW:
1447  default:
1448  outputInfo = TensorInfo({ inputTensorInfo.GetShape()[0],
1449  weightTensor.GetShape()[0] * weightTensor.GetShape()[1],
1450  outputHeight,
1451  outputWidth },
1452  DataType::Float32);
1453  break;
1454  }
1455 
1456  IConnectableLayer* layer = m_Network->AddDepthwiseConvolution2dLayer(desc,
1457  weightTensor,
1458  EmptyOptional(),
1459  nodeDef.name().c_str());
1460  layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
1461  inputSlot.Connect(layer->GetInputSlot(0));
1462 
1463  return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
1464 }
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
Definition: INetwork.hpp:62
bool m_BiasEnabled
Enable/disable bias.
DataLayout
Definition: Types.hpp:50
const TensorShape & GetShape() const
Definition: Tensor.hpp:187
uint32_t m_PadBottom
Padding bottom value in the height dimension.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
uint32_t m_PadLeft
Padding left value in the width dimension.
void IgnoreUnused(Ts &&...)
armnn::INetworkPtr m_Network
The network we&#39;re building. Gets cleared after it is passed to the user.
Definition: TfParser.hpp:251
uint32_t m_DilationY
Dilation factor value for height dimension.
virtual void SetTensorInfo(const TensorInfo &tensorInfo)=0
void Permute(const armnn::TensorShape &dstShape, const armnn::PermutationVector &mappings, const void *src, void *dst, size_t dataTypeSize)
Definition: Permute.cpp:131
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
uint32_t m_DilationX
Dilation factor value for width dimension.
uint32_t m_PadTop
Padding top value in the height dimension.
An output connection slot for a layer.
Definition: INetwork.hpp:38
Provides access to the appropriate indexes for Channels, Height and Width based on DataLayout...
A tensor defined by a TensorInfo (shape and data type) and an immutable backing store.
Definition: Tensor.hpp:314
void CalcPadding(uint32_t inputSize, uint32_t filterSize, uint32_t stride, uint32_t dilation, uint32_t &paddingFront, uint32_t &paddingBack, bool samePadding)
Definition: TfParser.cpp:429
#define CHECK_LOCATION()
Definition: Exceptions.hpp:197
#define CHECK_DATA_FORMAT(NODE_DEF, FORMAT, NODE_TYPE)
Definition: TfParser.cpp:356
EmptyOptional is used to initialize the Optional class in case we want to have default value for an O...
Definition: Optional.hpp:32
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
std::vector< OutputOfParsedTfOperation > GetInputParsedTfOperationsChecked(const tensorflow::NodeDef &nodeDef, std::size_t expectedNumInputs)
Finds the IParsedTfOperations for the nodes connected as inputs of the given node in the graph...
Definition: TfParser.cpp:615
virtual const IInputSlot & GetInputSlot(unsigned int index) const =0
Get a const input slot handle by slot index.
virtual const TensorInfo & GetTensorInfo() const =0
virtual const IOutputSlot & GetOutputSlot(unsigned int index) const =0
Get the const output slot handle by slot index.
#define CHECK_PADDING_TYPE(NODE_DEF, PADDING)
Definition: TfParser.cpp:368
virtual int Connect(IInputSlot &destination)=0
armnn::TensorShape Permuted(const armnn::TensorShape &srcShape, const armnn::PermutationVector &mappings)
Definition: Permute.cpp:98
A DepthwiseConvolution2dDescriptor for the DepthwiseConvolution2dLayer.
unsigned int GetNumElements() const
Definition: Tensor.hpp:192
uint32_t m_PadRight
Padding right value in the width dimension.

◆ ParseEqual()

ParsedTfOperationPtr ParseEqual ( const tensorflow::NodeDef &  nodeDef,
const tensorflow::GraphDef &  graphDef 
)

Definition at line 1947 of file TfParser.cpp.

References armnn::IgnoreUnused(), ITfParser::TfParserImpl::m_Network, ITfParser::TfParserImpl::ProcessComparisonLayer(), and ITfParser::TfParserImpl::ProcessElementwiseInputSlots().

1949 {
1950  IgnoreUnused(graphDef);
1951  std::pair<armnn::IOutputSlot*, armnn::IOutputSlot*> inputLayers = ProcessElementwiseInputSlots(nodeDef, "Equal");
1952  IOutputSlot* input0Slot = inputLayers.first;
1953  IOutputSlot* input1Slot = inputLayers.second;
1954 
1955  ComparisonDescriptor descriptor(ComparisonOperation::Equal);
1956  IConnectableLayer* const layer = m_Network->AddComparisonLayer(descriptor, nodeDef.name().c_str());
1957 
1958  return ProcessComparisonLayer(input0Slot, input1Slot, layer, nodeDef);
1959 }
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
Definition: INetwork.hpp:62
A ComparisonDescriptor for the ComparisonLayer.
Definition: Descriptors.hpp:78
void IgnoreUnused(Ts &&...)
armnn::INetworkPtr m_Network
The network we&#39;re building. Gets cleared after it is passed to the user.
Definition: TfParser.hpp:251
std::pair< armnn::IOutputSlot *, armnn::IOutputSlot * > ProcessElementwiseInputSlots(const tensorflow::NodeDef &nodeDef, const std::string &layerName)
Definition: TfParser.cpp:1807
An output connection slot for a layer.
Definition: INetwork.hpp:38
ParsedTfOperationPtr ProcessComparisonLayer(armnn::IOutputSlot *input0Slot, armnn::IOutputSlot *input1Slot, armnn::IConnectableLayer *const layer, const tensorflow::NodeDef &nodeDef)
Definition: TfParser.cpp:1841

◆ ParseExpandDims()

ParsedTfOperationPtr ParseExpandDims ( const tensorflow::NodeDef &  nodeDef,
const tensorflow::GraphDef &  graphDef 
)

Definition at line 1535 of file TfParser.cpp.

References CHECK_LOCATION, IOutputSlot::Connect(), TensorInfo::GetDataType(), ITfParser::TfParserImpl::GetInputParsedTfOperationsChecked(), IConnectableLayer::GetInputSlot(), TensorInfo::GetNumElements(), IConnectableLayer::GetOutputSlot(), TensorInfo::GetShape(), IOutputSlot::GetTensorInfo(), ITfParser::TfParserImpl::GetTfInputNodes(), armnn::IgnoreUnused(), ITfParser::TfParserImpl::m_Network, ReshapeDescriptor::m_TargetShape, armnnTfParser::OutputShapeOfExpandDims(), IOutputSlot::SetTensorInfo(), and armnn::Signed32.

1537 {
1538  IgnoreUnused(graphDef);
1539 
1540  // Number of inputs can either
1541  // be 1 - that indicates that the axis parameter is passed as an attribute of the operation
1542  // or 2 - which means that the axis parameter is passed as a second input
1543  std::vector<OutputOfConstNodeDef> nodes = GetTfInputNodes(nodeDef);
1544  const std::size_t numInputs = nodes.size();
1545  std::vector<OutputOfParsedTfOperation> inputs;
1546  std::int32_t expandDim; // axis or dim parameter. Describes which dimension to expand.
1547  if (numInputs == 1)
1548  {
1549  inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
1550  expandDim = ReadMandatoryNodeInt32Attribute(nodeDef, "Tdim");
1551  }
1552  else
1553  {
1554  inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
1555 
1556  // make sure data type is int32
1557  IOutputSlot& prevLayerOutputSlot = inputs[1].m_IndexedValue->ResolveArmnnOutputSlot(inputs[1].m_Index);
1558  TensorInfo inputTensorInfo = prevLayerOutputSlot.GetTensorInfo();
1559 
1560  if (inputTensorInfo.GetDataType()!=armnn::DataType::Signed32)
1561  {
1562  throw ParseException(
1563  fmt::format("The axis parameter of ExpandDims operation given as second input is not of type int32."
1564  " Input {0} Node {1} {2}",
1565  inputs[1].m_IndexedValue->GetNode().name(),
1566  nodeDef.name(),
1567  CHECK_LOCATION().AsString()));
1568  }
1569 
1570  // ensure the second input is a constant value
1571  if (!HasParsedConstTensor<int32_t>(inputs[1].m_IndexedValue->GetNode().name()))
1572  {
1573  throw ParseException(
1574  fmt::format("ArmNN only supports ExpandDims layers with constant axis/dim parameter. "
1575  "Input {0} Node {1} {2}",
1576  inputs[1].m_IndexedValue->GetNode().name(),
1577  nodeDef.name(),
1578  CHECK_LOCATION().AsString()));
1579  }
1580 
1581  // make sure the second input is scalar or contains only a single value
1582  // (we don't support expand dims for multiple axis but we don't care what shape the
1583  // given tensor has as long as there is only a single value in it
1584  // e.g. a tensor like this [[[1]]] is completely fine)
1585  if (inputTensorInfo.GetNumElements() != 1)
1586  {
1587  throw ParseException(
1588  fmt::format("The axis parameter of ExpandDims operation given as second input is not "
1589  "allowed to hold more than one value. "
1590  "Input {0} Node {1} {2}",
1591  inputs[1].m_IndexedValue->GetNode().name(),
1592  nodeDef.name(),
1593  CHECK_LOCATION().AsString()));
1594  }
1595 
1596  ParsedConstTfOperation<int32_t>* expandDimsNode =
1597  PolymorphicDowncast<ParsedConstTfOperation<int32_t>*>(inputs[1].m_IndexedValue);
1598 
1599  memcpy(&expandDim, expandDimsNode->GetStorage(), sizeof(expandDim));
1600  }
1601 
1602  // First input is the vector that should be expanded by another dimension
1603  IOutputSlot& prevLayerOutputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
1604  TensorInfo inputTensorInfo = prevLayerOutputSlot.GetTensorInfo();
1605 
1606  TensorInfo outputInfo;
1607  outputInfo = OutputShapeOfExpandDims(nodeDef, inputTensorInfo, expandDim);
1608 
1609  ReshapeDescriptor reshapeDesc;
1610  reshapeDesc.m_TargetShape = outputInfo.GetShape();
1611  IConnectableLayer* layer = m_Network->AddReshapeLayer(reshapeDesc, nodeDef.name().c_str());
1612  prevLayerOutputSlot.Connect(layer->GetInputSlot(0));
1613  layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
1614 
1615  return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
1616 }
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
Definition: INetwork.hpp:62
const TensorShape & GetShape() const
Definition: Tensor.hpp:187
A ReshapeDescriptor for the ReshapeLayer.
void IgnoreUnused(Ts &&...)
armnn::INetworkPtr m_Network
The network we&#39;re building. Gets cleared after it is passed to the user.
Definition: TfParser.hpp:251
virtual void SetTensorInfo(const TensorInfo &tensorInfo)=0
TensorShape m_TargetShape
Target shape value.
TensorInfo OutputShapeOfExpandDims(const tensorflow::NodeDef &nodeDef, TensorInfo inputTensorInfo, std::int32_t expandDim)
Definition: TfParser.cpp:1466
An output connection slot for a layer.
Definition: INetwork.hpp:38
DataType GetDataType() const
Definition: Tensor.hpp:194
#define CHECK_LOCATION()
Definition: Exceptions.hpp:197
std::vector< OutputOfParsedTfOperation > GetInputParsedTfOperationsChecked(const tensorflow::NodeDef &nodeDef, std::size_t expectedNumInputs)
Finds the IParsedTfOperations for the nodes connected as inputs of the given node in the graph...
Definition: TfParser.cpp:615
virtual const IInputSlot & GetInputSlot(unsigned int index) const =0
Get a const input slot handle by slot index.
std::vector< OutputOfConstNodeDef > GetTfInputNodes(const tensorflow::NodeDef &nodeDef) const
Finds the nodes connected as inputs of the given node in the graph.
Definition: TfParser.cpp:578
virtual const TensorInfo & GetTensorInfo() const =0
virtual const IOutputSlot & GetOutputSlot(unsigned int index) const =0
Get the const output slot handle by slot index.
virtual int Connect(IInputSlot &destination)=0
unsigned int GetNumElements() const
Definition: Tensor.hpp:192

◆ ParseFusedBatchNorm()

ParsedTfOperationPtr ParseFusedBatchNorm ( const tensorflow::NodeDef &  nodeDef,
const tensorflow::GraphDef &  graphDef 
)

Definition at line 1618 of file TfParser.cpp.

References CHECK_DATA_FORMAT, CHECK_LOCATION, IOutputSlot::Connect(), ITfParser::TfParserImpl::GetInputParsedTfOperationsChecked(), IConnectableLayer::GetInputSlot(), IConnectableLayer::GetOutputSlot(), IOutputSlot::GetTensorInfo(), armnn::IgnoreUnused(), BatchNormalizationDescriptor::m_DataLayout, BatchNormalizationDescriptor::m_Eps, ITfParser::TfParserImpl::m_Network, and IOutputSlot::SetTensorInfo().

1620 {
1621  IgnoreUnused(graphDef);
1622  std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 5);
1623 
1624  if (!HasParsedConstTensor<float>(inputs[1].m_IndexedValue->GetNode().name()))
1625  {
1626  throw ParseException(
1627  fmt::format("ArmNN only supports FusedBatchNormalization layers with constant scale. "
1628  "Input {}. Node {} {}",
1629  inputs[1].m_IndexedValue->GetNode().name(),
1630  nodeDef.name(),
1631  CHECK_LOCATION().AsString()));
1632  }
1633  ParsedConstTfOperation<float>* scaleNode =
1634  PolymorphicDowncast<ParsedConstTfOperation<float> *>(inputs[1].m_IndexedValue);
1635 
1636  if (!HasParsedConstTensor<float>(inputs[2].m_IndexedValue->GetNode().name()))
1637  {
1638  throw ParseException(
1639  fmt::format("ArmNN only supports FusedBatchNormalization layers with constant offset. "
1640  "Input {}. Node {} {}",
1641  inputs[2].m_IndexedValue->GetNode().name(),
1642  nodeDef.name(),
1643  CHECK_LOCATION().AsString()));
1644  }
1645  ParsedConstTfOperation<float>* offsetNode =
1646  PolymorphicDowncast<ParsedConstTfOperation<float> *>(inputs[2].m_IndexedValue);
1647 
1648  if (!HasParsedConstTensor<float>(inputs[3].m_IndexedValue->GetNode().name()))
1649  {
1650  throw ParseException(
1651  fmt::format("ArmNN only supports FusedBatchNormalization layers with constant mean. "
1652  "Input {}. Node {} {}",
1653  inputs[3].m_IndexedValue->GetNode().name(),
1654  nodeDef.name(),
1655  CHECK_LOCATION().AsString()));
1656  }
1657  ParsedConstTfOperation<float>* meanNode =
1658  PolymorphicDowncast<ParsedConstTfOperation<float> *>(inputs[3].m_IndexedValue);
1659 
1660  if (!HasParsedConstTensor<float>(inputs[4].m_IndexedValue->GetNode().name()))
1661  {
1662  throw ParseException(
1663  fmt::format("ArmNN only supports FusedBatchNormalization layers with constant variance. "
1664  "Input {}. Node {} {}",
1665  inputs[4].m_IndexedValue->GetNode().name(),
1666  nodeDef.name(),
1667  CHECK_LOCATION().AsString()));
1668  }
1669  ParsedConstTfOperation<float>* varianceNode =
1670  PolymorphicDowncast<ParsedConstTfOperation<float> *>(inputs[4].m_IndexedValue);
1671 
1672  const std::string dataFormat = ReadOptionalNodeStringAttribute(nodeDef, "data_format", "NHWC");
1673  CHECK_DATA_FORMAT(nodeDef, dataFormat, "FusedBatchNorm");
1674 
1675  // The descriptor only has the epsilon attribute.
1677  desc.m_Eps = ReadMandatoryNodeFloatAttribute(nodeDef, "epsilon");
1678  desc.m_DataLayout = dataFormat == "NHWC" ? DataLayout::NHWC : DataLayout::NCHW;
1679 
1680  // Data for the parsed tensor args (scale, offset, mean, variance) must be stored
1681  // locally until the layer is added.
1682  std::vector<float> scaleTensorData;
1683  ConstTensor scaleTensor = scaleNode->GetConstTensor(scaleTensorData);
1684 
1685  std::vector<float> offsetTensorData;
1686  ConstTensor offsetTensor = offsetNode->GetConstTensor(offsetTensorData);
1687 
1688  std::vector<float> meanTensorData;
1689  ConstTensor meanTensor = meanNode->GetConstTensor(meanTensorData);
1690 
1691  std::vector<float> varianceTensorData;
1692  ConstTensor varianceTensor = varianceNode->GetConstTensor(varianceTensorData);
1693 
1694  IConnectableLayer* layer = m_Network->AddBatchNormalizationLayer(desc,
1695  meanTensor,
1696  varianceTensor,
1697  offsetTensor,
1698  scaleTensor,
1699  nodeDef.name().c_str());
1700 
1701  IOutputSlot& inputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
1702 
1703  layer->GetOutputSlot(0).SetTensorInfo(inputSlot.GetTensorInfo());
1704  inputSlot.Connect(layer->GetInputSlot(0));
1705 
1706  return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
1707 }
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
Definition: INetwork.hpp:62
float m_Eps
Value to add to the variance. Used to avoid dividing by zero.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
void IgnoreUnused(Ts &&...)
armnn::INetworkPtr m_Network
The network we&#39;re building. Gets cleared after it is passed to the user.
Definition: TfParser.hpp:251
virtual void SetTensorInfo(const TensorInfo &tensorInfo)=0
An output connection slot for a layer.
Definition: INetwork.hpp:38
A tensor defined by a TensorInfo (shape and data type) and an immutable backing store.
Definition: Tensor.hpp:314
#define CHECK_LOCATION()
Definition: Exceptions.hpp:197
#define CHECK_DATA_FORMAT(NODE_DEF, FORMAT, NODE_TYPE)
Definition: TfParser.cpp:356
std::vector< OutputOfParsedTfOperation > GetInputParsedTfOperationsChecked(const tensorflow::NodeDef &nodeDef, std::size_t expectedNumInputs)
Finds the IParsedTfOperations for the nodes connected as inputs of the given node in the graph...
Definition: TfParser.cpp:615
virtual const IInputSlot & GetInputSlot(unsigned int index) const =0
Get a const input slot handle by slot index.
virtual const TensorInfo & GetTensorInfo() const =0
virtual const IOutputSlot & GetOutputSlot(unsigned int index) const =0
Get the const output slot handle by slot index.
virtual int Connect(IInputSlot &destination)=0
A BatchNormalizationDescriptor for the BatchNormalizationLayer.

◆ ParseGather()

ParsedTfOperationPtr ParseGather ( const tensorflow::NodeDef &  nodeDef,
const tensorflow::GraphDef &  graphDef 
)

Definition at line 1894 of file TfParser.cpp.

References IOutputSlot::Connect(), TensorInfo::GetDataType(), ITfParser::TfParserImpl::GetInputParsedTfOperationsChecked(), TensorInfo::GetNumDimensions(), TensorInfo::GetShape(), IOutputSlot::GetTensorInfo(), armnn::IgnoreUnused(), GatherDescriptor::m_Axis, and ITfParser::TfParserImpl::m_Network.

1896 {
1897  IgnoreUnused(graphDef);
1898  std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
1899  IOutputSlot& params = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
1900  IOutputSlot& indices = inputs[1].m_IndexedValue->ResolveArmnnOutputSlot(inputs[1].m_Index);
1901  GatherDescriptor descriptor;
1902  descriptor.m_Axis = ReadMandatoryNodeInt32Attribute(nodeDef, "axis");
1903 
1904  // Infer shape of output tensor
1905  unsigned int paramsDim = params.GetTensorInfo().GetNumDimensions();
1906  unsigned int indicesDim = indices.GetTensorInfo().GetNumDimensions();
1907  unsigned int outputDim = paramsDim - 1 + indicesDim;
1908 
1909  std::vector<unsigned int> dimSizes;
1910 
1911  for (unsigned int i = 0; i < indicesDim; ++i)
1912  {
1913  dimSizes.push_back(indices.GetTensorInfo().GetShape()[i]);
1914  }
1915  for (unsigned int i = 1; i < paramsDim; ++i)
1916  {
1917  dimSizes.push_back(params.GetTensorInfo().GetShape()[i]);
1918  }
1919 
1920  const TensorShape& inferredShape = TensorShape(outputDim, dimSizes.data());
1921 
1922  const TensorInfo inferredOutputInfo(inferredShape, params.GetTensorInfo().GetDataType());
1923 
1924  IConnectableLayer* const layer = m_Network->AddGatherLayer(descriptor, nodeDef.name().c_str());
1925  layer->GetOutputSlot(0).SetTensorInfo(inferredOutputInfo);
1926 
1927  params.Connect(layer->GetInputSlot(0));
1928  indices.Connect(layer->GetInputSlot(1));
1929 
1930  return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
1931 }
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
Definition: INetwork.hpp:62
const TensorShape & GetShape() const
Definition: Tensor.hpp:187
void IgnoreUnused(Ts &&...)
armnn::INetworkPtr m_Network
The network we&#39;re building. Gets cleared after it is passed to the user.
Definition: TfParser.hpp:251
An output connection slot for a layer.
Definition: INetwork.hpp:38
DataType GetDataType() const
Definition: Tensor.hpp:194
A GatherDescriptor for the GatherLayer.
int32_t m_Axis
The axis in params to gather indices from.
std::vector< OutputOfParsedTfOperation > GetInputParsedTfOperationsChecked(const tensorflow::NodeDef &nodeDef, std::size_t expectedNumInputs)
Finds the IParsedTfOperations for the nodes connected as inputs of the given node in the graph...
Definition: TfParser.cpp:615
virtual const TensorInfo & GetTensorInfo() const =0
virtual int Connect(IInputSlot &destination)=0
unsigned int GetNumDimensions() const
Definition: Tensor.hpp:191

◆ ParseGreater()

ParsedTfOperationPtr ParseGreater ( const tensorflow::NodeDef &  nodeDef,
const tensorflow::GraphDef &  graphDef 
)

Definition at line 1933 of file TfParser.cpp.

References armnn::IgnoreUnused(), ITfParser::TfParserImpl::m_Network, ITfParser::TfParserImpl::ProcessComparisonLayer(), and ITfParser::TfParserImpl::ProcessElementwiseInputSlots().

1935 {
1936  IgnoreUnused(graphDef);
1937  std::pair<armnn::IOutputSlot*, armnn::IOutputSlot*> inputLayers = ProcessElementwiseInputSlots(nodeDef, "Greater");
1938  IOutputSlot* input0Slot = inputLayers.first;
1939  IOutputSlot* input1Slot = inputLayers.second;
1940 
1941  ComparisonDescriptor descriptor(ComparisonOperation::Greater);
1942  IConnectableLayer* const layer = m_Network->AddComparisonLayer(descriptor, nodeDef.name().c_str());
1943 
1944  return ProcessComparisonLayer(input0Slot, input1Slot, layer, nodeDef);
1945 }
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
Definition: INetwork.hpp:62
A ComparisonDescriptor for the ComparisonLayer.
Definition: Descriptors.hpp:78
void IgnoreUnused(Ts &&...)
armnn::INetworkPtr m_Network
The network we&#39;re building. Gets cleared after it is passed to the user.
Definition: TfParser.hpp:251
std::pair< armnn::IOutputSlot *, armnn::IOutputSlot * > ProcessElementwiseInputSlots(const tensorflow::NodeDef &nodeDef, const std::string &layerName)
Definition: TfParser.cpp:1807
An output connection slot for a layer.
Definition: INetwork.hpp:38
ParsedTfOperationPtr ProcessComparisonLayer(armnn::IOutputSlot *input0Slot, armnn::IOutputSlot *input1Slot, armnn::IConnectableLayer *const layer, const tensorflow::NodeDef &nodeDef)
Definition: TfParser.cpp:1841

◆ ParseIdentity()

ParsedTfOperationPtr ParseIdentity ( const tensorflow::NodeDef &  nodeDef,
const tensorflow::GraphDef &  graphDef 
)

Definition at line 891 of file TfParser.cpp.

References ITfParser::DeferredSingleLayerParsedTfOperation, ITfParser::TfParserImpl::GetInputParsedTfOperationsChecked(), TensorInfo::GetNumElements(), armnn::IgnoreUnused(), and ITfParser::ParsedConstTfOperation.

893 {
894  IgnoreUnused(graphDef);
895  std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
896  // Any requests for the output slots of this node should be forwarded to the node connected as input.
897  return std::make_unique<ParsedIdentityTfOperation>(this, nodeDef, inputs[0].m_IndexedValue);
898 }
void IgnoreUnused(Ts &&...)
std::vector< OutputOfParsedTfOperation > GetInputParsedTfOperationsChecked(const tensorflow::NodeDef &nodeDef, std::size_t expectedNumInputs)
Finds the IParsedTfOperations for the nodes connected as inputs of the given node in the graph...
Definition: TfParser.cpp:615

◆ ParseLrn()

ParsedTfOperationPtr ParseLrn ( const tensorflow::NodeDef &  nodeDef,
const tensorflow::GraphDef &  graphDef 
)

Definition at line 2578 of file TfParser.cpp.

References ARMNN_ASSERT, IOutputSlot::Connect(), ITfParser::DeferredSingleLayerParsedTfOperation, ITfParser::TfParserImpl::GetInputParsedTfOperationsChecked(), IConnectableLayer::GetInputSlot(), IConnectableLayer::GetOutputSlot(), IOutputSlot::GetTensorInfo(), armnn::IgnoreUnused(), NormalizationDescriptor::m_Alpha, NormalizationDescriptor::m_Beta, NormalizationDescriptor::m_DataLayout, NormalizationDescriptor::m_K, m_Layer, ITfParser::TfParserImpl::m_Network, NormalizationDescriptor::m_NormChannelType, NormalizationDescriptor::m_NormMethodType, NormalizationDescriptor::m_NormSize, armnn::NHWC, ITfParser::ParsedMatMulTfOperation, and IOutputSlot::SetTensorInfo().

2580 {
2581  IgnoreUnused(graphDef);
2582  std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
2583 
2584  NormalizationDescriptor normalizationDescriptor;
2585  normalizationDescriptor.m_NormMethodType = NormalizationAlgorithmMethod::LocalBrightness;
2586  normalizationDescriptor.m_NormChannelType = NormalizationAlgorithmChannel::Across;
2587  normalizationDescriptor.m_Alpha = ReadMandatoryNodeFloatAttribute(nodeDef, "alpha");
2588  normalizationDescriptor.m_Beta = ReadMandatoryNodeFloatAttribute(nodeDef, "beta");
2589  normalizationDescriptor.m_K = ReadMandatoryNodeFloatAttribute(nodeDef, "bias");
2590  normalizationDescriptor.m_NormSize = ReadMandatoryNodeUint32Attribute(nodeDef, "depth_radius");
2591  normalizationDescriptor.m_DataLayout = armnn::DataLayout::NHWC;
2592 
2593  // The window size must be an odd value. For a window size of (2 * n + 1), TensorFlow defines depth_radius = n.
2594  normalizationDescriptor.m_NormSize = normalizationDescriptor.m_NormSize * 2 + 1;
2595 
2596  IOutputSlot& prevLayerOutputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2597  IConnectableLayer* layer = m_Network->AddNormalizationLayer(normalizationDescriptor,
2598  nodeDef.name().c_str());
2599  prevLayerOutputSlot.Connect(layer->GetInputSlot(0));
2600  layer->GetOutputSlot(0).SetTensorInfo(prevLayerOutputSlot.GetTensorInfo());
2601 
2602  return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2603 }
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
Definition: INetwork.hpp:62
float m_K
Kappa value used for the across channel normalization equation.
float m_Alpha
Alpha value for the normalization equation.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
void IgnoreUnused(Ts &&...)
armnn::INetworkPtr m_Network
The network we&#39;re building. Gets cleared after it is passed to the user.
Definition: TfParser.hpp:251
virtual void SetTensorInfo(const TensorInfo &tensorInfo)=0
NormalizationAlgorithmMethod m_NormMethodType
Normalization method algorithm to use (LocalBrightness, LocalContrast).
An output connection slot for a layer.
Definition: INetwork.hpp:38
NormalizationAlgorithmChannel m_NormChannelType
Normalization channel algorithm to use (Across, Within).
std::vector< OutputOfParsedTfOperation > GetInputParsedTfOperationsChecked(const tensorflow::NodeDef &nodeDef, std::size_t expectedNumInputs)
Finds the IParsedTfOperations for the nodes connected as inputs of the given node in the graph...
Definition: TfParser.cpp:615
virtual const IInputSlot & GetInputSlot(unsigned int index) const =0
Get a const input slot handle by slot index.
virtual const TensorInfo & GetTensorInfo() const =0
virtual const IOutputSlot & GetOutputSlot(unsigned int index) const =0
Get the const output slot handle by slot index.
virtual int Connect(IInputSlot &destination)=0
A NormalizationDescriptor for the NormalizationLayer.
float m_Beta
Beta value for the normalization equation.
uint32_t m_NormSize
Depth radius value.

◆ ParseMatMul()

ParsedTfOperationPtr ParseMatMul ( const tensorflow::NodeDef &  nodeDef,
const tensorflow::GraphDef &  graphDef 
)

Definition at line 2625 of file TfParser.cpp.

References armnn::IgnoreUnused().

2627 {
2628  IgnoreUnused(graphDef);
2629 
2630  // Defers the creation of the layer (see ParsedMatMulTfOperation).
2631  return std::make_unique<ParsedMatMulTfOperation>(this, nodeDef);
2632 }
void IgnoreUnused(Ts &&...)

◆ ParseMaximum()

ParsedTfOperationPtr ParseMaximum ( const tensorflow::NodeDef &  nodeDef,
const tensorflow::GraphDef &  graphDef 
)

Definition at line 1759 of file TfParser.cpp.

References ITfParser::TfParserImpl::AddMaximumLayer(), ARMNN_ASSERT, CHECK_LOCATION, IOutputSlot::Connect(), ITfParser::TfParserImpl::GetInputParsedTfOperationsChecked(), IConnectableLayer::GetInputSlot(), IConnectableLayer::GetOutputSlot(), IOutputSlot::GetTensorInfo(), armnn::IgnoreUnused(), ITfParser::TfParserImpl::IsSupportedLeakyReluPattern(), ITfParser::TfParserImpl::m_Network, and IOutputSlot::SetTensorInfo().

1761 {
1762  IgnoreUnused(graphDef);
1763  std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
1764  if (inputs.size() != 2)
1765  {
1766  throw ParseException(
1767  fmt::format("Maximum expects two inputs!. Got {} for Node {} {}",
1768  inputs.size(),
1769  nodeDef.name(),
1770  CHECK_LOCATION().AsString()));
1771  }
1772 
1773  auto inputNode0 = inputs[0].m_IndexedValue->GetNode();
1774  auto inputNode1 = inputs[1].m_IndexedValue->GetNode();
1775  IOutputSlot* outputOfLeakyRelu = nullptr;
1776 
1777  ActivationDescriptor desc;
1778 
1779  // A max node may be part of a LeakyRelu, with one input as a multiplication with a scalar constant,
1780  // i.e. one of the four possible scenarios:
1781  // 1, max(mul(a, x), x)
1782  // 2, max(mul(x, a), x)
1783  // 3, max(x, mul(a, x))
1784  // 4, max(x, mul(x, a))
1785  // These are handled by an activation layer.
1786 
1787  if (IsSupportedLeakyReluPattern(inputNode0, 0, inputs[1], &outputOfLeakyRelu, desc) ||
1788  IsSupportedLeakyReluPattern(inputNode0, 1, inputs[1], &outputOfLeakyRelu, desc) ||
1789  IsSupportedLeakyReluPattern(inputNode1, 0, inputs[0], &outputOfLeakyRelu, desc) ||
1790  IsSupportedLeakyReluPattern(inputNode1, 1, inputs[0], &outputOfLeakyRelu, desc))
1791  {
1792  ARMNN_ASSERT(outputOfLeakyRelu != nullptr);
1793 
1794  IConnectableLayer* const layer = m_Network->AddActivationLayer(desc, nodeDef.name().c_str());
1795  outputOfLeakyRelu->Connect(layer->GetInputSlot(0));
1796  layer->GetOutputSlot(0).SetTensorInfo(outputOfLeakyRelu->GetTensorInfo());
1797  return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
1798  }
1799  else
1800  {
1801  // Anything else is just a maximum layer.
1802 
1803  return AddMaximumLayer(nodeDef);
1804  }
1805 }
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
Definition: INetwork.hpp:62
void IgnoreUnused(Ts &&...)
armnn::INetworkPtr m_Network
The network we&#39;re building. Gets cleared after it is passed to the user.
Definition: TfParser.hpp:251
virtual void SetTensorInfo(const TensorInfo &tensorInfo)=0
bool IsSupportedLeakyReluPattern(const tensorflow::NodeDef &mulNodeDef, size_t alphaLayerIndex, const OutputOfParsedTfOperation &otherOp, armnn::IOutputSlot **outputOfLeakyRelu, armnn::ActivationDescriptor &desc)
Definition: TfParser.cpp:1709
An output connection slot for a layer.
Definition: INetwork.hpp:38
#define ARMNN_ASSERT(COND)
Definition: Assert.hpp:14
An ActivationDescriptor for the ActivationLayer.
Definition: Descriptors.hpp:25
#define CHECK_LOCATION()
Definition: Exceptions.hpp:197
std::vector< OutputOfParsedTfOperation > GetInputParsedTfOperationsChecked(const tensorflow::NodeDef &nodeDef, std::size_t expectedNumInputs)
Finds the IParsedTfOperations for the nodes connected as inputs of the given node in the graph...
Definition: TfParser.cpp:615
virtual const IInputSlot & GetInputSlot(unsigned int index) const =0
Get a const input slot handle by slot index.
virtual const TensorInfo & GetTensorInfo() const =0
virtual const IOutputSlot & GetOutputSlot(unsigned int index) const =0
Get the const output slot handle by slot index.
virtual int Connect(IInputSlot &destination)=0
ParsedTfOperationPtr AddMaximumLayer(const tensorflow::NodeDef &nodeDef)
Definition: TfParser.cpp:3236

◆ ParseMaxPool()

ParsedTfOperationPtr ParseMaxPool ( const tensorflow::NodeDef &  nodeDef,
const tensorflow::GraphDef &  graphDef 
)

Definition at line 2997 of file TfParser.cpp.

References ITfParser::TfParserImpl::ParsePooling2d().

2999 {
3000  return ParsePooling2d(nodeDef, graphDef, PoolingAlgorithm::Max);
3001 }
ParsedTfOperationPtr ParsePooling2d(const tensorflow::NodeDef &nodeDef, const tensorflow::GraphDef &graphDef, armnn::PoolingAlgorithm pooltype)
Definition: TfParser.cpp:3009

◆ ParseMean()

ParsedTfOperationPtr ParseMean ( const tensorflow::NodeDef &  nodeDef,
const tensorflow::GraphDef &  graphDef 
)

Definition at line 2634 of file TfParser.cpp.

References ARMNN_ASSERT, armnnUtils::CalculateReducedOutputTensoInfo(), CHECK_LOCATION, IOutputSlot::Connect(), ITfParser::DeferredSingleLayerParsedTfOperation, ITfParser::TfParserImpl::GetInputParsedTfOperationsChecked(), IConnectableLayer::GetInputSlot(), TensorInfo::GetNumDimensions(), IConnectableLayer::GetOutputSlot(), IOutputSlot::GetTensorInfo(), armnn::IgnoreUnused(), MeanDescriptor::m_Axis, MeanDescriptor::m_KeepDims, m_Layer, ITfParser::TfParserImpl::m_Network, ITfParser::ParsedMulTfOperation, and IOutputSlot::SetTensorInfo().

2636 {
2637  IgnoreUnused(graphDef);
2638  std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
2639  IOutputSlot& inputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2640  TensorInfo inputTensorInfo = inputSlot.GetTensorInfo();
2641 
2642  if (inputs.size() != 2)
2643  {
2644  throw ParseException(
2645  fmt::format("Mean expects two inputs!. Got {} for Node {} {}",
2646  inputs.size(),
2647  nodeDef.name(),
2648  CHECK_LOCATION().AsString()));
2649  }
2650 
2651  bool keepDims = ReadMandatoryNodeBoolAttribute(nodeDef, "keep_dims");
2652 
2653  ParsedConstTfOperation<int32_t>* axisNode =
2654  PolymorphicDowncast<ParsedConstTfOperation<int32_t>*>(inputs[1].m_IndexedValue);
2655 
2656  const TensorInfo& axisTensorInfo = axisNode->GetTensorInfo();
2657 
2658  ConstTensor axisTensor(axisTensorInfo, axisNode->GetStorage());
2659  const int* axisData = static_cast<const int*>(axisTensor.GetMemoryArea());
2660 
2661  TensorInfo outputTensorInfo;
2662  MeanDescriptor meanDescriptor;
2663  meanDescriptor.m_KeepDims = keepDims;
2664 
2665  // Negative axis values are supported so that the process requires
2666  // to convert them into the corresponding positive ones.
2667  // Duplicate values are also removed.
2668  std::vector<int> rawAxisVector(axisData, axisData + axisTensorInfo.GetNumElements());
2669  std::set<unsigned int> positiveAxisSet;
2670  int rank = static_cast<int>(inputTensorInfo.GetNumDimensions());
2671 
2672  std::transform(rawAxisVector.begin(), rawAxisVector.end(),
2673  std::inserter(positiveAxisSet, positiveAxisSet.begin()),
2674  [rank](int i) -> unsigned int { return static_cast<unsigned int>((i + rank) % rank); });
2675 
2676  CalculateReducedOutputTensoInfo(inputTensorInfo, positiveAxisSet, keepDims, outputTensorInfo);
2677 
2678  if (inputTensorInfo.GetNumDimensions() > positiveAxisSet.size())
2679  {
2680  meanDescriptor.m_Axis.assign(positiveAxisSet.begin(), positiveAxisSet.end());
2681  }
2682 
2683  IConnectableLayer* layer = m_Network->AddMeanLayer(meanDescriptor, nodeDef.name().c_str());
2684  layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2685  inputSlot.Connect(layer->GetInputSlot(0));
2686 
2687  return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2688 }
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
Definition: INetwork.hpp:62
void CalculateReducedOutputTensoInfo(const armnn::TensorInfo &inputTensorInfo, const std::set< unsigned int > &axisSet, bool keepDims, armnn::TensorInfo &outputTensorInfo)
Creates a tensor info after reducing the dimensions mentioned in axisData.
void IgnoreUnused(Ts &&...)
armnn::INetworkPtr m_Network
The network we&#39;re building. Gets cleared after it is passed to the user.
Definition: TfParser.hpp:251
virtual void SetTensorInfo(const TensorInfo &tensorInfo)=0
std::vector< unsigned int > m_Axis
Values for the dimensions to reduce.
bool m_KeepDims
Enable/disable keep dimensions. If true, then the reduced dimensions that are of length 1 are kept...
An output connection slot for a layer.
Definition: INetwork.hpp:38
A tensor defined by a TensorInfo (shape and data type) and an immutable backing store.
Definition: Tensor.hpp:314
#define CHECK_LOCATION()
Definition: Exceptions.hpp:197
std::vector< OutputOfParsedTfOperation > GetInputParsedTfOperationsChecked(const tensorflow::NodeDef &nodeDef, std::size_t expectedNumInputs)
Finds the IParsedTfOperations for the nodes connected as inputs of the given node in the graph...
Definition: TfParser.cpp:615
virtual const IInputSlot & GetInputSlot(unsigned int index) const =0
Get a const input slot handle by slot index.
A MeanDescriptor for the MeanLayer.
virtual const TensorInfo & GetTensorInfo() const =0
virtual const IOutputSlot & GetOutputSlot(unsigned int index) const =0
Get the const output slot handle by slot index.
virtual int Connect(IInputSlot &destination)=0
unsigned int GetNumDimensions() const
Definition: Tensor.hpp:191

◆ ParseMinimum()

ParsedTfOperationPtr ParseMinimum ( const tensorflow::NodeDef &  nodeDef,
const tensorflow::GraphDef &  graphDef 
)

Definition at line 1961 of file TfParser.cpp.

References armnn::IgnoreUnused(), ITfParser::TfParserImpl::m_Network, ITfParser::TfParserImpl::ProcessElementwiseInputSlots(), and ITfParser::TfParserImpl::ProcessElementwiseLayer().

1963 {
1964  IgnoreUnused(graphDef);
1965  std::pair<armnn::IOutputSlot*, armnn::IOutputSlot*> inputLayers = ProcessElementwiseInputSlots(nodeDef, "Minimum");
1966  IOutputSlot* input0Slot = inputLayers.first;
1967  IOutputSlot* input1Slot = inputLayers.second;
1968 
1969  IConnectableLayer* const layer = m_Network->AddMinimumLayer(nodeDef.name().c_str());
1970 
1971  return ProcessElementwiseLayer(input0Slot, input1Slot, layer, nodeDef);
1972 }
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
Definition: INetwork.hpp:62
void IgnoreUnused(Ts &&...)
armnn::INetworkPtr m_Network
The network we&#39;re building. Gets cleared after it is passed to the user.
Definition: TfParser.hpp:251
std::pair< armnn::IOutputSlot *, armnn::IOutputSlot * > ProcessElementwiseInputSlots(const tensorflow::NodeDef &nodeDef, const std::string &layerName)
Definition: TfParser.cpp:1807
An output connection slot for a layer.
Definition: INetwork.hpp:38
ParsedTfOperationPtr ProcessElementwiseLayer(armnn::IOutputSlot *input0Slot, armnn::IOutputSlot *input1Slot, armnn::IConnectableLayer *const layer, const tensorflow::NodeDef &nodeDef)
Definition: TfParser.cpp:1868

◆ ParseMul()

ParsedTfOperationPtr ParseMul ( const tensorflow::NodeDef &  nodeDef,
const tensorflow::GraphDef &  graphDef 
)

Definition at line 2710 of file TfParser.cpp.

References armnn::IgnoreUnused().

2712 {
2713  IgnoreUnused(graphDef);
2714 
2715  return std::make_unique<ParsedMulTfOperation>(this, nodeDef);
2716 }
void IgnoreUnused(Ts &&...)

◆ ParsePad()

ParsedTfOperationPtr ParsePad ( const tensorflow::NodeDef &  nodeDef,
const tensorflow::GraphDef &  graphDef 
)

Definition at line 2195 of file TfParser.cpp.

References armnnTfParser::CalculatePaddedOutputTensorInfo(), CHECK_LOCATION, armnnTfParser::CheckPaddingTensor(), IOutputSlot::Connect(), ITfParser::TfParserImpl::GetInputParsedTfOperationsChecked(), IConnectableLayer::GetInputSlot(), IConnectableLayer::GetOutputSlot(), IOutputSlot::GetTensorInfo(), armnn::IgnoreUnused(), ITfParser::TfParserImpl::m_Network, and IOutputSlot::SetTensorInfo().

2197 {
2198  IgnoreUnused(graphDef);
2199  // input consists of:
2200  // input[0] the tensor which will be padded
2201  // input[1] the tensor holding the padding values
2202  std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
2203  IOutputSlot& previousLayerOutputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2204  TensorInfo inputTensorInfo = previousLayerOutputSlot.GetTensorInfo();
2205  if (!HasParsedConstTensor<int32_t>(inputs[1].m_IndexedValue))
2206  {
2207  throw ParseException(
2208  fmt::format("ArmNN only supports Pad with constant padding. "
2209  "Input {}. Node {} {}",
2210  inputs[1].m_IndexedValue->GetNode().name(),
2211  nodeDef.name(),
2212  CHECK_LOCATION().AsString()));
2213 
2214  }
2215  ParsedConstTfOperation<int32_t>* paddingTensorOp =
2216  PolymorphicDowncast<ParsedConstTfOperation<int32_t>*>(inputs[1].m_IndexedValue);
2217 
2218  std::vector<int32_t> paddingTensorData;
2219  ConstTensor paddingTensor = paddingTensorOp->GetConstTensor(paddingTensorData);
2220  // paddings is an integer tensor with shape [n, 2], where n is the rank of tensor
2221  // and should match the rank of the input tensor that is being padded.
2222  // For each dimension D of input, paddings[D, 0] indicates how many values to add
2223  // before the contents of tensor in that dimension, and paddings[D, 1] indicates how
2224  // many values to add after the contents of tensor in that dimension
2225  // This needs to be translated into a padList for ACL
2226  std::vector<std::pair<unsigned int, unsigned int>> padList;
2227  unsigned int rank = CheckPaddingTensor(paddingTensor, inputTensorInfo, nodeDef.name());
2228  for (unsigned int i = 0; i < rank; ++i)
2229  {
2230  std::pair<unsigned int, unsigned int> paddingForDim;
2231  for (unsigned int j = 0; j < 2; j++)
2232  {
2233  unsigned int index = (i * 2) + j;
2234  int paddingAmount = paddingTensorData[index];
2235  // make sure we can cast to an unsigned value
2236  if (paddingAmount < 0)
2237  {
2238  throw ParseException(
2239  fmt::format("Negative amount {} specified at [{}, {}] of padding tensor on Node {} {}.",
2240  paddingAmount,
2241  i,
2242  j,
2243  nodeDef.name(),
2244  CHECK_LOCATION().AsString()));
2245  }
2246  if (j == 0)
2247  {
2248  paddingForDim.first = static_cast<unsigned int>(paddingAmount);
2249  }
2250  else
2251  {
2252  paddingForDim.second = static_cast<unsigned int>(paddingAmount);
2253  }
2254  }
2255  padList.push_back(paddingForDim);
2256  }
2257  PadDescriptor padDescriptor(padList);
2258  IConnectableLayer* layer = m_Network->AddPadLayer(padDescriptor, nodeDef.name().c_str());
2259  previousLayerOutputSlot.Connect(layer->GetInputSlot(0));
2260  // Use the padding to calculate the new output tensor shape
2261  TensorInfo outputTensorInfo = CalculatePaddedOutputTensorInfo(inputTensorInfo, padList);
2262  layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2263  return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2264 }
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
Definition: INetwork.hpp:62
unsigned int CheckPaddingTensor(const ConstTensor &paddingTensor, const TensorInfo &inputTensorInfo, const std::string &nodeName)
Definition: TfParser.cpp:2147
void IgnoreUnused(Ts &&...)
armnn::INetworkPtr m_Network
The network we&#39;re building. Gets cleared after it is passed to the user.
Definition: TfParser.hpp:251
virtual void SetTensorInfo(const TensorInfo &tensorInfo)=0
A PadDescriptor for the PadLayer.
An output connection slot for a layer.
Definition: INetwork.hpp:38
A tensor defined by a TensorInfo (shape and data type) and an immutable backing store.
Definition: Tensor.hpp:314
#define CHECK_LOCATION()
Definition: Exceptions.hpp:197
std::vector< OutputOfParsedTfOperation > GetInputParsedTfOperationsChecked(const tensorflow::NodeDef &nodeDef, std::size_t expectedNumInputs)
Finds the IParsedTfOperations for the nodes connected as inputs of the given node in the graph...
Definition: TfParser.cpp:615
virtual const IInputSlot & GetInputSlot(unsigned int index) const =0
Get a const input slot handle by slot index.
virtual const TensorInfo & GetTensorInfo() const =0
virtual const IOutputSlot & GetOutputSlot(unsigned int index) const =0
Get the const output slot handle by slot index.
virtual int Connect(IInputSlot &destination)=0
TensorInfo CalculatePaddedOutputTensorInfo(const TensorInfo &inputTensorInfo, const std::vector< std::pair< unsigned int, unsigned int >> &padList)
Definition: TfParser.cpp:2176

◆ ParsePlaceholder()

ParsedTfOperationPtr ParsePlaceholder ( const tensorflow::NodeDef &  nodeDef,
const tensorflow::GraphDef &  graphDef 
)

Definition at line 2718 of file TfParser.cpp.

References CHECK_LOCATION, ITfParser::TfParserImpl::GetInputParsedTfOperationsChecked(), IConnectableLayer::GetOutputSlot(), armnn::IgnoreUnused(), ITfParser::TfParserImpl::m_InputShapes, ITfParser::TfParserImpl::m_Network, ITfParser::TfParserImpl::m_NetworkInputsBindingInfo, armnn::numeric_cast(), IOutputSlot::SetTensorInfo(), and ITfParser::TfParserImpl::TrackInputBinding().

2720 {
2721  IgnoreUnused(graphDef);
2722 
2723  std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 0);
2724 
2726 
2727  auto it = m_InputShapes.find(nodeDef.name());
2728  if (it == m_InputShapes.end())
2729  {
2730  throw ParseException(
2731  fmt::format("Missing input shape for Placeholder '{}' {}",
2732  nodeDef.name(),
2733  CHECK_LOCATION().AsString()));
2734  }
2735  TensorInfo tensorInfo(it->second, DataType::Float32);
2736 
2737  IConnectableLayer* const layer = m_Network->AddInputLayer(layerId, nodeDef.name().c_str());
2738 
2739  layer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
2740 
2741  TrackInputBinding(layer, layerId, tensorInfo);
2742 
2743  return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2744 }
std::map< std::string, armnn::TensorShape > m_InputShapes
Definition: TfParser.hpp:261
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
Definition: INetwork.hpp:62
void TrackInputBinding(armnn::IConnectableLayer *layer, armnn::LayerBindingId id, const armnn::TensorInfo &tensorInfo)
Definition: TfParser.cpp:3704
void IgnoreUnused(Ts &&...)
armnn::INetworkPtr m_Network
The network we&#39;re building. Gets cleared after it is passed to the user.
Definition: TfParser.hpp:251
int LayerBindingId
Type of identifiers for bindable layers (inputs, outputs).
Definition: Types.hpp:210
virtual void SetTensorInfo(const TensorInfo &tensorInfo)=0
std::unordered_map< std::string, BindingPointInfo > m_NetworkInputsBindingInfo
Maps input layer names to their corresponding ids and tensor info.
Definition: TfParser.hpp:270
#define CHECK_LOCATION()
Definition: Exceptions.hpp:197
std::vector< OutputOfParsedTfOperation > GetInputParsedTfOperationsChecked(const tensorflow::NodeDef &nodeDef, std::size_t expectedNumInputs)
Finds the IParsedTfOperations for the nodes connected as inputs of the given node in the graph...
Definition: TfParser.cpp:615
std::enable_if_t< std::is_unsigned< Source >::value &&std::is_unsigned< Dest >::value, Dest > numeric_cast(Source source)
Definition: NumericCast.hpp:35
virtual const IOutputSlot & GetOutputSlot(unsigned int index) const =0
Get the const output slot handle by slot index.

◆ ParsePooling2d()

ParsedTfOperationPtr ParsePooling2d ( const tensorflow::NodeDef &  nodeDef,
const tensorflow::GraphDef &  graphDef,
armnn::PoolingAlgorithm  pooltype 
)

Definition at line 3009 of file TfParser.cpp.

References armnnTfParser::CalcPadding(), CHECK_DATA_FORMAT, CHECK_LOCATION, CHECK_PADDING_TYPE, IOutputSlot::Connect(), DataLayoutIndexed::GetHeightIndex(), ITfParser::TfParserImpl::GetInputParsedTfOperationsChecked(), IConnectableLayer::GetInputSlot(), IConnectableLayer::GetOutputSlot(), TensorInfo::GetShape(), IOutputSlot::GetTensorInfo(), DataLayoutIndexed::GetWidthIndex(), armnn::IgnoreUnused(), Pooling2dDescriptor::m_DataLayout, ITfParser::TfParserImpl::m_Network, Pooling2dDescriptor::m_OutputShapeRounding, Pooling2dDescriptor::m_PadBottom, Pooling2dDescriptor::m_PaddingMethod, Pooling2dDescriptor::m_PadLeft, Pooling2dDescriptor::m_PadRight, Pooling2dDescriptor::m_PadTop, Pooling2dDescriptor::m_PoolHeight, Pooling2dDescriptor::m_PoolType, Pooling2dDescriptor::m_PoolWidth, Pooling2dDescriptor::m_StrideX, Pooling2dDescriptor::m_StrideY, and IOutputSlot::SetTensorInfo().

Referenced by ITfParser::TfParserImpl::ParseAvgPool(), and ITfParser::TfParserImpl::ParseMaxPool().

3011 {
3012  IgnoreUnused(graphDef);
3013 
3014  std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
3015  IOutputSlot& inputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
3016  TensorInfo inputTensorInfo = inputSlot.GetTensorInfo();
3017 
3018  if (inputs.size() != 1)
3019  {
3020  throw ParseException(
3021  fmt::format("2D Pooling expects one input!. Got {} for Node {} {}",
3022  inputs.size(),
3023  nodeDef.name(),
3024  CHECK_LOCATION().AsString()));
3025  }
3026 
3027  std::string paddingString = ReadMandatoryNodeStringAttribute(nodeDef, "padding");
3028  std::string dataFormat = ReadMandatoryNodeStringAttribute(nodeDef, "data_format");
3029  std::vector<uint32_t> strides = ReadMandatoryNodeUint32ListAttribute(nodeDef, "strides");
3030  std::vector<uint32_t> ksize = ReadMandatoryNodeUint32ListAttribute(nodeDef, "ksize"); // size of pool windows
3031 
3032  Pooling2dDescriptor pooling2dDescriptor;
3033  pooling2dDescriptor.m_PoolType = pooltype;
3034  pooling2dDescriptor.m_PaddingMethod = PaddingMethod::Exclude;
3035  pooling2dDescriptor.m_OutputShapeRounding = OutputShapeRounding::Floor;
3036 
3037  CHECK_DATA_FORMAT(nodeDef, dataFormat, "Pooling2D");
3038  DataLayout dataLayout = dataFormat == "NHWC" ? DataLayout::NHWC : DataLayout::NCHW;
3039  pooling2dDescriptor.m_DataLayout = dataLayout;
3040  DataLayoutIndexed dataLayoutIndexed(dataLayout);
3041 
3042  pooling2dDescriptor.m_StrideX = strides[dataLayoutIndexed.GetWidthIndex()];
3043  pooling2dDescriptor.m_StrideY = strides[dataLayoutIndexed.GetHeightIndex()];
3044  pooling2dDescriptor.m_PoolWidth = ksize[dataLayoutIndexed.GetWidthIndex()];
3045  pooling2dDescriptor.m_PoolHeight = ksize[dataLayoutIndexed.GetHeightIndex()];
3046 
3047  uint32_t inputHeight = inputTensorInfo.GetShape()[dataLayoutIndexed.GetHeightIndex()];
3048  uint32_t inputWidth = inputTensorInfo.GetShape()[dataLayoutIndexed.GetWidthIndex()];
3049 
3050  bool padding = false;
3051  TensorInfo outputInfo;
3052  unsigned int outputHeight = 0;
3053  unsigned int outputWidth = 0;
3054 
3055  CHECK_PADDING_TYPE(nodeDef, paddingString);
3056 
3057  if (paddingString == "SAME")
3058  {
3059  padding = true;
3060 
3061  outputHeight = static_cast<uint32_t>(ceil(static_cast<float>(inputHeight) /
3062  static_cast<float>(pooling2dDescriptor.m_StrideY)));
3063  outputWidth = static_cast<uint32_t>(ceil(static_cast<float>(inputWidth) /
3064  static_cast<float>(pooling2dDescriptor.m_StrideX)));
3065  }
3066  else if (paddingString == "VALID")
3067  {
3068  padding = false;
3069 
3070  outputHeight = static_cast<uint32_t>(ceil(
3071  static_cast<float>(inputHeight - pooling2dDescriptor.m_PoolHeight + 1) /
3072  static_cast<float>(pooling2dDescriptor.m_StrideY)));
3073  outputWidth = static_cast<uint32_t>(ceil(
3074  static_cast<float>(inputWidth - pooling2dDescriptor.m_PoolWidth + 1) /
3075  static_cast<float>(pooling2dDescriptor.m_StrideX)));
3076  }
3077 
3078  switch (dataLayout)
3079  {
3080  case DataLayout::NHWC:
3081  outputInfo = TensorInfo({ inputTensorInfo.GetShape()[0],
3082  outputHeight,
3083  outputWidth,
3084  inputTensorInfo.GetShape()[3] },
3085  DataType::Float32);
3086  break;
3087  case DataLayout::NCHW:
3088  outputInfo = TensorInfo({ inputTensorInfo.GetShape()[0],
3089  inputTensorInfo.GetShape()[1],
3090  outputHeight,
3091  outputWidth },
3092  DataType::Float32);
3093  break;
3094  }
3095 
3096  CalcPadding(inputWidth, pooling2dDescriptor.m_PoolWidth, pooling2dDescriptor.m_StrideX, 1u,
3097  pooling2dDescriptor.m_PadLeft, pooling2dDescriptor.m_PadRight, padding);
3098  CalcPadding(inputHeight, pooling2dDescriptor.m_PoolHeight, pooling2dDescriptor.m_StrideY, 1u,
3099  pooling2dDescriptor.m_PadTop, pooling2dDescriptor.m_PadBottom, padding);
3100 
3101 
3102  IConnectableLayer* layer = m_Network->AddPooling2dLayer(pooling2dDescriptor, nodeDef.name().c_str());
3103  if (layer == nullptr)
3104  {
3105  throw ParseException(
3106  fmt::format("Failed to add pooling2d layer for {} {}",
3107  nodeDef.name(),
3108  CHECK_LOCATION().AsString()));
3109  }
3110 
3111  layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
3112 
3113  inputSlot.Connect(layer->GetInputSlot(0));
3114 
3115  return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
3116 }
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
Definition: INetwork.hpp:62
uint32_t m_PadBottom
Padding bottom value in the height dimension.
DataLayout
Definition: Types.hpp:50
const TensorShape & GetShape() const
Definition: Tensor.hpp:187
uint32_t m_PadLeft
Padding left value in the width dimension.
uint32_t m_PoolWidth
Pooling width value.
PaddingMethod m_PaddingMethod
The padding method to be used. (Exclude, IgnoreValue).
uint32_t m_PadTop
Padding top value in the height dimension.
void IgnoreUnused(Ts &&...)
armnn::INetworkPtr m_Network
The network we&#39;re building. Gets cleared after it is passed to the user.
Definition: TfParser.hpp:251
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
virtual void SetTensorInfo(const TensorInfo &tensorInfo)=0
uint32_t m_PoolHeight
Pooling height value.
uint32_t m_PadRight
Padding right value in the width dimension.
An output connection slot for a layer.
Definition: INetwork.hpp:38
Provides access to the appropriate indexes for Channels, Height and Width based on DataLayout...
void CalcPadding(uint32_t inputSize, uint32_t filterSize, uint32_t stride, uint32_t dilation, uint32_t &paddingFront, uint32_t &paddingBack, bool samePadding)
Definition: TfParser.cpp:429
#define CHECK_LOCATION()
Definition: Exceptions.hpp:197
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
#define CHECK_DATA_FORMAT(NODE_DEF, FORMAT, NODE_TYPE)
Definition: TfParser.cpp:356
PoolingAlgorithm m_PoolType
The pooling algorithm to use (Max. Average, L2).
OutputShapeRounding m_OutputShapeRounding
The rounding method for the output shape. (Floor, Ceiling).
std::vector< OutputOfParsedTfOperation > GetInputParsedTfOperationsChecked(const tensorflow::NodeDef &nodeDef, std::size_t expectedNumInputs)
Finds the IParsedTfOperations for the nodes connected as inputs of the given node in the graph...
Definition: TfParser.cpp:615
virtual const IInputSlot & GetInputSlot(unsigned int index) const =0
Get a const input slot handle by slot index.
virtual const TensorInfo & GetTensorInfo() const =0
virtual const IOutputSlot & GetOutputSlot(unsigned int index) const =0
Get the const output slot handle by slot index.
#define CHECK_PADDING_TYPE(NODE_DEF, PADDING)
Definition: TfParser.cpp:368
virtual int Connect(IInputSlot &destination)=0
A Pooling2dDescriptor for the Pooling2dLayer.
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.

◆ ParseRealDiv()

ParsedTfOperationPtr ParseRealDiv ( const tensorflow::NodeDef &  nodeDef,
const tensorflow::GraphDef &  graphDef 
)

Definition at line 2746 of file TfParser.cpp.

References ITfParser::TfParserImpl::AddRealDivLayer(), and armnn::IgnoreUnused().

2748 {
2749  IgnoreUnused(graphDef);
2750  return AddRealDivLayer(nodeDef);
2751 }
void IgnoreUnused(Ts &&...)
ParsedTfOperationPtr AddRealDivLayer(const tensorflow::NodeDef &nodeDef)
Definition: TfParser.cpp:3198

◆ ParseRelu()

ParsedTfOperationPtr ParseRelu ( const tensorflow::NodeDef &  nodeDef,
const tensorflow::GraphDef &  graphDef 
)

Definition at line 2753 of file TfParser.cpp.

References ITfParser::TfParserImpl::AddActivationLayer(), armnn::IgnoreUnused(), and ActivationDescriptor::m_Function.

2755 {
2756  IgnoreUnused(graphDef);
2757 
2758  ActivationDescriptor activationDesc;
2759  activationDesc.m_Function = ActivationFunction::ReLu;
2760  return AddActivationLayer(nodeDef, activationDesc);
2761 }
void IgnoreUnused(Ts &&...)
An ActivationDescriptor for the ActivationLayer.
Definition: Descriptors.hpp:25
ParsedTfOperationPtr AddActivationLayer(const tensorflow::NodeDef &nodeDef, armnn::ActivationDescriptor &desc)
Definition: TfParser.cpp:2984
ActivationFunction m_Function
The activation function to use (Sigmoid, TanH, Linear, ReLu, BoundedReLu, SoftReLu, LeakyReLu, Abs, Sqrt, Square, Elu).
Definition: Descriptors.hpp:48

◆ ParseRelu6()

ParsedTfOperationPtr ParseRelu6 ( const tensorflow::NodeDef &  nodeDef,
const tensorflow::GraphDef &  graphDef 
)

Definition at line 2763 of file TfParser.cpp.

References ITfParser::TfParserImpl::AddActivationLayer(), armnn::IgnoreUnused(), ActivationDescriptor::m_A, ActivationDescriptor::m_B, and ActivationDescriptor::m_Function.

2765 {
2766  IgnoreUnused(graphDef);
2767 
2768  ActivationDescriptor activationDesc;
2769  activationDesc.m_Function = ActivationFunction::BoundedReLu;
2770  activationDesc.m_A = 6.0f;
2771  activationDesc.m_B = 0.0f;
2772 
2773  return AddActivationLayer(nodeDef, activationDesc);
2774 }
void IgnoreUnused(Ts &&...)
An ActivationDescriptor for the ActivationLayer.
Definition: Descriptors.hpp:25
float m_A
Alpha upper bound value used by the activation functions. (BoundedReLu, Linear, TanH, Elu).
Definition: Descriptors.hpp:50
ParsedTfOperationPtr AddActivationLayer(const tensorflow::NodeDef &nodeDef, armnn::ActivationDescriptor &desc)
Definition: TfParser.cpp:2984
float m_B
Beta lower bound value used by the activation functions. (BoundedReLu, Linear, TanH).
Definition: Descriptors.hpp:52
ActivationFunction m_Function
The activation function to use (Sigmoid, TanH, Linear, ReLu, BoundedReLu, SoftReLu, LeakyReLu, Abs, Sqrt, Square, Elu).
Definition: Descriptors.hpp:48

◆ ParseReshape()

ParsedTfOperationPtr ParseReshape ( const tensorflow::NodeDef &  nodeDef,
const tensorflow::GraphDef &  graphDef 
)

Definition at line 2389 of file TfParser.cpp.

References CHECK_LOCATION, ITfParser::TfParserImpl::GetInputParsedTfOperationsChecked(), IConnectableLayer::GetInputSlot(), IConnectableLayer::GetOutputSlot(), TensorInfo::GetShape(), armnn::IgnoreUnused(), ITfParser::TfParserImpl::m_Network, ReshapeDescriptor::m_TargetShape, ITfParser::ParsedTfOperation, and IOutputSlot::SetTensorInfo().

2391 {
2392  IgnoreUnused(graphDef);
2393  std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
2394  ParsedTfOperation* inputNode = inputs[0].m_IndexedValue;
2395 
2396  if (!HasParsedConstTensor<int32_t>(inputs[1].m_IndexedValue->GetNode().name()))
2397  {
2398  throw ParseException(
2399  fmt::format("ArmNN only supports Reshape layers with constant shapes. "
2400  "Input {} Node {} {}",
2401  inputs[1].m_IndexedValue->GetNode().name(),
2402  nodeDef.name(),
2403  CHECK_LOCATION().AsString()));
2404  }
2405  ParsedConstTfOperation<int32_t>* shapeNode =
2406  PolymorphicDowncast<ParsedConstTfOperation<int32_t>*>(inputs[1].m_IndexedValue);
2407 
2408  armnn::IOutputSlot& prevLayerOutputSlot = inputNode->ResolveArmnnOutputSlot(inputs[0].m_Index);
2409  TensorInfo inputTensorInfo = prevLayerOutputSlot.GetTensorInfo();
2410 
2411  std::vector<int32_t> shapeTensorData;
2412  ConstTensor shapeTensor = shapeNode->GetConstTensor(shapeTensorData);
2413  const TensorInfo outputTensorInfo = PrepareReshape(inputTensorInfo, shapeTensorData);
2414 
2415  TensorShape targetShape = outputTensorInfo.GetShape();
2416  ReshapeDescriptor reshapeDesc;
2417  reshapeDesc.m_TargetShape = targetShape;
2418 
2419  IConnectableLayer* layer = m_Network->AddReshapeLayer(reshapeDesc, nodeDef.name().c_str());
2420  prevLayerOutputSlot.Connect(layer->GetInputSlot(0));
2421  layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2422 
2423  return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2424 }
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
Definition: INetwork.hpp:62
const TensorShape & GetShape() const
Definition: Tensor.hpp:187
A ReshapeDescriptor for the ReshapeLayer.
friend class ParsedTfOperation
Definition: ITfParser.hpp:61
void IgnoreUnused(Ts &&...)
armnn::INetworkPtr m_Network
The network we&#39;re building. Gets cleared after it is passed to the user.
Definition: TfParser.hpp:251
virtual void SetTensorInfo(const TensorInfo &tensorInfo)=0
TensorShape m_TargetShape
Target shape value.
An output connection slot for a layer.
Definition: INetwork.hpp:38
A tensor defined by a TensorInfo (shape and data type) and an immutable backing store.
Definition: Tensor.hpp:314
#define CHECK_LOCATION()
Definition: Exceptions.hpp:197
std::vector< OutputOfParsedTfOperation > GetInputParsedTfOperationsChecked(const tensorflow::NodeDef &nodeDef, std::size_t expectedNumInputs)
Finds the IParsedTfOperations for the nodes connected as inputs of the given node in the graph...
Definition: TfParser.cpp:615
virtual const IInputSlot & GetInputSlot(unsigned int index) const =0
Get a const input slot handle by slot index.
virtual const IOutputSlot & GetOutputSlot(unsigned int index) const =0
Get the const output slot handle by slot index.

◆ ParseResizeBilinear()

ParsedTfOperationPtr ParseResizeBilinear ( const tensorflow::NodeDef &  nodeDef,
const tensorflow::GraphDef &  graphDef 
)

Definition at line 2426 of file TfParser.cpp.

References armnn::Bilinear, CHECK_LOCATION, IOutputSlot::Connect(), armnn::Float32, ITfParser::TfParserImpl::GetInputParsedTfOperationsChecked(), IConnectableLayer::GetInputSlot(), IConnectableLayer::GetOutputSlot(), TensorInfo::GetShape(), IOutputSlot::GetTensorInfo(), armnn::IgnoreUnused(), ResizeDescriptor::m_DataLayout, ResizeDescriptor::m_Method, ITfParser::TfParserImpl::m_Network, ResizeDescriptor::m_TargetHeight, ResizeDescriptor::m_TargetWidth, armnn::NHWC, and IOutputSlot::SetTensorInfo().

2428 {
2429  IgnoreUnused(graphDef);
2430  std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
2431 
2432  if (!HasParsedConstTensor<int32_t>(inputs[1].m_IndexedValue->GetNode().name()))
2433  {
2434  throw ParseException(
2435  fmt::format("ArmNN only supports ResizeBilinear layers with constant sizes. "
2436  "Input {}. Node {} {}",
2437  inputs[1].m_IndexedValue->GetNode().name(),
2438  nodeDef.name(),
2439  CHECK_LOCATION().AsString()));
2440  }
2441  ParsedConstTfOperation<int32_t>* sizeNode =
2442  PolymorphicDowncast<ParsedConstTfOperation<int32_t>*>(inputs[1].m_IndexedValue);
2443 
2444  // Checks the align_corners attribute is not set.
2445  if (ReadOptionalNodeBoolAttribute(nodeDef, "align_corners", false))
2446  {
2447  throw ParseException(
2448  fmt::format("ArmNN only supports ResizeBilinear layers with align_corners set to false. "
2449  "Node {} {}",
2450  nodeDef.name(),
2451  CHECK_LOCATION().AsString()));
2452  }
2453 
2454  // Data for the parsed tensor args (size) must be stored locally.
2455  std::vector<int32_t> sizeTensorData;
2456  ConstTensor sizeTensor = sizeNode->GetConstTensor(sizeTensorData);
2457 
2458  // The descriptor only has target height and width attributes, which we get from the size tensor.
2459  ResizeDescriptor desc;
2461  desc.m_TargetHeight = static_cast<uint32_t> (sizeTensorData[0]);
2462  desc.m_TargetWidth = static_cast<uint32_t> (sizeTensorData[1]);
2464 
2465  IConnectableLayer* layer = m_Network->AddResizeLayer(desc, nodeDef.name().c_str());
2466 
2467  IOutputSlot& inputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2468  TensorInfo inputTensorInfo = inputSlot.GetTensorInfo();
2469  // The input shape is always in BHWC format, this will be swizzled below; for now,
2470  // get the batch and channels to make up the ArmNN output shape with the target size.
2471  unsigned int outBatch = inputTensorInfo.GetShape()[0];
2472  unsigned int outChannels = inputTensorInfo.GetShape()[3];
2473  unsigned int outHeight = desc.m_TargetHeight;
2474  unsigned int outWidth = desc.m_TargetWidth;
2475  TensorShape outShape({outBatch, outHeight, outWidth, outChannels });
2476  // The output DataType is always Float32, regardless of the input DataType.
2477  const TensorInfo outputTensorInfo(outShape, armnn::DataType::Float32);
2478  layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2479 
2480  inputSlot.Connect(layer->GetInputSlot(0));
2481 
2482  return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2483 }
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
Definition: INetwork.hpp:62
const TensorShape & GetShape() const
Definition: Tensor.hpp:187
ResizeMethod m_Method
The Interpolation method to use (Bilinear, NearestNeighbor).
void IgnoreUnused(Ts &&...)
armnn::INetworkPtr m_Network
The network we&#39;re building. Gets cleared after it is passed to the user.
Definition: TfParser.hpp:251
virtual void SetTensorInfo(const TensorInfo &tensorInfo)=0
A ResizeDescriptor for the ResizeLayer.
An output connection slot for a layer.
Definition: INetwork.hpp:38
A tensor defined by a TensorInfo (shape and data type) and an immutable backing store.
Definition: Tensor.hpp:314
uint32_t m_TargetWidth
Target width value.
#define CHECK_LOCATION()
Definition: Exceptions.hpp:197
uint32_t m_TargetHeight
Target height value.
std::vector< OutputOfParsedTfOperation > GetInputParsedTfOperationsChecked(const tensorflow::NodeDef &nodeDef, std::size_t expectedNumInputs)
Finds the IParsedTfOperations for the nodes connected as inputs of the given node in the graph...
Definition: TfParser.cpp:615
virtual const IInputSlot & GetInputSlot(unsigned int index) const =0
Get a const input slot handle by slot index.
virtual const TensorInfo & GetTensorInfo() const =0
virtual const IOutputSlot & GetOutputSlot(unsigned int index) const =0
Get the const output slot handle by slot index.
virtual int Connect(IInputSlot &destination)=0
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).

◆ ParseRsqrt()

ParsedTfOperationPtr ParseRsqrt ( const tensorflow::NodeDef &  nodeDef,
const tensorflow::GraphDef &  graphDef 
)

Definition at line 2787 of file TfParser.cpp.

References IOutputSlot::Connect(), ITfParser::TfParserImpl::GetInputParsedTfOperationsChecked(), IConnectableLayer::GetInputSlot(), IConnectableLayer::GetOutputSlot(), IOutputSlot::GetTensorInfo(), armnn::IgnoreUnused(), ITfParser::TfParserImpl::m_Network, and IOutputSlot::SetTensorInfo().

2789 {
2790  IgnoreUnused(graphDef);
2791 
2792  std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
2793 
2794  ElementwiseUnaryDescriptor descriptor(UnaryOperation::Rsqrt);
2795  IConnectableLayer* const layer = m_Network->AddElementwiseUnaryLayer(descriptor, nodeDef.name().c_str());
2796 
2797  IOutputSlot& prevLayerOutputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2798  prevLayerOutputSlot.Connect(layer->GetInputSlot(0));
2799  layer->GetOutputSlot(0).SetTensorInfo(prevLayerOutputSlot.GetTensorInfo());
2800 
2801  return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2802 }
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
Definition: INetwork.hpp:62
void IgnoreUnused(Ts &&...)
armnn::INetworkPtr m_Network
The network we&#39;re building. Gets cleared after it is passed to the user.
Definition: TfParser.hpp:251
virtual void SetTensorInfo(const TensorInfo &tensorInfo)=0
An output connection slot for a layer.
Definition: INetwork.hpp:38
A ElementwiseUnaryDescriptor for the ElementwiseUnaryLayer.
Definition: Descriptors.hpp:98
std::vector< OutputOfParsedTfOperation > GetInputParsedTfOperationsChecked(const tensorflow::NodeDef &nodeDef, std::size_t expectedNumInputs)
Finds the IParsedTfOperations for the nodes connected as inputs of the given node in the graph...
Definition: TfParser.cpp:615
virtual const IInputSlot & GetInputSlot(unsigned int index) const =0
Get a const input slot handle by slot index.
virtual const TensorInfo & GetTensorInfo() const =0
virtual const IOutputSlot & GetOutputSlot(unsigned int index) const =0
Get the const output slot handle by slot index.
virtual int Connect(IInputSlot &destination)=0

◆ ParseShape()

ParsedTfOperationPtr ParseShape ( const tensorflow::NodeDef &  nodeDef,
const tensorflow::GraphDef &  graphDef 
)

Definition at line 2348 of file TfParser.cpp.

References CHECK_LOCATION, ITfParser::TfParserImpl::GetInputParsedTfOperationsChecked(), TensorInfo::GetNumDimensions(), TensorInfo::GetShape(), IOutputSlot::GetTensorInfo(), and armnn::IgnoreUnused().

2350 {
2351  IgnoreUnused(graphDef);
2352  // Note: the Shape layer is handled in a special way, because:
2353  // 1. ARMNN doesn't support int32 tensors which it outputs.
2354  // 2. ARMNN works with statically shaped tensors which are known at parse time.
2355  // 3. because of 1. and 2. we treat the output of Shape as a temporary const int32
2356  // tensor which may be used as an input to other ops, most likely a Reshape.
2357 
2358  const tensorflow::DataType tfDataType = ReadMandatoryNodeTypeAttribute(nodeDef, "out_type");
2359  if (tfDataType != tensorflow::DT_INT32)
2360  {
2361  throw ParseException(
2362  fmt::format("Armnn only supports DT_INT32 as out_type. Got {} for Node {} {}",
2363  tensorflow::DataType_Name(tfDataType),
2364  nodeDef.name(),
2365  CHECK_LOCATION().AsString()));
2366  }
2367 
2368  const std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
2369  IOutputSlot& prevLayerOutputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2370  const TensorInfo& prevLayerTensorInfo = prevLayerOutputSlot.GetTensorInfo();
2371  unsigned int prevLayerDimensions = prevLayerTensorInfo.GetNumDimensions();
2372 
2373  std::vector<int32_t> shapeTensorData;
2374  shapeTensorData.reserve(prevLayerDimensions);
2375 
2376  for (unsigned int i=0; i<prevLayerDimensions; ++i)
2377  {
2378  shapeTensorData.push_back(static_cast<int32_t>(prevLayerTensorInfo.GetShape()[i]));
2379  }
2380 
2381  TensorInfo shapeTensorInfo(1, &prevLayerDimensions, DataType::Signed32);
2382 
2383  return std::make_unique<ParsedConstTfOperation<int32_t>>(this,
2384  nodeDef,
2385  &shapeTensorData[0],
2386  shapeTensorInfo);
2387 }
const TensorShape & GetShape() const
Definition: Tensor.hpp:187
void IgnoreUnused(Ts &&...)
DataType
Definition: Types.hpp:32
An output connection slot for a layer.
Definition: INetwork.hpp:38
#define CHECK_LOCATION()
Definition: Exceptions.hpp:197
std::vector< OutputOfParsedTfOperation > GetInputParsedTfOperationsChecked(const tensorflow::NodeDef &nodeDef, std::size_t expectedNumInputs)
Finds the IParsedTfOperations for the nodes connected as inputs of the given node in the graph...
Definition: TfParser.cpp:615
virtual const TensorInfo & GetTensorInfo() const =0
unsigned int GetNumDimensions() const
Definition: Tensor.hpp:191

◆ ParseSigmoid()

ParsedTfOperationPtr ParseSigmoid ( const tensorflow::NodeDef &  nodeDef,
const tensorflow::GraphDef &  graphDef 
)

Definition at line 2776 of file TfParser.cpp.

References ITfParser::TfParserImpl::AddActivationLayer(), armnn::IgnoreUnused(), and ActivationDescriptor::m_Function.

2778 {
2779  IgnoreUnused(graphDef);
2780 
2781  ActivationDescriptor activationDesc;
2782  activationDesc.m_Function = ActivationFunction::Sigmoid;
2783 
2784  return AddActivationLayer(nodeDef, activationDesc);
2785 }
void IgnoreUnused(Ts &&...)
An ActivationDescriptor for the ActivationLayer.
Definition: Descriptors.hpp:25
ParsedTfOperationPtr AddActivationLayer(const tensorflow::NodeDef &nodeDef, armnn::ActivationDescriptor &desc)
Definition: TfParser.cpp:2984
ActivationFunction m_Function
The activation function to use (Sigmoid, TanH, Linear, ReLu, BoundedReLu, SoftReLu, LeakyReLu, Abs, Sqrt, Square, Elu).
Definition: Descriptors.hpp:48

◆ ParseSoftmax()

ParsedTfOperationPtr ParseSoftmax ( const tensorflow::NodeDef &  nodeDef,
const tensorflow::GraphDef &  graphDef 
)

Definition at line 2804 of file TfParser.cpp.

References IOutputSlot::Connect(), ITfParser::TfParserImpl::GetInputParsedTfOperationsChecked(), IConnectableLayer::GetInputSlot(), IConnectableLayer::GetOutputSlot(), IOutputSlot::GetTensorInfo(), armnn::IgnoreUnused(), ITfParser::TfParserImpl::m_Network, and IOutputSlot::SetTensorInfo().

2806 {
2807  IgnoreUnused(graphDef);
2808 
2809  std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
2810 
2811  SoftmaxDescriptor softmaxDescriptor;
2812  IConnectableLayer* const layer = m_Network->AddSoftmaxLayer(softmaxDescriptor, nodeDef.name().c_str());
2813 
2814  IOutputSlot& prevLayerSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2815  prevLayerSlot.Connect(layer->GetInputSlot(0));
2816  layer->GetOutputSlot(0).SetTensorInfo(prevLayerSlot.GetTensorInfo());
2817 
2818  return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2819 }
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
Definition: INetwork.hpp:62
void IgnoreUnused(Ts &&...)
armnn::INetworkPtr m_Network
The network we&#39;re building. Gets cleared after it is passed to the user.
Definition: TfParser.hpp:251
virtual void SetTensorInfo(const TensorInfo &tensorInfo)=0
An output connection slot for a layer.
Definition: INetwork.hpp:38
std::vector< OutputOfParsedTfOperation > GetInputParsedTfOperationsChecked(const tensorflow::NodeDef &nodeDef, std::size_t expectedNumInputs)
Finds the IParsedTfOperations for the nodes connected as inputs of the given node in the graph...
Definition: TfParser.cpp:615
virtual const IInputSlot & GetInputSlot(unsigned int index) const =0
Get a const input slot handle by slot index.
virtual const TensorInfo & GetTensorInfo() const =0
virtual const IOutputSlot & GetOutputSlot(unsigned int index) const =0
Get the const output slot handle by slot index.
virtual int Connect(IInputSlot &destination)=0
A SoftmaxDescriptor for the SoftmaxLayer.

◆ ParseSoftplus()

ParsedTfOperationPtr ParseSoftplus ( const tensorflow::NodeDef &  nodeDef,
const tensorflow::GraphDef &  graphDef 
)

Definition at line 2912 of file TfParser.cpp.

References ITfParser::TfParserImpl::AddActivationLayer(), armnn::IgnoreUnused(), and ActivationDescriptor::m_Function.

2914 {
2915  IgnoreUnused(graphDef);
2916 
2917  ActivationDescriptor activationDesc;
2918  activationDesc.m_Function = ActivationFunction::SoftReLu;
2919 
2920  return AddActivationLayer(nodeDef, activationDesc);
2921 }
void IgnoreUnused(Ts &&...)
An ActivationDescriptor for the ActivationLayer.
Definition: Descriptors.hpp:25
ParsedTfOperationPtr AddActivationLayer(const tensorflow::NodeDef &nodeDef, armnn::ActivationDescriptor &desc)
Definition: TfParser.cpp:2984
ActivationFunction m_Function
The activation function to use (Sigmoid, TanH, Linear, ReLu, BoundedReLu, SoftReLu, LeakyReLu, Abs, Sqrt, Square, Elu).
Definition: Descriptors.hpp:48

◆ ParseSplit()

ParsedTfOperationPtr ParseSplit ( const tensorflow::NodeDef &  nodeDef,
const tensorflow::GraphDef &  graphDef 
)

Definition at line 2821 of file TfParser.cpp.

References CHECK_LOCATION, IOutputSlot::Connect(), ITfParser::TfParserImpl::GetConstInputIndex(), TensorInfo::GetDataType(), ITfParser::TfParserImpl::GetInputParsedTfOperationsChecked(), IConnectableLayer::GetInputSlot(), TensorInfo::GetNumDimensions(), IConnectableLayer::GetNumOutputSlots(), IConnectableLayer::GetOutputSlot(), TensorInfo::GetShape(), IOutputSlot::GetTensorInfo(), ITfParser::TfParserImpl::GetTfInputNodes(), armnn::IgnoreUnused(), ITfParser::TfParserImpl::m_Network, IOutputSlot::SetTensorInfo(), ViewsDescriptor::SetViewOriginCoord(), and ViewsDescriptor::SetViewSize().

2823 {
2824  IgnoreUnused(graphDef);
2825 
2826  std::vector<OutputOfConstNodeDef> nodes = GetTfInputNodes(nodeDef);
2827  unsigned int numInputs = static_cast<unsigned int>(nodes.size());
2828  std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, numInputs);
2829 
2830  // Constant tensor index
2831  unsigned int index = GetConstInputIndex(inputs);
2832  // Get the axis tensor data
2833  ParsedConstTfOperation<int32_t>* shapeNode =
2834  PolymorphicDowncast<ParsedConstTfOperation<int32_t>*>(inputs[index].m_IndexedValue);
2835 
2836  std::vector<int32_t> axisTensorData;
2837  shapeNode->GetConstTensor(axisTensorData);
2838 
2839  // This splitDim indicates the data format: 3 is the NHWC, 1 is the NCHW.
2840  const unsigned int splitDim = static_cast<unsigned int>(axisTensorData[0]);
2841 
2842  // Armnn supports split along the channel dimension for data formats NHWC and NCHW.
2843  if (splitDim == 0 || splitDim == 2)
2844  {
2845  throw armnn::ParseException(
2846  fmt::format("Dimension {} for split is not supported by Armnn. "
2847  "Node {} {}",
2848  splitDim,
2849  nodeDef.name(),
2850  CHECK_LOCATION().AsString()));
2851  }
2852 
2853  // As Armnn only supports splitter outputs of the same shape, therefore num_split will be limited to an integer.
2854  uint32_t num_split = ReadMandatoryNodeUint32Attribute(nodeDef, "num_split");
2855 
2856  IOutputSlot& inputSlot = inputs[1 - index].m_IndexedValue->ResolveArmnnOutputSlot(inputs[1 - index].m_Index);
2857  TensorInfo inputTensorInfo = inputSlot.GetTensorInfo();
2858 
2859  const unsigned int supportedNumDims = 4;
2860  auto inputDimSize = inputTensorInfo.GetNumDimensions();
2861 
2862  if (inputDimSize != supportedNumDims)
2863  {
2864  throw armnn::ParseException(
2865  fmt::format("The number of dimensions: {} for input tensors of the "
2866  "split op should be {} {}",
2867  inputTensorInfo.GetNumDimensions(),
2868  supportedNumDims,
2869  CHECK_LOCATION().AsString()));
2870  }
2871 
2872  std::vector<unsigned int> splitterDimSizes(inputDimSize);
2873 
2874  // Add current input shape to splitterDimSizes
2875  for (unsigned int i = 0; i < inputDimSize; ++i)
2876  {
2877  splitterDimSizes[i] = inputTensorInfo.GetShape()[i];
2878  }
2879 
2880  if (splitterDimSizes[splitDim] % num_split != 0)
2881  {
2882  throw ParseException("Number of splits must evenly divide the dimension");
2883  }
2884  splitterDimSizes[splitDim] /= num_split;
2885 
2886  SplitterDescriptor splitDesc(num_split);
2887  for (unsigned int g = 0; g < num_split; ++g)
2888  {
2889  // Set the size of the views.
2890  for (unsigned int dimIdx = 0; dimIdx < splitterDimSizes.size(); ++dimIdx)
2891  {
2892  splitDesc.SetViewSize(g, dimIdx, splitterDimSizes[dimIdx]);
2893  }
2894  splitDesc.SetViewOriginCoord(g, splitDim, splitterDimSizes[splitDim] * g);
2895  }
2896 
2897  IConnectableLayer *layer = m_Network->AddSplitterLayer(splitDesc, nodeDef.name().c_str());
2898 
2899  inputSlot.Connect(layer->GetInputSlot(0));
2900 
2901  TensorShape outShape = TensorShape(static_cast<unsigned int>(splitterDimSizes.size()),
2902  splitterDimSizes.data());
2903 
2904  for (unsigned int i = 0; i < layer->GetNumOutputSlots(); ++i)
2905  {
2906  layer->GetOutputSlot(i).SetTensorInfo(armnn::TensorInfo(outShape, inputTensorInfo.GetDataType()));
2907  }
2908 
2909  return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2910 }
virtual unsigned int GetNumOutputSlots() const =0
Returns the number of connectable output slots.
A ViewsDescriptor for the SplitterLayer.
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
Definition: INetwork.hpp:62
const TensorShape & GetShape() const
Definition: Tensor.hpp:187
void IgnoreUnused(Ts &&...)
armnn::INetworkPtr m_Network
The network we&#39;re building. Gets cleared after it is passed to the user.
Definition: TfParser.hpp:251
virtual void SetTensorInfo(const TensorInfo &tensorInfo)=0
An output connection slot for a layer.
Definition: INetwork.hpp:38
DataType GetDataType() const
Definition: Tensor.hpp:194
#define CHECK_LOCATION()
Definition: Exceptions.hpp:197
unsigned int GetConstInputIndex(const std::vector< OutputOfParsedTfOperation > &inputs)
Definition: TfParser.cpp:1198
std::vector< OutputOfParsedTfOperation > GetInputParsedTfOperationsChecked(const tensorflow::NodeDef &nodeDef, std::size_t expectedNumInputs)
Finds the IParsedTfOperations for the nodes connected as inputs of the given node in the graph...
Definition: TfParser.cpp:615
virtual const IInputSlot & GetInputSlot(unsigned int index) const =0
Get a const input slot handle by slot index.
std::vector< OutputOfConstNodeDef > GetTfInputNodes(const tensorflow::NodeDef &nodeDef) const
Finds the nodes connected as inputs of the given node in the graph.
Definition: TfParser.cpp:578
virtual const TensorInfo & GetTensorInfo() const =0
virtual const IOutputSlot & GetOutputSlot(unsigned int index) const =0
Get the const output slot handle by slot index.
virtual int Connect(IInputSlot &destination)=0
unsigned int GetNumDimensions() const
Definition: Tensor.hpp:191

◆ ParseSqueeze()

ParsedTfOperationPtr ParseSqueeze ( const tensorflow::NodeDef &  nodeDef,
const tensorflow::GraphDef &  graphDef 
)

Definition at line 2557 of file TfParser.cpp.

References IOutputSlot::Connect(), ITfParser::TfParserImpl::GetInputParsedTfOperationsChecked(), IConnectableLayer::GetInputSlot(), IConnectableLayer::GetOutputSlot(), TensorInfo::GetShape(), IOutputSlot::GetTensorInfo(), armnn::IgnoreUnused(), ITfParser::TfParserImpl::m_Network, ReshapeDescriptor::m_TargetShape, armnnTfParser::OutputShapeOfSqueeze(), and IOutputSlot::SetTensorInfo().

2559 {
2560  IgnoreUnused(graphDef);
2561  std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
2562 
2563  IOutputSlot& prevLayerOutputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2564  TensorInfo inputTensorInfo = prevLayerOutputSlot.GetTensorInfo();
2565 
2566  TensorInfo outputInfo;
2567  outputInfo = OutputShapeOfSqueeze(nodeDef, inputTensorInfo);
2568 
2569  ReshapeDescriptor reshapeDesc;
2570  reshapeDesc.m_TargetShape = outputInfo.GetShape();
2571  IConnectableLayer* layer = m_Network->AddReshapeLayer(reshapeDesc, nodeDef.name().c_str());
2572  prevLayerOutputSlot.Connect(layer->GetInputSlot(0));
2573  layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
2574 
2575  return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2576 }
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
Definition: INetwork.hpp:62
const TensorShape & GetShape() const
Definition: Tensor.hpp:187
A ReshapeDescriptor for the ReshapeLayer.
void IgnoreUnused(Ts &&...)
armnn::INetworkPtr m_Network
The network we&#39;re building. Gets cleared after it is passed to the user.
Definition: TfParser.hpp:251
virtual void SetTensorInfo(const TensorInfo &tensorInfo)=0
TensorShape m_TargetShape
Target shape value.
An output connection slot for a layer.
Definition: INetwork.hpp:38
std::vector< OutputOfParsedTfOperation > GetInputParsedTfOperationsChecked(const tensorflow::NodeDef &nodeDef, std::size_t expectedNumInputs)
Finds the IParsedTfOperations for the nodes connected as inputs of the given node in the graph...
Definition: TfParser.cpp:615
virtual const IInputSlot & GetInputSlot(unsigned int index) const =0
Get a const input slot handle by slot index.
virtual const TensorInfo & GetTensorInfo() const =0
virtual const IOutputSlot & GetOutputSlot(unsigned int index) const =0
Get the const output slot handle by slot index.
virtual int Connect(IInputSlot &destination)=0
TensorInfo OutputShapeOfSqueeze(const tensorflow::NodeDef &nodeDef, TensorInfo inputTensorInfo)
Definition: TfParser.cpp:2485

◆ ParseStack()

ParsedTfOperationPtr ParseStack ( const tensorflow::NodeDef &  nodeDef,
const tensorflow::GraphDef &  graphDef 
)

Definition at line 2015 of file TfParser.cpp.

References CHECK_LOCATION, IOutputSlot::Connect(), TensorInfo::GetDataType(), ITfParser::TfParserImpl::GetInputParsedTfOperationsChecked(), IConnectableLayer::GetInputSlot(), TensorShape::GetNumDimensions(), TensorInfo::GetNumDimensions(), IConnectableLayer::GetOutputSlot(), TensorInfo::GetShape(), IOutputSlot::GetTensorInfo(), ITfParser::TfParserImpl::GetTfInputNodes(), armnn::IgnoreUnused(), StackDescriptor::m_Axis, StackDescriptor::m_InputShape, ITfParser::TfParserImpl::m_Network, StackDescriptor::m_NumInputs, and IOutputSlot::SetTensorInfo().

2017 {
2018  IgnoreUnused(graphDef);
2019  std::vector<OutputOfConstNodeDef> nodes = GetTfInputNodes(nodeDef);
2020 
2021  unsigned int numInputs = static_cast<unsigned int>(nodes.size());
2022  if (numInputs < 1)
2023  {
2024  throw ParseException(
2025  fmt::format("Pack/Stack expects at least one input. Got {} for Node {} {}",
2026  numInputs,
2027  nodeDef.name(),
2028  CHECK_LOCATION().AsString()));
2029  }
2030 
2031  std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, numInputs);
2032  // Use the tensor shape of the first input as the "correct" input shape in the descriptor
2033  IOutputSlot* input0Slot = &inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2034  const TensorInfo& inputTensorInfo = input0Slot->GetTensorInfo();
2035  auto numDimensions = inputTensorInfo.GetShape().GetNumDimensions();
2036 
2037  // validate axis
2038  int32_t axis = ReadMandatoryNodeInt32Attribute(nodeDef, "axis");
2039  const int sNumDimensions = (static_cast<int>(numDimensions) + 1);
2040  if (!(axis < sNumDimensions && axis >= -sNumDimensions))
2041  {
2042  throw ParseException(
2043  fmt::format("Axis index is not in range. Got {} for Node {} {}",
2044  axis,
2045  nodeDef.name(),
2046  CHECK_LOCATION().AsString()));
2047  }
2048 
2049  if (axis < 0)
2050  {
2051  axis = static_cast<int32_t>(numDimensions) + axis + 1;
2052  }
2053 
2054  StackDescriptor stackDescriptor;
2055  stackDescriptor.m_Axis = static_cast<uint32_t>(axis);
2056  stackDescriptor.m_NumInputs = static_cast<uint32_t>(numInputs);
2057  stackDescriptor.m_InputShape = inputTensorInfo.GetShape();
2058 
2059  const unsigned int supportedNumDims = 4;
2060  for (unsigned int viewIndex = 0; viewIndex < numInputs; ++viewIndex)
2061  {
2062  IOutputSlot& inputSlot = inputs[viewIndex].m_IndexedValue->ResolveArmnnOutputSlot(inputs[viewIndex].m_Index);
2063  TensorInfo inputTensorInfo = inputSlot.GetTensorInfo();
2064 
2065  // Double check dimensions of the tensors
2066  if (inputTensorInfo.GetNumDimensions() >= supportedNumDims)
2067  {
2068  throw armnn::ParseException(
2069  fmt::format("The number of dimensions: {} for input tensors of the "
2070  "Pack/Stack op. Number of dimensions should be less than {} {}",
2071  inputTensorInfo.GetNumDimensions(),
2072  supportedNumDims,
2073  CHECK_LOCATION().AsString()));
2074  }
2075  }
2076 
2077  std::vector<unsigned int> outputDimensions;
2078  for (unsigned int i = 0; i < stackDescriptor.m_InputShape.GetNumDimensions(); ++i)
2079  {
2080  outputDimensions.push_back(stackDescriptor.m_InputShape[i]);
2081  }
2082  outputDimensions.insert(outputDimensions.begin() + axis, numInputs);
2083 
2084  // add Stack Layer
2085  IConnectableLayer* const layer = m_Network->AddStackLayer(stackDescriptor, nodeDef.name().c_str());
2086 
2087  for (unsigned int viewIndex = 0; viewIndex < numInputs; ++viewIndex)
2088  {
2089  IOutputSlot& inputSlot = inputs[viewIndex].m_IndexedValue->ResolveArmnnOutputSlot(inputs[viewIndex].m_Index);
2090  inputSlot.Connect(layer->GetInputSlot(viewIndex));
2091  }
2092 
2093  layer->GetOutputSlot(0).SetTensorInfo(
2094  armnn::TensorInfo(static_cast<uint32_t>(outputDimensions.size()),
2095  outputDimensions.data(),
2096  inputTensorInfo.GetDataType()));
2097 
2098  return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2099 }
uint32_t m_Axis
0-based axis along which to stack the input tensors.
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
Definition: INetwork.hpp:62
const TensorShape & GetShape() const
Definition: Tensor.hpp:187
TensorShape m_InputShape
Required shape of all input tensors.
void IgnoreUnused(Ts &&...)
armnn::INetworkPtr m_Network
The network we&#39;re building. Gets cleared after it is passed to the user.
Definition: TfParser.hpp:251
virtual void SetTensorInfo(const TensorInfo &tensorInfo)=0
A StackDescriptor for the StackLayer.
An output connection slot for a layer.
Definition: INetwork.hpp:38
DataType GetDataType() const
Definition: Tensor.hpp:194
#define CHECK_LOCATION()
Definition: Exceptions.hpp:197
uint32_t m_NumInputs
Number of input tensors.
unsigned int GetNumDimensions() const
Function that returns the tensor rank.
Definition: Tensor.cpp:174
std::vector< OutputOfParsedTfOperation > GetInputParsedTfOperationsChecked(const tensorflow::NodeDef &nodeDef, std::size_t expectedNumInputs)
Finds the IParsedTfOperations for the nodes connected as inputs of the given node in the graph...
Definition: TfParser.cpp:615
virtual const IInputSlot & GetInputSlot(unsigned int index) const =0
Get a const input slot handle by slot index.
std::vector< OutputOfConstNodeDef > GetTfInputNodes(const tensorflow::NodeDef &nodeDef) const
Finds the nodes connected as inputs of the given node in the graph.
Definition: TfParser.cpp:578
virtual const TensorInfo & GetTensorInfo() const =0
virtual const IOutputSlot & GetOutputSlot(unsigned int index) const =0
Get the const output slot handle by slot index.
virtual int Connect(IInputSlot &destination)=0
unsigned int GetNumDimensions() const
Definition: Tensor.hpp:191

◆ ParseStridedSlice()

ParsedTfOperationPtr ParseStridedSlice ( const tensorflow::NodeDef &  nodeDef,
const tensorflow::GraphDef &  graphDef 
)

Definition at line 2923 of file TfParser.cpp.

References armnnUtils::CalculateStridedSliceOutputTensorInfo(), IOutputSlot::Connect(), ITfParser::TfParserImpl::GetInputParsedTfOperationsChecked(), IConnectableLayer::GetInputSlot(), IConnectableLayer::GetOutputSlot(), IOutputSlot::GetTensorInfo(), ITfParser::TfParserImpl::GetTfInputNodes(), armnn::IgnoreUnused(), StridedSliceDescriptor::m_Begin, StridedSliceDescriptor::m_BeginMask, StridedSliceDescriptor::m_DataLayout, StridedSliceDescriptor::m_EllipsisMask, StridedSliceDescriptor::m_End, StridedSliceDescriptor::m_EndMask, ITfParser::TfParserImpl::m_Network, StridedSliceDescriptor::m_NewAxisMask, StridedSliceDescriptor::m_ShrinkAxisMask, StridedSliceDescriptor::m_Stride, armnn::NHWC, and IOutputSlot::SetTensorInfo().

2925 {
2926  IgnoreUnused(graphDef);
2927 
2928  std::vector<OutputOfConstNodeDef> nodes = GetTfInputNodes(nodeDef);
2929  unsigned int numInputs = static_cast<unsigned int>(nodes.size());
2930  std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, numInputs);
2931 
2932  ParsedConstTfOperation<int32_t>* beginNode =
2933  PolymorphicDowncast<ParsedConstTfOperation<int32_t> *>(inputs[1].m_IndexedValue);
2934  std::vector<int32_t> beginTensorData;
2935  beginNode->GetConstTensor(beginTensorData);
2936 
2937  ParsedConstTfOperation<int32_t>* endNode =
2938  PolymorphicDowncast<ParsedConstTfOperation<int32_t> *>(inputs[2].m_IndexedValue);
2939  std::vector<int32_t> endTensorData;
2940  endNode->GetConstTensor(endTensorData);
2941 
2942  ParsedConstTfOperation<int32_t>* stridesNode =
2943  PolymorphicDowncast<ParsedConstTfOperation<int32_t> *>(inputs[3].m_IndexedValue);
2944  std::vector<int32_t> stridesTensorData;
2945  stridesNode->GetConstTensor(stridesTensorData);
2946 
2948  desc.m_Begin = beginTensorData;
2949  desc.m_End = endTensorData;
2950  desc.m_Stride = stridesTensorData;
2951  desc.m_BeginMask = ReadMandatoryNodeInt32Attribute(nodeDef, "begin_mask");
2952  desc.m_EndMask = ReadMandatoryNodeInt32Attribute(nodeDef, "end_mask");
2953  desc.m_EllipsisMask = ReadMandatoryNodeInt32Attribute(nodeDef, "ellipsis_mask");
2954  desc.m_NewAxisMask = ReadMandatoryNodeInt32Attribute(nodeDef, "new_axis_mask");
2955  desc.m_ShrinkAxisMask = ReadMandatoryNodeInt32Attribute(nodeDef, "shrink_axis_mask");
2957  IConnectableLayer* const layer = m_Network->AddStridedSliceLayer(desc, nodeDef.name().c_str());
2958 
2959  IOutputSlot& prevLayerSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2960  TensorInfo inputTensorInfo = prevLayerSlot.GetTensorInfo();
2961 
2962  TensorInfo outputTensorInfo;
2963  CalculateStridedSliceOutputTensorInfo(inputTensorInfo, desc, outputTensorInfo);
2964 
2965  prevLayerSlot.Connect(layer->GetInputSlot(0));
2966  layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2967 
2968  return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2969 }
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
Definition: INetwork.hpp:62
int32_t m_ShrinkAxisMask
Shrink axis mask value. If set, the nth specification shrinks the dimensionality by 1...
std::vector< int > m_Begin
Begin values for the input that will be sliced.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
void IgnoreUnused(Ts &&...)
armnn::INetworkPtr m_Network
The network we&#39;re building. Gets cleared after it is passed to the user.
Definition: TfParser.hpp:251
int32_t m_BeginMask
Begin mask value.
int32_t m_EndMask
End mask value.
virtual void SetTensorInfo(const TensorInfo &tensorInfo)=0
int32_t m_NewAxisMask
New axis mask value.
An output connection slot for a layer.
Definition: INetwork.hpp:38
int32_t m_EllipsisMask
Ellipsis mask value.
std::vector< int > m_Stride
Stride values for the input that will be sliced.
void CalculateStridedSliceOutputTensorInfo(const armnn::TensorInfo &inputTensorInfo, const armnn::StridedSliceDescriptor &desc, armnn::TensorInfo &outputTensorInfo)
Create output tensor info for a StridedSlice operator.
std::vector< int > m_End
End values for the input that will be sliced.
std::vector< OutputOfParsedTfOperation > GetInputParsedTfOperationsChecked(const tensorflow::NodeDef &nodeDef, std::size_t expectedNumInputs)
Finds the IParsedTfOperations for the nodes connected as inputs of the given node in the graph...
Definition: TfParser.cpp:615
virtual const IInputSlot & GetInputSlot(unsigned int index) const =0
Get a const input slot handle by slot index.
std::vector< OutputOfConstNodeDef > GetTfInputNodes(const tensorflow::NodeDef &nodeDef) const
Finds the nodes connected as inputs of the given node in the graph.
Definition: TfParser.cpp:578
A StridedSliceDescriptor for the StridedSliceLayer.
virtual const TensorInfo & GetTensorInfo() const =0
virtual const IOutputSlot & GetOutputSlot(unsigned int index) const =0
Get the const output slot handle by slot index.
virtual int Connect(IInputSlot &destination)=0

◆ ParseSub()

ParsedTfOperationPtr ParseSub ( const tensorflow::NodeDef &  nodeDef,
const tensorflow::GraphDef &  graphDef 
)

Definition at line 1974 of file TfParser.cpp.

References IOutputSlot::Connect(), ITfParser::TfParserImpl::GetInputParsedTfOperationsChecked(), IConnectableLayer::GetInputSlot(), TensorInfo::GetNumDimensions(), IConnectableLayer::GetOutputSlot(), IOutputSlot::GetTensorInfo(), armnn::IgnoreUnused(), ITfParser::TfParserImpl::m_Network, and IOutputSlot::SetTensorInfo().

1976 {
1977  IgnoreUnused(graphDef);
1978  std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
1979 
1980  IOutputSlot* input0Slot = &inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
1981  IOutputSlot* input1Slot = &inputs[1].m_IndexedValue->ResolveArmnnOutputSlot(inputs[1].m_Index);
1982 
1983  const TensorInfo& input0Info = input0Slot->GetTensorInfo();
1984  const TensorInfo& input1Info = input1Slot->GetTensorInfo();
1985 
1986  if (input0Info.GetNumDimensions() == 1)
1987  {
1988  const bool isNHWC = true;
1989  input0Slot = AddBroadcastReshapeLayer(input1Slot, input0Slot, isNHWC, *m_Network, nodeDef);
1990  }
1991 
1992  if (input1Info.GetNumDimensions() == 1)
1993  {
1994  const bool isNHWC = true;
1995  input1Slot = AddBroadcastReshapeLayer(input0Slot, input1Slot, isNHWC, *m_Network, nodeDef);
1996  }
1997 
1998  IConnectableLayer* const layer = m_Network->AddSubtractionLayer(nodeDef.name().c_str());
1999 
2000  input0Slot->Connect(layer->GetInputSlot(0));
2001  input1Slot->Connect(layer->GetInputSlot(1));
2002 
2003  if (input0Info.GetNumDimensions() == 1)
2004  {
2005  layer->GetOutputSlot(0).SetTensorInfo(input1Slot->GetTensorInfo());
2006  }
2007  else
2008  {
2009  layer->GetOutputSlot(0).SetTensorInfo(input0Slot->GetTensorInfo());
2010  }
2011 
2012  return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2013 }
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
Definition: INetwork.hpp:62
void IgnoreUnused(Ts &&...)
armnn::INetworkPtr m_Network
The network we&#39;re building. Gets cleared after it is passed to the user.
Definition: TfParser.hpp:251
virtual void SetTensorInfo(const TensorInfo &tensorInfo)=0
An output connection slot for a layer.
Definition: INetwork.hpp:38
std::vector< OutputOfParsedTfOperation > GetInputParsedTfOperationsChecked(const tensorflow::NodeDef &nodeDef, std::size_t expectedNumInputs)
Finds the IParsedTfOperations for the nodes connected as inputs of the given node in the graph...
Definition: TfParser.cpp:615
virtual const IInputSlot & GetInputSlot(unsigned int index) const =0
Get a const input slot handle by slot index.
virtual const TensorInfo & GetTensorInfo() const =0
virtual const IOutputSlot & GetOutputSlot(unsigned int index) const =0
Get the const output slot handle by slot index.
virtual int Connect(IInputSlot &destination)=0
OptimizeForType< Layer, AddBroadcastReshapeLayerImpl > AddBroadcastReshapeLayer
unsigned int GetNumDimensions() const
Definition: Tensor.hpp:191

◆ ParseTanh()

ParsedTfOperationPtr ParseTanh ( const tensorflow::NodeDef &  nodeDef,
const tensorflow::GraphDef &  graphDef 
)

Definition at line 2971 of file TfParser.cpp.

References ITfParser::TfParserImpl::AddActivationLayer(), armnn::IgnoreUnused(), ActivationDescriptor::m_A, ActivationDescriptor::m_B, and ActivationDescriptor::m_Function.

2973 {
2974  IgnoreUnused(graphDef);
2975 
2976  ActivationDescriptor activationDesc;
2977  activationDesc.m_Function = ActivationFunction::TanH;
2978  activationDesc.m_A = 1.0f;
2979  activationDesc.m_B = 1.0f;
2980 
2981  return AddActivationLayer(nodeDef, activationDesc);
2982 }
void IgnoreUnused(Ts &&...)
An ActivationDescriptor for the ActivationLayer.
Definition: Descriptors.hpp:25
float m_A
Alpha upper bound value used by the activation functions. (BoundedReLu, Linear, TanH, Elu).
Definition: Descriptors.hpp:50
ParsedTfOperationPtr AddActivationLayer(const tensorflow::NodeDef &nodeDef, armnn::ActivationDescriptor &desc)
Definition: TfParser.cpp:2984
float m_B
Beta lower bound value used by the activation functions. (BoundedReLu, Linear, TanH).
Definition: Descriptors.hpp:52
ActivationFunction m_Function
The activation function to use (Sigmoid, TanH, Linear, ReLu, BoundedReLu, SoftReLu, LeakyReLu, Abs, Sqrt, Square, Elu).
Definition: Descriptors.hpp:48

◆ ParseTranspose()

ParsedTfOperationPtr ParseTranspose ( const tensorflow::NodeDef &  nodeDef,
const tensorflow::GraphDef &  graphDef 
)

Definition at line 2101 of file TfParser.cpp.

References ARMNN_ASSERT, CHECK_LOCATION, ITfParser::TfParserImpl::GetConstInputIndex(), ITfParser::TfParserImpl::GetInputParsedTfOperationsChecked(), armnn::IgnoreUnused(), ITfParser::TfParserImpl::m_Network, TensorInfo::SetShape(), and armnnUtils::TransposeTensorShape().

2103 {
2104  IgnoreUnused(graphDef);
2105 
2106  auto inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
2107  const auto inputCount = inputs.size();
2108 
2109  if (inputCount != 2)
2110  {
2111  throw ParseException(
2112  fmt::format("The number of given input is {}. It should be two for Transpose op."
2113  "Node {} {}",
2114  inputCount,
2115  nodeDef.name(),
2116  CHECK_LOCATION().AsString()));
2117  }
2118 
2119  auto* input0Slot = &inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2120 
2121  const auto constInput = inputs[GetConstInputIndex(inputs)];
2122  auto* permuteVectorInput =
2123  PolymorphicDowncast<ParsedConstTfOperation<int32_t>*>(constInput.m_IndexedValue);
2124  const auto& permuteVectorInfo = permuteVectorInput->GetTensorInfo();
2125 
2126  std::vector<int32_t> permuteVectorData;
2127  permuteVectorInput->GetConstTensor(permuteVectorData);
2128 
2129  std::vector<unsigned int> armnnPermuteVectorData(permuteVectorData.begin(), permuteVectorData.end());
2130 
2131  const auto permutationVector = PermutationVector(armnnPermuteVectorData.data(), permuteVectorInfo.GetNumElements());
2132  const auto desc = TransposeDescriptor(permutationVector);
2133 
2134  auto* layer = m_Network->AddTransposeLayer(desc, nodeDef.name().c_str());
2135  ARMNN_ASSERT(layer);
2136 
2137  input0Slot->Connect(layer->GetInputSlot(0));
2138 
2139  const auto& input0Info = input0Slot->GetTensorInfo();
2140  armnn::TensorInfo outputInfo {input0Info};
2141  outputInfo.SetShape(armnnUtils::TransposeTensorShape(input0Info.GetShape(), desc.m_DimMappings));
2142  layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
2143 
2144  return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2145 }
void IgnoreUnused(Ts &&...)
armnn::INetworkPtr m_Network
The network we&#39;re building. Gets cleared after it is passed to the user.
Definition: TfParser.hpp:251
void SetShape(const TensorShape &newShape)
Definition: Tensor.hpp:189
#define ARMNN_ASSERT(COND)
Definition: Assert.hpp:14
#define CHECK_LOCATION()
Definition: Exceptions.hpp:197
unsigned int GetConstInputIndex(const std::vector< OutputOfParsedTfOperation > &inputs)
Definition: TfParser.cpp:1198
std::vector< OutputOfParsedTfOperation > GetInputParsedTfOperationsChecked(const tensorflow::NodeDef &nodeDef, std::size_t expectedNumInputs)
Finds the IParsedTfOperations for the nodes connected as inputs of the given node in the graph...
Definition: TfParser.cpp:615
A TransposeDescriptor for the TransposeLayer.
armnn::TensorShape TransposeTensorShape(const armnn::TensorShape &srcShape, const armnn::PermutationVector &mappings)
Definition: Transpose.cpp:98

◆ ProcessComparisonLayer()

ParsedTfOperationPtr ProcessComparisonLayer ( armnn::IOutputSlot input0Slot,
armnn::IOutputSlot input1Slot,
armnn::IConnectableLayer *const  layer,
const tensorflow::NodeDef &  nodeDef 
)

Definition at line 1841 of file TfParser.cpp.

References IOutputSlot::Connect(), IConnectableLayer::GetInputSlot(), TensorShape::GetNumDimensions(), IConnectableLayer::GetOutputSlot(), TensorInfo::GetShape(), IOutputSlot::GetTensorInfo(), TensorInfo::SetDataType(), TensorInfo::SetShape(), and IOutputSlot::SetTensorInfo().

Referenced by ITfParser::TfParserImpl::ParseEqual(), and ITfParser::TfParserImpl::ParseGreater().

1846 {
1847  input0Slot->Connect(layer->GetInputSlot(0));
1848  input1Slot->Connect(layer->GetInputSlot(1));
1849 
1850  TensorInfo outputInfo = input0Slot->GetTensorInfo();
1851  outputInfo.SetDataType(DataType::Boolean);
1852  std::vector<unsigned int> outputShape;
1853 
1854  const TensorShape& input0Shape = input0Slot->GetTensorInfo().GetShape();
1855  const TensorShape& input1Shape = input1Slot->GetTensorInfo().GetShape();
1856 
1857  for (unsigned int i = 0; i < input0Shape.GetNumDimensions(); i++)
1858  {
1859  outputShape.push_back(std::max(input0Shape[i], input1Shape[i]));
1860  }
1861 
1862  outputInfo.SetShape(TensorShape(input0Shape.GetNumDimensions(), outputShape.data()));
1863  layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
1864 
1865  return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
1866 }
const TensorShape & GetShape() const
Definition: Tensor.hpp:187
virtual void SetTensorInfo(const TensorInfo &tensorInfo)=0
void SetShape(const TensorShape &newShape)
Definition: Tensor.hpp:189
void SetDataType(DataType type)
Definition: Tensor.hpp:195
unsigned int GetNumDimensions() const
Function that returns the tensor rank.
Definition: Tensor.cpp:174
virtual const IInputSlot & GetInputSlot(unsigned int index) const =0
Get a const input slot handle by slot index.
virtual const TensorInfo & GetTensorInfo() const =0
virtual const IOutputSlot & GetOutputSlot(unsigned int index) const =0
Get the const output slot handle by slot index.
virtual int Connect(IInputSlot &destination)=0

◆ ProcessElementwiseInputSlots()

std::pair< armnn::IOutputSlot *, armnn::IOutputSlot * > ProcessElementwiseInputSlots ( const tensorflow::NodeDef &  nodeDef,
const std::string &  layerName 
)

Definition at line 1807 of file TfParser.cpp.

References CHECK_LOCATION, ITfParser::TfParserImpl::GetInputParsedTfOperationsChecked(), TensorInfo::GetNumDimensions(), IOutputSlot::GetTensorInfo(), and ITfParser::TfParserImpl::m_Network.

Referenced by ITfParser::TfParserImpl::ParseEqual(), ITfParser::TfParserImpl::ParseGreater(), and ITfParser::TfParserImpl::ParseMinimum().

1809 {
1810  std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
1811 
1812  IOutputSlot* input0Slot = &inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
1813  IOutputSlot* input1Slot = &inputs[1].m_IndexedValue->ResolveArmnnOutputSlot(inputs[1].m_Index);
1814  const unsigned int input0Dim = input0Slot->GetTensorInfo().GetNumDimensions();
1815  const unsigned int input1Dim = input1Slot->GetTensorInfo().GetNumDimensions();
1816 
1817  if (input0Dim != input1Dim)
1818  {
1819  // broadcasting where input0 and input1 have different number of dimensions
1820  // is only supported for 1D and 4D tensors pair
1821  if (input0Dim == 1 && input1Dim == 4)
1822  {
1823  input0Slot = AddBroadcastReshapeLayer(input1Slot, input0Slot, true, *m_Network, nodeDef);
1824  }
1825  else if (input0Dim == 4 && input1Dim == 1)
1826  {
1827  input1Slot = AddBroadcastReshapeLayer(input0Slot, input1Slot, true, *m_Network, nodeDef);
1828  }
1829  else
1830  {
1831  throw ParseException(
1832  fmt::format("Unsupported broadcast configuration for {} operation {} {}",
1833  layerName,
1834  nodeDef.name(),
1835  CHECK_LOCATION().AsString()));
1836  }
1837  }
1838  return {input0Slot, input1Slot};
1839 }
armnn::INetworkPtr m_Network
The network we&#39;re building. Gets cleared after it is passed to the user.
Definition: TfParser.hpp:251
An output connection slot for a layer.
Definition: INetwork.hpp:38
#define CHECK_LOCATION()
Definition: Exceptions.hpp:197
std::vector< OutputOfParsedTfOperation > GetInputParsedTfOperationsChecked(const tensorflow::NodeDef &nodeDef, std::size_t expectedNumInputs)
Finds the IParsedTfOperations for the nodes connected as inputs of the given node in the graph...
Definition: TfParser.cpp:615
virtual const TensorInfo & GetTensorInfo() const =0
OptimizeForType< Layer, AddBroadcastReshapeLayerImpl > AddBroadcastReshapeLayer
unsigned int GetNumDimensions() const
Definition: Tensor.hpp:191

◆ ProcessElementwiseLayer()

ParsedTfOperationPtr ProcessElementwiseLayer ( armnn::IOutputSlot input0Slot,
armnn::IOutputSlot input1Slot,
armnn::IConnectableLayer *const  layer,
const tensorflow::NodeDef &  nodeDef 
)

Definition at line 1868 of file TfParser.cpp.

References IOutputSlot::Connect(), IConnectableLayer::GetInputSlot(), TensorShape::GetNumDimensions(), IConnectableLayer::GetOutputSlot(), TensorInfo::GetShape(), IOutputSlot::GetTensorInfo(), TensorInfo::SetShape(), and IOutputSlot::SetTensorInfo().

Referenced by ITfParser::TfParserImpl::ParseMinimum().

1873 {
1874  input0Slot->Connect(layer->GetInputSlot(0));
1875  input1Slot->Connect(layer->GetInputSlot(1));
1876 
1877  TensorInfo outputInfo = input0Slot->GetTensorInfo();
1878  std::vector<unsigned int> outputShape;
1879 
1880  const TensorShape& input0Shape = input0Slot->GetTensorInfo().GetShape();
1881  const TensorShape& input1Shape = input1Slot->GetTensorInfo().GetShape();
1882 
1883  for (unsigned int i = 0; i < input0Shape.GetNumDimensions(); i++)
1884  {
1885  outputShape.push_back(std::max(input0Shape[i], input1Shape[i]));
1886  }
1887 
1888  outputInfo.SetShape(TensorShape(input0Shape.GetNumDimensions(), outputShape.data()));
1889  layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
1890 
1891  return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
1892 }
const TensorShape & GetShape() const
Definition: Tensor.hpp:187
virtual void SetTensorInfo(const TensorInfo &tensorInfo)=0
void SetShape(const TensorShape &newShape)
Definition: Tensor.hpp:189
unsigned int GetNumDimensions() const
Function that returns the tensor rank.
Definition: Tensor.cpp:174
virtual const IInputSlot & GetInputSlot(unsigned int index) const =0
Get a const input slot handle by slot index.
virtual const TensorInfo & GetTensorInfo() const =0
virtual const IOutputSlot & GetOutputSlot(unsigned int index) const =0
Get the const output slot handle by slot index.
virtual int Connect(IInputSlot &destination)=0

◆ ResolveIdentityNode()

const tensorflow::NodeDef * ResolveIdentityNode ( const tensorflow::NodeDef *  nodeDef)

Handling identity layers as the input for Conv2D layer.

Definition at line 546 of file TfParser.cpp.

References CHECK_LOCATION, and ITfParser::TfParserImpl::m_NodesByName.

547 {
548  if (nodeDef->op() != "Identity")
549  {
550  return nodeDef;
551  }
552 
553  if (nodeDef->input_size() != 1)
554  {
555  throw ParseException(
556  fmt::format("Identity node should have a single input! {} has {} inputs {}",
557  nodeDef->name(),
558  nodeDef->input_size(),
559  CHECK_LOCATION().AsString()));
560  }
561 
562  auto it = m_NodesByName.find(nodeDef->input(0));
563  if (it != m_NodesByName.end())
564  {
565  const tensorflow::NodeDef* inputNode = it->second;
566  return ResolveIdentityNode(inputNode);
567  }
568  else
569  {
570  throw ParseException(
571  fmt::format("Cannot find what the Identity node {} is linked to! {}",
572  nodeDef->name(),
573  CHECK_LOCATION().AsString()));
574  }
575 }
const tensorflow::NodeDef * ResolveIdentityNode(const tensorflow::NodeDef *nodeDef)
Handling identity layers as the input for Conv2D layer.
Definition: TfParser.cpp:546
#define CHECK_LOCATION()
Definition: Exceptions.hpp:197
std::unordered_map< std::string, const tensorflow::NodeDef * > m_NodesByName
Map of nodes extracted from the GraphDef to speed up parsing.
Definition: TfParser.hpp:265

◆ TrackBindingPoint()

void TrackBindingPoint ( armnn::IConnectableLayer layer,
armnn::LayerBindingId  id,
const armnn::TensorInfo tensorInfo,
const char *  bindingPointDesc,
std::unordered_map< std::string, BindingPointInfo > &  nameToBindingInfo 
)
static

Definition at line 3718 of file TfParser.cpp.

References CHECK_LOCATION, and IConnectableLayer::GetName().

Referenced by ITfParser::TfParserImpl::TrackInputBinding(), and ITfParser::TfParserImpl::TrackOutputBinding().

3723 {
3724  const std::string layerName = layer->GetName();
3725  auto it = nameToBindingInfo.find(layerName);
3726  if (it == nameToBindingInfo.end())
3727  {
3728  nameToBindingInfo[layerName] = std::make_pair(id, tensorInfo);
3729  }
3730  else
3731  {
3732  throw ParseException(
3733  fmt::format("Id {} used by more than one {} layer {}",
3734  id,
3735  bindingPointDesc,
3736  CHECK_LOCATION().AsString()));
3737  }
3738 }
#define CHECK_LOCATION()
Definition: Exceptions.hpp:197
virtual const char * GetName() const =0
Returns the name of the layer.

◆ TrackInputBinding()

void TrackInputBinding ( armnn::IConnectableLayer layer,
armnn::LayerBindingId  id,
const armnn::TensorInfo tensorInfo 
)

Definition at line 3704 of file TfParser.cpp.

References ITfParser::TfParserImpl::m_NetworkInputsBindingInfo, and ITfParser::TfParserImpl::TrackBindingPoint().

Referenced by ITfParser::TfParserImpl::ParsePlaceholder().

3707 {
3708  return TrackBindingPoint(layer, id, tensorInfo, "input", m_NetworkInputsBindingInfo);
3709 }
static void TrackBindingPoint(armnn::IConnectableLayer *layer, armnn::LayerBindingId id, const armnn::TensorInfo &tensorInfo, const char *bindingPointDesc, std::unordered_map< std::string, BindingPointInfo > &nameToBindingInfo)
Definition: TfParser.cpp:3718
std::unordered_map< std::string, BindingPointInfo > m_NetworkInputsBindingInfo
Maps input layer names to their corresponding ids and tensor info.
Definition: TfParser.hpp:270

◆ TrackOutputBinding()

void TrackOutputBinding ( armnn::IConnectableLayer layer,
armnn::LayerBindingId  id,
const armnn::TensorInfo tensorInfo 
)

Definition at line 3711 of file TfParser.cpp.

References ITfParser::TfParserImpl::m_NetworkOutputsBindingInfo, and ITfParser::TfParserImpl::TrackBindingPoint().

Referenced by ITfParser::TfParserImpl::LoadNodeDef().

3714 {
3715  return TrackBindingPoint(layer, id, tensorInfo, "output", m_NetworkOutputsBindingInfo);
3716 }
static void TrackBindingPoint(armnn::IConnectableLayer *layer, armnn::LayerBindingId id, const armnn::TensorInfo &tensorInfo, const char *bindingPointDesc, std::unordered_map< std::string, BindingPointInfo > &nameToBindingInfo)
Definition: TfParser.cpp:3718
std::unordered_map< std::string, BindingPointInfo > m_NetworkOutputsBindingInfo
Maps output layer names to their corresponding ids and tensor info.
Definition: TfParser.hpp:273

Member Data Documentation

◆ m_ControlInputs

const std::list< std::string > m_ControlInputs
static
Initial value:
= {
"Assert"
}

Definition at line 259 of file TfParser.hpp.

Referenced by ITfParser::TfParserImpl::LoadNodeDef().

◆ m_InputShapes

◆ m_Network

armnn::INetworkPtr m_Network

The network we're building. Gets cleared after it is passed to the user.

Definition at line 251 of file TfParser.hpp.

Referenced by ITfParser::TfParserImpl::AddActivationLayer(), ITfParser::TfParserImpl::AddAdditionLayer(), ITfParser::TfParserImpl::AddFullyConnectedLayer(), ITfParser::TfParserImpl::AddMaximumLayer(), ITfParser::TfParserImpl::AddMultiplicationLayer(), ITfParser::TfParserImpl::AddRealDivLayer(), ITfParser::TfParserImpl::CreateAdditionLayer(), ITfParser::TfParserImpl::CreateNetworkFromGraphDef(), ITfParser::TfParserImpl::LoadNodeDef(), ITfParser::TfParserImpl::ParseConcat(), ITfParser::TfParserImpl::ParseConv2D(), ITfParser::TfParserImpl::ParseDepthwiseConv2D(), ITfParser::TfParserImpl::ParseEqual(), ITfParser::TfParserImpl::ParseExpandDims(), ITfParser::TfParserImpl::ParseFusedBatchNorm(), ITfParser::TfParserImpl::ParseGather(), ITfParser::TfParserImpl::ParseGreater(), ITfParser::TfParserImpl::ParseLrn(), ITfParser::TfParserImpl::ParseMaximum(), ITfParser::TfParserImpl::ParseMean(), ITfParser::TfParserImpl::ParseMinimum(), ITfParser::TfParserImpl::ParsePad(), ITfParser::TfParserImpl::ParsePlaceholder(), ITfParser::TfParserImpl::ParsePooling2d(), ITfParser::TfParserImpl::ParseReshape(), ITfParser::TfParserImpl::ParseResizeBilinear(), ITfParser::TfParserImpl::ParseRsqrt(), ITfParser::TfParserImpl::ParseSoftmax(), ITfParser::TfParserImpl::ParseSplit(), ITfParser::TfParserImpl::ParseSqueeze(), ITfParser::TfParserImpl::ParseStack(), ITfParser::TfParserImpl::ParseStridedSlice(), ITfParser::TfParserImpl::ParseSub(), ITfParser::TfParserImpl::ParseTranspose(), and ITfParser::TfParserImpl::ProcessElementwiseInputSlots().

◆ m_NetworkInputsBindingInfo

std::unordered_map<std::string, BindingPointInfo> m_NetworkInputsBindingInfo

◆ m_NetworkOutputsBindingInfo

std::unordered_map<std::string, BindingPointInfo> m_NetworkOutputsBindingInfo

◆ m_NodesByName

std::unordered_map<std::string, const tensorflow::NodeDef*> m_NodesByName

Map of nodes extracted from the GraphDef to speed up parsing.

Definition at line 265 of file TfParser.hpp.

Referenced by ITfParser::TfParserImpl::Cleanup(), ITfParser::TfParserImpl::GetTfInputNodes(), ITfParser::TfParserImpl::LoadGraphDef(), and ITfParser::TfParserImpl::ResolveIdentityNode().

◆ m_ParsedTfOperations

◆ m_RequestedOutputs

◆ ms_OperationNameToParsingFunctions

const std::map< std::string, ITfParser::TfParserImpl::OperationParsingFunction > ms_OperationNameToParsingFunctions
static

Map of TensorFlow operation names to parsing member functions.

Definition at line 257 of file TfParser.hpp.

Referenced by ITfParser::TfParserImpl::LoadNodeDef().


The documentation for this struct was generated from the following files: