ArmNN
 21.02
ICaffeParser::CaffeParserImpl Class Referenceabstract

#include <CaffeParser.hpp>

Inheritance diagram for ICaffeParser::CaffeParserImpl:
CaffeParser RecordByRecordCaffeParser

Public Member Functions

armnn::INetworkPtr CreateNetworkFromTextFile (const char *graphFile, const std::map< std::string, armnn::TensorShape > &inputShapes, const std::vector< std::string > &requestedOutputs)
 Create the network from a protobuf text file on disk. More...
 
virtual armnn::INetworkPtr CreateNetworkFromBinaryFile (const char *graphFile, const std::map< std::string, armnn::TensorShape > &inputShapes, const std::vector< std::string > &requestedOutputs)=0
 Create the network from a protobuf binary file on the disk. More...
 
armnn::INetworkPtr CreateNetworkFromString (const char *protoText, const std::map< std::string, armnn::TensorShape > &inputShapes, const std::vector< std::string > &requestedOutputs)
 Creates the network directly from protobuf text in a string. Useful for debugging/testing. More...
 
BindingPointInfo GetNetworkInputBindingInfo (const std::string &name) const
 Retrieves binding info (layer id and tensor info) for the network input identified by the given layer name. More...
 
BindingPointInfo GetNetworkOutputBindingInfo (const std::string &name) const
 Retrieves binding info (layer id and tensor info) for the network output identified by the given layer name. More...
 
 CaffeParserImpl ()
 
virtual ~CaffeParserImpl ()=default
 

Static Public Member Functions

static const std::string GetVersion ()
 Retrieve version in X.Y.Z form. More...
 

Protected Types

using OperationParsingFunction = void(CaffeParserImpl::*)(const caffe::LayerParameter &layerParam)
 

Protected Member Functions

armnn::TensorInfo BlobShapeToTensorInfo (const caffe::BlobShape &blobShape) const
 Converts Caffe's protobuf tensor shape format to ArmNN's. More...
 
void TrackInputBinding (armnn::IConnectableLayer *layer, armnn::LayerBindingId id, const armnn::TensorInfo &tensorInfo)
 
void TrackOutputBinding (armnn::IConnectableLayer *layer, armnn::LayerBindingId id, const armnn::TensorInfo &tensorInfo)
 
void SetArmnnOutputSlotForCaffeTop (const std::string &caffeTopName, armnn::IOutputSlot &armnnOutputSlot)
 
armnn::IOutputSlotGetArmnnOutputSlotForCaffeTop (const std::string &caffeTopName) const
 Retrieves the Armnn IOutputSlot representing the given Caffe top. More...
 
void Cleanup ()
 
armnn::INetworkPtr CreateNetworkFromNetParameter (caffe::NetParameter &netParam, const std::map< std::string, armnn::TensorShape > &inputShapes, const std::vector< std::string > &requestedOutputs)
 Parses a NetParameter loaded into memory from one of the other CreateNetwork*. More...
 
void LoadNetParam (caffe::NetParameter &netParameter)
 does the actual conversion from caffe::NetParameter to armnn::INetwork More...
 
std::vector< const caffe::LayerParameter * > GetInputs (const caffe::LayerParameter &layerParam)
 Find the Caffe layers listed as inputs (bottoms) for a given layer. More...
 
void ResolveInPlaceLayers (caffe::NetParameter &netParameter)
 Modifies the Caffe network to replace "in-place" layers (whose top() and bottom() are both the same) with regular layers. More...
 
void ParseInputLayer (const caffe::LayerParameter &layerParam)
 Adds an armnn layer to m_Network given a Caffe LayerParameter of the correct type and is responsible for recording any newly created IOutputSlots using SetArmnnOutputSlotForCaffeTop(). More...
 
void ParseConvLayer (const caffe::LayerParameter &layerParam)
 
void ParseDeconvLayer (const caffe::LayerParameter &layerParam)
 
void ParsePoolingLayer (const caffe::LayerParameter &layerParam)
 
void ParseReluLayer (const caffe::LayerParameter &layerParam)
 
void ParseLRNLayer (const caffe::LayerParameter &layerParam)
 
void ParseInnerProductLayer (const caffe::LayerParameter &layerParam)
 
void ParseSoftmaxLayer (const caffe::LayerParameter &layerParam)
 
void ParseEltwiseLayer (const caffe::LayerParameter &layerParam)
 
void ParseConcatLayer (const caffe::LayerParameter &layerParam)
 
void ParseBatchNormLayer (const caffe::LayerParameter &layerParam)
 
void ParseScaleLayer (const caffe::LayerParameter &layerParam)
 
void ParseSplitLayer (const caffe::LayerParameter &layerParam)
 
void ParseDropoutLayer (const caffe::LayerParameter &layerParam)
 
void ParseArgmaxLayer (const caffe::LayerParameter &layerParam)
 
void AddConvLayerWithSplits (const caffe::LayerParameter &layerParam, const armnn::Convolution2dDescriptor &desc, unsigned int kernelW, unsigned int kernelH)
 ParseConv may use these helpers depending on the group parameter. More...
 
void AddConvLayerWithDepthwiseConv (const caffe::LayerParameter &layerParam, const armnn::Convolution2dDescriptor &desc, unsigned int kernelW, unsigned int kernelH)
 
void AddDeconvLayerWithSplits (const caffe::LayerParameter &layerParam, const armnn::TransposeConvolution2dDescriptor &desc, unsigned int kernelW, unsigned int kernelH)
 

Static Protected Member Functions

static void TrackBindingPoint (armnn::IConnectableLayer *layer, armnn::LayerBindingId id, const armnn::TensorInfo &tensorInfo, const char *bindingPointDesc, std::unordered_map< std::string, BindingPointInfo > &nameToBindingInfo)
 
static std::pair< armnn::LayerBindingId, armnn::TensorInfoGetBindingInfo (const std::string &layerName, const char *bindingPointDesc, const std::unordered_map< std::string, BindingPointInfo > &bindingInfos)
 

Protected Attributes

std::unordered_map< std::string, BindingPointInfom_NetworkInputsBindingInfo
 maps input layer names to their corresponding ids and tensor infos More...
 
std::unordered_map< std::string, BindingPointInfom_NetworkOutputsBindingInfo
 maps output layer names to their corresponding ids and tensor infos More...
 
armnn::INetworkPtr m_Network
 
std::map< std::string, armnn::TensorShapem_InputShapes
 
std::unordered_map< std::string, armnn::IOutputSlot * > m_ArmnnOutputSlotForCaffeTop
 As we add armnn layers we store the armnn IOutputSlot which corresponds to the Caffe tops. More...
 
std::vector< std::string > m_RequestedOutputs
 
std::map< std::string, const caffe::LayerParameter * > m_CaffeLayersByTopName
 

Static Protected Attributes

static const std::map< std::string, OperationParsingFunctionms_CaffeLayerNameToParsingFunctions
 Maps Caffe layer names to parsing member functions. More...
 

Detailed Description

Definition at line 26 of file CaffeParser.hpp.

Member Typedef Documentation

◆ OperationParsingFunction

using OperationParsingFunction = void(CaffeParserImpl::*)(const caffe::LayerParameter& layerParam)
protected

Definition at line 131 of file CaffeParser.hpp.

Constructor & Destructor Documentation

◆ CaffeParserImpl()

Definition at line 310 of file CaffeParser.cpp.

311  : m_Network(nullptr, nullptr)
312 {
313 
314 }

◆ ~CaffeParserImpl()

virtual ~CaffeParserImpl ( )
virtualdefault

Member Function Documentation

◆ AddConvLayerWithDepthwiseConv()

void AddConvLayerWithDepthwiseConv ( const caffe::LayerParameter &  layerParam,
const armnn::Convolution2dDescriptor desc,
unsigned int  kernelW,
unsigned int  kernelH 
)
protected

Definition at line 825 of file CaffeParser.cpp.

References ARMNN_ASSERT, ICaffeParser::CaffeParserImpl::BlobShapeToTensorInfo(), CHECK_LOCATION, IOutputSlot::Connect(), ICaffeParser::CaffeParserImpl::GetArmnnOutputSlotForCaffeTop(), armnnUtils::GetTensorInfo(), Convolution2dDescriptor::m_BiasEnabled, Convolution2dDescriptor::m_DilationX, Convolution2dDescriptor::m_DilationY, ICaffeParser::CaffeParserImpl::m_Network, Convolution2dDescriptor::m_PadBottom, Convolution2dDescriptor::m_PadLeft, DepthwiseConvolution2dDescriptor::m_PadLeft, Convolution2dDescriptor::m_PadRight, Convolution2dDescriptor::m_PadTop, Convolution2dDescriptor::m_StrideX, Convolution2dDescriptor::m_StrideY, armnn::numeric_cast(), ICaffeParser::CaffeParserImpl::SetArmnnOutputSlotForCaffeTop(), and armnnCaffeParser::TensorDescToBlobShape().

Referenced by ICaffeParser::CaffeParserImpl::ParseConvLayer().

829 {
830  ARMNN_ASSERT(layerParam.type() == "Convolution");
831  ValidateNumInputsOutputs(layerParam, 1, 1);
832 
833  ConvolutionParameter convParam = layerParam.convolution_param();
834  BlobShape inputShape = TensorDescToBlobShape(GetArmnnOutputSlotForCaffeTop(layerParam.bottom(0)).GetTensorInfo());
835 
837  desc.m_PadLeft = convDesc.m_PadLeft;
838  desc.m_PadRight = convDesc.m_PadRight;
839  desc.m_PadTop = convDesc.m_PadTop;
840  desc.m_PadBottom = convDesc.m_PadBottom;
841  desc.m_StrideX = convDesc.m_StrideX;
842  desc.m_StrideY = convDesc.m_StrideY;
843  desc.m_DilationX = convDesc.m_DilationX;
844  desc.m_DilationY = convDesc.m_DilationY;
845  desc.m_BiasEnabled = convDesc.m_BiasEnabled;
846 
847  unsigned int numFilters = convParam.num_output();
848 
849  BlobShape outputShape;
850  outputShape.add_dim(0);
851  outputShape.set_dim(0, inputShape.dim(0));
852  outputShape.add_dim(1);
853  outputShape.set_dim(1, numFilters);
854  outputShape.add_dim(2);
855  outputShape.set_dim(
856  2, (static_cast<int>(
857  static_cast<float>(inputShape.dim(2) + 2 * desc.m_PadBottom - (desc.m_DilationX * (kernelH - 1) + 1)) /
858  static_cast<float>(desc.m_StrideY)) + 1));
859  outputShape.add_dim(3);
860  outputShape.set_dim(
861  3, (static_cast<int>(
862  static_cast<float>(inputShape.dim(3) + 2 * desc.m_PadRight - (desc.m_DilationY * (kernelW - 1) + 1)) /
863  static_cast<float>(desc.m_StrideX)) + 1));
864 
865  // Load the weight data
866  size_t allWeightsSize = armnn::numeric_cast<size_t>(inputShape.dim(1) * kernelH * kernelW);
867  vector<float> weightData(allWeightsSize);
868 
869  GetDataFromBlob(layerParam, weightData, 0);
870 
871  // depth multiplier will be 1 for the depthwise convolution
872  const unsigned int weightDimSizes[4] = {
873  static_cast<unsigned int>(1), // depth multiplier
874  static_cast<unsigned int>(inputShape.dim(1)), // #channels
875  kernelH,
876  kernelW};
877 
878  armnn::IConnectableLayer* returnLayer = nullptr;
879  ConstTensor weights(TensorInfo(4, weightDimSizes, DataType::Float32), weightData.data());
880  Optional<ConstTensor> optionalBiases;
881  vector<float> biasData;
882  if (desc.m_BiasEnabled)
883  {
884  TensorInfo biasInfo;
885 
886  biasData.resize(armnn::numeric_cast<size_t>(outputShape.dim(1)), 1.f);
887  GetDataFromBlob(layerParam, biasData, 1);
888 
889  const unsigned int biasDimSizes[1] = {static_cast<unsigned int>(outputShape.dim(1))};
890  biasInfo = TensorInfo(1, biasDimSizes, DataType::Float32);
891 
892  ConstTensor biases(biasInfo, biasData.data());
893  optionalBiases = Optional<ConstTensor>(biases);
894  }
895  returnLayer = m_Network->AddDepthwiseConvolution2dLayer(desc,
896  weights,
897  optionalBiases,
898  layerParam.name().c_str());
899 
900  if (!returnLayer)
901  {
902  throw ParseException(
903  fmt::format("Failed to create depthwise convolution layer. "
904  "Layer={} #filters={} {}",
905  layerParam.name(),
906  numFilters,
907  CHECK_LOCATION().AsString()));
908  }
909  armnn::IOutputSlot& inputConnection = GetArmnnOutputSlotForCaffeTop(layerParam.bottom(0));
910  inputConnection.Connect(returnLayer->GetInputSlot(0));
911  returnLayer->GetOutputSlot(0).SetTensorInfo(BlobShapeToTensorInfo(outputShape));
912  SetArmnnOutputSlotForCaffeTop(layerParam.top(0), returnLayer->GetOutputSlot(0));
913 }
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
Definition: INetwork.hpp:62
armnn::IOutputSlot & GetArmnnOutputSlotForCaffeTop(const std::string &caffeTopName) const
Retrieves the Armnn IOutputSlot representing the given Caffe top.
An output connection slot for a layer.
Definition: INetwork.hpp:38
A tensor defined by a TensorInfo (shape and data type) and an immutable backing store.
Definition: Tensor.hpp:314
#define ARMNN_ASSERT(COND)
Definition: Assert.hpp:14
#define CHECK_LOCATION()
Definition: Exceptions.hpp:197
void SetArmnnOutputSlotForCaffeTop(const std::string &caffeTopName, armnn::IOutputSlot &armnnOutputSlot)
std::enable_if_t< std::is_unsigned< Source >::value &&std::is_unsigned< Dest >::value, Dest > numeric_cast(Source source)
Definition: NumericCast.hpp:35
armnn::TensorInfo GetTensorInfo(unsigned int numberOfBatches, unsigned int numberOfChannels, unsigned int height, unsigned int width, const armnn::DataLayout dataLayout, const armnn::DataType dataType)
Definition: TensorUtils.cpp:38
virtual int Connect(IInputSlot &destination)=0
armnn::TensorInfo BlobShapeToTensorInfo(const caffe::BlobShape &blobShape) const
Converts Caffe&#39;s protobuf tensor shape format to ArmNN&#39;s.
A DepthwiseConvolution2dDescriptor for the DepthwiseConvolution2dLayer.
uint32_t m_PadLeft
Padding left value in the width dimension.
BlobShape TensorDescToBlobShape(const TensorInfo &desc)

◆ AddConvLayerWithSplits()

void AddConvLayerWithSplits ( const caffe::LayerParameter &  layerParam,
const armnn::Convolution2dDescriptor desc,
unsigned int  kernelW,
unsigned int  kernelH 
)
protected

ParseConv may use these helpers depending on the group parameter.

Definition at line 447 of file CaffeParser.cpp.

References ARMNN_ASSERT, ICaffeParser::CaffeParserImpl::BlobShapeToTensorInfo(), CHECK_LOCATION, IOutputSlot::Connect(), ICaffeParser::CaffeParserImpl::GetArmnnOutputSlotForCaffeTop(), IConnectableLayer::GetInputSlot(), IConnectableLayer::GetNumOutputSlots(), IConnectableLayer::GetOutputSlot(), armnnUtils::GetTensorInfo(), Convolution2dDescriptor::m_BiasEnabled, Convolution2dDescriptor::m_DilationX, Convolution2dDescriptor::m_DilationY, ICaffeParser::CaffeParserImpl::m_Network, Convolution2dDescriptor::m_PadBottom, Convolution2dDescriptor::m_PadRight, Convolution2dDescriptor::m_StrideX, Convolution2dDescriptor::m_StrideY, armnn::numeric_cast(), ICaffeParser::CaffeParserImpl::SetArmnnOutputSlotForCaffeTop(), IOutputSlot::SetTensorInfo(), OriginsDescriptor::SetViewOriginCoord(), ViewsDescriptor::SetViewOriginCoord(), ViewsDescriptor::SetViewSize(), and armnnCaffeParser::TensorDescToBlobShape().

Referenced by ICaffeParser::CaffeParserImpl::ParseConvLayer().

451 {
452  ARMNN_ASSERT(layerParam.type() == "Convolution");
453  ValidateNumInputsOutputs(layerParam, 1, 1);
454 
455  ConvolutionParameter convParam = layerParam.convolution_param();
456  BlobShape inputShape = TensorDescToBlobShape(GetArmnnOutputSlotForCaffeTop(layerParam.bottom(0)).GetTensorInfo());
457  const unsigned int numGroups = convParam.has_group() ? convParam.group() : 1;
458 
459  // asusme these were already verified by the caller ParseConvLayer() function
460  ARMNN_ASSERT(numGroups < inputShape.dim(1));
461  ARMNN_ASSERT(numGroups > 1);
462 
463  // Handle grouping
464  armnn::IOutputSlot& inputConnection = GetArmnnOutputSlotForCaffeTop(layerParam.bottom(0));
465 
466  vector<string> convLayerNames(numGroups);
467  vector<armnn::IConnectableLayer*> convLayers(numGroups);
468  convLayerNames[0] = layerParam.name();
469 
470  // This convolution is to be applied to chunks of the input data so add a splitter layer
471 
472  // Redirect the convolution input to the splitter
473  unsigned int splitterDimSizes[4] = {static_cast<unsigned int>(inputShape.dim(0)),
474  static_cast<unsigned int>(inputShape.dim(1)),
475  static_cast<unsigned int>(inputShape.dim(2)),
476  static_cast<unsigned int>(inputShape.dim(3))};
477 
478  // Split dimension 1 of the splitter output shape and conv input shapes
479  // according to the number of groups
480 
481  splitterDimSizes[1] /= numGroups;
482  inputShape.set_dim(1, splitterDimSizes[1]);
483 
484  // This is used to describe how the input is to be split
485  ViewsDescriptor splitterDesc(numGroups);
486 
487  // Create an output node for each group, giving each a unique name
488  for (unsigned int g = 0; g < numGroups; ++g)
489  {
490  // Work out the names of the splitter layers child convolutions
491  stringstream ss;
492  ss << layerParam.name() << "_" << g;
493  convLayerNames[g] = ss.str();
494 
495  splitterDesc.SetViewOriginCoord(g, 1, splitterDimSizes[1] * g);
496 
497  // Set the size of the views.
498  for (unsigned int dimIdx=0; dimIdx < 4; dimIdx++)
499  {
500  splitterDesc.SetViewSize(g, dimIdx, splitterDimSizes[dimIdx]);
501  }
502  }
503 
504  const std::string splitterLayerName = std::string("splitter_") + layerParam.bottom(0);
505  armnn::IConnectableLayer* splitterLayer = m_Network->AddSplitterLayer(splitterDesc, splitterLayerName.c_str());
506 
507  inputConnection.Connect(splitterLayer->GetInputSlot(0));
508  for (unsigned int i = 0; i < splitterLayer->GetNumOutputSlots(); i++)
509  {
510  splitterLayer->GetOutputSlot(i).SetTensorInfo(BlobShapeToTensorInfo(inputShape));
511  }
512 
513  unsigned int numFilters = convParam.num_output();
514 
515  // Populates convolution output tensor descriptor dimensions.
516  BlobShape outputShape;
517  outputShape.add_dim(0);
518  outputShape.set_dim(0, inputShape.dim(0));
519  outputShape.add_dim(1);
520  // Ensures that dimension 1 of the convolution output is split according to the number of groups.
521  outputShape.set_dim(1, numFilters / numGroups);
522  outputShape.add_dim(2);
523  outputShape.set_dim(
524  2, (static_cast<int>(
525  static_cast<float>(inputShape.dim(2) + 2 * desc.m_PadBottom - (desc.m_DilationX * (kernelH - 1) + 1)) /
526  static_cast<float>(desc.m_StrideY)) + 1));
527  outputShape.add_dim(3);
528  outputShape.set_dim(
529  3, (static_cast<int>(
530  static_cast<float>(inputShape.dim(3) + 2 * desc.m_PadRight - (desc.m_DilationY * (kernelW - 1) + 1)) /
531  static_cast<float>(desc.m_StrideX)) + 1));
532 
533  // Load the weight data for ALL groups
534  vector<float> weightData(armnn::numeric_cast<size_t>(numGroups *
535  inputShape.dim(1) * // number of input channels
536  outputShape.dim(1) * // number of output channels
537  kernelH *
538  kernelW));
539  GetDataFromBlob(layerParam, weightData, 0);
540 
541  const unsigned int weightDimSizes[4] = {
542  static_cast<unsigned int>(outputShape.dim(1)),
543  static_cast<unsigned int>(inputShape.dim(1)),
544  kernelH,
545  kernelW};
546 
547  TensorInfo biasInfo;
548  vector<float> biasData;
549 
550  if (desc.m_BiasEnabled)
551  {
552  biasData.resize(armnn::numeric_cast<size_t>(numGroups * outputShape.dim(1)), 1.f);
553  GetDataFromBlob(layerParam, biasData, 1);
554 
555  const unsigned int biasDimSizes[1] = {static_cast<unsigned int>(outputShape.dim(1))};
556  biasInfo = TensorInfo(1, biasDimSizes, DataType::Float32);
557  }
558 
559  const unsigned int numWeightsPerGroup = armnn::numeric_cast<unsigned int>(weightData.size()) / numGroups;
560  const unsigned int numBiasesPerGroup = armnn::numeric_cast<unsigned int>(biasData.size()) / numGroups;
561 
562  for (unsigned int g = 0; g < numGroups; ++g)
563  {
564  // Sets the slot index, group 0 should be connected to the 0th output of the splitter
565  // group 1 should be connected to the 1st output of the splitter.
566 
567  // Pulls out the weights for this group from that loaded from the model file earlier.
568  ConstTensor weights(TensorInfo(4, weightDimSizes, DataType::Float32),
569  weightData.data() + numWeightsPerGroup * g);
570 
571  IConnectableLayer* convLayer = nullptr;
572  Optional<ConstTensor> optionalBiases;
573  if (desc.m_BiasEnabled)
574  {
575  // Pulls out the biases for this group from that loaded from the model file earlier.
576  ConstTensor biases(biasInfo, biasData.data() + numBiasesPerGroup * g);
577  optionalBiases = Optional<ConstTensor>(biases);
578  }
579  convLayer = m_Network->AddConvolution2dLayer(desc,
580  weights,
581  optionalBiases,
582  convLayerNames[g].c_str());
583  convLayers[g] = convLayer;
584 
585  // If we have more than one group then the input to the nth convolution the splitter layer's nth output,
586  // otherwise it's the regular input to this layer.
587  armnn::IOutputSlot& splitterInputConnection =
588  splitterLayer ? splitterLayer->GetOutputSlot(g) : inputConnection;
589  splitterInputConnection.Connect(convLayer->GetInputSlot(0));
590  convLayer->GetOutputSlot(0).SetTensorInfo(BlobShapeToTensorInfo(outputShape));
591  }
592 
593  // If the convolution was performed in chunks, add a layer to concatenate the results
594 
595  // The merge input shape matches that of the convolution output
596  unsigned int concatDimSizes[4] = {static_cast<unsigned int>(outputShape.dim(0)),
597  static_cast<unsigned int>(outputShape.dim(1)),
598  static_cast<unsigned int>(outputShape.dim(2)),
599  static_cast<unsigned int>(outputShape.dim(3))};
600 
601  // This is used to describe how the input is to be concatenated
602  OriginsDescriptor concatDesc(numGroups);
603 
604  // Now create an input node for each group, using the name from
605  // the output of the corresponding convolution
606  for (unsigned int g = 0; g < numGroups; ++g)
607  {
608  concatDesc.SetViewOriginCoord(g, 1, concatDimSizes[1] * g);
609  }
610 
611  // Make sure the output from the concat is the correct size to hold the data for all groups
612  concatDimSizes[1] *= numGroups;
613  outputShape.set_dim(1, concatDimSizes[1]);
614 
615  // Finally add the concat layer
616  IConnectableLayer* concatLayer = m_Network->AddConcatLayer(concatDesc, layerParam.name().c_str());
617 
618  if (!concatLayer)
619  {
620  throw ParseException(
621  fmt::format("Failed to create final concat layer for Split+Convolution+Concat. "
622  "Layer={} #groups={} #filters={} {}",
623  layerParam.name(),
624  numGroups,
625  numFilters,
626  CHECK_LOCATION().AsString()));
627  }
628 
629  for (unsigned int g = 0; g < numGroups; ++g)
630  {
631  convLayers[g]->GetOutputSlot(0).Connect(concatLayer->GetInputSlot(g));
632  }
633  concatLayer->GetOutputSlot(0).SetTensorInfo(armnn::TensorInfo(4, concatDimSizes, DataType::Float32));
634  SetArmnnOutputSlotForCaffeTop(layerParam.top(0), concatLayer->GetOutputSlot(0));
635 }
uint32_t m_PadBottom
Padding bottom value in the height dimension.
bool m_BiasEnabled
Enable/disable bias.
virtual unsigned int GetNumOutputSlots() const =0
Returns the number of connectable output slots.
A ViewsDescriptor for the SplitterLayer.
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
Definition: INetwork.hpp:62
uint32_t m_PadRight
Padding right value in the width dimension.
uint32_t m_DilationY
Dilation along y axis.
virtual void SetTensorInfo(const TensorInfo &tensorInfo)=0
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
armnn::IOutputSlot & GetArmnnOutputSlotForCaffeTop(const std::string &caffeTopName) const
Retrieves the Armnn IOutputSlot representing the given Caffe top.
An output connection slot for a layer.
Definition: INetwork.hpp:38
An OriginsDescriptor for the ConcatLayer.
A tensor defined by a TensorInfo (shape and data type) and an immutable backing store.
Definition: Tensor.hpp:314
#define ARMNN_ASSERT(COND)
Definition: Assert.hpp:14
#define CHECK_LOCATION()
Definition: Exceptions.hpp:197
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
void SetArmnnOutputSlotForCaffeTop(const std::string &caffeTopName, armnn::IOutputSlot &armnnOutputSlot)
uint32_t m_DilationX
Dilation along x axis.
virtual const IInputSlot & GetInputSlot(unsigned int index) const =0
Get a const input slot handle by slot index.
std::enable_if_t< std::is_unsigned< Source >::value &&std::is_unsigned< Dest >::value, Dest > numeric_cast(Source source)
Definition: NumericCast.hpp:35
armnn::TensorInfo GetTensorInfo(unsigned int numberOfBatches, unsigned int numberOfChannels, unsigned int height, unsigned int width, const armnn::DataLayout dataLayout, const armnn::DataType dataType)
Definition: TensorUtils.cpp:38
virtual const IOutputSlot & GetOutputSlot(unsigned int index) const =0
Get the const output slot handle by slot index.
virtual int Connect(IInputSlot &destination)=0
armnn::TensorInfo BlobShapeToTensorInfo(const caffe::BlobShape &blobShape) const
Converts Caffe&#39;s protobuf tensor shape format to ArmNN&#39;s.
BlobShape TensorDescToBlobShape(const TensorInfo &desc)

◆ AddDeconvLayerWithSplits()

void AddDeconvLayerWithSplits ( const caffe::LayerParameter &  layerParam,
const armnn::TransposeConvolution2dDescriptor desc,
unsigned int  kernelW,
unsigned int  kernelH 
)
protected

Definition at line 637 of file CaffeParser.cpp.

References ARMNN_ASSERT, ICaffeParser::CaffeParserImpl::BlobShapeToTensorInfo(), CHECK_LOCATION, IOutputSlot::Connect(), ICaffeParser::CaffeParserImpl::GetArmnnOutputSlotForCaffeTop(), IConnectableLayer::GetInputSlot(), IConnectableLayer::GetNumOutputSlots(), IConnectableLayer::GetOutputSlot(), armnnUtils::GetTensorInfo(), TransposeConvolution2dDescriptor::m_BiasEnabled, ICaffeParser::CaffeParserImpl::m_Network, TransposeConvolution2dDescriptor::m_PadBottom, TransposeConvolution2dDescriptor::m_PadRight, TransposeConvolution2dDescriptor::m_StrideX, TransposeConvolution2dDescriptor::m_StrideY, armnn::numeric_cast(), ICaffeParser::CaffeParserImpl::SetArmnnOutputSlotForCaffeTop(), IOutputSlot::SetTensorInfo(), OriginsDescriptor::SetViewOriginCoord(), ViewsDescriptor::SetViewOriginCoord(), ViewsDescriptor::SetViewSize(), and armnnCaffeParser::TensorDescToBlobShape().

Referenced by ICaffeParser::CaffeParserImpl::ParseDeconvLayer().

641 {
642  ARMNN_ASSERT(layerParam.type() == "Deconvolution");
643  ValidateNumInputsOutputs(layerParam, 1, 1);
644 
645  ConvolutionParameter convParam = layerParam.convolution_param();
646  BlobShape inputShape = TensorDescToBlobShape(GetArmnnOutputSlotForCaffeTop(layerParam.bottom(0)).GetTensorInfo());
647  const unsigned int numGroups = convParam.has_group() ? convParam.group() : 1;
648 
649  // asusme these were already verified by the caller ParseDeconvLayer() function
650  ARMNN_ASSERT(numGroups <= inputShape.dim(1));
651  ARMNN_ASSERT(numGroups > 1);
652 
653  // Handle grouping
654  armnn::IOutputSlot& inputConnection = GetArmnnOutputSlotForCaffeTop(layerParam.bottom(0));
655 
656  vector<string> convLayerNames(numGroups);
657  vector<armnn::IConnectableLayer*> convLayers(numGroups);
658  convLayerNames[0] = layerParam.name();
659 
660  // This deconvolution is to be applied to chunks of the input data so add a splitter layer
661 
662  // Redirect the deconvolution input to the splitter
663  unsigned int splitterDimSizes[4] = {static_cast<unsigned int>(inputShape.dim(0)),
664  static_cast<unsigned int>(inputShape.dim(1)),
665  static_cast<unsigned int>(inputShape.dim(2)),
666  static_cast<unsigned int>(inputShape.dim(3))};
667 
668  // Split dimension 1 of the splitter output shape and deconv input shapes
669  // according to the number of groups
670 
671  splitterDimSizes[1] /= numGroups;
672  inputShape.set_dim(1, splitterDimSizes[1]);
673 
674  // This is used to describe how the input is to be split
675  ViewsDescriptor splitterDesc(numGroups);
676 
677  // Create an output node for each group, giving each a unique name
678  for (unsigned int g = 0; g < numGroups; ++g)
679  {
680  // Work out the names of the splitter layers child deconvolutions
681  stringstream ss;
682  ss << layerParam.name() << "_" << g;
683  convLayerNames[g] = ss.str();
684 
685  splitterDesc.SetViewOriginCoord(g, 1, splitterDimSizes[1] * g);
686 
687  // Set the size of the views.
688  for (unsigned int dimIdx=0; dimIdx < 4; dimIdx++)
689  {
690  splitterDesc.SetViewSize(g, dimIdx, splitterDimSizes[dimIdx]);
691  }
692  }
693 
694  const std::string splitterLayerName = std::string("splitter_") + layerParam.bottom(0);
695  armnn::IConnectableLayer* splitterLayer = m_Network->AddSplitterLayer(splitterDesc, splitterLayerName.c_str());
696 
697  inputConnection.Connect(splitterLayer->GetInputSlot(0));
698  for (unsigned int i = 0; i < splitterLayer->GetNumOutputSlots(); i++)
699  {
700  splitterLayer->GetOutputSlot(i).SetTensorInfo(BlobShapeToTensorInfo(inputShape));
701  }
702 
703  unsigned int numFilters = convParam.num_output();
704 
705  // Populates deconvolution output tensor descriptor dimensions.
706  BlobShape outputShape;
707  outputShape.add_dim(0);
708  outputShape.set_dim(0, inputShape.dim(0));
709  outputShape.add_dim(1);
710  // Ensures that dimension 1 of the deconvolution output is split according to the number of groups.
711  outputShape.set_dim(1, numFilters / numGroups);
712  outputShape.add_dim(2);
713  outputShape.set_dim(
714  2, (static_cast<int>(
715  desc.m_StrideY * (inputShape.dim(2) - 1) - 2 * desc.m_PadBottom + kernelH)));
716  outputShape.add_dim(3);
717  outputShape.set_dim(
718  3, (static_cast<int>(
719  desc.m_StrideX * (inputShape.dim(3) - 1) - 2 * desc.m_PadRight + kernelW)));
720 
721  // Load the weight data for ALL groups
722  vector<float> weightData(armnn::numeric_cast<size_t>(numGroups *
723  inputShape.dim(1) * // number of input channels
724  outputShape.dim(1) * // number of output channels
725  kernelH *
726  kernelW));
727  GetDataFromBlob(layerParam, weightData, 0);
728 
729  const unsigned int weightDimSizes[4] = {
730  static_cast<unsigned int>(outputShape.dim(1)),
731  static_cast<unsigned int>(inputShape.dim(1)),
732  kernelH,
733  kernelW};
734 
735  TensorInfo biasInfo;
736  vector<float> biasData;
737 
738  if (desc.m_BiasEnabled)
739  {
740  biasData.resize(armnn::numeric_cast<size_t>(numGroups * outputShape.dim(1)), 1.f);
741  GetDataFromBlob(layerParam, biasData, 1);
742 
743  const unsigned int biasDimSizes[1] = {static_cast<unsigned int>(outputShape.dim(1))};
744  biasInfo = TensorInfo(1, biasDimSizes, DataType::Float32);
745  }
746 
747  const unsigned int numWeightsPerGroup = armnn::numeric_cast<unsigned int>(weightData.size()) / numGroups;
748  const unsigned int numBiasesPerGroup = armnn::numeric_cast<unsigned int>(biasData.size()) / numGroups;
749 
750  for (unsigned int g = 0; g < numGroups; ++g)
751  {
752  // Sets the slot index, group 0 should be connected to the 0th output of the splitter
753  // group 1 should be connected to the 1st output of the splitter.
754 
755  // Pulls out the weights for this group from that loaded from the model file earlier.
756  ConstTensor weights(TensorInfo(4, weightDimSizes, DataType::Float32),
757  weightData.data() + numWeightsPerGroup * g);
758 
759  IConnectableLayer* deconvLayer = nullptr;
760  Optional<ConstTensor> optionalBiases;
761  if (desc.m_BiasEnabled)
762  {
763  // Pulls out the biases for this group from that loaded from the model file earlier.
764  ConstTensor biases(biasInfo, biasData.data() + numBiasesPerGroup * g);
765  optionalBiases = Optional<ConstTensor>(biases);
766  }
767  deconvLayer = m_Network->AddTransposeConvolution2dLayer(desc,
768  weights,
769  optionalBiases,
770  convLayerNames[g].c_str());
771  convLayers[g] = deconvLayer;
772 
773  // If we have more than one group then the input to the nth deconvolution the splitter layer's nth output,
774  // otherwise it's the regular input to this layer.
775  armnn::IOutputSlot& splitterInputConnection =
776  splitterLayer ? splitterLayer->GetOutputSlot(g) : inputConnection;
777  splitterInputConnection.Connect(deconvLayer->GetInputSlot(0));
778  deconvLayer->GetOutputSlot(0).SetTensorInfo(BlobShapeToTensorInfo(outputShape));
779  }
780 
781  // If the deconvolution was performed in chunks, add a layer to concatenate the results
782 
783  // The merge input shape matches that of the deconvolution output
784  unsigned int concatDimSizes[4] = {static_cast<unsigned int>(outputShape.dim(0)),
785  static_cast<unsigned int>(outputShape.dim(1)),
786  static_cast<unsigned int>(outputShape.dim(2)),
787  static_cast<unsigned int>(outputShape.dim(3))};
788 
789  // This is used to describe how the input is to be concatenated
790  OriginsDescriptor concatDesc(numGroups);
791 
792  // Now create an input node for each group, using the name from
793  // the output of the corresponding deconvolution
794  for (unsigned int g = 0; g < numGroups; ++g)
795  {
796  concatDesc.SetViewOriginCoord(g, 1, concatDimSizes[1] * g);
797  }
798 
799  // Make sure the output from the concat is the correct size to hold the data for all groups
800  concatDimSizes[1] *= numGroups;
801  outputShape.set_dim(1, concatDimSizes[1]);
802 
803  // Finally add the concat layer
804  IConnectableLayer* concatLayer = m_Network->AddConcatLayer(concatDesc, layerParam.name().c_str());
805 
806  if (!concatLayer)
807  {
808  throw ParseException(
809  fmt::format("Failed to create final concat layer for Split+Deconvolution+Concat. "
810  "Layer={} #groups={} #filters={} {}",
811  layerParam.name(),
812  numGroups,
813  numFilters,
814  CHECK_LOCATION().AsString()));
815  }
816 
817  for (unsigned int g = 0; g < numGroups; ++g)
818  {
819  convLayers[g]->GetOutputSlot(0).Connect(concatLayer->GetInputSlot(g));
820  }
821  concatLayer->GetOutputSlot(0).SetTensorInfo(armnn::TensorInfo(4, concatDimSizes, DataType::Float32));
822  SetArmnnOutputSlotForCaffeTop(layerParam.top(0), concatLayer->GetOutputSlot(0));
823 }
virtual unsigned int GetNumOutputSlots() const =0
Returns the number of connectable output slots.
A ViewsDescriptor for the SplitterLayer.
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
Definition: INetwork.hpp:62
bool m_BiasEnabled
Enable/disable bias.
uint32_t m_PadBottom
Padding bottom value in the height dimension.
virtual void SetTensorInfo(const TensorInfo &tensorInfo)=0
armnn::IOutputSlot & GetArmnnOutputSlotForCaffeTop(const std::string &caffeTopName) const
Retrieves the Armnn IOutputSlot representing the given Caffe top.
An output connection slot for a layer.
Definition: INetwork.hpp:38
An OriginsDescriptor for the ConcatLayer.
A tensor defined by a TensorInfo (shape and data type) and an immutable backing store.
Definition: Tensor.hpp:314
#define ARMNN_ASSERT(COND)
Definition: Assert.hpp:14
#define CHECK_LOCATION()
Definition: Exceptions.hpp:197
void SetArmnnOutputSlotForCaffeTop(const std::string &caffeTopName, armnn::IOutputSlot &armnnOutputSlot)
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
virtual const IInputSlot & GetInputSlot(unsigned int index) const =0
Get a const input slot handle by slot index.
std::enable_if_t< std::is_unsigned< Source >::value &&std::is_unsigned< Dest >::value, Dest > numeric_cast(Source source)
Definition: NumericCast.hpp:35
armnn::TensorInfo GetTensorInfo(unsigned int numberOfBatches, unsigned int numberOfChannels, unsigned int height, unsigned int width, const armnn::DataLayout dataLayout, const armnn::DataType dataType)
Definition: TensorUtils.cpp:38
uint32_t m_PadRight
Padding right value in the width dimension.
virtual const IOutputSlot & GetOutputSlot(unsigned int index) const =0
Get the const output slot handle by slot index.
virtual int Connect(IInputSlot &destination)=0
armnn::TensorInfo BlobShapeToTensorInfo(const caffe::BlobShape &blobShape) const
Converts Caffe&#39;s protobuf tensor shape format to ArmNN&#39;s.
BlobShape TensorDescToBlobShape(const TensorInfo &desc)

◆ BlobShapeToTensorInfo()

TensorInfo BlobShapeToTensorInfo ( const caffe::BlobShape &  blobShape) const
protected

Converts Caffe's protobuf tensor shape format to ArmNN's.

Definition at line 349 of file CaffeParser.cpp.

Referenced by ICaffeParser::CaffeParserImpl::AddConvLayerWithDepthwiseConv(), ICaffeParser::CaffeParserImpl::AddConvLayerWithSplits(), ICaffeParser::CaffeParserImpl::AddDeconvLayerWithSplits(), ICaffeParser::CaffeParserImpl::ParseConvLayer(), ICaffeParser::CaffeParserImpl::ParseDeconvLayer(), and ICaffeParser::CaffeParserImpl::ParseInputLayer().

350 {
351  std::vector<unsigned int> shape;
352  for (int j = 0; j < blobShape.dim_size(); ++j)
353  {
354  shape.push_back(static_cast<unsigned int>(blobShape.dim(j)));
355  }
356 
357  return TensorInfo(armnn::numeric_cast<unsigned int>(shape.size()), shape.data(), DataType::Float32);
358 }

◆ Cleanup()

void Cleanup ( )
protected

Definition at line 2261 of file CaffeParser.cpp.

References ICaffeParser::CaffeParserImpl::m_ArmnnOutputSlotForCaffeTop, ICaffeParser::CaffeParserImpl::m_CaffeLayersByTopName, ICaffeParser::CaffeParserImpl::m_InputShapes, and ICaffeParser::CaffeParserImpl::m_RequestedOutputs.

Referenced by RecordByRecordCaffeParser::CreateNetworkFromBinaryFile(), and ICaffeParser::CaffeParserImpl::CreateNetworkFromNetParameter().

2261  {
2262  // cleanup, in case we reuse this parser
2263  m_InputShapes.clear();
2264  m_RequestedOutputs.clear();
2266  // NOTE: when we get the text/string format
2267  // optimised for memory then this data structure can
2268  // also move to the CaffeParser class
2269  m_CaffeLayersByTopName.clear();
2270 }
std::map< std::string, armnn::TensorShape > m_InputShapes
std::unordered_map< std::string, armnn::IOutputSlot * > m_ArmnnOutputSlotForCaffeTop
As we add armnn layers we store the armnn IOutputSlot which corresponds to the Caffe tops...
std::map< std::string, const caffe::LayerParameter * > m_CaffeLayersByTopName

◆ CreateNetworkFromBinaryFile()

virtual armnn::INetworkPtr CreateNetworkFromBinaryFile ( const char *  graphFile,
const std::map< std::string, armnn::TensorShape > &  inputShapes,
const std::vector< std::string > &  requestedOutputs 
)
pure virtual

Create the network from a protobuf binary file on the disk.

Implemented in CaffeParser, and RecordByRecordCaffeParser.

◆ CreateNetworkFromNetParameter()

INetworkPtr CreateNetworkFromNetParameter ( caffe::NetParameter &  netParam,
const std::map< std::string, armnn::TensorShape > &  inputShapes,
const std::vector< std::string > &  requestedOutputs 
)
protected

Parses a NetParameter loaded into memory from one of the other CreateNetwork*.

Definition at line 2225 of file CaffeParser.cpp.

References ICaffeParser::CaffeParserImpl::Cleanup(), INetwork::Create(), ICaffeParser::CaffeParserImpl::LoadNetParam(), ICaffeParser::CaffeParserImpl::m_InputShapes, ICaffeParser::CaffeParserImpl::m_Network, ICaffeParser::CaffeParserImpl::m_NetworkInputsBindingInfo, ICaffeParser::CaffeParserImpl::m_NetworkOutputsBindingInfo, and ICaffeParser::CaffeParserImpl::m_RequestedOutputs.

Referenced by CaffeParser::CreateNetworkFromBinaryFile(), ICaffeParser::CaffeParserImpl::CreateNetworkFromString(), and ICaffeParser::CaffeParserImpl::CreateNetworkFromTextFile().

2228 {
2231 
2233 
2234  m_InputShapes = inputShapes;
2235  if (requestedOutputs.size() == 0)
2236  {
2237  throw ParseException("requestedOutputs must have at least one entry");
2238  }
2239  m_RequestedOutputs = requestedOutputs;
2240 
2241  try
2242  {
2243  LoadNetParam(netParam);
2244  }
2245  catch (const ParseException& e)
2246  {
2247  Cleanup();
2248  throw e;
2249  }
2250 
2251  Cleanup();
2252 
2253  return move(m_Network);
2254 }
std::map< std::string, armnn::TensorShape > m_InputShapes
void LoadNetParam(caffe::NetParameter &netParameter)
does the actual conversion from caffe::NetParameter to armnn::INetwork
std::unordered_map< std::string, BindingPointInfo > m_NetworkOutputsBindingInfo
maps output layer names to their corresponding ids and tensor infos
std::unordered_map< std::string, BindingPointInfo > m_NetworkInputsBindingInfo
maps input layer names to their corresponding ids and tensor infos
static INetworkPtr Create(NetworkOptions networkOptions={})
Definition: Network.cpp:510

◆ CreateNetworkFromString()

INetworkPtr CreateNetworkFromString ( const char *  protoText,
const std::map< std::string, armnn::TensorShape > &  inputShapes,
const std::vector< std::string > &  requestedOutputs 
)

Creates the network directly from protobuf text in a string. Useful for debugging/testing.

Definition at line 2171 of file CaffeParser.cpp.

References CHECK_LOCATION, and ICaffeParser::CaffeParserImpl::CreateNetworkFromNetParameter().

2174 {
2175  // Parses the string into a message.
2176  NetParameter netParam;
2177  bool success = google::protobuf::TextFormat::ParseFromString(protoText, &netParam);
2178 
2179  if (!success)
2180  {
2181  throw ParseException(
2182  fmt::format("Failed to parse graph string {}",
2183  CHECK_LOCATION().AsString()));
2184  }
2185 
2186  return CreateNetworkFromNetParameter(netParam, inputShapes, requestedOutputs);
2187 }
armnn::INetworkPtr CreateNetworkFromNetParameter(caffe::NetParameter &netParam, const std::map< std::string, armnn::TensorShape > &inputShapes, const std::vector< std::string > &requestedOutputs)
Parses a NetParameter loaded into memory from one of the other CreateNetwork*.
#define CHECK_LOCATION()
Definition: Exceptions.hpp:197

◆ CreateNetworkFromTextFile()

INetworkPtr CreateNetworkFromTextFile ( const char *  graphFile,
const std::map< std::string, armnn::TensorShape > &  inputShapes,
const std::vector< std::string > &  requestedOutputs 
)

Create the network from a protobuf text file on disk.

Definition at line 2139 of file CaffeParser.cpp.

References CHECK_LOCATION, and ICaffeParser::CaffeParserImpl::CreateNetworkFromNetParameter().

2142 {
2143  FILE* fd = fopen(graphFile, "r");
2144 
2145  if (fd == nullptr)
2146  {
2147  throw FileNotFoundException(
2148  fmt::format("Failed to open graph file: {} {}",
2149  graphFile,
2150  CHECK_LOCATION().AsString()));
2151  }
2152 
2153  // Parses the file into a message.
2154  NetParameter netParam;
2155  auto input = new google::protobuf::io::FileInputStream(fileno(fd));
2156  bool success = google::protobuf::TextFormat::Parse(input, &netParam);
2157  delete input;
2158  fclose(fd);
2159 
2160  if (!success)
2161  {
2162  throw ParseException(
2163  fmt::format("Failed to parse graph file: {} {}",
2164  graphFile,
2165  CHECK_LOCATION().AsString()));
2166  }
2167 
2168  return CreateNetworkFromNetParameter(netParam, inputShapes, requestedOutputs);
2169 }
armnn::INetworkPtr CreateNetworkFromNetParameter(caffe::NetParameter &netParam, const std::map< std::string, armnn::TensorShape > &inputShapes, const std::vector< std::string > &requestedOutputs)
Parses a NetParameter loaded into memory from one of the other CreateNetwork*.
#define CHECK_LOCATION()
Definition: Exceptions.hpp:197

◆ GetArmnnOutputSlotForCaffeTop()

armnn::IOutputSlot & GetArmnnOutputSlotForCaffeTop ( const std::string &  caffeTopName) const
protected

Retrieves the Armnn IOutputSlot representing the given Caffe top.

Throws if it cannot be found (e.g. not parsed yet).

Definition at line 1952 of file CaffeParser.cpp.

References CHECK_LOCATION, and ICaffeParser::CaffeParserImpl::m_ArmnnOutputSlotForCaffeTop.

Referenced by ICaffeParser::CaffeParserImpl::AddConvLayerWithDepthwiseConv(), ICaffeParser::CaffeParserImpl::AddConvLayerWithSplits(), ICaffeParser::CaffeParserImpl::AddDeconvLayerWithSplits(), RecordByRecordCaffeParser::CreateNetworkFromBinaryFile(), ICaffeParser::CaffeParserImpl::LoadNetParam(), ICaffeParser::CaffeParserImpl::ParseArgmaxLayer(), ICaffeParser::CaffeParserImpl::ParseBatchNormLayer(), ICaffeParser::CaffeParserImpl::ParseConcatLayer(), ICaffeParser::CaffeParserImpl::ParseConvLayer(), ICaffeParser::CaffeParserImpl::ParseDeconvLayer(), ICaffeParser::CaffeParserImpl::ParseDropoutLayer(), ICaffeParser::CaffeParserImpl::ParseEltwiseLayer(), ICaffeParser::CaffeParserImpl::ParseInnerProductLayer(), ICaffeParser::CaffeParserImpl::ParseLRNLayer(), ICaffeParser::CaffeParserImpl::ParsePoolingLayer(), ICaffeParser::CaffeParserImpl::ParseReluLayer(), ICaffeParser::CaffeParserImpl::ParseScaleLayer(), ICaffeParser::CaffeParserImpl::ParseSoftmaxLayer(), and ICaffeParser::CaffeParserImpl::ParseSplitLayer().

1953 {
1954  auto it = m_ArmnnOutputSlotForCaffeTop.find(caffeTopName);
1955  if (it != m_ArmnnOutputSlotForCaffeTop.end())
1956  {
1957  return *it->second;
1958  }
1959  else
1960  {
1961  throw ParseException(
1962  fmt::format("Could not find armnn output slot for Caffe top '{}' {}",
1963  caffeTopName,
1964  CHECK_LOCATION().AsString()));
1965  }
1966 }
#define CHECK_LOCATION()
Definition: Exceptions.hpp:197
std::unordered_map< std::string, armnn::IOutputSlot * > m_ArmnnOutputSlotForCaffeTop
As we add armnn layers we store the armnn IOutputSlot which corresponds to the Caffe tops...

◆ GetBindingInfo()

std::pair< armnn::LayerBindingId, armnn::TensorInfo > GetBindingInfo ( const std::string &  layerName,
const char *  bindingPointDesc,
const std::unordered_map< std::string, BindingPointInfo > &  bindingInfos 
)
staticprotected

Definition at line 332 of file CaffeParser.cpp.

References CHECK_LOCATION.

Referenced by ICaffeParser::CaffeParserImpl::GetNetworkInputBindingInfo(), and ICaffeParser::CaffeParserImpl::GetNetworkOutputBindingInfo().

336 {
337  auto it = nameToBindingInfo.find(layerName);
338  if (it == nameToBindingInfo.end())
339  {
341  fmt::format("Unknown binding {} for layer '{}'. {}",
342  bindingPointDesc,
343  layerName,
344  CHECK_LOCATION().AsString()));
345  }
346  return it->second;
347 }
#define CHECK_LOCATION()
Definition: Exceptions.hpp:197

◆ GetInputs()

vector< const LayerParameter * > GetInputs ( const caffe::LayerParameter &  layerParam)
protected

Find the Caffe layers listed as inputs (bottoms) for a given layer.

Definition at line 374 of file CaffeParser.cpp.

References CHECK_LOCATION, and ICaffeParser::CaffeParserImpl::m_CaffeLayersByTopName.

Referenced by ICaffeParser::CaffeParserImpl::LoadNetParam().

375 {
376  std::vector<const caffe::LayerParameter*> ret;
377  ret.reserve(armnn::numeric_cast<size_t>(layerParam.bottom_size()));
378  for (int j = 0; j < layerParam.bottom_size(); ++j)
379  {
380  std::string inputName = layerParam.bottom(j);
381  auto inputIt = m_CaffeLayersByTopName.find(inputName);
382  if (inputIt == m_CaffeLayersByTopName.end())
383  {
384  throw ParseException(
385  fmt::format("Can't find Caffe layer with top called '{}', "
386  "which is listed as an input of '{}'. {}",
387  inputName,
388  layerParam.name(),
389  CHECK_LOCATION().AsString()));
390  }
391  ret.push_back(inputIt->second);
392  }
393 
394  return ret;
395 }
#define CHECK_LOCATION()
Definition: Exceptions.hpp:197
std::map< std::string, const caffe::LayerParameter * > m_CaffeLayersByTopName

◆ GetNetworkInputBindingInfo()

BindingPointInfo GetNetworkInputBindingInfo ( const std::string &  name) const

Retrieves binding info (layer id and tensor info) for the network input identified by the given layer name.

Definition at line 322 of file CaffeParser.cpp.

References ICaffeParser::CaffeParserImpl::GetBindingInfo(), and ICaffeParser::CaffeParserImpl::m_NetworkInputsBindingInfo.

323 {
324  return GetBindingInfo(name, "input", m_NetworkInputsBindingInfo);
325 }
static std::pair< armnn::LayerBindingId, armnn::TensorInfo > GetBindingInfo(const std::string &layerName, const char *bindingPointDesc, const std::unordered_map< std::string, BindingPointInfo > &bindingInfos)
std::unordered_map< std::string, BindingPointInfo > m_NetworkInputsBindingInfo
maps input layer names to their corresponding ids and tensor infos

◆ GetNetworkOutputBindingInfo()

BindingPointInfo GetNetworkOutputBindingInfo ( const std::string &  name) const

Retrieves binding info (layer id and tensor info) for the network output identified by the given layer name.

Definition at line 327 of file CaffeParser.cpp.

References ICaffeParser::CaffeParserImpl::GetBindingInfo(), and ICaffeParser::CaffeParserImpl::m_NetworkOutputsBindingInfo.

328 {
329  return GetBindingInfo(name, "output", m_NetworkOutputsBindingInfo);
330 }
static std::pair< armnn::LayerBindingId, armnn::TensorInfo > GetBindingInfo(const std::string &layerName, const char *bindingPointDesc, const std::unordered_map< std::string, BindingPointInfo > &bindingInfos)
std::unordered_map< std::string, BindingPointInfo > m_NetworkOutputsBindingInfo
maps output layer names to their corresponding ids and tensor infos

◆ GetVersion()

const std::string GetVersion ( )
static

Retrieve version in X.Y.Z form.

Definition at line 2256 of file CaffeParser.cpp.

References CAFFE_PARSER_VERSION.

2257 {
2258  return CAFFE_PARSER_VERSION;
2259 }
#define CAFFE_PARSER_VERSION
CAFFE_PARSER_VERSION: "X.Y.Z" where: X = Major version number Y = Minor version number Z = Patch vers...
Definition: Version.hpp:25

◆ LoadNetParam()

void LoadNetParam ( caffe::NetParameter &  netParameter)
protected

does the actual conversion from caffe::NetParameter to armnn::INetwork

Definition at line 2043 of file CaffeParser.cpp.

References CHECK_LOCATION, IOutputSlot::Connect(), ICaffeParser::CaffeParserImpl::GetArmnnOutputSlotForCaffeTop(), ICaffeParser::CaffeParserImpl::GetInputs(), ICaffeParser::CaffeParserImpl::m_CaffeLayersByTopName, ICaffeParser::CaffeParserImpl::m_Network, ICaffeParser::CaffeParserImpl::m_NetworkOutputsBindingInfo, ICaffeParser::CaffeParserImpl::m_RequestedOutputs, ICaffeParser::CaffeParserImpl::ms_CaffeLayerNameToParsingFunctions, armnn::numeric_cast(), ICaffeParser::CaffeParserImpl::ResolveInPlaceLayers(), and ICaffeParser::CaffeParserImpl::TrackOutputBinding().

Referenced by ICaffeParser::CaffeParserImpl::CreateNetworkFromNetParameter().

2044 {
2045  // Caffe models sometimes have an implicit input layer.
2046  // In that case, add an explicit one.
2047  if (netParameter.input_size() > 0)
2048  {
2049  LayerParameter* newLayer = netParameter.add_layer();
2050 
2051  newLayer->set_type("Input");
2052  newLayer->set_name(netParameter.input(0));
2053  newLayer->add_top(netParameter.input(0));
2054 
2055  InputParameter* inputParam = newLayer->mutable_input_param();
2056  BlobShape* shape = inputParam->add_shape();
2057 
2058  int dim_size = netParameter.input_dim_size();
2059  for (int i = 0; i < dim_size; ++i)
2060  {
2061  shape->add_dim(netParameter.input_dim(i));
2062  }
2063  }
2064 
2065  // Replaces in-place layers with regular ones to make the rest of the parsing easier.
2066  ResolveInPlaceLayers(netParameter);
2067 
2068  // Creates a lookup of Caffe layers by name.
2069  for (int i = 0; i < netParameter.layer_size(); ++i)
2070  {
2071  const caffe::LayerParameter& layer = netParameter.layer(i);
2072  for (int i = 0; i < layer.top_size(); ++i)
2073  {
2074  m_CaffeLayersByTopName[layer.top(i)] = &layer;
2075  }
2076  }
2077 
2078  // Finds the output layers the user requested.
2079  std::vector<const caffe::LayerParameter*> targetLayers;
2080  for (const std::string& requestedOutputName : m_RequestedOutputs)
2081  {
2082  auto nodeIt = m_CaffeLayersByTopName.find(requestedOutputName);
2083  if (nodeIt == m_CaffeLayersByTopName.end())
2084  {
2085  throw ParseException(
2086  fmt::format("Couldn't find requested output layer '{}' in graph {}",
2087  requestedOutputName,
2088  CHECK_LOCATION().AsString()));
2089  }
2090  targetLayers.push_back(nodeIt->second);
2091  }
2092 
2093  // Sorts them into a linear ordering such that all inputs of a node are before the node itself.
2094  std::vector<const caffe::LayerParameter*> sortedNodes;
2095  if (!armnnUtils::GraphTopologicalSort<const caffe::LayerParameter*>(
2096  targetLayers,
2097  [this](const caffe::LayerParameter* node)
2098  {
2099  return GetInputs(*node);
2100  },
2101  sortedNodes))
2102  {
2103  throw ParseException(
2104  fmt::format("Cycle detected in graph. #nodes: {} {}",
2105  sortedNodes.size(),
2106  CHECK_LOCATION().AsString()));
2107  }
2108 
2109  // Parses each node in order, knowing that all inputs of a node will be processed before the node itself.
2110  for (const caffe::LayerParameter* current : sortedNodes)
2111  {
2112  auto it = ms_CaffeLayerNameToParsingFunctions.find(current->type());
2113  if (it == ms_CaffeLayerNameToParsingFunctions.end())
2114  {
2115  throw ParseException(
2116  fmt::format("Unsupported layer type: '{}' for layer {} {}",
2117  current->type(),
2118  current->name(),
2119  CHECK_LOCATION().AsString()));
2120  }
2121  auto func = it->second;
2122  (this->*func)(*current);
2123  }
2124 
2125  // Adds ArmNN output layers connected to each requested output.
2126  for (const std::string& requestedOutput : m_RequestedOutputs)
2127  {
2128  armnn::IOutputSlot& outputSlot = GetArmnnOutputSlotForCaffeTop(requestedOutput);
2129 
2132  armnn::IConnectableLayer* const outputLayer = m_Network->AddOutputLayer(outputId, requestedOutput.c_str());
2133  outputSlot.Connect(outputLayer->GetInputSlot(0));
2134 
2135  TrackOutputBinding(outputLayer, outputId, outputLayer->GetInputSlot(0).GetConnection()->GetTensorInfo());
2136  }
2137 }
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
Definition: INetwork.hpp:62
void TrackOutputBinding(armnn::IConnectableLayer *layer, armnn::LayerBindingId id, const armnn::TensorInfo &tensorInfo)
int LayerBindingId
Type of identifiers for bindable layers (inputs, outputs).
Definition: Types.hpp:210
std::unordered_map< std::string, BindingPointInfo > m_NetworkOutputsBindingInfo
maps output layer names to their corresponding ids and tensor infos
armnn::IOutputSlot & GetArmnnOutputSlotForCaffeTop(const std::string &caffeTopName) const
Retrieves the Armnn IOutputSlot representing the given Caffe top.
An output connection slot for a layer.
Definition: INetwork.hpp:38
static const std::map< std::string, OperationParsingFunction > ms_CaffeLayerNameToParsingFunctions
Maps Caffe layer names to parsing member functions.
#define CHECK_LOCATION()
Definition: Exceptions.hpp:197
void ResolveInPlaceLayers(caffe::NetParameter &netParameter)
Modifies the Caffe network to replace "in-place" layers (whose top() and bottom() are both the same) ...
std::enable_if_t< std::is_unsigned< Source >::value &&std::is_unsigned< Dest >::value, Dest > numeric_cast(Source source)
Definition: NumericCast.hpp:35
virtual int Connect(IInputSlot &destination)=0
std::vector< const caffe::LayerParameter * > GetInputs(const caffe::LayerParameter &layerParam)
Find the Caffe layers listed as inputs (bottoms) for a given layer.
std::map< std::string, const caffe::LayerParameter * > m_CaffeLayersByTopName

◆ ParseArgmaxLayer()

void ParseArgmaxLayer ( const caffe::LayerParameter &  layerParam)
protected

Definition at line 1382 of file CaffeParser.cpp.

References CHECK_LOCATION, Connect(), ICaffeParser::CaffeParserImpl::GetArmnnOutputSlotForCaffeTop(), IConnectableLayer::GetInputSlot(), IConnectableLayer::GetOutputSlot(), armnnUtils::GetTensorInfo(), ArgMinMaxDescriptor::m_Axis, ArgMinMaxDescriptor::m_Function, ICaffeParser::CaffeParserImpl::m_Network, ArgMinMaxDescriptor::m_Output_Type, armnn::Max, ICaffeParser::CaffeParserImpl::SetArmnnOutputSlotForCaffeTop(), IOutputSlot::SetTensorInfo(), armnn::Signed32, and armnnCaffeParser::TensorDescToBlobShape().

1383 {
1384  ValidateNumInputsOutputs(layerParam, 1, 1);
1385  ArgMaxParameter param = layerParam.argmax_param();
1386 
1387  BlobShape inputShape = TensorDescToBlobShape(GetArmnnOutputSlotForCaffeTop(layerParam.bottom(0)).GetTensorInfo());
1388 
1389  const unsigned int topK = param.has_top_k() ? param.top_k() : 1;
1390  if (topK != 1) {
1391  throw ParseException(
1392  fmt::format("ArgMaxLayer: Only support top_k equals to 1. Layer={} {}",
1393  layerParam.name(),
1394  CHECK_LOCATION().AsString()));
1395  }
1396 
1397  const unsigned int outMaxVal = param.has_out_max_val() ? param.out_max_val() : false;
1398  if (outMaxVal) {
1399  throw ParseException(
1400  fmt::format("ArgMaxLayer: Does not support out_max_val. Layer={} {}",
1401  layerParam.name(),
1402  CHECK_LOCATION().AsString()));
1403  }
1404 
1405  int axis = param.has_axis() ? param.axis() : 1;
1406  if (axis < 0) {
1407  axis = inputShape.dim_size() - axis;
1408  }
1409  if ((axis < 0) || (axis >= inputShape.dim_size())) {
1410  throw ParseException(
1411  fmt::format("ArgMaxLayer: Invalid axis value which outside range of input dims. "
1412  "{}'s input has input dim_size {}, requested axis: {}. {}",
1413  layerParam.name(),
1414  inputShape.dim_size(),
1415  axis,
1416  CHECK_LOCATION().AsString()));
1417  }
1418 
1419  ArgMinMaxDescriptor desc;
1420  desc.m_Axis = axis;
1423 
1424  armnn::IConnectableLayer* argmaxLayer = m_Network->AddArgMinMaxLayer(desc,
1425  layerParam.name().c_str());
1426 
1427  TensorShape outputShape(static_cast<unsigned int>(inputShape.dim_size() - 1));
1428  int j = 0;
1429  // remove the flatten axis
1430  for (int i = 0; i < inputShape.dim_size(); ++i)
1431  {
1432  if (i == axis) continue;
1433  outputShape[static_cast<unsigned int>(j++)] = static_cast<unsigned int>(inputShape.dim(i));
1434  }
1435  TensorInfo outputInfo(outputShape, DataType::Signed32);
1436 
1437  GetArmnnOutputSlotForCaffeTop(layerParam.bottom(0)).Connect(argmaxLayer->GetInputSlot(0));
1438  argmaxLayer->GetOutputSlot(0).SetTensorInfo(outputInfo);
1439  SetArmnnOutputSlotForCaffeTop(layerParam.top(0), argmaxLayer->GetOutputSlot(0));
1440 }
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
Definition: INetwork.hpp:62
ArgMinMaxFunction m_Function
Specify if the function is to find Min or Max.
Definition: Descriptors.hpp:70
virtual void SetTensorInfo(const TensorInfo &tensorInfo)=0
armnn::IOutputSlot & GetArmnnOutputSlotForCaffeTop(const std::string &caffeTopName) const
Retrieves the Armnn IOutputSlot representing the given Caffe top.
An ArgMinMaxDescriptor for ArgMinMaxLayer.
Definition: Descriptors.hpp:56
#define CHECK_LOCATION()
Definition: Exceptions.hpp:197
void SetArmnnOutputSlotForCaffeTop(const std::string &caffeTopName, armnn::IOutputSlot &armnnOutputSlot)
virtual const IInputSlot & GetInputSlot(unsigned int index) const =0
Get a const input slot handle by slot index.
armnn::TensorInfo GetTensorInfo(unsigned int numberOfBatches, unsigned int numberOfChannels, unsigned int height, unsigned int width, const armnn::DataLayout dataLayout, const armnn::DataType dataType)
Definition: TensorUtils.cpp:38
virtual const IOutputSlot & GetOutputSlot(unsigned int index) const =0
Get the const output slot handle by slot index.
int m_Axis
Axis to reduce across the input tensor.
Definition: Descriptors.hpp:72
void Connect(armnn::IConnectableLayer *from, armnn::IConnectableLayer *to, const armnn::TensorInfo &tensorInfo, unsigned int fromIndex, unsigned int toIndex)
Definition: TestUtils.cpp:12
armnn::DataType m_Output_Type
Definition: Descriptors.hpp:74
BlobShape TensorDescToBlobShape(const TensorInfo &desc)

◆ ParseBatchNormLayer()

void ParseBatchNormLayer ( const caffe::LayerParameter &  layerParam)
protected

Definition at line 1767 of file CaffeParser.cpp.

References CHECK_LOCATION, Connect(), armnn::Float32, ICaffeParser::CaffeParserImpl::GetArmnnOutputSlotForCaffeTop(), IConnectableLayer::GetInputSlot(), IConnectableLayer::GetOutputSlot(), TensorInfo::GetShape(), armnnUtils::GetTensorInfo(), BatchNormalizationDescriptor::m_Eps, ICaffeParser::CaffeParserImpl::m_Network, ICaffeParser::CaffeParserImpl::SetArmnnOutputSlotForCaffeTop(), and IOutputSlot::SetTensorInfo().

1768 {
1769  ValidateNumInputsOutputs(layerParam, 1, 1);
1770 
1771  const TensorInfo& inputInfo = GetArmnnOutputSlotForCaffeTop(layerParam.bottom(0)).GetTensorInfo();
1772 
1773  string name = layerParam.name();
1774 
1775  BatchNormParameter param = layerParam.batch_norm_param();
1776  // If use_global_stats is not explicitly set in the model, assume it to be true (its default value
1777  // when the network is in the testing phase).
1778  if (param.has_use_global_stats())
1779  {
1780  if (!param.use_global_stats())
1781  {
1782  throw ParseException(
1783  fmt::format("Error parsing Batch Norm layer '{}': "
1784  "Parameter 'use_global_stats' is set to false, which is "
1785  "unsupported (value used for training). {}",
1786  name,
1787  CHECK_LOCATION().AsString()));
1788  }
1789  }
1790 
1792  desc.m_Eps = param.eps();
1793 
1794  unsigned int channels = inputInfo.GetShape()[1];
1795  unsigned int shape[] = {channels};
1796 
1797  vector<float> meanData(channels);
1798  GetDataFromBlob(layerParam, meanData, 0);
1799 
1800  vector<float> varianceData(channels);
1801  GetDataFromBlob(layerParam, varianceData, 1);
1802 
1803  // Reads moving average factor and applies scaling (if required).
1804  const BlobProto& blob = layerParam.blobs(armnn::numeric_cast<int>(2));
1805  const float movingAverageFactor = blob.data(armnn::numeric_cast<int>(0));
1806  if(movingAverageFactor != 0.0f)
1807  {
1808  const float scaleFactor = 1.0f / movingAverageFactor;
1809  auto scaleFunction = [scaleFactor](float f) -> float { return f * scaleFactor; };
1810 
1811  std::transform(varianceData.begin(), varianceData.end(), varianceData.begin(), scaleFunction);
1812  std::transform(meanData.begin(), meanData.end(), meanData.begin(), scaleFunction);
1813  }
1814 
1815  // Identifies scale operation.
1816  vector<float> betaData(channels, 0.0f);
1817  vector<float> gammaData(channels, 1.0f);
1818 
1819  ConstTensor mean(TensorInfo(1, shape, armnn::DataType::Float32), meanData);
1820  ConstTensor variance(TensorInfo(1, shape, armnn::DataType::Float32), varianceData);
1821  ConstTensor beta(TensorInfo(1, shape, armnn::DataType::Float32), betaData);
1822  ConstTensor gamma(TensorInfo(1, shape, armnn::DataType::Float32), gammaData);
1823 
1824  armnn::IConnectableLayer* const batchNormLayer = m_Network->AddBatchNormalizationLayer(desc,
1825  mean, variance, beta, gamma, name.c_str());
1826  GetArmnnOutputSlotForCaffeTop(layerParam.bottom(0)).Connect(batchNormLayer->GetInputSlot(0));
1827  batchNormLayer->GetOutputSlot(0).SetTensorInfo(inputInfo);
1828  SetArmnnOutputSlotForCaffeTop(layerParam.top(0), batchNormLayer->GetOutputSlot(0));
1829 }
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
Definition: INetwork.hpp:62
const TensorShape & GetShape() const
Definition: Tensor.hpp:187
float m_Eps
Value to add to the variance. Used to avoid dividing by zero.
virtual void SetTensorInfo(const TensorInfo &tensorInfo)=0
armnn::IOutputSlot & GetArmnnOutputSlotForCaffeTop(const std::string &caffeTopName) const
Retrieves the Armnn IOutputSlot representing the given Caffe top.
A tensor defined by a TensorInfo (shape and data type) and an immutable backing store.
Definition: Tensor.hpp:314
#define CHECK_LOCATION()
Definition: Exceptions.hpp:197
void SetArmnnOutputSlotForCaffeTop(const std::string &caffeTopName, armnn::IOutputSlot &armnnOutputSlot)
virtual const IInputSlot & GetInputSlot(unsigned int index) const =0
Get a const input slot handle by slot index.
armnn::TensorInfo GetTensorInfo(unsigned int numberOfBatches, unsigned int numberOfChannels, unsigned int height, unsigned int width, const armnn::DataLayout dataLayout, const armnn::DataType dataType)
Definition: TensorUtils.cpp:38
virtual const IOutputSlot & GetOutputSlot(unsigned int index) const =0
Get the const output slot handle by slot index.
void Connect(armnn::IConnectableLayer *from, armnn::IConnectableLayer *to, const armnn::TensorInfo &tensorInfo, unsigned int fromIndex, unsigned int toIndex)
Definition: TestUtils.cpp:12
A BatchNormalizationDescriptor for the BatchNormalizationLayer.

◆ ParseConcatLayer()

void ParseConcatLayer ( const caffe::LayerParameter &  layerParam)
protected

Definition at line 1708 of file CaffeParser.cpp.

References CHECK_LOCATION, IOutputSlot::Connect(), ICaffeParser::CaffeParserImpl::GetArmnnOutputSlotForCaffeTop(), IConnectableLayer::GetInputSlot(), TensorInfo::GetNumDimensions(), IConnectableLayer::GetOutputSlot(), TensorInfo::GetShape(), armnnUtils::GetTensorInfo(), ICaffeParser::CaffeParserImpl::m_Network, ICaffeParser::CaffeParserImpl::SetArmnnOutputSlotForCaffeTop(), IOutputSlot::SetTensorInfo(), and OriginsDescriptor::SetViewOriginCoord().

1709 {
1710  unsigned int numInputs = static_cast<unsigned int>(layerParam.bottom_size());
1711  // We assume concat happens along the channel dimension, which is 1 in (0, 1, 2, 3).
1712  unsigned int concatDim = 1;
1713  unsigned int numOfDims = 4;
1714 
1715  // we only consider 4-D tensor here
1716  OriginsDescriptor concatDescriptor(static_cast<uint32_t>(numInputs), numOfDims);
1717  std::vector<unsigned int>mergeDimSizes(numOfDims, 0u);
1718 
1719  unsigned int mergeDim = 0;
1720  for (unsigned int viewIndex = 0; viewIndex < numInputs; ++viewIndex)
1721  {
1722  const TensorInfo& inputInfo = GetArmnnOutputSlotForCaffeTop(
1723  layerParam.bottom(armnn::numeric_cast<int>(viewIndex))).GetTensorInfo();
1724  // Checks whether the dimensions of the input tensors are actually 4.
1725  if (inputInfo.GetNumDimensions()!=4)
1726  {
1727  throw ParseException(
1728  fmt::format("The number of dimensions for input tensors of "
1729  "the concatenation op should be 4. Inputs of {} has "
1730  "{} dimensions. {}",
1731  layerParam.name(),
1732  inputInfo.GetNumDimensions(),
1733  CHECK_LOCATION().AsString()));
1734  }
1735 
1736  mergeDimSizes[0] = inputInfo.GetShape()[0];
1737  mergeDimSizes[1] = inputInfo.GetShape()[1];
1738  mergeDimSizes[2] = inputInfo.GetShape()[2];
1739  mergeDimSizes[3] = inputInfo.GetShape()[3];
1740 
1741  for (unsigned int j = 0; j < concatDim; ++j)
1742  {
1743  concatDescriptor.SetViewOriginCoord(viewIndex, j, 0);
1744  }
1745 
1746  concatDescriptor.SetViewOriginCoord(viewIndex, concatDim, mergeDim);
1747  mergeDim += mergeDimSizes[concatDim];
1748 
1749  for (unsigned int j = concatDim+1; j < numOfDims; ++j)
1750  {
1751  concatDescriptor.SetViewOriginCoord(viewIndex, j, 0);
1752  }
1753  }
1754  mergeDimSizes[concatDim] = mergeDim;
1755 
1756  armnn::IConnectableLayer* concatlayer = m_Network->AddConcatLayer(concatDescriptor, layerParam.name().c_str());
1757  for (unsigned int i = 0; i < numInputs; ++i)
1758  {
1759  armnn::IOutputSlot& outputSlot = GetArmnnOutputSlotForCaffeTop(layerParam.bottom(armnn::numeric_cast<int>(i)));
1760  outputSlot.Connect(concatlayer->GetInputSlot(i));
1761  }
1762 
1763  concatlayer->GetOutputSlot(0).SetTensorInfo(armnn::TensorInfo(numOfDims, mergeDimSizes.data(), DataType::Float32));
1764  SetArmnnOutputSlotForCaffeTop(layerParam.top(0), concatlayer->GetOutputSlot(0));
1765 }
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
Definition: INetwork.hpp:62
const TensorShape & GetShape() const
Definition: Tensor.hpp:187
virtual void SetTensorInfo(const TensorInfo &tensorInfo)=0
armnn::IOutputSlot & GetArmnnOutputSlotForCaffeTop(const std::string &caffeTopName) const
Retrieves the Armnn IOutputSlot representing the given Caffe top.
An output connection slot for a layer.
Definition: INetwork.hpp:38
An OriginsDescriptor for the ConcatLayer.
#define CHECK_LOCATION()
Definition: Exceptions.hpp:197
void SetArmnnOutputSlotForCaffeTop(const std::string &caffeTopName, armnn::IOutputSlot &armnnOutputSlot)
virtual const IInputSlot & GetInputSlot(unsigned int index) const =0
Get a const input slot handle by slot index.
armnn::TensorInfo GetTensorInfo(unsigned int numberOfBatches, unsigned int numberOfChannels, unsigned int height, unsigned int width, const armnn::DataLayout dataLayout, const armnn::DataType dataType)
Definition: TensorUtils.cpp:38
virtual const IOutputSlot & GetOutputSlot(unsigned int index) const =0
Get the const output slot handle by slot index.
virtual int Connect(IInputSlot &destination)=0
unsigned int GetNumDimensions() const
Definition: Tensor.hpp:191

◆ ParseConvLayer()

void ParseConvLayer ( const caffe::LayerParameter &  layerParam)
protected

Definition at line 915 of file CaffeParser.cpp.

References ICaffeParser::CaffeParserImpl::AddConvLayerWithDepthwiseConv(), ICaffeParser::CaffeParserImpl::AddConvLayerWithSplits(), ARMNN_ASSERT, ICaffeParser::CaffeParserImpl::BlobShapeToTensorInfo(), CHECK_LOCATION, IOutputSlot::Connect(), GET_OPTIONAL_WITH_VECTOR_FALLBACK, ICaffeParser::CaffeParserImpl::GetArmnnOutputSlotForCaffeTop(), armnnUtils::GetTensorInfo(), Convolution2dDescriptor::m_BiasEnabled, Convolution2dDescriptor::m_DilationX, Convolution2dDescriptor::m_DilationY, ICaffeParser::CaffeParserImpl::m_Network, Convolution2dDescriptor::m_PadBottom, Convolution2dDescriptor::m_PadLeft, Convolution2dDescriptor::m_PadRight, Convolution2dDescriptor::m_PadTop, Convolution2dDescriptor::m_StrideX, Convolution2dDescriptor::m_StrideY, ICaffeParser::CaffeParserImpl::SetArmnnOutputSlotForCaffeTop(), and armnnCaffeParser::TensorDescToBlobShape().

916 {
917  // Ignored Caffe Parameters
918  // * Weight Filler
919  // * Bias Filler
920  // * Engine
921  // * Force nd_im2col
922  // * Axis
923 
924  // Not Available ArmNN Interface Parameters
925  // * Rounding policy;
926 
927  ARMNN_ASSERT(layerParam.type() == "Convolution");
928  ValidateNumInputsOutputs(layerParam, 1, 1);
929 
930  ConvolutionParameter convParam = layerParam.convolution_param();
931  BlobShape inputShape = TensorDescToBlobShape(GetArmnnOutputSlotForCaffeTop(layerParam.bottom(0)).GetTensorInfo());
932  const unsigned int numGroups = convParam.has_group() ? convParam.group() : 1;
933  unsigned int numFilters = convParam.num_output();
934 
935  const auto notFound = std::numeric_limits<unsigned int>::max();
936 
937  unsigned int kernelH = GET_OPTIONAL_WITH_VECTOR_FALLBACK(convParam, ConvolutionParameter,
938  kernel_h, kernel_size, unsigned int, notFound);
939  unsigned int kernelW = GET_OPTIONAL_WITH_VECTOR_FALLBACK(convParam, ConvolutionParameter,
940  kernel_w, kernel_size, unsigned int, notFound);
941 
942  unsigned int strideH = GET_OPTIONAL_WITH_VECTOR_FALLBACK(convParam, ConvolutionParameter,
943  stride_h, stride, unsigned int, 1u);
944  unsigned int strideW = GET_OPTIONAL_WITH_VECTOR_FALLBACK(convParam, ConvolutionParameter,
945  stride_w, stride, unsigned int, 1u);
946 
947  unsigned int padH = GET_OPTIONAL_WITH_VECTOR_FALLBACK(convParam, ConvolutionParameter,
948  pad_h, pad, unsigned int, 0u);
949  unsigned int padW = GET_OPTIONAL_WITH_VECTOR_FALLBACK(convParam, ConvolutionParameter,
950  pad_w, pad, unsigned int, 0u);
951 
952  unsigned int dilationH = convParam.dilation_size() > 0 ? convParam.dilation(0) : 1;
953  unsigned int dilationW = convParam.dilation_size() > 1 ? convParam.dilation(1) :
954  convParam.dilation_size() > 0 ? convParam.dilation(0) : 1;
955 
956  Convolution2dDescriptor convolution2dDescriptor;
957  convolution2dDescriptor.m_PadLeft = padW;
958  convolution2dDescriptor.m_PadRight = padW;
959  convolution2dDescriptor.m_PadTop = padH;
960  convolution2dDescriptor.m_PadBottom = padH;
961  convolution2dDescriptor.m_StrideX = strideW;
962  convolution2dDescriptor.m_StrideY = strideH;
963  convolution2dDescriptor.m_DilationX = dilationW;
964  convolution2dDescriptor.m_DilationY = dilationH;
965  convolution2dDescriptor.m_BiasEnabled = convParam.has_bias_term() ? convParam.bias_term() : true;
966 
967  if (numGroups > numFilters)
968  {
969  throw ParseException(
970  fmt::format("Error parsing Convolution: {}. "
971  "The 'group'={} parameter cannot be larger than the "
972  "number of filters supplied ='{}'. {}",
973  layerParam.name(),
974  numGroups,
975  numFilters,
976  CHECK_LOCATION().AsString()));
977  }
978 
979  if (inputShape.dim_size() != 4)
980  {
981  throw ParseException(
982  fmt::format("Convolution input shape is expected to have 4 dimensions. "
983  "{}'s input has only {}. {}",
984  layerParam.name(),
985  inputShape.dim_size(),
986  CHECK_LOCATION().AsString()));
987  }
988 
989  if (numGroups > 1)
990  {
991  if (numGroups > inputShape.dim(1))
992  {
993  throw ParseException(
994  fmt::format("Error parsing Convolution: {}. "
995  "The 'group'={} parameter cannot be larger than the "
996  "channel of the input shape={} (in NCHW format). {}",
997  layerParam.name(),
998  numGroups,
999  inputShape.dim(1),
1000  CHECK_LOCATION().AsString()));
1001  }
1002  else if (numGroups == inputShape.dim(1))
1003  {
1004  // we use a depthwise convolution here, because the number of groups equals to the
1005  // input channels
1006  AddConvLayerWithDepthwiseConv(layerParam, convolution2dDescriptor, kernelW, kernelH);
1007  return;
1008  }
1009  else
1010  {
1011  // we split the input by channels into channels/groups separate convolutions
1012  // and concatenate the results afterwards
1013  AddConvLayerWithSplits(layerParam, convolution2dDescriptor, kernelW, kernelH);
1014  return;
1015  }
1016  }
1017 
1018  // NOTE: at this point we only need to handle #group=1 case, all other cases should be
1019  // handled by the AddConvLayer* helpers
1020 
1021  // Populate convolution output tensor descriptor dimensions
1022  BlobShape outputShape;
1023  outputShape.add_dim(0);
1024  outputShape.set_dim(0, inputShape.dim(0));
1025  outputShape.add_dim(1);
1026  outputShape.set_dim(1, numFilters);
1027  outputShape.add_dim(2);
1028  outputShape.set_dim(
1029  2, (static_cast<int>(
1030  static_cast<float>(inputShape.dim(2) + 2 * padH - (dilationH * (kernelH - 1) + 1)) /
1031  static_cast<float>(strideH)) + 1));
1032  outputShape.add_dim(3);
1033  outputShape.set_dim(
1034  3, (static_cast<int>(
1035  static_cast<float>(inputShape.dim(3) + 2 * padW - (dilationW * (kernelW - 1) + 1)) /
1036  static_cast<float>(strideW)) + 1));
1037 
1038  // Load the weight data for ALL groups
1039  vector<float> weightData(armnn::numeric_cast<size_t>(inputShape.dim(1) *
1040  outputShape.dim(1) *
1041  kernelH *
1042  kernelW));
1043  GetDataFromBlob(layerParam, weightData, 0);
1044 
1045  const unsigned int weightDimSizes[4] = {
1046  static_cast<unsigned int>(outputShape.dim(1)), // output channels
1047  static_cast<unsigned int>(inputShape.dim(1)), // input channels
1048  kernelH,
1049  kernelW};
1050 
1051  armnn::IConnectableLayer* returnLayer = nullptr;
1052 
1053  // Pull out the weights for this group from that loaded from the model file earlier
1054  ConstTensor weights(TensorInfo(4, weightDimSizes, DataType::Float32), weightData.data());
1055  Optional<ConstTensor> optionalBiases;
1056  vector<float> biasData;
1057  if (convolution2dDescriptor.m_BiasEnabled)
1058  {
1059  TensorInfo biasInfo;
1060 
1061  biasData.resize(armnn::numeric_cast<size_t>(outputShape.dim(1)), 1.f);
1062  GetDataFromBlob(layerParam, biasData, 1);
1063 
1064  const unsigned int biasDimSizes[1] = {static_cast<unsigned int>(outputShape.dim(1))};
1065  biasInfo = TensorInfo(1, biasDimSizes, DataType::Float32);
1066 
1067  // Pull out the biases for this group from that loaded from the model file earlier
1068  ConstTensor biases(biasInfo, biasData.data());
1069  optionalBiases = Optional<ConstTensor>(biases);
1070  }
1071  returnLayer = m_Network->AddConvolution2dLayer(convolution2dDescriptor,
1072  weights,
1073  optionalBiases,
1074  layerParam.name().c_str());
1075 
1076  armnn::IOutputSlot& inputConnection = GetArmnnOutputSlotForCaffeTop(layerParam.bottom(0));
1077  inputConnection.Connect(returnLayer->GetInputSlot(0));
1078  returnLayer->GetOutputSlot(0).SetTensorInfo(BlobShapeToTensorInfo(outputShape));
1079 
1080  if (!returnLayer)
1081  {
1082  throw ParseException(
1083  fmt::format("Failed to create Convolution layer. "
1084  "Layer={} #groups={} #filters={} {}",
1085  layerParam.name(),
1086  numGroups,
1087  numFilters,
1088  CHECK_LOCATION().AsString()));
1089  }
1090 
1091  SetArmnnOutputSlotForCaffeTop(layerParam.top(0), returnLayer->GetOutputSlot(0));
1092 }
uint32_t m_PadBottom
Padding bottom value in the height dimension.
bool m_BiasEnabled
Enable/disable bias.
void AddConvLayerWithDepthwiseConv(const caffe::LayerParameter &layerParam, const armnn::Convolution2dDescriptor &desc, unsigned int kernelW, unsigned int kernelH)
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
Definition: INetwork.hpp:62
A Convolution2dDescriptor for the Convolution2dLayer.
uint32_t m_PadRight
Padding right value in the width dimension.
uint32_t m_DilationY
Dilation along y axis.
void AddConvLayerWithSplits(const caffe::LayerParameter &layerParam, const armnn::Convolution2dDescriptor &desc, unsigned int kernelW, unsigned int kernelH)
ParseConv may use these helpers depending on the group parameter.
uint32_t m_PadTop
Padding top value in the height dimension.
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
armnn::IOutputSlot & GetArmnnOutputSlotForCaffeTop(const std::string &caffeTopName) const
Retrieves the Armnn IOutputSlot representing the given Caffe top.
An output connection slot for a layer.
Definition: INetwork.hpp:38
A tensor defined by a TensorInfo (shape and data type) and an immutable backing store.
Definition: Tensor.hpp:314
#define ARMNN_ASSERT(COND)
Definition: Assert.hpp:14
#define CHECK_LOCATION()
Definition: Exceptions.hpp:197
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
void SetArmnnOutputSlotForCaffeTop(const std::string &caffeTopName, armnn::IOutputSlot &armnnOutputSlot)
uint32_t m_DilationX
Dilation along x axis.
armnn::TensorInfo GetTensorInfo(unsigned int numberOfBatches, unsigned int numberOfChannels, unsigned int height, unsigned int width, const armnn::DataLayout dataLayout, const armnn::DataType dataType)
Definition: TensorUtils.cpp:38
#define GET_OPTIONAL_WITH_VECTOR_FALLBACK(PARAM, PARAM_TYPE, OPTIONAL_VALUE, FALLBACK_VECTOR, VALUE_TYPE, DEFAULT_VALUE)
virtual int Connect(IInputSlot &destination)=0
armnn::TensorInfo BlobShapeToTensorInfo(const caffe::BlobShape &blobShape) const
Converts Caffe&#39;s protobuf tensor shape format to ArmNN&#39;s.
uint32_t m_PadLeft
Padding left value in the width dimension.
BlobShape TensorDescToBlobShape(const TensorInfo &desc)

◆ ParseDeconvLayer()

void ParseDeconvLayer ( const caffe::LayerParameter &  layerParam)
protected

Definition at line 1094 of file CaffeParser.cpp.

References ICaffeParser::CaffeParserImpl::AddDeconvLayerWithSplits(), ARMNN_ASSERT, ICaffeParser::CaffeParserImpl::BlobShapeToTensorInfo(), CHECK_LOCATION, IOutputSlot::Connect(), GET_OPTIONAL_WITH_VECTOR_FALLBACK, ICaffeParser::CaffeParserImpl::GetArmnnOutputSlotForCaffeTop(), armnnUtils::GetTensorInfo(), ICaffeParser::CaffeParserImpl::m_Network, TransposeConvolution2dDescriptor::m_PadLeft, ICaffeParser::CaffeParserImpl::SetArmnnOutputSlotForCaffeTop(), and armnnCaffeParser::TensorDescToBlobShape().

1095 {
1096  // Ignored Caffe Parameters
1097  // * Weight Filler
1098  // * Bias Filler
1099  // * Engine
1100  // * Force nd_im2col
1101  // * Axis
1102 
1103  // Not Available ArmNN Interface Parameters
1104  // * Rounding policy;
1105 
1106  ARMNN_ASSERT(layerParam.type() == "Deconvolution");
1107  ValidateNumInputsOutputs(layerParam, 1, 1);
1108 
1109  ConvolutionParameter convParam = layerParam.convolution_param();
1110  BlobShape inputShape = TensorDescToBlobShape(GetArmnnOutputSlotForCaffeTop(layerParam.bottom(0)).GetTensorInfo());
1111  const unsigned int numGroups = convParam.has_group() ? convParam.group() : 1;
1112  unsigned int numFilters = convParam.num_output();
1113 
1114  const auto notFound = std::numeric_limits<unsigned int>::max();
1115 
1116  unsigned int kernelH = GET_OPTIONAL_WITH_VECTOR_FALLBACK(convParam, ConvolutionParameter,
1117  kernel_h, kernel_size, unsigned int, notFound);
1118  unsigned int kernelW = GET_OPTIONAL_WITH_VECTOR_FALLBACK(convParam, ConvolutionParameter,
1119  kernel_w, kernel_size, unsigned int, notFound);
1120 
1121  unsigned int strideH = GET_OPTIONAL_WITH_VECTOR_FALLBACK(convParam, ConvolutionParameter,
1122  stride_h, stride, unsigned int, 1u);
1123  unsigned int strideW = GET_OPTIONAL_WITH_VECTOR_FALLBACK(convParam, ConvolutionParameter,
1124  stride_w, stride, unsigned int, 1u);
1125 
1126  unsigned int padH = GET_OPTIONAL_WITH_VECTOR_FALLBACK(convParam, ConvolutionParameter,
1127  pad_h, pad, unsigned int, 0u);
1128  unsigned int padW = GET_OPTIONAL_WITH_VECTOR_FALLBACK(convParam, ConvolutionParameter,
1129  pad_w, pad, unsigned int, 0u);
1130 
1131  unsigned int dilationH = convParam.dilation_size() > 0 ? convParam.dilation(0) : 1;
1132  unsigned int dilationW = convParam.dilation_size() > 1 ? convParam.dilation(1) :
1133  convParam.dilation_size() > 0 ? convParam.dilation(0) : 1;
1134 
1135  if (dilationH != 1 || dilationW != 1) {
1136  fmt::format("Dilated decnvolution is not supported. "
1137  "{}'s input has dilation {} {}. {}",
1138  layerParam.name(),
1139  dilationW, dilationH,
1140  CHECK_LOCATION().AsString());
1141  }
1142 
1143  TransposeConvolution2dDescriptor deconvolution2dDescriptor;
1144  deconvolution2dDescriptor.m_PadLeft = padW;
1145  deconvolution2dDescriptor.m_PadRight = padW;
1146  deconvolution2dDescriptor.m_PadTop = padH;
1147  deconvolution2dDescriptor.m_PadBottom = padH;
1148  deconvolution2dDescriptor.m_StrideX = strideW;
1149  deconvolution2dDescriptor.m_StrideY = strideH;
1150  deconvolution2dDescriptor.m_BiasEnabled = convParam.has_bias_term() ? convParam.bias_term() : true;
1151 
1152  if (numGroups > numFilters)
1153  {
1154  throw ParseException(
1155  fmt::format("Error parsing Deconvolution: {}. "
1156  "The 'group'={} parameter cannot be larger than the "
1157  "number of filters supplied ='{}'. {}",
1158  layerParam.name(),
1159  numGroups,
1160  numFilters,
1161  CHECK_LOCATION().AsString()));
1162  }
1163 
1164  if (inputShape.dim_size() != 4)
1165  {
1166  throw ParseException(
1167  fmt::format("Deconvolution input shape is expected to have 4 dimensions. "
1168  "{}'s input has only {}. {}",
1169  layerParam.name(),
1170  inputShape.dim_size(),
1171  CHECK_LOCATION().AsString()));
1172  }
1173 
1174  if (numGroups > 1)
1175  {
1176  if (numGroups > inputShape.dim(1))
1177  {
1178  throw ParseException(
1179  fmt::format("Error parsing Deconvolution: {}. "
1180  "The 'group'={} parameter cannot be larger than the "
1181  "channel of the input shape={} (in NCHW format). {}",
1182  layerParam.name(),
1183  numGroups,
1184  inputShape.dim(1),
1185  CHECK_LOCATION().AsString()));
1186  }
1187  else
1188  {
1189  // we split the input by channels into channels/groups separate convolutions
1190  // and concatenate the results afterwards
1191  AddDeconvLayerWithSplits(layerParam, deconvolution2dDescriptor, kernelW, kernelH);
1192  return;
1193  }
1194  }
1195 
1196  // NOTE: at this point we only need to handle #group=1 case, all other cases should be
1197  // handled by the AddDeconvLayer* helpers
1198 
1199  // Populate deconvolution output tensor descriptor dimensions
1200  BlobShape outputShape;
1201  outputShape.add_dim(0);
1202  outputShape.set_dim(0, inputShape.dim(0));
1203  outputShape.add_dim(1);
1204  outputShape.set_dim(1, numFilters);
1205  outputShape.add_dim(2);
1206  outputShape.set_dim(
1207  2, (static_cast<int>(
1208  strideH * (inputShape.dim(2) - 1) - 2 * padH + (dilationH * (kernelH - 1) + 1))));
1209  outputShape.add_dim(3);
1210  outputShape.set_dim(
1211  3, (static_cast<int>(
1212  strideW * (inputShape.dim(3) - 1) - 2 * padW + (dilationW * (kernelW - 1) + 1))));
1213 
1214  // Load the weight data for ALL groups
1215  vector<float> weightData(armnn::numeric_cast<size_t>(inputShape.dim(1) *
1216  outputShape.dim(1) *
1217  kernelH *
1218  kernelW));
1219  GetDataFromBlob(layerParam, weightData, 0);
1220 
1221  const unsigned int weightDimSizes[4] = {
1222  static_cast<unsigned int>(outputShape.dim(1)), // output channels
1223  static_cast<unsigned int>(inputShape.dim(1)), // input channels
1224  kernelH,
1225  kernelW};
1226 
1227  armnn::IConnectableLayer* returnLayer = nullptr;
1228 
1229  // Pull out the weights for this group from that loaded from the model file earlier
1230  ConstTensor weights(TensorInfo(4, weightDimSizes, DataType::Float32), weightData.data());
1231  Optional<ConstTensor> optionalBiases;
1232  vector<float> biasData;
1233  if (deconvolution2dDescriptor.m_BiasEnabled)
1234  {
1235  TensorInfo biasInfo;
1236 
1237  biasData.resize(armnn::numeric_cast<size_t>(outputShape.dim(1)), 1.f);
1238  GetDataFromBlob(layerParam, biasData, 1);
1239 
1240  const unsigned int biasDimSizes[1] = {static_cast<unsigned int>(outputShape.dim(1))};
1241  biasInfo = TensorInfo(1, biasDimSizes, DataType::Float32);
1242 
1243  // Pull out the biases for this group from that loaded from the model file earlier
1244  ConstTensor biases(biasInfo, biasData.data());
1245  optionalBiases = Optional<ConstTensor>(biases);
1246  }
1247  returnLayer = m_Network->AddTransposeConvolution2dLayer(deconvolution2dDescriptor,
1248  weights,
1249  optionalBiases,
1250  layerParam.name().c_str());
1251 
1252  armnn::IOutputSlot& inputConnection = GetArmnnOutputSlotForCaffeTop(layerParam.bottom(0));
1253  inputConnection.Connect(returnLayer->GetInputSlot(0));
1254  returnLayer->GetOutputSlot(0).SetTensorInfo(BlobShapeToTensorInfo(outputShape));
1255 
1256  if (!returnLayer)
1257  {
1258  throw ParseException(
1259  fmt::format("Failed to create Deconvolution layer. "
1260  "Layer={} #groups={} #filters={} {}",
1261  layerParam.name(),
1262  numGroups,
1263  numFilters,
1264  CHECK_LOCATION().AsString()));
1265  }
1266 
1267  SetArmnnOutputSlotForCaffeTop(layerParam.top(0), returnLayer->GetOutputSlot(0));
1268 }
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
Definition: INetwork.hpp:62
void AddDeconvLayerWithSplits(const caffe::LayerParameter &layerParam, const armnn::TransposeConvolution2dDescriptor &desc, unsigned int kernelW, unsigned int kernelH)
A TransposeConvolution2dDescriptor for the TransposeConvolution2dLayer.
armnn::IOutputSlot & GetArmnnOutputSlotForCaffeTop(const std::string &caffeTopName) const
Retrieves the Armnn IOutputSlot representing the given Caffe top.
An output connection slot for a layer.
Definition: INetwork.hpp:38
A tensor defined by a TensorInfo (shape and data type) and an immutable backing store.
Definition: Tensor.hpp:314
#define ARMNN_ASSERT(COND)
Definition: Assert.hpp:14
#define CHECK_LOCATION()
Definition: Exceptions.hpp:197
void SetArmnnOutputSlotForCaffeTop(const std::string &caffeTopName, armnn::IOutputSlot &armnnOutputSlot)
uint32_t m_PadLeft
Padding left value in the width dimension.
armnn::TensorInfo GetTensorInfo(unsigned int numberOfBatches, unsigned int numberOfChannels, unsigned int height, unsigned int width, const armnn::DataLayout dataLayout, const armnn::DataType dataType)
Definition: TensorUtils.cpp:38
#define GET_OPTIONAL_WITH_VECTOR_FALLBACK(PARAM, PARAM_TYPE, OPTIONAL_VALUE, FALLBACK_VECTOR, VALUE_TYPE, DEFAULT_VALUE)
virtual int Connect(IInputSlot &destination)=0
armnn::TensorInfo BlobShapeToTensorInfo(const caffe::BlobShape &blobShape) const
Converts Caffe&#39;s protobuf tensor shape format to ArmNN&#39;s.
BlobShape TensorDescToBlobShape(const TensorInfo &desc)

◆ ParseDropoutLayer()

void ParseDropoutLayer ( const caffe::LayerParameter &  layerParam)
protected

Definition at line 1900 of file CaffeParser.cpp.

References CHECK_LOCATION, ICaffeParser::CaffeParserImpl::GetArmnnOutputSlotForCaffeTop(), and ICaffeParser::CaffeParserImpl::SetArmnnOutputSlotForCaffeTop().

1901 {
1902  // Ignored for inference, so patch the single input to its single output.
1903  if (layerParam.bottom_size() != 1 || layerParam.top_size() != 1)
1904  {
1905  throw ParseException(
1906  fmt::format("Dropout layer '{}' should have exactly 1 bottom and 1 top. "
1907  "#bottoms={} #tops={} {}",
1908  layerParam.name(),
1909  layerParam.bottom_size(),
1910  layerParam.top_size(),
1911  CHECK_LOCATION().AsString()));
1912  }
1913  SetArmnnOutputSlotForCaffeTop(layerParam.top(0), GetArmnnOutputSlotForCaffeTop(layerParam.bottom(0)));
1914 }
armnn::IOutputSlot & GetArmnnOutputSlotForCaffeTop(const std::string &caffeTopName) const
Retrieves the Armnn IOutputSlot representing the given Caffe top.
#define CHECK_LOCATION()
Definition: Exceptions.hpp:197
void SetArmnnOutputSlotForCaffeTop(const std::string &caffeTopName, armnn::IOutputSlot &armnnOutputSlot)

◆ ParseEltwiseLayer()

void ParseEltwiseLayer ( const caffe::LayerParameter &  layerParam)
protected

Definition at line 1663 of file CaffeParser.cpp.

References CHECK_LOCATION, Connect(), ICaffeParser::CaffeParserImpl::GetArmnnOutputSlotForCaffeTop(), IConnectableLayer::GetInputSlot(), IConnectableLayer::GetOutputSlot(), armnnUtils::GetTensorInfo(), ICaffeParser::CaffeParserImpl::m_Network, ICaffeParser::CaffeParserImpl::SetArmnnOutputSlotForCaffeTop(), and IOutputSlot::SetTensorInfo().

1664 {
1665  ValidateNumInputsOutputs(layerParam, 2, 1);
1666 
1667  const TensorInfo& inputInfo = GetArmnnOutputSlotForCaffeTop(layerParam.bottom(0)).GetTensorInfo();
1668 
1669  // Ignored Caffe Parameters:
1670  // coeff
1671 
1672  EltwiseParameter_EltwiseOp operation = EltwiseParameter_EltwiseOp_SUM; // Defaults to sum as per caffe.
1673 
1674  if (layerParam.has_eltwise_param() && layerParam.eltwise_param().has_operation())
1675  {
1676  operation = layerParam.eltwise_param().operation();
1677  }
1678 
1679  armnn::IConnectableLayer* newLayer = nullptr;
1680  switch (operation)
1681  {
1682  case EltwiseParameter_EltwiseOp_SUM:
1683  {
1684  newLayer = m_Network->AddAdditionLayer(layerParam.name().c_str());
1685  break;
1686  }
1687  case EltwiseParameter_EltwiseOp_PROD:
1688  {
1689  newLayer = m_Network->AddMultiplicationLayer(layerParam.name().c_str());
1690  break;
1691  }
1692  default:
1693  {
1694  throw ParseException(
1695  fmt::format("Unsupported operation {} in Eltwise layer {} {}",
1696  operation,
1697  layerParam.name(),
1698  CHECK_LOCATION().AsString()));
1699  }
1700  }
1701 
1702  GetArmnnOutputSlotForCaffeTop(layerParam.bottom(0)).Connect(newLayer->GetInputSlot(0));
1703  GetArmnnOutputSlotForCaffeTop(layerParam.bottom(1)).Connect(newLayer->GetInputSlot(1));
1704  newLayer->GetOutputSlot(0).SetTensorInfo(inputInfo);
1705  SetArmnnOutputSlotForCaffeTop(layerParam.top(0), newLayer->GetOutputSlot(0));
1706 }
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
Definition: INetwork.hpp:62
virtual void SetTensorInfo(const TensorInfo &tensorInfo)=0
armnn::IOutputSlot & GetArmnnOutputSlotForCaffeTop(const std::string &caffeTopName) const
Retrieves the Armnn IOutputSlot representing the given Caffe top.
#define CHECK_LOCATION()
Definition: Exceptions.hpp:197
void SetArmnnOutputSlotForCaffeTop(const std::string &caffeTopName, armnn::IOutputSlot &armnnOutputSlot)
virtual const IInputSlot & GetInputSlot(unsigned int index) const =0
Get a const input slot handle by slot index.
armnn::TensorInfo GetTensorInfo(unsigned int numberOfBatches, unsigned int numberOfChannels, unsigned int height, unsigned int width, const armnn::DataLayout dataLayout, const armnn::DataType dataType)
Definition: TensorUtils.cpp:38
virtual const IOutputSlot & GetOutputSlot(unsigned int index) const =0
Get the const output slot handle by slot index.
void Connect(armnn::IConnectableLayer *from, armnn::IConnectableLayer *to, const armnn::TensorInfo &tensorInfo, unsigned int fromIndex, unsigned int toIndex)
Definition: TestUtils.cpp:12

◆ ParseInnerProductLayer()

void ParseInnerProductLayer ( const caffe::LayerParameter &  layerParam)
protected

Definition at line 1567 of file CaffeParser.cpp.

References Connect(), ICaffeParser::CaffeParserImpl::GetArmnnOutputSlotForCaffeTop(), IConnectableLayer::GetInputSlot(), TensorInfo::GetNumDimensions(), IConnectableLayer::GetOutputSlot(), TensorInfo::GetShape(), armnnUtils::GetTensorInfo(), FullyConnectedDescriptor::m_BiasEnabled, ICaffeParser::CaffeParserImpl::m_Network, FullyConnectedDescriptor::m_TransposeWeightMatrix, ICaffeParser::CaffeParserImpl::SetArmnnOutputSlotForCaffeTop(), and IOutputSlot::SetTensorInfo().

1568 {
1569  InnerProductParameter param = layerParam.inner_product_param();
1570 
1571  ValidateNumInputsOutputs(layerParam, 1, 1);
1572 
1573  unsigned int outputSize = param.num_output();
1574 
1575  // Ignored Caffe Parameters:
1576  // Weight Filler
1577  // Bias Filler
1578  // Engine
1579  // Axis
1580 
1581  FullyConnectedDescriptor tensorFullyConnectedDescriptor;
1582 
1583  if (param.has_transpose())
1584  {
1585  // If true, assumes transposed weights.
1586  tensorFullyConnectedDescriptor.m_TransposeWeightMatrix = param.transpose();
1587  }
1588  else
1589  {
1590  // Caffe defaults to transposed.
1591  tensorFullyConnectedDescriptor.m_TransposeWeightMatrix = true;
1592  }
1593 
1594  const TensorInfo& inputInfo = GetArmnnOutputSlotForCaffeTop(layerParam.bottom(0)).GetTensorInfo();
1595 
1596  TensorInfo weightInfo;
1597  TensorInfo biasInfo;
1598 
1599  // Allows implicit flattening of extra dimensions.
1600  unsigned int inputSize = inputInfo.GetShape()[1];
1601  for (unsigned int i = 2; i < inputInfo.GetNumDimensions(); ++i)
1602  {
1603  inputSize *= inputInfo.GetShape()[i];
1604  }
1605 
1606  const float* weightDataPtr = GetArrayPtrFromBlob(layerParam, 0);
1607  const unsigned int swTD[2] = { outputSize, inputSize };
1608  ConstTensor weights(TensorInfo(2, swTD, DataType::Float32), weightDataPtr);
1609 
1610  tensorFullyConnectedDescriptor.m_BiasEnabled = true;
1611  // Todo: check whether bias enabled.
1612  armnn::IConnectableLayer* fullyConnectedLayer = nullptr;
1613  if (tensorFullyConnectedDescriptor.m_BiasEnabled)
1614  {
1615  // BIAS VALUE
1616  const float* biasDataPtr = GetArrayPtrFromBlob(layerParam, 1);
1617 
1618  const unsigned int sbTD[1] = { outputSize };
1619 
1620  ConstTensor biases(TensorInfo(1, sbTD, DataType::Float32), biasDataPtr);
1621 
1622  fullyConnectedLayer = m_Network->AddFullyConnectedLayer(tensorFullyConnectedDescriptor,
1623  weights,
1624  Optional<ConstTensor>(biases),
1625  layerParam.name().c_str());
1626  }
1627  else
1628  {
1629  fullyConnectedLayer = m_Network->AddFullyConnectedLayer(tensorFullyConnectedDescriptor,
1630  weights,
1631  EmptyOptional(),
1632  layerParam.name().c_str());
1633  }
1634 
1635  TensorInfo outputInfo({ inputInfo.GetShape()[0], outputSize }, DataType::Float32);
1636  GetArmnnOutputSlotForCaffeTop(layerParam.bottom(0)).Connect(fullyConnectedLayer->GetInputSlot(0));
1637  fullyConnectedLayer->GetOutputSlot(0).SetTensorInfo(outputInfo);
1638  SetArmnnOutputSlotForCaffeTop(layerParam.top(0), fullyConnectedLayer->GetOutputSlot(0));
1639 }
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
Definition: INetwork.hpp:62
const TensorShape & GetShape() const
Definition: Tensor.hpp:187
bool m_TransposeWeightMatrix
Enable/disable transpose weight matrix.
virtual void SetTensorInfo(const TensorInfo &tensorInfo)=0
armnn::IOutputSlot & GetArmnnOutputSlotForCaffeTop(const std::string &caffeTopName) const
Retrieves the Armnn IOutputSlot representing the given Caffe top.
A FullyConnectedDescriptor for the FullyConnectedLayer.
bool m_BiasEnabled
Enable/disable bias.
A tensor defined by a TensorInfo (shape and data type) and an immutable backing store.
Definition: Tensor.hpp:314
void SetArmnnOutputSlotForCaffeTop(const std::string &caffeTopName, armnn::IOutputSlot &armnnOutputSlot)
EmptyOptional is used to initialize the Optional class in case we want to have default value for an O...
Definition: Optional.hpp:32
virtual const IInputSlot & GetInputSlot(unsigned int index) const =0
Get a const input slot handle by slot index.
armnn::TensorInfo GetTensorInfo(unsigned int numberOfBatches, unsigned int numberOfChannels, unsigned int height, unsigned int width, const armnn::DataLayout dataLayout, const armnn::DataType dataType)
Definition: TensorUtils.cpp:38
virtual const IOutputSlot & GetOutputSlot(unsigned int index) const =0
Get the const output slot handle by slot index.
void Connect(armnn::IConnectableLayer *from, armnn::IConnectableLayer *to, const armnn::TensorInfo &tensorInfo, unsigned int fromIndex, unsigned int toIndex)
Definition: TestUtils.cpp:12
unsigned int GetNumDimensions() const
Definition: Tensor.hpp:191

◆ ParseInputLayer()

void ParseInputLayer ( const caffe::LayerParameter &  layerParam)
protected

Adds an armnn layer to m_Network given a Caffe LayerParameter of the correct type and is responsible for recording any newly created IOutputSlots using SetArmnnOutputSlotForCaffeTop().

Definition at line 397 of file CaffeParser.cpp.

References ARMNN_ASSERT, ICaffeParser::CaffeParserImpl::BlobShapeToTensorInfo(), CHECK_LOCATION, ICaffeParser::CaffeParserImpl::m_InputShapes, ICaffeParser::CaffeParserImpl::m_Network, ICaffeParser::CaffeParserImpl::m_NetworkInputsBindingInfo, armnn::numeric_cast(), ICaffeParser::CaffeParserImpl::SetArmnnOutputSlotForCaffeTop(), TensorInfo::SetShape(), and ICaffeParser::CaffeParserImpl::TrackInputBinding().

398 {
399  ARMNN_ASSERT(layerParam.type() == "Input");
400  ValidateNumInputsOutputs(layerParam, 0, 1);
401 
402  const InputParameter& param = layerParam.input_param();
403 
406  armnn::IConnectableLayer* const inputLayer = m_Network->AddInputLayer(inputId, layerParam.name().c_str());
407 
408  // Decides the tensor info for this input. This can be specified in the Caffe network but can also
409  // be overriden by user input (m_inputShapes).
410  armnn::TensorInfo inputTensorInfo;
411 
412  const BlobShape* originalShape = param.shape_size() > 0 && param.shape(0).dim_size() > 0 ?
413  &param.shape(0) : nullptr;
414  if (originalShape)
415  {
416  inputTensorInfo = BlobShapeToTensorInfo(*originalShape);
417  }
418 
419  auto overrideIt = m_InputShapes.find(layerParam.name());
420  if (overrideIt != m_InputShapes.end())
421  {
422  const TensorShape& overrideShape = overrideIt->second;
423  if (originalShape &&
424  ( originalShape->dim(1) != overrideShape[1]
425  || originalShape->dim(2) != overrideShape[2]
426  || originalShape->dim(3) != overrideShape[3]))
427  {
428  throw ParseException(
429  fmt::format("Parsed input shape for '{}' is incompatible with the override provided. {}",
430  layerParam.name(),
431  CHECK_LOCATION().AsString()));
432  }
433  inputTensorInfo.SetShape(overrideShape);
434  }
435  else if (!originalShape)
436  {
437  throw ParseException(
438  fmt::format("No input descriptor given for '{}' and no input shape found in caffe model. {}",
439  layerParam.name(),
440  CHECK_LOCATION().AsString()));
441  }
442  TrackInputBinding(inputLayer, inputId, inputTensorInfo);
443  inputLayer->GetOutputSlot(0).SetTensorInfo(inputTensorInfo);
444  SetArmnnOutputSlotForCaffeTop(layerParam.top(0), inputLayer->GetOutputSlot(0));
445 }
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
Definition: INetwork.hpp:62
std::map< std::string, armnn::TensorShape > m_InputShapes
int LayerBindingId
Type of identifiers for bindable layers (inputs, outputs).
Definition: Types.hpp:210
void SetShape(const TensorShape &newShape)
Definition: Tensor.hpp:189
std::unordered_map< std::string, BindingPointInfo > m_NetworkInputsBindingInfo
maps input layer names to their corresponding ids and tensor infos
#define ARMNN_ASSERT(COND)
Definition: Assert.hpp:14
#define CHECK_LOCATION()
Definition: Exceptions.hpp:197
void SetArmnnOutputSlotForCaffeTop(const std::string &caffeTopName, armnn::IOutputSlot &armnnOutputSlot)
std::enable_if_t< std::is_unsigned< Source >::value &&std::is_unsigned< Dest >::value, Dest > numeric_cast(Source source)
Definition: NumericCast.hpp:35
void TrackInputBinding(armnn::IConnectableLayer *layer, armnn::LayerBindingId id, const armnn::TensorInfo &tensorInfo)
armnn::TensorInfo BlobShapeToTensorInfo(const caffe::BlobShape &blobShape) const
Converts Caffe&#39;s protobuf tensor shape format to ArmNN&#39;s.

◆ ParseLRNLayer()

void ParseLRNLayer ( const caffe::LayerParameter &  layerParam)
protected

Definition at line 1468 of file CaffeParser.cpp.

References CHECK_LOCATION, Connect(), ICaffeParser::CaffeParserImpl::GetArmnnOutputSlotForCaffeTop(), IConnectableLayer::GetInputSlot(), IConnectableLayer::GetOutputSlot(), armnnUtils::GetTensorInfo(), NormalizationDescriptor::m_Alpha, NormalizationDescriptor::m_Beta, NormalizationDescriptor::m_K, ICaffeParser::CaffeParserImpl::m_Network, NormalizationDescriptor::m_NormChannelType, NormalizationDescriptor::m_NormMethodType, NormalizationDescriptor::m_NormSize, armnn::numeric_cast(), ICaffeParser::CaffeParserImpl::SetArmnnOutputSlotForCaffeTop(), and IOutputSlot::SetTensorInfo().

1469 {
1470  ValidateNumInputsOutputs(layerParam, 1, 1);
1471 
1472  LRNParameter param = layerParam.lrn_param();
1473 
1474  const TensorInfo& inputInfo = GetArmnnOutputSlotForCaffeTop(layerParam.bottom(0)).GetTensorInfo();
1475 
1476  // Ignored BATCH NORMALIZATION Caffe Parameters.
1477  // Ignored MVN Caffe Parameters.
1478  // Ignored LRN Caffe Parameters.
1479  // Engine
1480 
1481  NormalizationDescriptor normalizationDescriptor;
1482  if (param.has_norm_region())
1483  {
1484  LRNParameter_NormRegion n = param.norm_region();
1485  switch (n)
1486  {
1487  case LRNParameter_NormRegion_ACROSS_CHANNELS:
1488  {
1489  normalizationDescriptor.m_NormChannelType = NormalizationAlgorithmChannel::Across;
1490  break;
1491  }
1492  case LRNParameter_NormRegion_WITHIN_CHANNEL:
1493  {
1494  normalizationDescriptor.m_NormChannelType = NormalizationAlgorithmChannel::Within;
1495  break;
1496  }
1497  default:
1498  {
1499  throw ParseException(
1500  fmt::format("Unknown region {} for LRN layer {} {}",
1501  n,
1502  layerParam.name(),
1503  CHECK_LOCATION().AsString()));
1504  }
1505  }
1506  }
1507  else
1508  {
1509  // Caffe defaults to normalization across channels.
1510  normalizationDescriptor.m_NormChannelType = NormalizationAlgorithmChannel::Across;
1511  }
1512 
1513  normalizationDescriptor.m_NormMethodType = NormalizationAlgorithmMethod::LocalBrightness;
1514  if (param.has_local_size())
1515  {
1516  normalizationDescriptor.m_NormSize = param.local_size();
1517  }
1518  else
1519  {
1520  throw ParseException(
1521  fmt::format("local_size not defined for LRN layer {} {}",
1522  layerParam.name(),
1523  CHECK_LOCATION().AsString()));
1524  }
1525 
1526  if (param.has_alpha())
1527  {
1528  normalizationDescriptor.m_Alpha = param.alpha();
1529  normalizationDescriptor.m_Alpha /= armnn::numeric_cast<float>(param.local_size());
1530  }
1531  else
1532  {
1533  throw ParseException(
1534  fmt::format("Alpha not defined for LRN layer {} {}",
1535  layerParam.name(),
1536  CHECK_LOCATION().AsString()));
1537  }
1538  if (param.has_beta())
1539  {
1540  normalizationDescriptor.m_Beta = param.beta();
1541  }
1542  else
1543  {
1544  throw ParseException(
1545  fmt::format("Beta not defined for LRN layer {} {}",
1546  layerParam.name(),
1547  CHECK_LOCATION().AsString()));
1548  }
1549 
1550  if (param.has_k())
1551  {
1552  normalizationDescriptor.m_K = param.k();
1553  }
1554  else
1555  {
1556  normalizationDescriptor.m_K = 1;
1557  }
1558 
1559  IConnectableLayer* const normLayer = m_Network->AddNormalizationLayer(normalizationDescriptor,
1560  layerParam.name().c_str());
1561  GetArmnnOutputSlotForCaffeTop(layerParam.bottom(0)).Connect(normLayer->GetInputSlot(0));
1562  normLayer->GetOutputSlot(0).SetTensorInfo(inputInfo);
1563 
1564  SetArmnnOutputSlotForCaffeTop(layerParam.top(0), normLayer->GetOutputSlot(0));
1565 }
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
Definition: INetwork.hpp:62
float m_K
Kappa value used for the across channel normalization equation.
float m_Alpha
Alpha value for the normalization equation.
virtual void SetTensorInfo(const TensorInfo &tensorInfo)=0
NormalizationAlgorithmMethod m_NormMethodType
Normalization method algorithm to use (LocalBrightness, LocalContrast).
armnn::IOutputSlot & GetArmnnOutputSlotForCaffeTop(const std::string &caffeTopName) const
Retrieves the Armnn IOutputSlot representing the given Caffe top.
#define CHECK_LOCATION()
Definition: Exceptions.hpp:197
void SetArmnnOutputSlotForCaffeTop(const std::string &caffeTopName, armnn::IOutputSlot &armnnOutputSlot)
NormalizationAlgorithmChannel m_NormChannelType
Normalization channel algorithm to use (Across, Within).
virtual const IInputSlot & GetInputSlot(unsigned int index) const =0
Get a const input slot handle by slot index.
std::enable_if_t< std::is_unsigned< Source >::value &&std::is_unsigned< Dest >::value, Dest > numeric_cast(Source source)
Definition: NumericCast.hpp:35
armnn::TensorInfo GetTensorInfo(unsigned int numberOfBatches, unsigned int numberOfChannels, unsigned int height, unsigned int width, const armnn::DataLayout dataLayout, const armnn::DataType dataType)
Definition: TensorUtils.cpp:38
virtual const IOutputSlot & GetOutputSlot(unsigned int index) const =0
Get the const output slot handle by slot index.
void Connect(armnn::IConnectableLayer *from, armnn::IConnectableLayer *to, const armnn::TensorInfo &tensorInfo, unsigned int fromIndex, unsigned int toIndex)
Definition: TestUtils.cpp:12
A NormalizationDescriptor for the NormalizationLayer.
float m_Beta
Beta value for the normalization equation.
uint32_t m_NormSize
Depth radius value.

◆ ParsePoolingLayer()

void ParsePoolingLayer ( const caffe::LayerParameter &  layerParam)
protected

Definition at line 1270 of file CaffeParser.cpp.

References CHECK_LOCATION, Connect(), GET_OPTIONAL_WITH_FALLBACK, ICaffeParser::CaffeParserImpl::GetArmnnOutputSlotForCaffeTop(), IConnectableLayer::GetInputSlot(), IConnectableLayer::GetOutputSlot(), TensorInfo::GetShape(), armnnUtils::GetTensorInfo(), ICaffeParser::CaffeParserImpl::m_Network, Pooling2dDescriptor::m_OutputShapeRounding, Pooling2dDescriptor::m_PadBottom, Pooling2dDescriptor::m_PaddingMethod, Pooling2dDescriptor::m_PadLeft, Pooling2dDescriptor::m_PadRight, Pooling2dDescriptor::m_PadTop, Pooling2dDescriptor::m_PoolHeight, Pooling2dDescriptor::m_PoolType, Pooling2dDescriptor::m_PoolWidth, Pooling2dDescriptor::m_StrideX, Pooling2dDescriptor::m_StrideY, armnn::Max, ICaffeParser::CaffeParserImpl::SetArmnnOutputSlotForCaffeTop(), and IOutputSlot::SetTensorInfo().

1271 {
1272  // Ignored Caffe Parameters
1273  // Stochastic Pooling
1274  // Engine
1275 
1276  ValidateNumInputsOutputs(layerParam, 1, 1);
1277  PoolingParameter param = layerParam.pooling_param();
1278  const TensorInfo& inputInfo = GetArmnnOutputSlotForCaffeTop(layerParam.bottom(0)).GetTensorInfo();
1279 
1280  const auto notFound = std::numeric_limits<unsigned int>::max();
1281 
1282  unsigned int kernel_h = GET_OPTIONAL_WITH_FALLBACK(param, PoolingParameter,
1283  kernel_h, kernel_size, unsigned int, notFound);
1284  unsigned int kernel_w = GET_OPTIONAL_WITH_FALLBACK(param, PoolingParameter,
1285  kernel_w, kernel_size, unsigned int, notFound);
1286 
1287  if ((kernel_h == notFound || kernel_w == notFound) && param.has_global_pooling())
1288  {
1289  kernel_h = inputInfo.GetShape()[2];
1290  kernel_w = inputInfo.GetShape()[3];
1291  }
1292 
1293  unsigned int stride_h = GET_OPTIONAL_WITH_FALLBACK(param, PoolingParameter,
1294  stride_h, stride, unsigned int, notFound);
1295  unsigned int stride_w = GET_OPTIONAL_WITH_FALLBACK(param, PoolingParameter,
1296  stride_h, stride, unsigned int, notFound);
1297 
1298  if ((stride_h == notFound || stride_w == notFound) && param.has_global_pooling())
1299  {
1300  stride_h = 1;
1301  stride_w = 1;
1302  }
1303 
1304  unsigned int pad_h = GET_OPTIONAL_WITH_FALLBACK(param, PoolingParameter,
1305  pad_h, pad, unsigned int, 0u);
1306  unsigned int pad_w = GET_OPTIONAL_WITH_FALLBACK(param, PoolingParameter,
1307  pad_w, pad, unsigned int, 0u);
1308 
1309  // Populate Weight and Bias Filter Descriptor
1310  Pooling2dDescriptor pooling2dDescriptor;
1311  if (param.has_pool())
1312  {
1313  PoolingParameter_PoolMethod p = param.pool();
1314  switch (p)
1315  {
1316  case PoolingParameter_PoolMethod_MAX:
1317  {
1318  pooling2dDescriptor.m_PoolType = PoolingAlgorithm::Max;
1319  break;
1320  }
1321  case PoolingParameter_PoolMethod_AVE:
1322  {
1323  pooling2dDescriptor.m_PoolType = PoolingAlgorithm::Average;
1324  break;
1325  }
1326  case PoolingParameter_PoolMethod_STOCHASTIC:
1327  {
1328  throw ParseException(
1329  fmt::format("Pooling Layer: Stochastic Pooling Not Supported. Layer={} {}",
1330  layerParam.name(),
1331  CHECK_LOCATION().AsString()));
1332  }
1333  default:
1334  {
1335  throw ParseException(
1336  fmt::format("Pooling Layer: unknown pooling method: {} for layer: {} {}",
1337  p,
1338  layerParam.name(),
1339  CHECK_LOCATION().AsString()));
1340  }
1341  }
1342  }
1343  else
1344  {
1345  throw ParseException(
1346  fmt::format("No Pooling Method Defined for {} {}",
1347  layerParam.name(),
1348  CHECK_LOCATION().AsString()));
1349  }
1350 
1351  pooling2dDescriptor.m_PadLeft = pad_w;
1352  pooling2dDescriptor.m_PadRight = pad_w;
1353  pooling2dDescriptor.m_PadTop = pad_h;
1354  pooling2dDescriptor.m_PadBottom = pad_h;
1355  pooling2dDescriptor.m_StrideX = stride_w;
1356  pooling2dDescriptor.m_StrideY = stride_h;
1357  pooling2dDescriptor.m_PoolWidth = kernel_w;
1358  pooling2dDescriptor.m_PoolHeight = kernel_h;
1359 
1360  pooling2dDescriptor.m_OutputShapeRounding = OutputShapeRounding::Ceiling;
1361  pooling2dDescriptor.m_PaddingMethod = PaddingMethod::IgnoreValue;
1362 
1363  armnn::IConnectableLayer* poolingLayer = m_Network->AddPooling2dLayer(pooling2dDescriptor,
1364  layerParam.name().c_str());
1365 
1366  TensorInfo outputInfo(
1367  { inputInfo.GetShape()[0],
1368  inputInfo.GetShape()[1],
1369  static_cast<unsigned int>(ceil(
1370  static_cast<float>(inputInfo.GetShape()[2] + 2 * pad_h - kernel_h) /
1371  armnn::numeric_cast<float>(stride_h))) + 1,
1372  static_cast<unsigned int>(ceil(
1373  static_cast<float>(inputInfo.GetShape()[3] + 2 * pad_w - kernel_w) /
1374  armnn::numeric_cast<float>(stride_w))) + 1 },
1375  DataType::Float32);
1376 
1377  GetArmnnOutputSlotForCaffeTop(layerParam.bottom(0)).Connect(poolingLayer->GetInputSlot(0));
1378  poolingLayer->GetOutputSlot(0).SetTensorInfo(outputInfo);
1379  SetArmnnOutputSlotForCaffeTop(layerParam.top(0), poolingLayer->GetOutputSlot(0));
1380 }
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
Definition: INetwork.hpp:62
uint32_t m_PadBottom
Padding bottom value in the height dimension.
const TensorShape & GetShape() const
Definition: Tensor.hpp:187
uint32_t m_PadLeft
Padding left value in the width dimension.
uint32_t m_PoolWidth
Pooling width value.
PaddingMethod m_PaddingMethod
The padding method to be used. (Exclude, IgnoreValue).
uint32_t m_PadTop
Padding top value in the height dimension.
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
virtual void SetTensorInfo(const TensorInfo &tensorInfo)=0
uint32_t m_PoolHeight
Pooling height value.
armnn::IOutputSlot & GetArmnnOutputSlotForCaffeTop(const std::string &caffeTopName) const
Retrieves the Armnn IOutputSlot representing the given Caffe top.
uint32_t m_PadRight
Padding right value in the width dimension.
#define GET_OPTIONAL_WITH_FALLBACK(PARAM, PARAM_TYPE, OPTIONAL_VALUE, FALLBACK_VALUE, VALUE_TYPE, DEFAULT_VALUE)
#define CHECK_LOCATION()
Definition: Exceptions.hpp:197
void SetArmnnOutputSlotForCaffeTop(const std::string &caffeTopName, armnn::IOutputSlot &armnnOutputSlot)
PoolingAlgorithm m_PoolType
The pooling algorithm to use (Max. Average, L2).
OutputShapeRounding m_OutputShapeRounding
The rounding method for the output shape. (Floor, Ceiling).
virtual const IInputSlot & GetInputSlot(unsigned int index) const =0
Get a const input slot handle by slot index.
armnn::TensorInfo GetTensorInfo(unsigned int numberOfBatches, unsigned int numberOfChannels, unsigned int height, unsigned int width, const armnn::DataLayout dataLayout, const armnn::DataType dataType)
Definition: TensorUtils.cpp:38
virtual const IOutputSlot & GetOutputSlot(unsigned int index) const =0
Get the const output slot handle by slot index.
void Connect(armnn::IConnectableLayer *from, armnn::IConnectableLayer *to, const armnn::TensorInfo &tensorInfo, unsigned int fromIndex, unsigned int toIndex)
Definition: TestUtils.cpp:12
A Pooling2dDescriptor for the Pooling2dLayer.
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.

◆ ParseReluLayer()

void ParseReluLayer ( const caffe::LayerParameter &  layerParam)
protected

Definition at line 1442 of file CaffeParser.cpp.

References Connect(), ICaffeParser::CaffeParserImpl::GetArmnnOutputSlotForCaffeTop(), IConnectableLayer::GetInputSlot(), IConnectableLayer::GetOutputSlot(), armnnUtils::GetTensorInfo(), ActivationDescriptor::m_A, ActivationDescriptor::m_Function, ICaffeParser::CaffeParserImpl::m_Network, ICaffeParser::CaffeParserImpl::SetArmnnOutputSlotForCaffeTop(), and IOutputSlot::SetTensorInfo().

1443 {
1444  ValidateNumInputsOutputs(layerParam, 1, 1);
1445 
1446  const string& name = layerParam.name();
1447  const ReLUParameter& param = layerParam.relu_param();
1448 
1449  ActivationDescriptor activationDescriptor;
1450  const float negativeSlope = param.negative_slope();
1451  if (negativeSlope == 0.0f)
1452  {
1453  activationDescriptor.m_Function = ActivationFunction::ReLu;
1454  }
1455  else
1456  {
1457  activationDescriptor.m_Function = ActivationFunction::LeakyReLu;
1458  activationDescriptor.m_A = negativeSlope;
1459  }
1460 
1461  const TensorInfo& inputInfo = GetArmnnOutputSlotForCaffeTop(layerParam.bottom(0)).GetTensorInfo();
1462  IConnectableLayer* const activationLayer = m_Network->AddActivationLayer(activationDescriptor, name.c_str());
1463  GetArmnnOutputSlotForCaffeTop(layerParam.bottom(0)).Connect(activationLayer->GetInputSlot(0));
1464  activationLayer->GetOutputSlot(0).SetTensorInfo(inputInfo);
1465  SetArmnnOutputSlotForCaffeTop(layerParam.top(0), activationLayer->GetOutputSlot(0));
1466 }
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
Definition: INetwork.hpp:62
virtual void SetTensorInfo(const TensorInfo &tensorInfo)=0
armnn::IOutputSlot & GetArmnnOutputSlotForCaffeTop(const std::string &caffeTopName) const
Retrieves the Armnn IOutputSlot representing the given Caffe top.
An ActivationDescriptor for the ActivationLayer.
Definition: Descriptors.hpp:25
void SetArmnnOutputSlotForCaffeTop(const std::string &caffeTopName, armnn::IOutputSlot &armnnOutputSlot)
float m_A
Alpha upper bound value used by the activation functions. (BoundedReLu, Linear, TanH, Elu).
Definition: Descriptors.hpp:50
virtual const IInputSlot & GetInputSlot(unsigned int index) const =0
Get a const input slot handle by slot index.
armnn::TensorInfo GetTensorInfo(unsigned int numberOfBatches, unsigned int numberOfChannels, unsigned int height, unsigned int width, const armnn::DataLayout dataLayout, const armnn::DataType dataType)
Definition: TensorUtils.cpp:38
virtual const IOutputSlot & GetOutputSlot(unsigned int index) const =0
Get the const output slot handle by slot index.
void Connect(armnn::IConnectableLayer *from, armnn::IConnectableLayer *to, const armnn::TensorInfo &tensorInfo, unsigned int fromIndex, unsigned int toIndex)
Definition: TestUtils.cpp:12
ActivationFunction m_Function
The activation function to use (Sigmoid, TanH, Linear, ReLu, BoundedReLu, SoftReLu, LeakyReLu, Abs, Sqrt, Square, Elu).
Definition: Descriptors.hpp:48

◆ ParseScaleLayer()

void ParseScaleLayer ( const caffe::LayerParameter &  layerParam)
protected

Definition at line 1831 of file CaffeParser.cpp.

References CHECK_LOCATION, Connect(), armnn::Float32, ICaffeParser::CaffeParserImpl::GetArmnnOutputSlotForCaffeTop(), IConnectableLayer::GetInputSlot(), IConnectableLayer::GetOutputSlot(), TensorInfo::GetShape(), armnnUtils::GetTensorInfo(), BatchNormalizationDescriptor::m_Eps, ICaffeParser::CaffeParserImpl::m_Network, ICaffeParser::CaffeParserImpl::SetArmnnOutputSlotForCaffeTop(), and IOutputSlot::SetTensorInfo().

1832 {
1833  // Current unoptimal solution: add a batchnormalization layer with 0 mean and 1 variance.
1834  ValidateNumInputsOutputs(layerParam, 1, 1);
1835 
1836  const TensorInfo& inputInfo = GetArmnnOutputSlotForCaffeTop(layerParam.bottom(0)).GetTensorInfo();
1837 
1838  string name = layerParam.name();
1839 
1840  ScaleParameter param = layerParam.scale_param();
1841  if (param.axis() != 1)
1842  {
1843  // Would have to use something other than BatchNormalizationLayer in this case
1844  throw ParseException(
1845  fmt::format("Loading Scale Layer: Only axis 1 is supported currently. "
1846  "Layer={} Axis={} {}",
1847  layerParam.name(),
1848  param.axis(),
1849  CHECK_LOCATION().AsString()));
1850  }
1851 
1852  unsigned int channels = inputInfo.GetShape()[1];
1853  unsigned int shape[] = {channels};
1854 
1856  desc.m_Eps = 0.0f; // Don't need epsilon if variance is 1.
1857  vector<float> meanData(channels, 0.0f);
1858  vector<float> varianceData(channels, 1.0f);
1859  vector<float> betaData(channels, 0.0f);
1860  vector<float> gammaData(channels);
1861 
1862  GetDataFromBlob(layerParam, gammaData, 0);
1863 
1864  if(param.has_bias_term())
1865  {
1866  GetDataFromBlob(layerParam, betaData, 1);
1867  }
1868 
1869  ConstTensor mean(TensorInfo(1, shape, armnn::DataType::Float32), meanData);
1870  ConstTensor variance(TensorInfo(1, shape, armnn::DataType::Float32), varianceData);
1871  ConstTensor beta(TensorInfo(1, shape, armnn::DataType::Float32), betaData);
1872  ConstTensor gamma(TensorInfo(1, shape, armnn::DataType::Float32), gammaData);
1873 
1874  armnn::IConnectableLayer* const batchNormLayer = m_Network->AddBatchNormalizationLayer(desc,
1875  mean, variance, beta, gamma, name.c_str());
1876  GetArmnnOutputSlotForCaffeTop(layerParam.bottom(0)).Connect(batchNormLayer->GetInputSlot(0));
1877  batchNormLayer->GetOutputSlot(0).SetTensorInfo(inputInfo);
1878  SetArmnnOutputSlotForCaffeTop(layerParam.top(0), batchNormLayer->GetOutputSlot(0));
1879 }
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
Definition: INetwork.hpp:62
const TensorShape & GetShape() const
Definition: Tensor.hpp:187
float m_Eps
Value to add to the variance. Used to avoid dividing by zero.
virtual void SetTensorInfo(const TensorInfo &tensorInfo)=0
armnn::IOutputSlot & GetArmnnOutputSlotForCaffeTop(const std::string &caffeTopName) const
Retrieves the Armnn IOutputSlot representing the given Caffe top.
A tensor defined by a TensorInfo (shape and data type) and an immutable backing store.
Definition: Tensor.hpp:314
#define CHECK_LOCATION()
Definition: Exceptions.hpp:197
void SetArmnnOutputSlotForCaffeTop(const std::string &caffeTopName, armnn::IOutputSlot &armnnOutputSlot)
virtual const IInputSlot & GetInputSlot(unsigned int index) const =0
Get a const input slot handle by slot index.
armnn::TensorInfo GetTensorInfo(unsigned int numberOfBatches, unsigned int numberOfChannels, unsigned int height, unsigned int width, const armnn::DataLayout dataLayout, const armnn::DataType dataType)
Definition: TensorUtils.cpp:38
virtual const IOutputSlot & GetOutputSlot(unsigned int index) const =0
Get the const output slot handle by slot index.
void Connect(armnn::IConnectableLayer *from, armnn::IConnectableLayer *to, const armnn::TensorInfo &tensorInfo, unsigned int fromIndex, unsigned int toIndex)
Definition: TestUtils.cpp:12
A BatchNormalizationDescriptor for the BatchNormalizationLayer.

◆ ParseSoftmaxLayer()

void ParseSoftmaxLayer ( const caffe::LayerParameter &  layerParam)
protected

Definition at line 1641 of file CaffeParser.cpp.

References Connect(), ICaffeParser::CaffeParserImpl::GetArmnnOutputSlotForCaffeTop(), IConnectableLayer::GetInputSlot(), IConnectableLayer::GetOutputSlot(), armnnUtils::GetTensorInfo(), SoftmaxDescriptor::m_Axis, ICaffeParser::CaffeParserImpl::m_Network, ICaffeParser::CaffeParserImpl::SetArmnnOutputSlotForCaffeTop(), and IOutputSlot::SetTensorInfo().

1642 {
1643  ValidateNumInputsOutputs(layerParam, 1, 1);
1644 
1645  SoftmaxParameter param = layerParam.softmax_param();
1646 
1647  const TensorInfo& inputInfo = GetArmnnOutputSlotForCaffeTop(layerParam.bottom(0)).GetTensorInfo();
1648 
1649  // Ignored Caffe Parameters:
1650  // axis
1651  // Engine
1652 
1653  armnn::SoftmaxDescriptor softmaxDescriptor;
1654  softmaxDescriptor.m_Axis = 1;
1655  armnn::IConnectableLayer* const softmaxLayer = m_Network->AddSoftmaxLayer(
1656  softmaxDescriptor,
1657  layerParam.name().c_str());
1658  GetArmnnOutputSlotForCaffeTop(layerParam.bottom(0)).Connect(softmaxLayer->GetInputSlot(0));
1659  softmaxLayer->GetOutputSlot(0).SetTensorInfo(inputInfo);
1660  SetArmnnOutputSlotForCaffeTop(layerParam.top(0), softmaxLayer->GetOutputSlot(0));
1661 }
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
Definition: INetwork.hpp:62
int m_Axis
Scalar, defaulted to the last index (-1), specifying the dimension the activation will be performed o...
virtual void SetTensorInfo(const TensorInfo &tensorInfo)=0
armnn::IOutputSlot & GetArmnnOutputSlotForCaffeTop(const std::string &caffeTopName) const
Retrieves the Armnn IOutputSlot representing the given Caffe top.
void SetArmnnOutputSlotForCaffeTop(const std::string &caffeTopName, armnn::IOutputSlot &armnnOutputSlot)
virtual const IInputSlot & GetInputSlot(unsigned int index) const =0
Get a const input slot handle by slot index.
armnn::TensorInfo GetTensorInfo(unsigned int numberOfBatches, unsigned int numberOfChannels, unsigned int height, unsigned int width, const armnn::DataLayout dataLayout, const armnn::DataType dataType)
Definition: TensorUtils.cpp:38
virtual const IOutputSlot & GetOutputSlot(unsigned int index) const =0
Get the const output slot handle by slot index.
void Connect(armnn::IConnectableLayer *from, armnn::IConnectableLayer *to, const armnn::TensorInfo &tensorInfo, unsigned int fromIndex, unsigned int toIndex)
Definition: TestUtils.cpp:12
A SoftmaxDescriptor for the SoftmaxLayer.

◆ ParseSplitLayer()

void ParseSplitLayer ( const caffe::LayerParameter &  layerParam)
protected

Definition at line 1881 of file CaffeParser.cpp.

References CHECK_LOCATION, ICaffeParser::CaffeParserImpl::GetArmnnOutputSlotForCaffeTop(), and ICaffeParser::CaffeParserImpl::SetArmnnOutputSlotForCaffeTop().

1882 {
1883  // Used in caffe to duplicate memory - not necessary in armnn.
1884  if (layerParam.bottom_size() != 1)
1885  {
1886  throw ParseException(
1887  fmt::format("Split layer '{}' should have exactly 1 bottom. "
1888  "#bottoms={} {}",
1889  layerParam.name(),
1890  layerParam.bottom_size(),
1891  CHECK_LOCATION().AsString()));
1892  }
1893  armnn::IOutputSlot& outputSlot = GetArmnnOutputSlotForCaffeTop(layerParam.bottom(0));
1894  for (int i = 0; i < layerParam.top_size(); i++)
1895  {
1896  SetArmnnOutputSlotForCaffeTop(layerParam.top(i), outputSlot);
1897  }
1898 }
armnn::IOutputSlot & GetArmnnOutputSlotForCaffeTop(const std::string &caffeTopName) const
Retrieves the Armnn IOutputSlot representing the given Caffe top.
An output connection slot for a layer.
Definition: INetwork.hpp:38
#define CHECK_LOCATION()
Definition: Exceptions.hpp:197
void SetArmnnOutputSlotForCaffeTop(const std::string &caffeTopName, armnn::IOutputSlot &armnnOutputSlot)

◆ ResolveInPlaceLayers()

void ResolveInPlaceLayers ( caffe::NetParameter &  netParameter)
protected

Modifies the Caffe network to replace "in-place" layers (whose top() and bottom() are both the same) with regular layers.

This simplifies further parsing.

Definition at line 1987 of file CaffeParser.cpp.

References CHECK_LOCATION.

Referenced by RecordByRecordCaffeParser::CreateNetworkFromBinaryFile(), and ICaffeParser::CaffeParserImpl::LoadNetParam().

1988 {
1989  // Finds layers with the same top.
1990  std::map<std::string, std::vector<caffe::LayerParameter*>> layersByTop;
1991  for (int layerIdx = 0; layerIdx < netParameter.layer_size(); ++layerIdx)
1992  {
1993  caffe::LayerParameter& layer = *netParameter.mutable_layer(layerIdx);
1994  std::string name = layer.name();
1995  for (int i = 0; i < layer.top_size(); ++i)
1996  {
1997  layersByTop[layer.top(i)].push_back(&layer);
1998  }
1999  }
2000 
2001  // For each set of layers with the same top, resolves them to a linear chain rather than in-place layers.
2002  // Note that for 'regular' layers, there will be a single layer in each group and so this will be a no-op.
2003  for (auto layersWithSameTopIt : layersByTop)
2004  {
2005  const std::string& top = layersWithSameTopIt.first;
2006  const std::vector<caffe::LayerParameter*>& layersWithSameTop = layersWithSameTopIt.second;
2007 
2008  // Chains the layers together in the order that they are listed in the prototxt (hopefully this is correct).
2009  // Note that the last layer will not have its top modified so that other layers will continue to reference it.
2010  for (unsigned int layerIdx = 0; layerIdx < layersWithSameTop.size() - 1; ++layerIdx)
2011  {
2012  caffe::LayerParameter& layer1 = *layersWithSameTop[layerIdx];
2013  caffe::LayerParameter& layer2 = *layersWithSameTop[layerIdx+1];
2014  if (layer1.top_size() != 1)
2015  {
2016  throw ParseException(
2017  fmt::format("Node '{}' is an in-place layer but doesn't have exactly one "
2018  "top. It has {} instead. {}",
2019  layer1.name(),
2020  layer1.top_size(),
2021  CHECK_LOCATION().AsString()));
2022  }
2023  std::string newTop = layer1.name() + "_top";
2024  layer1.set_top(0, newTop);
2025  if (layer2.bottom_size() != 1 || layer2.bottom(0) != top)
2026  {
2027  throw ParseException(
2028  fmt::format("Node '{}' is an in-place layer but "
2029  "doesn't have exactly one bottom, or it doesn't match its top. "
2030  "#bottoms={}, first bottom is {}, top is {} {}",
2031  layer2.name(),
2032  layer2.bottom(0),
2033  top,
2034  CHECK_LOCATION().AsString()));
2035  }
2036  layer2.set_bottom(0, newTop);
2037  }
2038  }
2039 }
#define CHECK_LOCATION()
Definition: Exceptions.hpp:197

◆ SetArmnnOutputSlotForCaffeTop()

void SetArmnnOutputSlotForCaffeTop ( const std::string &  caffeTopName,
armnn::IOutputSlot armnnOutputSlot 
)
protected

Definition at line 1968 of file CaffeParser.cpp.

References CHECK_LOCATION, and ICaffeParser::CaffeParserImpl::m_ArmnnOutputSlotForCaffeTop.

Referenced by ICaffeParser::CaffeParserImpl::AddConvLayerWithDepthwiseConv(), ICaffeParser::CaffeParserImpl::AddConvLayerWithSplits(), ICaffeParser::CaffeParserImpl::AddDeconvLayerWithSplits(), ICaffeParser::CaffeParserImpl::ParseArgmaxLayer(), ICaffeParser::CaffeParserImpl::ParseBatchNormLayer(), ICaffeParser::CaffeParserImpl::ParseConcatLayer(), ICaffeParser::CaffeParserImpl::ParseConvLayer(), ICaffeParser::CaffeParserImpl::ParseDeconvLayer(), ICaffeParser::CaffeParserImpl::ParseDropoutLayer(), ICaffeParser::CaffeParserImpl::ParseEltwiseLayer(), ICaffeParser::CaffeParserImpl::ParseInnerProductLayer(), ICaffeParser::CaffeParserImpl::ParseInputLayer(), ICaffeParser::CaffeParserImpl::ParseLRNLayer(), ICaffeParser::CaffeParserImpl::ParsePoolingLayer(), ICaffeParser::CaffeParserImpl::ParseReluLayer(), ICaffeParser::CaffeParserImpl::ParseScaleLayer(), ICaffeParser::CaffeParserImpl::ParseSoftmaxLayer(), and ICaffeParser::CaffeParserImpl::ParseSplitLayer().

1970 {
1971  auto it = m_ArmnnOutputSlotForCaffeTop.find(caffeTopName);
1972  if (it == m_ArmnnOutputSlotForCaffeTop.end())
1973  {
1974  m_ArmnnOutputSlotForCaffeTop[caffeTopName] = &armnnOutputSlot;
1975  }
1976  else
1977  {
1978  throw ParseException(
1979  fmt::format("Attempting to add duplicate entry for Caffe top '{}' {}",
1980  caffeTopName,
1981  CHECK_LOCATION().AsString()));
1982  }
1983 }
#define CHECK_LOCATION()
Definition: Exceptions.hpp:197
std::unordered_map< std::string, armnn::IOutputSlot * > m_ArmnnOutputSlotForCaffeTop
As we add armnn layers we store the armnn IOutputSlot which corresponds to the Caffe tops...

◆ TrackBindingPoint()

void TrackBindingPoint ( armnn::IConnectableLayer layer,
armnn::LayerBindingId  id,
const armnn::TensorInfo tensorInfo,
const char *  bindingPointDesc,
std::unordered_map< std::string, BindingPointInfo > &  nameToBindingInfo 
)
staticprotected

Definition at line 1930 of file CaffeParser.cpp.

References CHECK_LOCATION, and IConnectableLayer::GetName().

Referenced by ICaffeParser::CaffeParserImpl::TrackInputBinding(), and ICaffeParser::CaffeParserImpl::TrackOutputBinding().

1935 {
1936  const std::string layerName = layer->GetName();
1937  auto it = nameToBindingInfo.find(layerName);
1938  if (it == nameToBindingInfo.end())
1939  {
1940  nameToBindingInfo[layerName] = std::make_pair(id, tensorInfo);
1941  }
1942  else
1943  {
1944  throw ParseException(
1945  fmt::format("Id {} used by more than one {} layer {}",
1946  id,
1947  bindingPointDesc,
1948  CHECK_LOCATION().AsString()));
1949  }
1950 }
#define CHECK_LOCATION()
Definition: Exceptions.hpp:197
virtual const char * GetName() const =0
Returns the name of the layer.

◆ TrackInputBinding()

void TrackInputBinding ( armnn::IConnectableLayer layer,
armnn::LayerBindingId  id,
const armnn::TensorInfo tensorInfo 
)
protected

Definition at line 1916 of file CaffeParser.cpp.

References IConnectableLayer::GetName(), ICaffeParser::CaffeParserImpl::m_NetworkInputsBindingInfo, and ICaffeParser::CaffeParserImpl::TrackBindingPoint().

Referenced by ICaffeParser::CaffeParserImpl::ParseInputLayer().

1919 {
1920  return TrackBindingPoint(layer, id, tensorInfo, layer->GetName(), m_NetworkInputsBindingInfo);
1921 }
static void TrackBindingPoint(armnn::IConnectableLayer *layer, armnn::LayerBindingId id, const armnn::TensorInfo &tensorInfo, const char *bindingPointDesc, std::unordered_map< std::string, BindingPointInfo > &nameToBindingInfo)
std::unordered_map< std::string, BindingPointInfo > m_NetworkInputsBindingInfo
maps input layer names to their corresponding ids and tensor infos
virtual const char * GetName() const =0
Returns the name of the layer.

◆ TrackOutputBinding()

void TrackOutputBinding ( armnn::IConnectableLayer layer,
armnn::LayerBindingId  id,
const armnn::TensorInfo tensorInfo 
)
protected

Definition at line 1923 of file CaffeParser.cpp.

References IConnectableLayer::GetName(), ICaffeParser::CaffeParserImpl::m_NetworkOutputsBindingInfo, and ICaffeParser::CaffeParserImpl::TrackBindingPoint().

Referenced by RecordByRecordCaffeParser::CreateNetworkFromBinaryFile(), and ICaffeParser::CaffeParserImpl::LoadNetParam().

1926 {
1927  return TrackBindingPoint(layer, id, tensorInfo, layer->GetName(), m_NetworkOutputsBindingInfo);
1928 }
static void TrackBindingPoint(armnn::IConnectableLayer *layer, armnn::LayerBindingId id, const armnn::TensorInfo &tensorInfo, const char *bindingPointDesc, std::unordered_map< std::string, BindingPointInfo > &nameToBindingInfo)
std::unordered_map< std::string, BindingPointInfo > m_NetworkOutputsBindingInfo
maps output layer names to their corresponding ids and tensor infos
virtual const char * GetName() const =0
Returns the name of the layer.

Member Data Documentation

◆ m_ArmnnOutputSlotForCaffeTop

std::unordered_map<std::string, armnn::IOutputSlot*> m_ArmnnOutputSlotForCaffeTop
protected

As we add armnn layers we store the armnn IOutputSlot which corresponds to the Caffe tops.

Definition at line 147 of file CaffeParser.hpp.

Referenced by ICaffeParser::CaffeParserImpl::Cleanup(), ICaffeParser::CaffeParserImpl::GetArmnnOutputSlotForCaffeTop(), and ICaffeParser::CaffeParserImpl::SetArmnnOutputSlotForCaffeTop().

◆ m_CaffeLayersByTopName

std::map<std::string, const caffe::LayerParameter*> m_CaffeLayersByTopName
protected

◆ m_InputShapes

◆ m_Network

◆ m_NetworkInputsBindingInfo

◆ m_NetworkOutputsBindingInfo

◆ m_RequestedOutputs

◆ ms_CaffeLayerNameToParsingFunctions

const std::map< std::string, ICaffeParser::CaffeParserImpl::OperationParsingFunction > ms_CaffeLayerNameToParsingFunctions
staticprotected

The documentation for this class was generated from the following files: