19 #include <google/protobuf/io/zero_copy_stream_impl.h> 20 #include <google/protobuf/text_format.h> 22 #include <tensorflow/core/framework/graph.pb.h> 24 #include <boost/format.hpp> 25 #include <boost/format.hpp> 26 #include <boost/numeric/conversion/cast.hpp> 27 #include <boost/polymorphic_cast.hpp> 32 using namespace armnn;
43 template <
typename Callable>
44 void ReadMandatoryNodeAttributeImpl(
const tensorflow::NodeDef& nodeDef,
45 const std::string& attribName,
46 tensorflow::AttrValue::ValueCase expectedValueCase,
49 auto iter = nodeDef.attr().find(attribName);
50 if (iter != nodeDef.attr().end())
52 const auto& attrValue = iter->second;
53 if (attrValue.value_case() == expectedValueCase)
62 "Attribute %1% of node %2% expected to have %3% as tensorflow::AttrValue::ValueCase, " 63 "but found %4% instead %5%")
66 %
static_cast<int>(expectedValueCase)
67 % static_cast<int>(attrValue.value_case())
76 "Could not find required attribute %1% in node %2% %3%")
83 template <
typename Callable>
84 void ReadOptionalNodeAttributeImpl(
const tensorflow::NodeDef& nodeDef,
85 const std::string& attribName,
86 tensorflow::AttrValue::ValueCase expectedValueCase,
89 auto iter = nodeDef.attr().find(attribName);
90 if (iter != nodeDef.attr().end())
92 const auto& attrValue = iter->second;
93 if (attrValue.value_case() == expectedValueCase)
102 "Attribute %1% of node %2% expected to have %3% as tensorflow::AttrValue::ValueCase, " 103 "but found %4% instead %5%")
106 %
static_cast<int>(expectedValueCase)
107 % static_cast<int>(attrValue.value_case())
113 float ReadMandatoryNodeFloatAttribute(
const tensorflow::NodeDef& nodeDef,
const std::string& name)
115 float attribValue = 0.0f;
116 ReadMandatoryNodeAttributeImpl(nodeDef, name, tensorflow::AttrValue::kF,
117 [&attribValue](
const tensorflow::AttrValue& attrValue)
119 attribValue = attrValue.f();
124 int32_t ReadMandatoryNodeInt32Attribute(
const tensorflow::NodeDef& nodeDef,
const std::string& name)
126 int32_t attribValue = 0u;
127 ReadMandatoryNodeAttributeImpl(nodeDef, name, tensorflow::AttrValue::kI,
128 [&attribValue](
const tensorflow::AttrValue& attrValue)
130 attribValue =
static_cast<int32_t
>(attrValue.i());
135 bool ReadMandatoryNodeBoolAttribute(
const tensorflow::NodeDef& nodeDef,
const std::string& name)
137 bool attribValue =
false;
138 ReadMandatoryNodeAttributeImpl(nodeDef, name, tensorflow::AttrValue::kB,
139 [&attribValue](
const tensorflow::AttrValue& attrValue)
141 attribValue =
static_cast<bool>(attrValue.b());
146 uint32_t ReadMandatoryNodeUint32Attribute(
const tensorflow::NodeDef& nodeDef,
const std::string& name)
148 uint32_t attribValue = 0u;
149 ReadMandatoryNodeAttributeImpl(nodeDef, name, tensorflow::AttrValue::kI,
150 [&attribValue](
const tensorflow::AttrValue& attrValue)
152 attribValue =
static_cast<uint32_t
>(attrValue.i());
157 std::string ReadMandatoryNodeStringAttribute(
const tensorflow::NodeDef& nodeDef,
const std::string& name)
159 std::string attribValue =
"";
160 ReadMandatoryNodeAttributeImpl(nodeDef, name, tensorflow::AttrValue::kS,
161 [&attribValue](
const tensorflow::AttrValue& attrValue)
163 attribValue = attrValue.s();
168 std::vector<uint32_t> ReadMandatoryNodeUint32ListAttribute(
const tensorflow::NodeDef& nodeDef,
169 const std::string& name)
171 std::vector<uint32_t> attriList;
172 ReadMandatoryNodeAttributeImpl(nodeDef, name, tensorflow::AttrValue::kList,
173 [&attriList](
const tensorflow::AttrValue& attrValue)
175 for (
int attriNum = 0; attriNum < attrValue.list().i_size(); ++attriNum)
177 attriList.push_back(static_cast<uint32_t>(attrValue.list().i(attriNum)));
184 std::vector<uint32_t> ReadOptionalNodeUint32ListAttribute(
const tensorflow::NodeDef& nodeDef,
185 const std::string& name)
187 std::vector<uint32_t> attriList;
188 ReadOptionalNodeAttributeImpl(nodeDef, name, tensorflow::AttrValue::kList,
189 [&attriList](
const tensorflow::AttrValue& attrValue)
191 for (
int attriNum = 0; attriNum < attrValue.list().i_size(); ++attriNum)
193 attriList.push_back(static_cast<uint32_t>(attrValue.list().i(attriNum)));
200 std::string ReadOptionalNodeStringAttribute(
const tensorflow::NodeDef& nodeDef,
201 const std::string& name,
202 const std::string& defaultValue =
"")
204 std::string attribValue = defaultValue;
205 ReadOptionalNodeAttributeImpl(nodeDef, name, tensorflow::AttrValue::kS,
206 [&attribValue](
const tensorflow::AttrValue& attrValue)
208 attribValue = attrValue.s();
213 bool ReadOptionalNodeBoolAttribute(
const tensorflow::NodeDef& nodeDef,
214 const std::string& name,
215 bool defaultValue =
false)
217 bool attribValue = defaultValue;
218 ReadOptionalNodeAttributeImpl(nodeDef, name, tensorflow::AttrValue::kB,
219 [&attribValue](
const tensorflow::AttrValue& attrValue)
221 attribValue = attrValue.b();
226 tensorflow::DataType ReadMandatoryNodeTypeAttribute(
const tensorflow::NodeDef& nodeDef,
const std::string& name)
229 ReadMandatoryNodeAttributeImpl(nodeDef, name, tensorflow::AttrValue::kType,
230 [&attribValue](
const tensorflow::AttrValue& attrValue)
232 attribValue = attrValue.type();
239 std::vector<unsigned int> outDims(targetDims.begin(), targetDims.end());
240 const auto stretchDim = std::find(targetDims.begin(), targetDims.end(), -1);
242 if (stretchDim != targetDims.end())
244 if (std::find(std::next(stretchDim), targetDims.end(), -1) != targetDims.end())
249 "At most one component of shape can be -1 %1%")
253 auto targetNumElements =
255 std::accumulate(targetDims.begin(), targetDims.end(), -1, std::multiplies<int32_t>()));
256 auto stretchIndex =
static_cast<size_t>(std::distance(targetDims.begin(), stretchDim));
257 outDims[stretchIndex] = input.
GetNumElements() / targetNumElements;
261 reshapeInfo.
SetShape(
TensorShape{
static_cast<unsigned int>(outDims.size()), outDims.data() });
268 INetwork& m_Network,
const tensorflow::NodeDef& nodeDef)
272 const unsigned int matchDim = inputTensorInfo.
GetNumDimensions() - (isNHWC ? 1 : 3);
273 std::array<unsigned int, MaxNumOfTensorDimensions> reshapedDimensions;
274 std::fill_n(reshapedDimensions.begin(), inputTensorInfo.
GetNumDimensions(), 1);
275 reshapedDimensions[matchDim] = input1Info.
GetShape()[0];
280 const std::string reshapeLayerName =
"reshape_for-" + nodeDef.name();
293 OutputId ParseOutputId(
const std::string & name)
295 unsigned int outputNum = 0;
296 size_t colonPos = name.find_last_of(
":");
297 if (colonPos != std::string::npos)
299 int n = std::stoi(name.substr(colonPos+1));
305 "Output tensor id is out of range for %1% %2%")
309 outputNum =
static_cast<unsigned int>(n);
311 return OutputId(name.substr(0,colonPos),outputNum);
314 #define CHECK_DATA_FORMAT(NODE_DEF, FORMAT, NODE_TYPE) \ 315 if( FORMAT != "NHWC" && FORMAT != "NCHW" ) \ 317 throw ParseException( \ 320 "Unsupported data format %1% passed for %2% node %3%. " \ 321 "Only NHWC and NCHW supported %4%") \ 325 % CHECK_LOCATION().AsString())); \ 328 #define CHECK_PADDING_TYPE(NODE_DEF, PADDING) \ 329 if(PADDING != "SAME" && PADDING != "VALID" ) \ 331 throw ParseException( \ 334 "Only 'SAME' and 'VALID' padding supported. Got %1% for %2% %3%") \ 337 % CHECK_LOCATION().AsString())); \ 342 const std::map<std::string, TfParser::OperationParsingFunction> TfParser::ms_OperationNameToParsingFunctions = {
343 {
"Const", &TfParser::ParseConst },
344 {
"Add", &TfParser::ParseAdd },
345 {
"AddN", &TfParser::ParseAddN },
346 {
"BiasAdd", &TfParser::ParseBiasAdd },
347 {
"Identity", &TfParser::ParseIdentity },
348 {
"Conv2D", &TfParser::ParseConv2D },
349 {
"DepthwiseConv2dNative", &TfParser::ParseDepthwiseConv2D },
350 {
"ExpandDims", &TfParser::ParseExpandDims },
351 {
"FusedBatchNorm", &TfParser::ParseFusedBatchNorm },
352 {
"Gather", &TfParser::ParseGather},
353 {
"Greater", &TfParser::ParseGreater},
354 {
"ConcatV2", &TfParser::ParseConcat },
355 {
"LRN", &TfParser::ParseLrn },
356 {
"MatMul", &TfParser::ParseMatMul },
357 {
"Mean", &TfParser::ParseMean },
358 {
"Mul", &TfParser::ParseMul },
359 {
"Placeholder", &TfParser::ParsePlaceholder },
360 {
"RealDiv", &TfParser::ParseRealDiv },
361 {
"Relu", &TfParser::ParseRelu },
362 {
"Relu6", &TfParser::ParseRelu6 },
363 {
"Reshape", &TfParser::ParseReshape },
364 {
"ResizeBilinear", &TfParser::ParseResizeBilinear },
365 {
"Rsqrt", &TfParser::ParseRsqrt },
366 {
"Shape", &TfParser::ParseShape },
367 {
"Squeeze", &TfParser::ParseSqueeze },
368 {
"Sigmoid", &TfParser::ParseSigmoid },
369 {
"Softmax", &TfParser::ParseSoftmax },
370 {
"Softplus", &TfParser::ParseSoftplus },
371 {
"Split", &TfParser::ParseSplit },
372 {
"StridedSlice", &TfParser::ParseStridedSlice },
373 {
"Tanh", &TfParser::ParseTanh },
374 {
"MaxPool", &TfParser::ParseMaxPool },
375 {
"AvgPool", &TfParser::ParseAvgPool },
376 {
"Maximum", &TfParser::ParseMaximum },
377 {
"Minimum", &TfParser::ParseMinimum },
378 {
"Equal", &TfParser::ParseEqual },
379 {
"Pad", &TfParser::ParsePad },
380 {
"Sub", &TfParser::ParseSub },
381 {
"Pack" , &TfParser::ParseStack },
382 {
"Stack", &TfParser::ParseStack },
383 {
"Transpose", &TfParser::ParseTranspose },
386 const std::list<std::string> TfParser::m_ControlInputs = {
406 uint32_t filterSize,
bool samePadding,
407 uint32_t* paddingFront, uint32_t* paddingBack) {
412 uint32_t outputSize = (inputSize + stride - 1) / stride;
413 uint32_t temp = (outputSize - 1) * stride + filterSize;
414 if (temp > inputSize) {
415 *paddingFront = (temp - inputSize) / 2;
416 *paddingBack = (temp - inputSize) - *paddingFront;
421 void CalcPadding(uint32_t input, uint32_t kernel, uint32_t stride, uint32_t& outPadHead, uint32_t& outPadTail,
430 class ParsedTfOperation
433 ParsedTfOperation(
TfParser* parser,
const tensorflow::NodeDef& node)
439 virtual ~ParsedTfOperation() {};
441 const tensorflow::NodeDef& GetNode()
const {
return m_Node; }
445 virtual IOutputSlot& ResolveArmnnOutputSlot(
unsigned int tfOutputIndex) = 0;
448 virtual ParsedTfOperation* ResolveIdentityOperations()
455 const tensorflow::NodeDef& m_Node;
460 class SingleLayerParsedTfOperation :
public ParsedTfOperation
464 : ParsedTfOperation(parser, node)
469 IOutputSlot& ResolveArmnnOutputSlot(
unsigned int tfOutputIndex)
override 473 unsigned int armnnOutputSlotIdx = tfOutputIndex;
479 "The requested output slot #%1% " 480 "for %2% does not exist %3%")
493 class DeferredSingleLayerParsedTfOperation :
public SingleLayerParsedTfOperation
496 DeferredSingleLayerParsedTfOperation(
TfParser* parser,
const tensorflow::NodeDef& node)
497 : SingleLayerParsedTfOperation(parser, node,
nullptr)
501 IOutputSlot& ResolveArmnnOutputSlot(
unsigned int tfOutputIndex)
override 505 CreateLayerDeferred();
507 return SingleLayerParsedTfOperation::ResolveArmnnOutputSlot(tfOutputIndex);
511 virtual void CreateLayerDeferred() = 0;
516 : m_Network(nullptr, nullptr)
521 const tensorflow::NodeDef* TfParser::ResolveIdentityNode(
const tensorflow::NodeDef* nodeDef)
523 if (nodeDef->op() !=
"Identity")
528 if (nodeDef->input_size() != 1)
533 "Identity node should have a single input! %1% has %2% inputs %3%")
535 % nodeDef->input_size()
539 auto it = m_NodesByName.find(nodeDef->input(0));
540 if (it != m_NodesByName.end())
542 const tensorflow::NodeDef* inputNode = it->second;
543 return ResolveIdentityNode(inputNode);
550 "Cannot find what the Identity node %1% is linked to! %2%")
556 std::vector<OutputOfConstNodeDef>
557 TfParser::GetTfInputNodes(
const tensorflow::NodeDef& nodeDef)
const 559 std::vector<OutputOfConstNodeDef> ret;
561 if (nodeDef.op() ==
"Const")
567 ret.reserve(boost::numeric_cast<size_t>(nodeDef.input_size()));
568 for (
int j = 0; j < nodeDef.input_size(); ++j)
570 OutputId outputId = ParseOutputId(nodeDef.input(j));
572 if (nodeDef.input(j)[0] ==
'^')
579 if (inputIt == m_NodesByName.end())
584 "Can't find node '%1%', which is listed as an input of '%2%' %3%")
595 std::vector<OutputOfParsedTfOperation>
596 TfParser::GetInputParsedTfOperationsChecked(
const tensorflow::NodeDef& nodeDef,
597 std::size_t expectedNumInputs)
600 std::vector<OutputOfConstNodeDef> nodes = GetTfInputNodes(nodeDef);
601 const std::size_t numInputs = nodes.size();
602 if (numInputs != expectedNumInputs)
607 "Unexpected number of inputs for node %1%. Expected %2%, found %3% %4%")
614 std::vector<OutputOfParsedTfOperation> result;
615 for (
auto&& node : nodes)
617 auto it = m_ParsedTfOperations.find(node.m_IndexedValue->name());
618 if (it == m_ParsedTfOperations.end())
623 "Node with name '%1%' has not been parsed %2%")
624 % node.m_IndexedValue->name()
627 ParsedTfOperation* parsedOp = it->second.get();
629 parsedOp = parsedOp->ResolveIdentityOperations();
636 const tensorflow::NodeDef& nodeDef,
639 const std::string& layerName)
646 if (input0Dim != input1Dim)
650 if (input0Dim == 1 && input1Dim == 4)
652 input0Slot = AddBroadcastReshapeLayer(input1Slot, input0Slot,
true, *m_Network, nodeDef);
654 else if (input0Dim == 4 && input1Dim == 1)
656 input1Slot = AddBroadcastReshapeLayer(input0Slot, input1Slot,
true, *m_Network, nodeDef);
662 boost::format(
"Unsupported broadcast configuration for %1% operation %2% %3%")
675 std::vector<unsigned int> outputShape;
682 outputShape.push_back(std::max(input0Shape[i], input1Shape[i]));
692 const tensorflow::NodeDef& nodeDef,
695 unsigned int numberOfAddition,
696 unsigned long numberOfLayersToConnect,
701 std::string layerName(nodeDef.name());
702 if (isOdd || numberOfLayersToConnect != 2)
705 layerName.append(
"_addN_").append(std::to_string(numberOfAddition));
707 return CreateAdditionLayer(nodeDef, input0Slot, input1Slot, layerName);
711 const tensorflow::NodeDef& nodeDef,
714 unsigned int numberOfAddition)
718 std::string layerName(nodeDef.name());
719 layerName.append(
"_addN_").append(std::to_string(numberOfAddition));
720 return CreateAdditionLayer(nodeDef, input0Slot, input1Slot, layerName);
724 const tensorflow::NodeDef& nodeDef,
730 return CreateAdditionLayer(nodeDef, input0Slot, input1Slot, nodeDef.name());
733 ParsedTfOperationPtr TfParser::ParseAddN(
const tensorflow::NodeDef& nodeDef,
const tensorflow::GraphDef& graphDef)
736 uint32_t numberOfInputs = ReadMandatoryNodeUint32Attribute(nodeDef,
"N");
737 if (numberOfInputs < 2)
743 "AddN Node with name '%1%' has less than two (%2) inputs %3%")
745 % std::to_string(numberOfInputs)
748 else if (numberOfInputs == 2)
751 return AddAdditionLayer(nodeDef,
false);
758 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, numberOfInputs);
759 unsigned int numberOfAdditions = 0;
760 std::vector<IConnectableLayer*> layers;
762 for (
unsigned int i = 0; i < numberOfInputs; ++i)
765 bool onSecondItem = i % 2;
770 nodeDef, inputs[ i - 1], inputs[i], numberOfAdditions);
771 layers.push_back(newLayer);
775 std::vector<IConnectableLayer*> layersToConnect(layers);
776 unsigned long numberOfLayersToConnect = layersToConnect.size();
777 bool isOdd = numberOfInputs % 2;
779 while (numberOfLayersToConnect > 1)
782 for (
unsigned long i = 0; i < numberOfLayersToConnect; ++i) {
783 bool onSecondItem = i % 2;
788 layersToConnect[i - 1],
791 numberOfLayersToConnect,
793 layers.push_back(newLayer);
797 layersToConnect = layers;
798 numberOfLayersToConnect = layersToConnect.size();
807 finalLayer = CreateAdditionLayer(nodeDef, inputs[numberOfInputs - 1], finalLayer);
809 return std::make_unique<SingleLayerParsedTfOperation>(
this, nodeDef, finalLayer);
813 ParsedTfOperationPtr TfParser::ParseAdd(
const tensorflow::NodeDef& nodeDef,
const tensorflow::GraphDef& graphDef)
816 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
820 if (inputs[0].m_IndexedValue->GetNode().op() ==
"MatMul" &&
821 HasParsedConstTensor<float>(inputs[1].m_IndexedValue->GetNode().name()))
824 AddFullyConnectedLayer(inputs[0].m_IndexedValue->GetNode(),
825 &nodeDef,nodeDef.name().c_str());
826 return std::make_unique<SingleLayerParsedTfOperation>(
this, nodeDef, layer);
828 else if (HasParsedConstTensor<float>(inputs[0].m_IndexedValue->GetNode().name()) &&
829 inputs[1].m_IndexedValue->GetNode().op() ==
"MatMul")
832 AddFullyConnectedLayer(inputs[1].m_IndexedValue->GetNode(),
833 &nodeDef,nodeDef.name().c_str());
834 return std::make_unique<SingleLayerParsedTfOperation>(
this, nodeDef, layer);
839 return AddAdditionLayer(nodeDef);
843 ParsedTfOperationPtr TfParser::ParseBiasAdd(
const tensorflow::NodeDef& nodeDef,
const tensorflow::GraphDef& graphDef)
846 return AddAdditionLayer(nodeDef,
true);
850 class ParsedIdentityTfOperation :
public ParsedTfOperation
853 ParsedIdentityTfOperation(
TfParser* parser,
const tensorflow::NodeDef& node, ParsedTfOperation* representative)
854 : ParsedTfOperation(parser, node)
855 , m_Representative(representative)
859 virtual IOutputSlot& ResolveArmnnOutputSlot(
unsigned int tfOutputIndex)
override 861 BOOST_ASSERT(m_Representative);
862 return m_Representative->ResolveArmnnOutputSlot(tfOutputIndex);
865 virtual ParsedTfOperation* ResolveIdentityOperations()
override 867 return m_Representative->ResolveIdentityOperations();
871 ParsedTfOperation* m_Representative;
874 ParsedTfOperationPtr TfParser::ParseIdentity(
const tensorflow::NodeDef& nodeDef,
const tensorflow::GraphDef& graphDef)
877 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
879 return std::make_unique<ParsedIdentityTfOperation>(
this, nodeDef, inputs[0].m_IndexedValue);
885 template <
typename T>
890 const T* tensorData,
const TensorInfo& tensorInfo)
891 : DeferredSingleLayerParsedTfOperation(parser, node),
893 m_TensorInfo(tensorInfo)
898 void CreateLayerDeferred()
override 900 BOOST_ASSERT(
m_Layer ==
nullptr);
901 m_Layer = m_Parser->m_Network->AddConstantLayer(
ConstTensor(m_TensorInfo, m_Storage), m_Node.name().c_str());
905 ConstTensor GetConstTensor(std::vector<T>& outputTensorData)
const 909 memcpy(outputTensorData.data(), m_Storage.data(), m_TensorInfo.
GetNumBytes());
912 ConstTensor constTensor(m_TensorInfo, outputTensorData);
916 const T* GetStorage()
const 918 return m_Storage.data();
928 std::vector<T> m_Storage;
934 const tensorflow::NodeDef& nodeDef)
938 case tensorflow::DT_FLOAT:
939 return DataType::Float32;
941 case tensorflow::DT_INT32:
942 return DataType::Signed32;
948 "Unknown DataType %1% for node %2% %3%")
949 % tensorflow::DataType_Name(tfDataType)
955 struct ParseTfTensorValueList
957 template<
typename DataType>
959 const tensorflow::TensorProto& tfTensor,
960 unsigned int dstElements,
961 std::vector<int8_t>& outputData);
963 template <
typename DataType>
964 static void ReadData(
const void* srcData,
unsigned int numSrcElements,
965 std::vector<int8_t>& dstData,
unsigned int numDstElements)
968 if (numSrcElements == 0)
974 if (numDstElements == 0)
976 numDstElements = numSrcElements;
980 dstData.resize(std::max(numSrcElements, numDstElements) *
sizeof(
DataType));
986 std::copy(srcTensor, srcTensor + numSrcElements, dstTensor);
988 if (numDstElements > numSrcElements)
991 std::fill(dstTensor + numSrcElements, dstTensor + numDstElements, srcTensor[numSrcElements - 1]);
998 void ParseTfTensorValueList::Parse<float>(
const tensorflow::TensorProto& tfTensor,
999 unsigned int dstElements, std::vector<int8_t>& outputData)
1001 ReadData<float>(tfTensor.float_val().data(),
static_cast<unsigned int>(tfTensor.float_val_size()),
1002 outputData, dstElements);
1006 void ParseTfTensorValueList::Parse<int32_t>(
const tensorflow::TensorProto& tfTensor,
1007 unsigned int dstElements, std::vector<int8_t>& outputData)
1009 ReadData<int32_t>(tfTensor.int_val().data(),
static_cast<unsigned int>(tfTensor.int_val_size()),
1010 outputData, dstElements);
1013 template <
template<
typename>
class OperatorType,
typename T = int8_t>
1014 struct MakeTfOperation
1016 template<
typename DataType,
class... Args>
1017 inline static std::unique_ptr<OperatorType<DataType>> Parse(
TfParser* parser,
const tensorflow::NodeDef& node,
1020 return std::make_unique<OperatorType<DataType>>(parser, node, std::forward<Args>(args)...);
1025 struct MakeTfOperation<ParsedConstTfOperation>
1027 template<
typename DataType,
class... Args>
1028 inline static std::unique_ptr<ParsedConstTfOperation<DataType>> Parse(
TfParser* parser,
1029 const tensorflow::NodeDef& node,
const std::vector<int8_t>& tensorData,
const TensorInfo& tensorInfo)
1031 return std::make_unique<ParsedConstTfOperation<DataType>>(parser, node,
1032 reinterpret_cast<const DataType*
>(tensorData.data()), tensorInfo);
1036 template <
class FuncType>
1037 struct InvokeParseFunction
1039 template<
class ResType,
class... Args>
1040 inline static ResType Result(
DataType dataType, Args&&... args)
1042 if (dataType == DataType::Float32)
1044 return FuncType::template Parse<float>(std::forward<Args>(args)...);
1046 else if (dataType == DataType::Signed32)
1048 return FuncType::template Parse<int32_t>(std::forward<Args>(args)...);
1054 template<
class... Args>
1055 inline static void Result(
DataType dataType, Args&&... args)
1057 if (dataType == DataType::Float32)
1059 FuncType::template Parse<float>(std::forward<Args>(args)...);
1061 else if (dataType == DataType::Signed32)
1063 FuncType::template Parse<int32_t>(std::forward<Args>(args)...);
1068 ParsedTfOperationPtr TfParser::ParseConst(
const tensorflow::NodeDef& nodeDef,
const tensorflow::GraphDef& graphDef)
1071 BOOST_ASSERT(nodeDef.op() ==
"Const");
1073 if (nodeDef.attr().count(
"value") == 0)
1078 "Value not found for Const node - %1% %2%")
1083 const tensorflow::TensorProto& tfTensor = nodeDef.attr().at(
"value").tensor();
1084 const tensorflow::TensorShapeProto& tfTensorShape = tfTensor.tensor_shape();
1087 const auto GetDimensionSize = [](
auto& d) {
return d.size(); };
1089 std::vector<unsigned int> dimensionSizes;
1090 std::transform(tfTensorShape.dim().begin(), tfTensorShape.dim().end(),
1091 std::back_inserter(dimensionSizes), GetDimensionSize);
1095 unsigned int numElements = 0U;
1097 if (!dimensionSizes.empty())
1099 numElements = std::accumulate(dimensionSizes.begin(), dimensionSizes.end(),
1100 1U, std::multiplies<unsigned int>());
1103 std::vector<int8_t> tensorData;
1106 if (tfTensor.tensor_content().empty())
1108 InvokeParseFunction<ParseTfTensorValueList>::Result<
void>(dataType, tfTensor, numElements, tensorData);
1112 if (numElements == 0)
1114 const unsigned int tfNumElements =
1115 static_cast<unsigned int>(tensorData.size()) /
GetDataTypeSize(dataType);
1116 dimensionSizes.push_back(tfNumElements);
1122 tensorData.assign(tfTensor.tensor_content().begin(), tfTensor.tensor_content().end());
1125 if (numElements == 0)
1130 "No tensor shape found for Const node - %1% %2%")
1137 if (tensorData.empty())
1142 "No tensor data found for Const node - %1% %2%")
1147 const TensorInfo tensorInfo(static_cast<unsigned int>(dimensionSizes.size()),
1148 dimensionSizes.data(),
1153 if (tensorData.size() > tensorInfo.GetNumBytes())
1158 "Number of elements (%1%) should be less than or equal " 1159 "to the number of elements implied by the shape argument (%2%) for Const node - %3% %4%")
1161 % tensorInfo.GetNumElements()
1166 return InvokeParseFunction<MakeTfOperation<ParsedConstTfOperation>>::Result<ParsedTfOperationPtr>(
1167 dataType,
this, nodeDef, tensorData, tensorInfo);
1170 template<
typename Type>
1171 bool TfParser::HasParsedConstTensor(
const std::string & nodeName)
const 1173 auto it = m_ParsedTfOperations.find(nodeName);
1174 if (it == m_ParsedTfOperations.end())
1178 return dynamic_cast<ParsedConstTfOperation<Type>*
>(it->second.get()) !=
nullptr;
1181 template<
typename Type>
1182 bool TfParser::HasParsedConstTensor(ParsedTfOperation* parsedTfOpPtr)
const 1184 return dynamic_cast<ParsedConstTfOperation<Type>*
>(parsedTfOpPtr) !=
nullptr;
1187 unsigned int TfParser::GetConstInputIndex(
const std::vector<OutputOfParsedTfOperation>& inputs)
1189 for (
unsigned int i = 0; i < inputs.size(); i++)
1191 if (HasParsedConstTensor<int32_t>(inputs[i].m_IndexedValue->GetNode().name()))
1199 "ArmNN only supports operators with constant axis. %1%")
1205 const tensorflow::GraphDef& graphDef)
1208 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
1209 IOutputSlot& inputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
1212 if (!HasParsedConstTensor<float>(inputs[1].m_IndexedValue->GetNode().name()))
1217 "ArmNN only supports Convolution layers with constant weights for %1%, input %2% %3%")
1219 % inputs[1].m_IndexedValue->GetNode().name()
1222 ParsedConstTfOperation<float>* weightNode =
1223 boost::polymorphic_downcast<ParsedConstTfOperation<float> *>(inputs[1].m_IndexedValue);
1225 std::string paddingString = ReadMandatoryNodeStringAttribute(nodeDef,
"padding");
1226 std::string dataFormat = ReadMandatoryNodeStringAttribute(nodeDef,
"data_format");
1227 std::vector<uint32_t> strides = ReadMandatoryNodeUint32ListAttribute(nodeDef,
"strides");
1230 std::vector<uint32_t> dilations = ReadOptionalNodeUint32ListAttribute(nodeDef,
"dilations");
1231 if (!dilations.empty())
1233 for (
auto dilation : dilations)
1240 "ArmNN only supports Convolution layers with dilations [1,1,1,1] for %1% %2%")
1252 DataLayout dataLayout = dataFormat ==
"NHWC" ? DataLayout::NHWC : DataLayout::NCHW;
1269 dataLayout == DataLayout::NHWC ?
1270 std::initializer_list<unsigned int>{ 1, 2, 3, 0 } :
1271 std::initializer_list<unsigned int>{ 2, 3, 1, 0 };
1274 const TensorInfo& weightTensorInfo = weightNode->GetTensorInfo();
1278 std::vector<float> weightTensorSwizzledData(weightTensorInfo.
GetNumElements());
1280 weightNode->GetStorage(), weightTensorSwizzledData.data(),
sizeof(float));
1283 ConstTensor weightTensor(weightTensorSwizzledInfo, weightTensorSwizzledData);
1288 bool padding =
false;
1290 unsigned int outputHeight = 0;
1291 unsigned int outputWidth = 0;
1295 if (paddingString ==
"SAME")
1299 outputHeight =
static_cast<uint32_t
>(ceil(static_cast<float>(inputHeight) /
1301 outputWidth =
static_cast<uint32_t
>(ceil(static_cast<float>(inputWidth) /
1304 else if (paddingString ==
"VALID")
1308 outputHeight =
static_cast<uint32_t
>(ceil(static_cast<float>(inputHeight - weightHeight + 1) /
1310 outputWidth =
static_cast<uint32_t
>(ceil(static_cast<float>(inputWidth - weightWidth + 1) /
1316 case DataLayout::NHWC:
1323 case DataLayout::NCHW:
1339 nodeDef.name().c_str());
1343 return std::make_unique<SingleLayerParsedTfOperation>(
this, nodeDef, layer);
1347 const tensorflow::GraphDef& graphDef)
1350 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
1351 IOutputSlot& inputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
1354 if (!HasParsedConstTensor<float>(inputs[1].m_IndexedValue->GetNode().name()))
1359 "ArmNN only supports Depthwise Convolution layer with constant weights. " 1360 "Non const input found %1% for node %2% %3%")
1361 % inputs[1].m_IndexedValue->GetNode().name()
1366 ParsedConstTfOperation<float>* weightNode =
1367 boost::polymorphic_downcast<ParsedConstTfOperation<float> *>(inputs[1].m_IndexedValue);
1369 std::string paddingString = ReadMandatoryNodeStringAttribute(nodeDef,
"padding");
1370 std::string dataFormat = ReadMandatoryNodeStringAttribute(nodeDef,
"data_format");
1371 std::vector<uint32_t> strides = ReadMandatoryNodeUint32ListAttribute(nodeDef,
"strides");
1378 DataLayout dataLayout = dataFormat ==
"NHWC" ? DataLayout::NHWC : DataLayout::NCHW;
1396 const TensorInfo& weightTensorInfo = weightNode->GetTensorInfo();
1400 std::vector<float> weightTensorSwizzledData(weightTensorInfo.
GetNumElements());
1402 weightNode->GetStorage(), weightTensorSwizzledData.data(),
sizeof(float));
1405 ConstTensor weightTensor(weightTensorSwizzledInfo, weightTensorSwizzledData);
1407 uint32_t weightHeight = weightTensor.
GetShape()[2];
1408 uint32_t weightWidth = weightTensor.
GetShape()[3];
1410 bool padding =
false;
1412 unsigned int outputHeight = 0;
1413 unsigned int outputWidth = 0;
1417 if (paddingString ==
"SAME")
1421 outputHeight =
static_cast<uint32_t
>(ceil(static_cast<float>(inputHeight) /
1423 outputWidth =
static_cast<uint32_t
>(ceil(static_cast<float>(inputWidth) /
1426 else if (paddingString ==
"VALID")
1430 outputHeight =
static_cast<uint32_t
>(ceil(static_cast<float>(inputHeight - weightHeight + 1) /
1432 outputWidth =
static_cast<uint32_t
>(ceil(static_cast<float>(inputWidth - weightWidth + 1) /
1438 case DataLayout::NHWC:
1445 case DataLayout::NCHW:
1461 nodeDef.name().c_str());
1465 return std::make_unique<SingleLayerParsedTfOperation>(
this, nodeDef, layer);
1470 BOOST_ASSERT(nodeDef.op() ==
"ExpandDims");
1476 "Unsupported number of dimensions: %1% for input shape for ExpandDims %2% %3%")
1482 std::int32_t expandDim = ReadMandatoryNodeInt32Attribute(nodeDef,
"Tdim");
1485 std::vector<uint32_t> outputDims;
1488 if (expandDim >= -1 - inputDimSize && expandDim <= inputDimSize)
1492 auto currentDimension = inputTensorInfo.
GetShape()[i];
1493 outputDims.push_back(currentDimension);
1499 auto getPosition = std::next(outputDims.begin() + 0, expandDim);
1500 outputDims.insert(getPosition, 1);
1508 auto getPosition = std::next(outputDims.begin() + outputDimSize, expandDim);
1509 outputDims.insert(getPosition, 1);
1517 "Cannot expand dimension %1% in input tensor with %2% dimension %3%")
1523 if (outputDims.size() > 4)
1528 "Unsupported number of dimensions: %1% for output shape for ExpandDims %2% %3%")
1540 return outTensorInfo;
1543 ParsedTfOperationPtr TfParser::ParseExpandDims(
const tensorflow::NodeDef& nodeDef,
const tensorflow::GraphDef& graphDef)
1546 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
1548 IOutputSlot& prevLayerOutputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
1560 return std::make_unique<SingleLayerParsedTfOperation>(
this, nodeDef, layer);
1564 const tensorflow::GraphDef& graphDef)
1567 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 5);
1569 if (!HasParsedConstTensor<float>(inputs[1].m_IndexedValue->GetNode().name()))
1574 "ArmNN only supports FusedBatchNormalization layers with constant scale. " 1575 "Input %1%. Node %2% %3%")
1576 % inputs[1].m_IndexedValue->GetNode().name()
1580 ParsedConstTfOperation<float>* scaleNode =
1581 boost::polymorphic_downcast<ParsedConstTfOperation<float> *>(inputs[1].m_IndexedValue);
1583 if (!HasParsedConstTensor<float>(inputs[2].m_IndexedValue->GetNode().name()))
1588 "ArmNN only supports FusedBatchNormalization layers with constant offset. " 1589 "Input %1%. Node %2% %3%")
1590 % inputs[2].m_IndexedValue->GetNode().name()
1594 ParsedConstTfOperation<float>* offsetNode =
1595 boost::polymorphic_downcast<ParsedConstTfOperation<float> *>(inputs[2].m_IndexedValue);
1597 if (!HasParsedConstTensor<float>(inputs[3].m_IndexedValue->GetNode().name()))
1602 "ArmNN only supports FusedBatchNormalization layers with constant mean. " 1603 "Input %1%. Node %2% %3%")
1604 % inputs[3].m_IndexedValue->GetNode().name()
1608 ParsedConstTfOperation<float>* meanNode =
1609 boost::polymorphic_downcast<ParsedConstTfOperation<float> *>(inputs[3].m_IndexedValue);
1611 if (!HasParsedConstTensor<float>(inputs[4].m_IndexedValue->GetNode().name()))
1616 "ArmNN only supports FusedBatchNormalization layers with constant variance. " 1617 "Input %1%. Node %2% %3%")
1618 % inputs[4].m_IndexedValue->GetNode().name()
1622 ParsedConstTfOperation<float>* varianceNode =
1623 boost::polymorphic_downcast<ParsedConstTfOperation<float> *>(inputs[4].m_IndexedValue);
1625 const std::string dataFormat = ReadOptionalNodeStringAttribute(nodeDef,
"data_format",
"NHWC");
1630 desc.
m_Eps = ReadMandatoryNodeFloatAttribute(nodeDef,
"epsilon");
1631 desc.
m_DataLayout = dataFormat ==
"NHWC" ? DataLayout::NHWC : DataLayout::NCHW;
1635 std::vector<float> scaleTensorData;
1636 ConstTensor scaleTensor = scaleNode->GetConstTensor(scaleTensorData);
1638 std::vector<float> offsetTensorData;
1639 ConstTensor offsetTensor = offsetNode->GetConstTensor(offsetTensorData);
1641 std::vector<float> meanTensorData;
1642 ConstTensor meanTensor = meanNode->GetConstTensor(meanTensorData);
1644 std::vector<float> varianceTensorData;
1645 ConstTensor varianceTensor = varianceNode->GetConstTensor(varianceTensorData);
1652 nodeDef.name().c_str());
1654 IOutputSlot& inputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
1659 return std::make_unique<SingleLayerParsedTfOperation>(
this, nodeDef, layer);
1662 bool TfParser::IsSupportedLeakyReluPattern(
const tensorflow::NodeDef& mulNodeDef,
1663 size_t alphaLayerIndex,
1668 const tensorflow::NodeDef& otherNodeDef = otherOp.
m_IndexedValue->GetNode();
1677 if (mulNodeDef.op() ==
"Mul")
1679 size_t otherLayerIndex = (alphaLayerIndex == 0 ? 1 : 0);
1680 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(mulNodeDef, 2);
1682 BOOST_ASSERT(inputs.size() == 2);
1683 BOOST_ASSERT((otherLayerIndex == 0 || alphaLayerIndex == 0));
1684 BOOST_ASSERT((otherLayerIndex == 1 || alphaLayerIndex == 1));
1685 BOOST_ASSERT(((otherLayerIndex + alphaLayerIndex) == 1));
1687 if (inputs[otherLayerIndex].m_IndexedValue->GetNode().name() == otherNodeDef.name())
1689 if (HasParsedConstTensor<float>(inputs[alphaLayerIndex].m_IndexedValue->GetNode().name()))
1691 ParsedConstTfOperation<float>* alpha =
1692 boost::polymorphic_downcast<ParsedConstTfOperation<float> *>(
1693 inputs[alphaLayerIndex].m_IndexedValue);
1695 std::vector<float> const_data;
1696 ConstTensor const_tensor = alpha->GetConstTensor(const_data);
1698 if (const_data.size() == 1)
1700 desc.
m_Function = ActivationFunction::LeakyReLu;
1701 desc.
m_A = const_data[0];
1713 const tensorflow::GraphDef& graphDef)
1716 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
1717 if (inputs.size() != 2)
1722 "Maximum expects two inputs!. Got %1% for Node %2% %3%")
1728 auto inputNode0 = inputs[0].m_IndexedValue->GetNode();
1729 auto inputNode1 = inputs[1].m_IndexedValue->GetNode();
1742 if (IsSupportedLeakyReluPattern(inputNode0, 0, inputs[1], &outputOfLeakyRelu, desc) ||
1743 IsSupportedLeakyReluPattern(inputNode0, 1, inputs[1], &outputOfLeakyRelu, desc) ||
1744 IsSupportedLeakyReluPattern(inputNode1, 0, inputs[0], &outputOfLeakyRelu, desc) ||
1745 IsSupportedLeakyReluPattern(inputNode1, 1, inputs[0], &outputOfLeakyRelu, desc))
1747 BOOST_ASSERT(outputOfLeakyRelu !=
nullptr);
1752 return std::make_unique<SingleLayerParsedTfOperation>(
this, nodeDef, layer);
1758 return AddMaximumLayer(nodeDef);
1762 std::pair<armnn::IOutputSlot*, armnn::IOutputSlot*> TfParser::ProcessElementwiseInputSlots(
1763 const tensorflow::NodeDef& nodeDef,
const std::string& layerName)
1765 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
1767 IOutputSlot* input0Slot = &inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
1768 IOutputSlot* input1Slot = &inputs[1].m_IndexedValue->ResolveArmnnOutputSlot(inputs[1].m_Index);
1772 if (input0Dim != input1Dim)
1776 if (input0Dim == 1 && input1Dim == 4)
1778 input0Slot = AddBroadcastReshapeLayer(input1Slot, input0Slot,
true, *m_Network, nodeDef);
1780 else if (input0Dim == 4 && input1Dim == 1)
1782 input1Slot = AddBroadcastReshapeLayer(input0Slot, input1Slot,
true, *m_Network, nodeDef);
1788 boost::format(
"Unsupported broadcast configuration for %1% operation %2% %3%")
1794 return {input0Slot, input1Slot};
1801 const tensorflow::NodeDef& nodeDef)
1808 std::vector<unsigned int> outputShape;
1815 outputShape.push_back(std::max(input0Shape[i], input1Shape[i]));
1821 return std::make_unique<SingleLayerParsedTfOperation>(
this, nodeDef, layer);
1828 const tensorflow::NodeDef& nodeDef)
1834 std::vector<unsigned int> outputShape;
1841 outputShape.push_back(std::max(input0Shape[i], input1Shape[i]));
1847 return std::make_unique<SingleLayerParsedTfOperation>(
this, nodeDef, layer);
1851 const tensorflow::GraphDef& graphDef)
1854 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
1855 IOutputSlot& params = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
1856 IOutputSlot& indices = inputs[1].m_IndexedValue->ResolveArmnnOutputSlot(inputs[1].m_Index);
1861 unsigned int outputDim = paramsDim - 1 + indicesDim;
1863 std::vector<unsigned int> dimSizes;
1865 for (
unsigned int i = 0; i < indicesDim; ++i)
1869 for (
unsigned int i = 1; i < paramsDim; ++i)
1881 params.
Connect(layer->GetInputSlot(0));
1882 indices.
Connect(layer->GetInputSlot(1));
1884 return std::make_unique<SingleLayerParsedTfOperation>(
this, nodeDef, layer);
1888 const tensorflow::GraphDef& graphDef)
1891 std::pair<armnn::IOutputSlot*, armnn::IOutputSlot*> inputLayers = ProcessElementwiseInputSlots(nodeDef,
"Greater");
1898 return ProcessComparisonLayer(input0Slot, input1Slot, layer, nodeDef);
1902 const tensorflow::GraphDef& graphDef)
1905 std::pair<armnn::IOutputSlot*, armnn::IOutputSlot*> inputLayers = ProcessElementwiseInputSlots(nodeDef,
"Equal");
1912 return ProcessComparisonLayer(input0Slot, input1Slot, layer, nodeDef);
1916 const tensorflow::GraphDef& graphDef)
1919 std::pair<armnn::IOutputSlot*, armnn::IOutputSlot*> inputLayers = ProcessElementwiseInputSlots(nodeDef,
"Minimum");
1925 return ProcessElementwiseLayer(input0Slot, input1Slot, layer, nodeDef);
1928 ParsedTfOperationPtr TfParser::ParseSub(
const tensorflow::NodeDef& nodeDef,
const tensorflow::GraphDef& graphDef)
1931 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
1933 IOutputSlot* input0Slot = &inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
1934 IOutputSlot* input1Slot = &inputs[1].m_IndexedValue->ResolveArmnnOutputSlot(inputs[1].m_Index);
1941 const bool isNHWC =
true;
1942 input0Slot = AddBroadcastReshapeLayer(input1Slot, input0Slot, isNHWC, *m_Network, nodeDef);
1947 const bool isNHWC =
true;
1948 input1Slot = AddBroadcastReshapeLayer(input0Slot, input1Slot, isNHWC, *m_Network, nodeDef);
1965 return std::make_unique<SingleLayerParsedTfOperation>(
this, nodeDef, layer);
1968 ParsedTfOperationPtr TfParser::ParseStack(
const tensorflow::NodeDef& nodeDef,
const tensorflow::GraphDef& graphDef)
1971 std::vector<OutputOfConstNodeDef> nodes = GetTfInputNodes(nodeDef);
1973 unsigned int numInputs =
static_cast<unsigned int>(nodes.size());
1979 "Pack/Stack expects at least one input. Got %1% for Node %2% %3%")
1985 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, numInputs);
1987 IOutputSlot* input0Slot = &inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
1992 int32_t axis = ReadMandatoryNodeInt32Attribute(nodeDef,
"axis");
1993 const int sNumDimensions = (
static_cast<int>(numDimensions) + 1);
1994 if (!(axis < sNumDimensions && axis >= -sNumDimensions))
1999 "Axis index is not in range. Got %1% for Node %2% %3%")
2007 axis =
static_cast<int32_t
>(numDimensions) + axis + 1;
2011 stackDescriptor.
m_Axis =
static_cast<uint32_t
>(axis);
2012 stackDescriptor.
m_NumInputs =
static_cast<uint32_t
>(numInputs);
2015 const unsigned int supportedNumDims = 4;
2016 for (
unsigned int viewIndex = 0; viewIndex < numInputs; ++viewIndex)
2018 IOutputSlot& inputSlot = inputs[viewIndex].m_IndexedValue->ResolveArmnnOutputSlot(inputs[viewIndex].m_Index);
2027 "The number of dimensions: %1% for input tensors of the " 2028 "Pack/Stack op. Number of dimensions should be less than %2% %3%")
2035 std::vector<unsigned int> outputDimensions;
2038 outputDimensions.push_back(stackDescriptor.
m_InputShape[i]);
2040 outputDimensions.insert(outputDimensions.begin() + axis, numInputs);
2045 for (
unsigned int viewIndex = 0; viewIndex < numInputs; ++viewIndex)
2047 IOutputSlot& inputSlot = inputs[viewIndex].m_IndexedValue->ResolveArmnnOutputSlot(inputs[viewIndex].m_Index);
2053 outputDimensions.data(),
2056 return std::make_unique<SingleLayerParsedTfOperation>(
this, nodeDef, layer);
2059 ParsedTfOperationPtr TfParser::ParseTranspose(
const tensorflow::NodeDef& nodeDef,
const tensorflow::GraphDef& graphDef)
2063 auto inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
2064 const auto inputCount = inputs.size();
2066 if (inputCount != 2)
2071 "The number of given input is %1%. It should be two for Transpose op." 2078 auto* input0Slot = &inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2080 const auto constInput = inputs[GetConstInputIndex(inputs)];
2081 auto* permuteVectorInput =
2082 boost::polymorphic_downcast<ParsedConstTfOperation<int32_t>*>(constInput.m_IndexedValue);
2083 const auto& permuteVectorInfo = permuteVectorInput->
GetTensorInfo();
2085 std::vector<int32_t> permuteVectorData;
2086 permuteVectorInput->GetConstTensor(permuteVectorData);
2088 std::vector<unsigned int> armnnPermuteVectorData(permuteVectorData.begin(), permuteVectorData.end());
2090 const auto permutationVector =
PermutationVector(armnnPermuteVectorData.data(), permuteVectorInfo.GetNumElements());
2094 BOOST_ASSERT(layer);
2103 return std::make_unique<SingleLayerParsedTfOperation>(
this, nodeDef, layer);
2108 const std::string& nodeName)
2110 unsigned int rank = paddingTensor.
GetShape()[0];
2112 if (rank != expectedRank)
2117 "Expected the padding tensor to be of rank %1 not %2 on Node %3 %4.")
2123 unsigned int second = paddingTensor.
GetShape()[1];
2129 "Expected the padding tensor to be of dimensions [%1, 2] not [%1, %2] on Node %3 %4.")
2139 const std::vector<std::pair<unsigned int, unsigned int>>& padList)
2142 std::vector<unsigned int> outDims;
2143 for (
unsigned int i = 0; i < numDims; ++i)
2145 unsigned int dimSize = inputTensorInfo.
GetShape()[i];
2146 const std::pair<unsigned int, unsigned int>& dimPadding = padList[i];
2147 dimSize += dimPadding.first;
2148 dimSize += dimPadding.second;
2149 outDims.push_back(dimSize);
2151 TensorInfo paddedTensorInfo = inputTensorInfo;
2152 unsigned int outDimsSize =
static_cast<unsigned int>(outDims.size());
2154 return paddedTensorInfo;
2158 const tensorflow::GraphDef& graphDef)
2164 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
2165 IOutputSlot& previousLayerOutputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2167 if (!HasParsedConstTensor<int32_t>(inputs[1].m_IndexedValue))
2172 "ArmNN only supports Pad with constant padding. " 2173 "Input %1%. Node %2% %3%")
2174 % inputs[1].m_IndexedValue->GetNode().name()
2179 ParsedConstTfOperation<int32_t>* paddingTensorOp =
2180 boost::polymorphic_downcast<ParsedConstTfOperation<int32_t>*>(inputs[1].m_IndexedValue);
2182 std::vector<int32_t> paddingTensorData;
2183 ConstTensor paddingTensor = paddingTensorOp->GetConstTensor(paddingTensorData);
2190 std::vector<std::pair<unsigned int, unsigned int>> padList;
2191 unsigned int rank =
CheckPaddingTensor(paddingTensor, inputTensorInfo, nodeDef.name());
2192 for (
unsigned int i = 0; i < rank; ++i)
2194 std::pair<unsigned int, unsigned int> paddingForDim;
2195 for (
unsigned int j = 0; j < 2; j++)
2197 unsigned int index = (i * 2) + j;
2198 int paddingAmount = paddingTensorData[index];
2200 if (paddingAmount < 0)
2205 "Negative amount %1 specified at [%2, %3] of padding tensor on Node %4 %5.")
2214 paddingForDim.first =
static_cast<unsigned int>(paddingAmount);
2218 paddingForDim.second =
static_cast<unsigned int>(paddingAmount);
2221 padList.push_back(paddingForDim);
2229 return std::make_unique<SingleLayerParsedTfOperation>(
this, nodeDef, layer);
2233 const tensorflow::GraphDef& graphDef)
2236 std::vector<OutputOfConstNodeDef> nodes = GetTfInputNodes(nodeDef);
2239 unsigned int numInputs =
static_cast<unsigned int>(nodes.size());
2241 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, numInputs);
2244 unsigned int index = GetConstInputIndex(inputs);
2246 ParsedConstTfOperation<int32_t>* shapeNode =
2247 boost::polymorphic_downcast<ParsedConstTfOperation<int32_t>*>(inputs[index].m_IndexedValue);
2249 std::vector<int32_t> axisTensorData;
2250 shapeNode->GetConstTensor(axisTensorData);
2253 const unsigned int concatDim =
static_cast<unsigned int>(axisTensorData[0]);
2256 if (concatDim == 0 || concatDim == 2)
2261 "Dimension %1% for concatenation is not supported by Armnn. " 2268 const unsigned int supportedNumDims = 4;
2269 unsigned int numConcatViews = numInputs - 1;
2270 OriginsDescriptor concatDescriptor(static_cast<uint32_t>(numConcatViews), supportedNumDims);
2273 unsigned int mergeDim = 0;
2274 for (
unsigned int viewIndex = 0; viewIndex < numConcatViews; ++viewIndex)
2277 IOutputSlot& inputSlot = inputs[viewIndex].m_IndexedValue->ResolveArmnnOutputSlot(inputs[viewIndex].m_Index);
2286 "The number of dimensions: %1% for input tensors of the " 2287 "concatenation op should be %2% %3%")
2294 mergeDims = inputTensorInfo.
GetShape();
2295 unsigned int* viewOrigin =
const_cast<unsigned int*
>(concatDescriptor.
GetViewOrigin(viewIndex));
2296 std::fill(viewOrigin, viewOrigin + supportedNumDims, 0);
2300 mergeDim += mergeDims[concatDim];
2304 mergeDims[concatDim] = mergeDim;
2309 for (
unsigned int viewIndex = 0; viewIndex < numConcatViews; ++viewIndex)
2311 IOutputSlot& inputSlot = inputs[viewIndex].m_IndexedValue->ResolveArmnnOutputSlot(inputs[viewIndex].m_Index);
2315 return std::make_unique<SingleLayerParsedTfOperation>(
this, nodeDef, layer);
2319 const tensorflow::GraphDef& graphDef)
2329 if (tfDataType != tensorflow::DT_INT32)
2334 "Armnn only supports DT_INT32 as out_type. Got %1% for Node %2% %3%")
2335 % tensorflow::DataType_Name(tfDataType)
2340 const std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
2341 IOutputSlot& prevLayerOutputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2345 std::vector<int32_t> shapeTensorData;
2346 shapeTensorData.reserve(prevLayerDimensions);
2348 for (
unsigned int i=0; i<prevLayerDimensions; ++i)
2350 shapeTensorData.push_back(static_cast<int32_t>(prevLayerTensorInfo.
GetShape()[i]));
2353 TensorInfo shapeTensorInfo(1, &prevLayerDimensions, DataType::Signed32);
2355 return std::make_unique<ParsedConstTfOperation<int32_t>>(
this,
2357 &shapeTensorData[0],
2362 const tensorflow::GraphDef& graphDef)
2365 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
2366 ParsedTfOperation* inputNode = inputs[0].m_IndexedValue;
2368 if (!HasParsedConstTensor<int32_t>(inputs[1].m_IndexedValue->GetNode().name()))
2373 "ArmNN only supports Reshape layers with constant shapes. " 2374 "Input %1% Node %2% %3%")
2375 % inputs[1].m_IndexedValue->GetNode().name()
2379 ParsedConstTfOperation<int32_t>* shapeNode =
2380 boost::polymorphic_downcast<ParsedConstTfOperation<int32_t>*>(inputs[1].m_IndexedValue);
2382 armnn::IOutputSlot& prevLayerOutputSlot = inputNode->ResolveArmnnOutputSlot(inputs[0].m_Index);
2383 TensorInfo inputTensorInfo = prevLayerOutputSlot.GetTensorInfo();
2385 std::vector<int32_t> shapeTensorData;
2386 ConstTensor shapeTensor = shapeNode->GetConstTensor(shapeTensorData);
2387 const TensorInfo outputTensorInfo = PrepareReshape(inputTensorInfo, shapeTensorData);
2397 return std::make_unique<SingleLayerParsedTfOperation>(
this, nodeDef, layer);
2401 const tensorflow::GraphDef& graphDef)
2404 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
2406 if (!HasParsedConstTensor<int32_t>(inputs[1].m_IndexedValue->GetNode().name()))
2411 "ArmNN only supports ResizeBilinear layers with constant sizes. " 2412 "Input %1%. Node %2% %3%")
2413 % inputs[1].m_IndexedValue->GetNode().name()
2417 ParsedConstTfOperation<int32_t>* sizeNode =
2418 boost::polymorphic_downcast<ParsedConstTfOperation<int32_t>*>(inputs[1].m_IndexedValue);
2421 if (ReadOptionalNodeBoolAttribute(nodeDef,
"align_corners",
false))
2426 "ArmNN only supports ResizeBilinear layers with align_corners set to false. " 2433 std::vector<int32_t> sizeTensorData;
2434 ConstTensor sizeTensor = sizeNode->GetConstTensor(sizeTensorData);
2440 desc.
m_TargetWidth =
static_cast<uint32_t
> (sizeTensorData[1]);
2445 IOutputSlot& inputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2449 unsigned int outBatch = inputTensorInfo.
GetShape()[0];
2450 unsigned int outChannels = inputTensorInfo.
GetShape()[3];
2453 TensorShape outShape({outBatch, outHeight, outWidth, outChannels });
2460 return std::make_unique<SingleLayerParsedTfOperation>(
this, nodeDef, layer);
2465 BOOST_ASSERT(nodeDef.op() ==
"Squeeze");
2469 if (tfDataType == tensorflow::DT_FLOAT)
2471 type = DataType::Float32;
2473 else if (tfDataType == tensorflow::DT_INT32)
2475 type = DataType::Signed32;
2481 boost::format(
"Unsupported DataType %1% for Squeeze operation %2% %3%")
2482 % tensorflow::DataType_Name(tfDataType)
2493 "Unsupported number of dimensions: %1% for input shape for Squeeze %2% %3%")
2499 std::vector<uint32_t> squeezeDims = ReadOptionalNodeUint32ListAttribute(nodeDef,
"squeeze_dims");
2500 static const uint32_t dimensionSequence[] = { 0, 1, 2, 3 };
2502 if (squeezeDims.empty())
2504 squeezeDims.assign(dimensionSequence,
2508 std::vector<uint32_t> outputDims;
2511 bool skipSqueeze = (std::find(squeezeDims.begin(), squeezeDims.end(), i) == squeezeDims.end());
2512 auto currentDimension = inputTensorInfo.
GetShape()[i];
2513 if (skipSqueeze || currentDimension != 1)
2515 outputDims.push_back(currentDimension);
2519 if (outputDims.size() > 4)
2524 "Unsupported number of dimensions: %1% for output shape for Squeeze %2% %3%")
2535 outTensorInfo.SetDataType(type);
2537 return outTensorInfo;
2540 ParsedTfOperationPtr TfParser::ParseSqueeze(
const tensorflow::NodeDef& nodeDef,
const tensorflow::GraphDef& graphDef)
2543 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
2545 IOutputSlot& prevLayerOutputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2557 return std::make_unique<SingleLayerParsedTfOperation>(
this, nodeDef, layer);
2560 ParsedTfOperationPtr TfParser::ParseLrn(
const tensorflow::NodeDef& nodeDef,
const tensorflow::GraphDef& graphDef)
2563 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
2566 normalizationDescriptor.
m_NormMethodType = NormalizationAlgorithmMethod::LocalBrightness;
2567 normalizationDescriptor.
m_NormChannelType = NormalizationAlgorithmChannel::Across;
2568 normalizationDescriptor.
m_Alpha = ReadMandatoryNodeFloatAttribute(nodeDef,
"alpha");
2569 normalizationDescriptor.
m_Beta = ReadMandatoryNodeFloatAttribute(nodeDef,
"beta");
2570 normalizationDescriptor.
m_K = ReadMandatoryNodeFloatAttribute(nodeDef,
"bias");
2571 normalizationDescriptor.
m_NormSize = ReadMandatoryNodeUint32Attribute(nodeDef,
"depth_radius");
2577 IOutputSlot& prevLayerOutputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2579 nodeDef.name().c_str());
2583 return std::make_unique<SingleLayerParsedTfOperation>(
this, nodeDef, layer);
2595 : DeferredSingleLayerParsedTfOperation(parser, node)
2599 void CreateLayerDeferred()
override 2601 BOOST_ASSERT(
m_Layer ==
nullptr);
2602 m_Layer = m_Parser->AddFullyConnectedLayer(m_Node,
nullptr, m_Node.name().c_str());
2606 ParsedTfOperationPtr TfParser::ParseMatMul(
const tensorflow::NodeDef& nodeDef,
const tensorflow::GraphDef& graphDef)
2611 return std::make_unique<ParsedMatMulTfOperation>(
this, nodeDef);
2614 ParsedTfOperationPtr TfParser::ParseMean(
const tensorflow::NodeDef& nodeDef,
const tensorflow::GraphDef& graphDef)
2617 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
2618 IOutputSlot& inputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2621 if (inputs.size() != 2)
2624 boost::str(boost::format(
"Mean expects two inputs!. Got %1% for Node %2% %3%")
2630 bool keepDims = ReadMandatoryNodeBoolAttribute(nodeDef,
"keep_dims");
2632 ParsedConstTfOperation<int32_t>* axisNode =
2633 boost::polymorphic_downcast<ParsedConstTfOperation<int32_t>*>(inputs[1].m_IndexedValue);
2635 const TensorInfo& axisTensorInfo = axisNode->GetTensorInfo();
2637 ConstTensor axisTensor(axisTensorInfo, axisNode->GetStorage());
2638 const int* axisData =
static_cast<const int*
>(axisTensor.GetMemoryArea());
2647 std::vector<int> rawAxisVector(axisData, axisData + axisTensorInfo.GetNumElements());
2648 std::set<unsigned int> positiveAxisSet;
2651 std::transform(rawAxisVector.begin(), rawAxisVector.end(),
2652 std::inserter(positiveAxisSet, positiveAxisSet.begin()),
2653 [rank](
int i) ->
unsigned int {
return static_cast<unsigned int>((i + rank) % rank); });
2659 meanDescriptor.
m_Axis.assign(positiveAxisSet.begin(), positiveAxisSet.end());
2666 return std::make_unique<SingleLayerParsedTfOperation>(
this, nodeDef, layer);
2678 : DeferredSingleLayerParsedTfOperation(parser, node)
2682 void CreateLayerDeferred()
override 2684 BOOST_ASSERT(
m_Layer ==
nullptr);
2685 m_Layer = m_Parser->AddMultiplicationLayer(m_Node);
2689 ParsedTfOperationPtr TfParser::ParseMul(
const tensorflow::NodeDef& nodeDef,
const tensorflow::GraphDef& graphDef)
2693 return std::make_unique<ParsedMulTfOperation>(
this, nodeDef);
2697 const tensorflow::GraphDef& graphDef)
2701 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 0);
2705 auto it = m_InputShapes.find(nodeDef.name());
2706 if (it == m_InputShapes.end())
2711 "Missing input shape for Placeholder '%1%' %2%")
2715 TensorInfo tensorInfo(it->second, DataType::Float32);
2721 TrackInputBinding(layer, layerId, tensorInfo);
2723 return std::make_unique<SingleLayerParsedTfOperation>(
this, nodeDef, layer);
2726 ParsedTfOperationPtr TfParser::ParseRealDiv(
const tensorflow::NodeDef& nodeDef,
const tensorflow::GraphDef& graphDef)
2729 return AddRealDivLayer(nodeDef);
2733 const tensorflow::GraphDef& graphDef)
2738 activationDesc.
m_Function = ActivationFunction::ReLu;
2739 return AddActivationLayer(nodeDef, activationDesc);
2743 const tensorflow::GraphDef& graphDef)
2748 activationDesc.
m_Function = ActivationFunction::BoundedReLu;
2749 activationDesc.
m_A = 6.0f;
2750 activationDesc.
m_B = 0.0f;
2752 return AddActivationLayer(nodeDef, activationDesc);
2756 const tensorflow::GraphDef& graphDef)
2761 activationDesc.
m_Function = ActivationFunction::Sigmoid;
2763 return AddActivationLayer(nodeDef, activationDesc);
2767 const tensorflow::GraphDef &graphDef)
2771 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
2776 IOutputSlot& prevLayerOutputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2780 return std::make_unique<SingleLayerParsedTfOperation>(
this, nodeDef, layer);
2784 const tensorflow::GraphDef& graphDef)
2788 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
2793 IOutputSlot& prevLayerSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2797 return std::make_unique<SingleLayerParsedTfOperation>(
this, nodeDef, layer);
2801 const tensorflow::GraphDef& graphDef)
2805 std::vector<OutputOfConstNodeDef> nodes = GetTfInputNodes(nodeDef);
2806 unsigned int numInputs =
static_cast<unsigned int>(nodes.size());
2807 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, numInputs);
2810 unsigned int index = GetConstInputIndex(inputs);
2812 ParsedConstTfOperation<int32_t>* shapeNode =
2813 boost::polymorphic_downcast<ParsedConstTfOperation<int32_t>*>(inputs[index].m_IndexedValue);
2815 std::vector<int32_t> axisTensorData;
2816 shapeNode->GetConstTensor(axisTensorData);
2819 const unsigned int splitDim =
static_cast<unsigned int>(axisTensorData[0]);
2822 if (splitDim == 0 || splitDim == 2)
2827 "Dimension %1% for split is not supported by Armnn. " 2835 uint32_t num_split = ReadMandatoryNodeUint32Attribute(nodeDef,
"num_split");
2837 IOutputSlot& inputSlot = inputs[1 - index].m_IndexedValue->ResolveArmnnOutputSlot(inputs[1 - index].m_Index);
2840 const unsigned int supportedNumDims = 4;
2843 if (inputDimSize != supportedNumDims)
2848 "The number of dimensions: %1% for input tensors of the " 2849 "split op should be %2% %3%")
2855 std::vector<unsigned int> splitterDimSizes(inputDimSize);
2858 for (
unsigned int i = 0; i < inputDimSize; ++i)
2860 splitterDimSizes[i] = inputTensorInfo.
GetShape()[i];
2863 if (splitterDimSizes[splitDim] % num_split != 0)
2865 throw ParseException(
"Number of splits must evenly divide the dimension");
2867 splitterDimSizes[splitDim] /= num_split;
2870 for (
unsigned int g = 0; g < num_split; ++g)
2873 for (
unsigned int dimIdx = 0; dimIdx < splitterDimSizes.size(); ++dimIdx)
2875 splitDesc.
SetViewSize(g, dimIdx, splitterDimSizes[dimIdx]);
2885 splitterDimSizes.data());
2892 return std::make_unique<SingleLayerParsedTfOperation>(
this, nodeDef, layer);
2896 const tensorflow::GraphDef& graphDef)
2901 activationDesc.
m_Function = ActivationFunction::SoftReLu;
2903 return AddActivationLayer(nodeDef, activationDesc);
2907 const tensorflow::GraphDef& graphDef)
2911 std::vector<OutputOfConstNodeDef> nodes = GetTfInputNodes(nodeDef);
2912 unsigned int numInputs =
static_cast<unsigned int>(nodes.size());
2913 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, numInputs);
2915 ParsedConstTfOperation<int32_t>* beginNode =
2916 boost::polymorphic_downcast<ParsedConstTfOperation<int32_t> *>(inputs[1].m_IndexedValue);
2917 std::vector<int32_t> beginTensorData;
2918 beginNode->GetConstTensor(beginTensorData);
2920 ParsedConstTfOperation<int32_t>* endNode =
2921 boost::polymorphic_downcast<ParsedConstTfOperation<int32_t> *>(inputs[2].m_IndexedValue);
2922 std::vector<int32_t> endTensorData;
2923 endNode->GetConstTensor(endTensorData);
2925 ParsedConstTfOperation<int32_t>* stridesNode =
2926 boost::polymorphic_downcast<ParsedConstTfOperation<int32_t> *>(inputs[3].m_IndexedValue);
2927 std::vector<int32_t> stridesTensorData;
2928 stridesNode->GetConstTensor(stridesTensorData);
2931 desc.
m_Begin = beginTensorData;
2932 desc.
m_End = endTensorData;
2934 desc.
m_BeginMask = ReadMandatoryNodeInt32Attribute(nodeDef,
"begin_mask");
2935 desc.
m_EndMask = ReadMandatoryNodeInt32Attribute(nodeDef,
"end_mask");
2936 desc.
m_EllipsisMask = ReadMandatoryNodeInt32Attribute(nodeDef,
"ellipsis_mask");
2937 desc.
m_NewAxisMask = ReadMandatoryNodeInt32Attribute(nodeDef,
"new_axis_mask");
2938 desc.
m_ShrinkAxisMask = ReadMandatoryNodeInt32Attribute(nodeDef,
"shrink_axis_mask");
2942 IOutputSlot& prevLayerSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2951 return std::make_unique<SingleLayerParsedTfOperation>(
this, nodeDef, layer);
2954 ParsedTfOperationPtr TfParser::ParseTanh(
const tensorflow::NodeDef& nodeDef,
const tensorflow::GraphDef& graphDef)
2959 activationDesc.
m_Function = ActivationFunction::TanH;
2960 activationDesc.
m_A = 1.0f;
2961 activationDesc.
m_B = 1.0f;
2963 return AddActivationLayer(nodeDef, activationDesc);
2969 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
2973 IOutputSlot& prevLayerOutputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2976 return std::make_unique<SingleLayerParsedTfOperation>(
this, nodeDef, layer);
2980 const tensorflow::GraphDef& graphDef)
2982 return ParsePooling2d(nodeDef, graphDef, PoolingAlgorithm::Max);
2986 const tensorflow::GraphDef& graphDef)
2988 return ParsePooling2d(nodeDef, graphDef, PoolingAlgorithm::Average);
2996 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
2997 IOutputSlot& inputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
3000 if (inputs.size() != 1)
3005 "2D Pooling expects one input!. Got %1% for Node %2% %3%")
3011 std::string paddingString = ReadMandatoryNodeStringAttribute(nodeDef,
"padding");
3012 std::string dataFormat = ReadMandatoryNodeStringAttribute(nodeDef,
"data_format");
3013 std::vector<uint32_t> strides = ReadMandatoryNodeUint32ListAttribute(nodeDef,
"strides");
3014 std::vector<uint32_t> ksize = ReadMandatoryNodeUint32ListAttribute(nodeDef,
"ksize");
3022 DataLayout dataLayout = dataFormat ==
"NHWC" ? DataLayout::NHWC : DataLayout::NCHW;
3034 bool padding =
false;
3036 unsigned int outputHeight = 0;
3037 unsigned int outputWidth = 0;
3041 if (paddingString ==
"SAME")
3045 outputHeight =
static_cast<uint32_t
>(ceil(static_cast<float>(inputHeight) /
3046 static_cast<float>(pooling2dDescriptor.
m_StrideY)));
3047 outputWidth =
static_cast<uint32_t
>(ceil(static_cast<float>(inputWidth) /
3048 static_cast<float>(pooling2dDescriptor.
m_StrideX)));
3050 else if (paddingString ==
"VALID")
3054 outputHeight =
static_cast<uint32_t
>(ceil(
3055 static_cast<float>(inputHeight - pooling2dDescriptor.
m_PoolHeight + 1) /
3056 static_cast<float>(pooling2dDescriptor.
m_StrideY)));
3057 outputWidth =
static_cast<uint32_t
>(ceil(
3058 static_cast<float>(inputWidth - pooling2dDescriptor.
m_PoolWidth + 1) /
3059 static_cast<float>(pooling2dDescriptor.
m_StrideX)));
3064 case DataLayout::NHWC:
3071 case DataLayout::NCHW:
3087 if (layer ==
nullptr)
3092 "Failed to add pooling2d layer for %1% %2%")
3101 return std::make_unique<SingleLayerParsedTfOperation>(
this, nodeDef, layer);
3104 ParsedTfOperationPtr TfParser::AddAdditionLayer(
const tensorflow::NodeDef& nodeDef,
bool isBiasAdd)
3106 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
3108 IOutputSlot* input0Slot = &inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
3109 IOutputSlot* input1Slot = &inputs[1].m_IndexedValue->ResolveArmnnOutputSlot(inputs[1].m_Index);
3123 "Unsupported bias for BiasAdd. It should be a 1D vector. " 3124 "Got %1% dimensions for input %2%. Node %3% %4%")
3126 % inputs[1].m_IndexedValue->GetNode().name()
3131 const std::string dataFormat = ReadMandatoryNodeStringAttribute(nodeDef,
"data_format");
3134 input1Slot = AddBroadcastReshapeLayer(input0Slot, input1Slot, dataFormat ==
"NHWC", *m_Network, nodeDef);
3140 const bool isNHWC =
true;
3141 input0Slot = AddBroadcastReshapeLayer(input1Slot, input0Slot, isNHWC, *m_Network, nodeDef);
3146 const bool isNHWC =
true;
3147 input1Slot = AddBroadcastReshapeLayer(input0Slot, input1Slot, isNHWC, *m_Network, nodeDef);
3161 std::vector<unsigned int> outputShape;
3167 outputShape.push_back(std::max(input0Shape[i], input1Shape[i]));
3183 return std::make_unique<SingleLayerParsedTfOperation>(
this, nodeDef, layer);
3188 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
3191 IOutputSlot* input0Slot = &inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
3192 IOutputSlot* input1Slot = &inputs[1].m_IndexedValue->ResolveArmnnOutputSlot(inputs[1].m_Index);
3198 if (input0NumDims < input1NumDims)
3200 const bool isNHWC =
true;
3201 input0Slot = AddBroadcastReshapeLayer(input1Slot, input0Slot, isNHWC, *m_Network, nodeDef);
3203 if (input1NumDims < input0NumDims)
3205 const bool isNHWC =
true;
3206 input1Slot = AddBroadcastReshapeLayer(input0Slot, input1Slot, isNHWC, *m_Network, nodeDef);
3212 if (input0NumDims < input1NumDims)
3221 return std::make_unique<SingleLayerParsedTfOperation>(
this, nodeDef, layer);
3226 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
3228 IOutputSlot* input0Slot = &inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
3229 IOutputSlot* input1Slot = &inputs[1].m_IndexedValue->ResolveArmnnOutputSlot(inputs[1].m_Index);
3234 if (input0NumDims < input1NumDims)
3236 const bool isNHWC =
true;
3237 input0Slot = AddBroadcastReshapeLayer(input1Slot, input0Slot, isNHWC, *m_Network, nodeDef);
3239 if (input1NumDims < input0NumDims)
3241 const bool isNHWC =
true;
3242 input1Slot = AddBroadcastReshapeLayer(input0Slot, input1Slot, isNHWC, *m_Network, nodeDef);
3251 std::vector<unsigned int> outputShape;
3258 outputShape.push_back(std::max(input0Shape[i], input1Shape[i]));
3264 return std::make_unique<SingleLayerParsedTfOperation>(
this, nodeDef, layer);
3267 IConnectableLayer* TfParser::AddMultiplicationLayer(
const tensorflow::NodeDef& nodeDef)
3269 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
3272 IOutputSlot* input0Slot = &inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
3273 IOutputSlot* input1Slot = &inputs[1].m_IndexedValue->ResolveArmnnOutputSlot(inputs[1].m_Index);
3278 if (input0NumDims < input1NumDims)
3280 const bool isNHWC =
true;
3281 input0Slot = AddBroadcastReshapeLayer(input1Slot, input0Slot, isNHWC, *m_Network, nodeDef);
3283 if (input1NumDims < input0NumDims)
3285 const bool isNHWC =
true;
3286 input1Slot = AddBroadcastReshapeLayer(input0Slot, input1Slot, isNHWC, *m_Network, nodeDef);
3292 if (input0NumDims < input1NumDims)
3303 IConnectableLayer* TfParser::AddFullyConnectedLayer(
const tensorflow::NodeDef& matMulNodeDef,
3304 const tensorflow::NodeDef* addNodeDef,
const char* armnnLayerName)
3307 ParsedConstTfOperation<float>* biasNode =
nullptr;
3308 if (addNodeDef !=
nullptr)
3310 std::vector<OutputOfParsedTfOperation> addInputs = GetInputParsedTfOperationsChecked(*addNodeDef, 2);
3312 if (HasParsedConstTensor<float>(addInputs[0].m_IndexedValue->GetNode().name()))
3314 biasNode = boost::polymorphic_downcast<ParsedConstTfOperation<float>*>(addInputs[0].m_IndexedValue);
3316 else if (HasParsedConstTensor<float>(addInputs[1].m_IndexedValue->GetNode().name()))
3318 biasNode = boost::polymorphic_downcast<ParsedConstTfOperation<float>*>(addInputs[1].m_IndexedValue);
3325 "ArmNN only supports fully connected layers with constant bias. " 3326 "Inputs %1% and %2%. AddNode %3%. MatMulNode %4% %5%")
3327 % addInputs[0].m_IndexedValue->GetNode().name()
3328 % addInputs[1].m_IndexedValue->GetNode().name()
3329 % addNodeDef->name()
3330 % matMulNodeDef.name()
3336 ParsedConstTfOperation<float>* weightNode =
nullptr;
3337 ParsedTfOperation* inputNode =
nullptr;
3338 unsigned int inputIdx = 0;
3339 std::vector<OutputOfParsedTfOperation> mulInputs = GetInputParsedTfOperationsChecked(matMulNodeDef, 2);
3340 if (HasParsedConstTensor<float>(mulInputs[0].m_IndexedValue->GetNode().name()))
3342 weightNode = boost::polymorphic_downcast<ParsedConstTfOperation<float>*>(mulInputs[0].m_IndexedValue);
3343 inputNode = mulInputs[1].m_IndexedValue;
3344 inputIdx = mulInputs[1].m_Index;
3346 else if (HasParsedConstTensor<float>(mulInputs[1].m_IndexedValue->GetNode().name()))
3348 weightNode = boost::polymorphic_downcast<ParsedConstTfOperation<float>*>(mulInputs[1].m_IndexedValue);
3349 inputNode = mulInputs[0].m_IndexedValue;
3350 inputIdx = mulInputs[0].m_Index;
3357 "ArmNN only supports fully connected layers with constant weights. " 3358 "Inputs %1% and %2%. MatMulNode %3% %4%")
3359 % mulInputs[0].m_IndexedValue->GetNode().name()
3360 % mulInputs[1].m_IndexedValue->GetNode().name()
3361 % matMulNodeDef.name()
3365 std::vector<float> weightTensorData;
3367 ConstTensor weights = weightNode->GetConstTensor(weightTensorData);
3374 std::vector<float> biasTensorData;
3376 if (addNodeDef !=
nullptr)
3378 ConstTensor biases = biasNode->GetConstTensor(biasTensorData);
3385 "Shape of matmul weights and bias do not match. " 3386 "AddNode %1%. MatMulNode %2% %3%")
3387 % addNodeDef->name()
3388 % matMulNodeDef.name()
3396 BOOST_ASSERT(layer !=
nullptr);
3398 inputNode->ResolveArmnnOutputSlot(inputIdx).Connect(layer->GetInputSlot(0));
3399 unsigned int batches = inputNode->ResolveArmnnOutputSlot(inputIdx).GetTensorInfo().GetShape()[0];
3403 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
3407 void TfParser::LoadNodeDef(
const tensorflow::NodeDef& nodeDef,
const tensorflow::GraphDef& graphDef)
3411 if (nodeDef.attr().count(
"T") != 0)
3413 auto attr = nodeDef.attr().at(
"T");
3416 else if (nodeDef.attr().count(
"dtype") != 0)
3418 auto attr = nodeDef.attr().at(
"dtype");
3422 if ((type != tensorflow::DT_FLOAT && type != tensorflow::DT_INT32) && nodeDef.op() !=
"Const")
3427 "Currently only FLOAT and INT32 are supported for tensorflow nodes (apart from Const). " 3428 "Got %1% for Node %2% %3%")
3429 % tensorflow::DataType_Name(type)
3434 const std::string& operation = nodeDef.op();
3435 auto itControlInput = std::find(m_ControlInputs.begin(), m_ControlInputs.end(), operation);
3436 if (itControlInput != m_ControlInputs.end())
3441 auto it = ms_OperationNameToParsingFunctions.find(operation);
3442 if (it != ms_OperationNameToParsingFunctions.end())
3444 auto func = it->second;
3446 ParsedTfOperation* parsedTfOperationRaw = parsedTfOperation.get();
3449 auto it = m_ParsedTfOperations.find(nodeDef.name());
3450 if (it != m_ParsedTfOperations.end())
3452 throw ParseException(boost::str(boost::format(
"Name %1% used by more than one node") % nodeDef.name()));
3454 m_ParsedTfOperations[nodeDef.name()] = std::move(parsedTfOperation);
3457 if (std::find(m_RequestedOutputs.begin(), m_RequestedOutputs.end(), nodeDef.name()) !=
3458 m_RequestedOutputs.end())
3460 auto outId = ParseOutputId(nodeDef.name());
3462 IOutputSlot& prevSlot = parsedTfOperationRaw->ResolveArmnnOutputSlot(outId.m_Index);
3464 TensorInfo tensorInfo = prevSlot.GetTensorInfo();
3470 TrackOutputBinding(outputLayer, layerId, tensorInfo);
3478 "Unsupported operation %1% in tensorflow::GraphDef %2%")
3484 void TfParser::LoadGraphDef(
const tensorflow::GraphDef& graphDef)
3487 m_NodesByName.clear();
3488 m_NetworkInputsBindingInfo.clear();
3489 m_NetworkOutputsBindingInfo.clear();
3491 for (
int i = 0; i < graphDef.node_size(); ++i)
3493 const tensorflow::NodeDef& node = graphDef.node(i);
3494 m_NodesByName[node.name()] = &node;
3498 for (
const auto& pair : m_InputShapes)
3500 const std::string& requestedInputName = pair.first;
3501 auto nodeIt = m_NodesByName.find(requestedInputName);
3502 if (nodeIt == m_NodesByName.end())
3507 "Couldn't find requested input node '%1%' in graph %2%")
3508 % requestedInputName
3514 std::vector<const tensorflow::NodeDef*> targetNodes;
3515 for (
const std::string& requestedOutputName : m_RequestedOutputs)
3517 auto nodeIt = m_NodesByName.find(requestedOutputName);
3518 if (nodeIt == m_NodesByName.end())
3523 "Couldn't find requested output node '%1%' in graph %2%")
3524 % requestedOutputName
3527 targetNodes.push_back(nodeIt->second);
3531 std::vector<const tensorflow::NodeDef*> sortedNodes;
3532 if (!armnnUtils::GraphTopologicalSort<const tensorflow::NodeDef*>(
3534 [
this](
const tensorflow::NodeDef* node)
3536 auto outputs = GetTfInputNodes(*node);
3537 std::vector<const tensorflow::NodeDef*> nodesOnly;
3538 for (
const auto & o : outputs) {
3539 nodesOnly.push_back(o.m_IndexedValue);
3548 "Cycle detected in graph %1%")
3553 for (
const auto& it : sortedNodes)
3555 const tensorflow::NodeDef& currentNode = *it;
3556 LoadNodeDef(currentNode, graphDef);
3561 const std::map<std::string, TensorShape>& inputShapes,
3562 const std::vector<std::string>& requestedOutputs)
3564 FILE* fd = fopen(graphFile,
"r");
3571 "Graph file %1% failed to open %2%")
3577 tensorflow::GraphDef graphDef;
3578 auto input =
new google::protobuf::io::FileInputStream(fileno(fd));
3579 bool success = google::protobuf::TextFormat::Parse(input, &graphDef);
3588 "Failed to parse graph file %1%")
3592 return CreateNetworkFromGraphDef(graphDef, inputShapes, requestedOutputs);
3596 const std::map<std::string, TensorShape>& inputShapes,
3597 const std::vector<std::string>& requestedOutputs)
3600 tensorflow::GraphDef graphDef;
3601 bool success = google::protobuf::TextFormat::ParseFromString(protoText, &graphDef);
3608 "Failed to parse graph file %1%")
3612 return CreateNetworkFromGraphDef(graphDef, inputShapes, requestedOutputs);
3616 const std::map<std::string, TensorShape>& inputShapes,
3617 const std::vector<std::string>& requestedOutputs)
3619 FILE* fd = fopen(graphFile,
"rb");
3626 "Graph file %1% failed to open %2%")
3632 tensorflow::GraphDef graphDef;
3634 google::protobuf::io::FileInputStream inStream(fileno(fd));
3635 google::protobuf::io::CodedInputStream codedStream(&inStream);
3636 codedStream.SetTotalBytesLimit(INT_MAX, INT_MAX);
3637 bool success = graphDef.ParseFromCodedStream(&codedStream);
3645 "Failed to parse protobuf file %1% %2%")
3650 return CreateNetworkFromGraphDef(graphDef, inputShapes, requestedOutputs);
3653 INetworkPtr TfParser::CreateNetworkFromGraphDef(
const tensorflow::GraphDef& graphDef,
3654 const std::map<std::string, TensorShape>& inputShapes,
3655 const std::vector<std::string>& requestedOutputs)
3657 m_Network = INetwork::Create();
3659 m_InputShapes = inputShapes;
3660 if (requestedOutputs.size() == 0)
3665 "requestedOutputs must have at least one entry %1%")
3668 m_RequestedOutputs = requestedOutputs;
3672 LoadGraphDef(graphDef);
3682 return std::move(m_Network);
3685 void TfParser::Cleanup()
3688 m_InputShapes.clear();
3689 m_RequestedOutputs.clear();
3690 m_NodesByName.clear();
3691 m_ParsedTfOperations.clear();
3696 return GetBindingInfo(name,
"input", m_NetworkInputsBindingInfo);
3701 return GetBindingInfo(name,
"output", m_NetworkOutputsBindingInfo);
3704 std::pair<LayerBindingId, TensorInfo> TfParser::GetBindingInfo(
const std::string& layerName,
3705 const char* bindingPointDesc,
3706 const std::unordered_map<std::string, BindingPointInfo>& nameToBindingInfo)
3708 auto it = nameToBindingInfo.find(layerName);
3709 if (it == nameToBindingInfo.end())
3714 "Unknown %1% '%2%' %3%")
3724 return TrackBindingPoint(layer,
id, tensorInfo,
"input", m_NetworkInputsBindingInfo);
3729 return TrackBindingPoint(layer,
id, tensorInfo,
"output", m_NetworkOutputsBindingInfo);
3735 const char* bindingPointDesc,
3736 std::unordered_map<std::string, BindingPointInfo>& nameToBindingInfo)
3738 const std::string layerName = layer->
GetName();
3739 auto it = nameToBindingInfo.find(layerName);
3740 if (it == nameToBindingInfo.end())
3742 nameToBindingInfo[layerName] = std::make_pair(
id, tensorInfo);
3749 "Id %1% used by more than one %2% layer %3%")
uint32_t m_PadBottom
Padding bottom value in the height dimension.
bool m_BiasEnabled
Enable/disable bias.
std::unique_ptr< ITfParser, void(*)(ITfParser *parser)> ITfParserPtr
virtual IConnectableLayer * AddGatherLayer(const char *name=nullptr)=0
Add Gather layer to the network.
virtual IConnectableLayer * AddComparisonLayer(const ComparisonDescriptor &comparisonDescriptor, const char *name=nullptr)=0
Add a Comparison layer to the network.
virtual unsigned int GetNumOutputSlots() const =0
Returns the number of connectable output slots.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
uint32_t m_Axis
0-based axis along which to stack the input tensors.
A ViewsDescriptor for the SplitterLayer.
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
virtual IConnectableLayer * AddMeanLayer(const MeanDescriptor &meanDescriptor, const char *name=nullptr)=0
Add a Mean layer to the network.
uint32_t m_PadBottom
Padding bottom value in the height dimension.
bool m_BiasEnabled
Enable/disable bias.
unsigned int GetWidthIndex() const
float m_K
Kappa value used for the across channel normalization equation.
friend class ParsedMulTfOperation
WithOutputTensorIndex< ParsedTfOperation * > OutputOfParsedTfOperation
const TensorShape & GetShape() const
uint32_t m_PadBottom
Padding bottom value in the height dimension.
uint32_t m_PadLeft
Padding left value in the width dimension.
armnn::BindingPointInfo BindingPointInfo
int32_t m_ShrinkAxisMask
Shrink axis mask value. If set, the nth specification shrinks the dimensionality by 1...
A ReshapeDescriptor for the ReshapeLayer.
WithOutputTensorIndex< std::string > OutputId
std::vector< int > m_Begin
Begin values for the input that will be sliced.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
virtual IConnectableLayer * AddSoftmaxLayer(const SoftmaxDescriptor &softmaxDescriptor, const char *name=nullptr)=0
Adds a softmax layer to the network.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
A ComparisonDescriptor for the ComparisonLayer.
TensorShape m_InputShape
Required shape of all input tensors.
virtual IConnectableLayer * AddMinimumLayer(const char *name=nullptr)=0
Add a Minimum layer to the network.
uint32_t m_PoolWidth
Pooling width value.
A Convolution2dDescriptor for the Convolution2dLayer.
float m_Alpha
Alpha value for the normalization equation.
uint32_t m_PadLeft
Padding left value in the width dimension.
virtual IConnectableLayer * AddPadLayer(const PadDescriptor &padDescriptor, const char *name=nullptr)=0
Adds a fully pad layer to the network.
void CalculateReducedOutputTensoInfo(const armnn::TensorInfo &inputTensorInfo, const std::set< unsigned int > &axisSet, bool keepDims, armnn::TensorInfo &outputTensorInfo)
Creates a tensor info after reducing the dimensions mentioned in axisData.
const TensorShape & GetShape() const
unsigned int GetNumBytes() const
ResizeMethod m_Method
The Interpolation method to use (Bilinear, NearestNeighbor).
virtual IConnectableLayer * AddActivationLayer(const ActivationDescriptor &activationDescriptor, const char *name=nullptr)=0
Adds an activation layer to the network.
float m_Eps
Value to add to the variance. Used to avoid dividing by zero.
PaddingMethod m_PaddingMethod
The padding method to be used. (Exclude, IgnoreValue).
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
virtual IConnectableLayer * AddElementwiseUnaryLayer(const ElementwiseUnaryDescriptor &elementwiseUnaryDescriptor, const char *name=nullptr)=0
Add an ElementwiseUnary layer to the network.
unsigned int CheckPaddingTensor(const ConstTensor &paddingTensor, const TensorInfo &inputTensorInfo, const std::string &nodeName)
virtual BindingPointInfo GetNetworkOutputBindingInfo(const std::string &name) const override
Retrieves binding info (layer id and tensor info) for the network output identified by the given laye...
Main network class which provides the interface for building up a neural network. ...
virtual IConnectableLayer * AddBatchNormalizationLayer(const BatchNormalizationDescriptor &desc, const ConstTensor &mean, const ConstTensor &variance, const ConstTensor &beta, const ConstTensor &gamma, const char *name=nullptr)=0
Adds a batch normalization layer to the network.
uint32_t m_PadTop
Padding top value in the height dimension.
void CalcPadding(uint32_t input, uint32_t kernel, uint32_t stride, uint32_t &outPadHead, uint32_t &outPadTail, bool samePadding)
uint32_t m_PadRight
Padding right value in the width dimension.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
Copyright (c) 2020 ARM Limited.
void IgnoreUnused(Ts &&...)
int32_t m_BeginMask
Begin mask value.
int32_t m_EndMask
End mask value.
friend class ParsedConstTfOperation
const armnn::PermutationVector NHWCToArmNN
unsigned int GetNumOutputSlots() const override
Returns the number of connectable output slots.
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
int LayerBindingId
Type of identifiers for bindable layers (inputs, outputs).
virtual IConnectableLayer * AddNormalizationLayer(const NormalizationDescriptor &normalizationDescriptor, const char *name=nullptr)=0
Adds a normalization layer to the network.
virtual IConnectableLayer * AddFullyConnectedLayer(const FullyConnectedDescriptor &fullyConnectedDescriptor, const ConstTensor &weights, const Optional< ConstTensor > &biases, const char *name=nullptr)=0
Adds a fully connected layer to the network.
unsigned int GetHeightIndex() const
virtual void SetTensorInfo(const TensorInfo &tensorInfo)=0
NormalizationAlgorithmMethod m_NormMethodType
Normalization method algorithm to use (LocalBrightness, LocalContrast).
void SetShape(const TensorShape &newShape)
A ResizeDescriptor for the ResizeLayer.
std::vector< unsigned int > m_Axis
Values for the dimensions to reduce.
A StackDescriptor for the StackLayer.
virtual IConnectableLayer * AddConvolution2dLayer(const Convolution2dDescriptor &convolution2dDescriptor, const ConstTensor &weights, const Optional< ConstTensor > &biases, const char *name=nullptr)=0
Adds a 2D convolution layer to the network.
TensorShape m_TargetShape
Target shape value.
virtual IConnectableLayer * AddOutputLayer(LayerBindingId id, const char *name=nullptr)=0
Adds an output layer to the network.
std::unique_ptr< ParsedTfOperation > ParsedTfOperationPtr
virtual IConnectableLayer * AddAdditionLayer(const char *name=nullptr)=0
Adds an addition layer to the network.
uint32_t m_PoolHeight
Pooling height value.
uint32_t m_PadTop
Padding top value in the height dimension.
A PadDescriptor for the PadLayer.
void Permute(const armnn::TensorShape &dstShape, const armnn::PermutationVector &mappings, const void *src, void *dst, size_t dataTypeSize)
virtual IConnectableLayer * AddConcatLayer(const ConcatDescriptor &concatDescriptor, const char *name=nullptr)=0
Adds a concatenation layer to the network.
const uint32_t * GetViewOrigin(uint32_t idx) const
Return the view origin at the int value idx.
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
virtual IConnectableLayer * AddStackLayer(const StackDescriptor &descriptor, const char *name=nullptr)=0
Adds a stack layer to the network.
WithOutputTensorIndex< const tensorflow::NodeDef * > OutputOfConstNodeDef
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
uint32_t m_PadRight
Padding right value in the width dimension.
uint32_t m_PadTop
Padding top value in the height dimension.
Status SetViewSize(uint32_t view, uint32_t coord, uint32_t value)
Set the size of the views.
virtual IConnectableLayer * AddResizeLayer(const ResizeDescriptor &resizeDescriptor, const char *name=nullptr)=0
Adds a resize layer to the network.
int32_t m_NewAxisMask
New axis mask value.
virtual IConnectableLayer * AddPooling2dLayer(const Pooling2dDescriptor &pooling2dDescriptor, const char *name=nullptr)=0
Adds a pooling layer to the network.
bool m_KeepDims
Enable/disable keep dimensions. If true, then the reduced dimensions that are of length 1 are kept...
An output connection slot for a layer.
Provides access to the appropriate indexes for Channels, Height and Width based on DataLayout...
DataType GetDataType() const
An OriginsDescriptor for the ConcatLayer.
A FullyConnectedDescriptor for the FullyConnectedLayer.
int32_t m_EllipsisMask
Ellipsis mask value.
virtual IConnectableLayer * AddSplitterLayer(const ViewsDescriptor &splitterDescriptor, const char *name=nullptr)=0
Adds a splitter layer to the network.
bool m_BiasEnabled
Enable/disable bias.
WithOutputTensorIndex wraps a value and an index.
A tensor defined by a TensorInfo (shape and data type) and an immutable backing store.
uint32_t m_TargetWidth
Target width value.
virtual armnn::INetworkPtr CreateNetworkFromString(const char *protoText, const std::map< std::string, armnn::TensorShape > &inputShapes, const std::vector< std::string > &requestedOutputs) override
Creates the network directly from protobuf text in a string. Useful for debugging/testing.
std::enable_if_t< std::is_unsigned< Source >::value &&std::is_unsigned< Dest >::value, Dest > numeric_cast(Source source)
std::vector< int > m_Stride
Stride values for the input that will be sliced.
An ActivationDescriptor for the ActivationLayer.
friend class ParsedMatMulTfOperation
void CalculateSamePadding(uint32_t inputSize, uint32_t stride, uint32_t filterSize, bool samePadding, uint32_t *paddingFront, uint32_t *paddingBack)
void SetDataType(DataType type)
uint32_t m_NumInputs
Number of input tensors.
uint32_t m_TargetHeight
Target height value.
virtual IConnectableLayer * AddDepthwiseConvolution2dLayer(const DepthwiseConvolution2dDescriptor &convolution2dDescriptor, const ConstTensor &weights, const Optional< ConstTensor > &biases, const char *name=nullptr)=0
Adds a 2D depthwise convolution layer to the network.
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
void CalculateStridedSliceOutputTensorInfo(const armnn::TensorInfo &inputTensorInfo, const armnn::StridedSliceDescriptor &desc, armnn::TensorInfo &outputTensorInfo)
Create output tensor info for a StridedSlice operator.
std::vector< int > m_End
End values for the input that will be sliced.
NormalizationAlgorithmChannel m_NormChannelType
Normalization channel algorithm to use (Across, Within).
DataType ConvertTfTensorDataType(const tensorflow::DataType tfDataType, const tensorflow::NodeDef &nodeDef)
float m_A
Alpha upper bound value used by the activation functions. (BoundedReLu, Linear, TanH).
#define CHECK_DATA_FORMAT(NODE_DEF, FORMAT, NODE_TYPE)
virtual IConnectableLayer * AddStridedSliceLayer(const StridedSliceDescriptor &stridedSliceDescriptor, const char *name=nullptr)=0
Adds a strided slice layer to the network.
EmptyOptional is used to initialize the Optional class in case we want to have default value for an O...
virtual IConnectableLayer * AddReshapeLayer(const ReshapeDescriptor &reshapeDescriptor, const char *name=nullptr)=0
Adds a reshape layer to the network.
A ElementwiseUnaryDescriptor for the ElementwiseUnaryLayer.
PoolingAlgorithm m_PoolType
The pooling algorithm to use (Max. Average, L2).
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
Parses a directed acyclic graph from a tensorflow protobuf file.
virtual IConnectableLayer * AddMaximumLayer(const char *name=nullptr)=0
Add a Maximum layer to the network.
unsigned int GetNumDimensions() const
virtual armnn::INetworkPtr CreateNetworkFromTextFile(const char *graphFile, const std::map< std::string, armnn::TensorShape > &inputShapes, const std::vector< std::string > &requestedOutputs) override
Creates the network from a protobuf text file on the disk.
OutputShapeRounding m_OutputShapeRounding
The rounding method for the output shape. (Floor, Ceiling).
void SetConcatAxis(unsigned int concatAxis)
Set the concatenation axis value.
void SetTensorInfo(const TensorInfo &tensorInfo) override
virtual const IInputSlot & GetInputSlot(unsigned int index) const =0
Get a const input slot handle by slot index.
A MeanDescriptor for the MeanLayer.
armnn::TensorInfo GetTensorInfo(unsigned int numberOfBatches, unsigned int numberOfChannels, unsigned int height, unsigned int width, const armnn::DataLayout dataLayout, const armnn::DataType dataType)
const OutputSlot & GetOutputSlot(unsigned int index=0) const override
Get the const output slot handle by slot index.
A TransposeDescriptor for the TransposeLayer.
A StridedSliceDescriptor for the StridedSliceLayer.
virtual const TensorInfo & GetTensorInfo() const =0
virtual const IOutputSlot & GetOutputSlot(unsigned int index) const =0
Get the const output slot handle by slot index.
const char * GetName() const override
Returns the name of the layer.
virtual const char * GetName() const =0
Returns the name of the layer.
std::unique_ptr< INetwork, void(*)(INetwork *network)> INetworkPtr
armnn::TensorShape TransposeTensorShape(const armnn::TensorShape &srcShape, const armnn::PermutationVector &mappings)
#define CHECK_PADDING_TYPE(NODE_DEF, PADDING)
virtual int Connect(IInputSlot &destination)=0
A Pooling2dDescriptor for the Pooling2dLayer.
A NormalizationDescriptor for the NormalizationLayer.
virtual BindingPointInfo GetNetworkInputBindingInfo(const std::string &name) const override
Retrieves binding info (layer id and tensor info) for the network input identified by the given layer...
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
const armnn::PermutationVector ArmNNToNHWC
unsigned int GetNumDimensions() const
virtual IConnectableLayer * AddDivisionLayer(const char *name=nullptr)=0
Adds a division layer to the network.
virtual IConnectableLayer * AddInputLayer(LayerBindingId id, const char *name=nullptr)=0
Adds an input layer to the network.
float m_B
Beta lower bound value used by the activation functions. (BoundedReLu, Linear, TanH).
armnn::TensorShape Permuted(const armnn::TensorShape &srcShape, const armnn::PermutationVector &mappings)
A SoftmaxDescriptor for the SoftmaxLayer.
float m_Beta
Beta value for the normalization equation.
uint32_t m_NormSize
Depth radius value.
Status SetViewOriginCoord(uint32_t view, uint32_t coord, uint32_t value)
Set the view origin coordinates.
ActivationFunction m_Function
The activation function to use (Sigmoid, TanH, Linear, ReLu, BoundedReLu, SoftReLu, LeakyReLu, Abs, Sqrt, Square).
virtual armnn::INetworkPtr CreateNetworkFromBinaryFile(const char *graphFile, const std::map< std::string, armnn::TensorShape > &inputShapes, const std::vector< std::string > &requestedOutputs) override
Creates the network from a protobuf binary file on the disk.
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
A DepthwiseConvolution2dDescriptor for the DepthwiseConvolution2dLayer.
A BatchNormalizationDescriptor for the BatchNormalizationLayer.
uint32_t m_PadLeft
Padding left value in the width dimension.
unsigned int GetNumElements() const
Status SetViewOriginCoord(uint32_t view, uint32_t coord, uint32_t value)
Set the view origin coordinates.
constexpr unsigned int GetDataTypeSize(DataType dataType)
virtual IConnectableLayer * AddTransposeLayer(const TransposeDescriptor &transposeDescriptor, const char *name=nullptr)=0
Adds a transpose layer to the network.
TensorInfo CalculatePaddedOutputTensorInfo(const TensorInfo &inputTensorInfo, const std::vector< std::pair< unsigned int, unsigned int >> &padList)
uint32_t m_PadRight
Padding right value in the width dimension.
virtual IConnectableLayer * AddMultiplicationLayer(const char *name=nullptr)=0
Adds a multiplication layer to the network.
TensorInfo OutputShapeOfExpandDims(const tensorflow::NodeDef &nodeDef, TensorInfo inputTensorInfo)
virtual IConnectableLayer * AddSubtractionLayer(const char *name=nullptr)=0
Adds a subtraction layer to the network.
TensorInfo OutputShapeOfSqueeze(const tensorflow::NodeDef &nodeDef, TensorInfo inputTensorInfo)