17 #include <google/protobuf/io/zero_copy_stream_impl.h> 18 #include <google/protobuf/text_format.h> 20 #include <tensorflow/core/framework/graph.pb.h> 22 #include <boost/format.hpp> 23 #include <boost/core/ignore_unused.hpp> 24 #include <boost/format.hpp> 25 #include <boost/numeric/conversion/cast.hpp> 26 #include <boost/polymorphic_cast.hpp> 31 using namespace armnn;
42 template <
typename Callable>
43 void ReadMandatoryNodeAttributeImpl(
const tensorflow::NodeDef& nodeDef,
44 const std::string& attribName,
45 tensorflow::AttrValue::ValueCase expectedValueCase,
48 auto iter = nodeDef.attr().find(attribName);
49 if (iter != nodeDef.attr().end())
51 const auto& attrValue = iter->second;
52 if (attrValue.value_case() == expectedValueCase)
61 "Attribute %1% of node %2% expected to have %3% as tensorflow::AttrValue::ValueCase, " 62 "but found %4% instead %5%")
65 %
static_cast<int>(expectedValueCase)
66 % static_cast<int>(attrValue.value_case())
75 "Could not find required attribute %1% in node %2% %3%")
82 template <
typename Callable>
83 void ReadOptionalNodeAttributeImpl(
const tensorflow::NodeDef& nodeDef,
84 const std::string& attribName,
85 tensorflow::AttrValue::ValueCase expectedValueCase,
88 auto iter = nodeDef.attr().find(attribName);
89 if (iter != nodeDef.attr().end())
91 const auto& attrValue = iter->second;
92 if (attrValue.value_case() == expectedValueCase)
101 "Attribute %1% of node %2% expected to have %3% as tensorflow::AttrValue::ValueCase, " 102 "but found %4% instead %5%")
105 %
static_cast<int>(expectedValueCase)
106 % static_cast<int>(attrValue.value_case())
112 float ReadMandatoryNodeFloatAttribute(
const tensorflow::NodeDef& nodeDef,
const std::string& name)
114 float attribValue = 0.0f;
115 ReadMandatoryNodeAttributeImpl(nodeDef, name, tensorflow::AttrValue::kF,
116 [&attribValue](
const tensorflow::AttrValue& attrValue)
118 attribValue = attrValue.f();
123 int32_t ReadMandatoryNodeInt32Attribute(
const tensorflow::NodeDef& nodeDef,
const std::string& name)
125 int32_t attribValue = 0u;
126 ReadMandatoryNodeAttributeImpl(nodeDef, name, tensorflow::AttrValue::kI,
127 [&attribValue](
const tensorflow::AttrValue& attrValue)
129 attribValue =
static_cast<int32_t
>(attrValue.i());
134 bool ReadMandatoryNodeBoolAttribute(
const tensorflow::NodeDef& nodeDef,
const std::string& name)
136 bool attribValue =
false;
137 ReadMandatoryNodeAttributeImpl(nodeDef, name, tensorflow::AttrValue::kB,
138 [&attribValue](
const tensorflow::AttrValue& attrValue)
140 attribValue =
static_cast<bool>(attrValue.b());
145 uint32_t ReadMandatoryNodeUint32Attribute(
const tensorflow::NodeDef& nodeDef,
const std::string& name)
147 uint32_t attribValue = 0u;
148 ReadMandatoryNodeAttributeImpl(nodeDef, name, tensorflow::AttrValue::kI,
149 [&attribValue](
const tensorflow::AttrValue& attrValue)
151 attribValue =
static_cast<uint32_t
>(attrValue.i());
156 std::string ReadMandatoryNodeStringAttribute(
const tensorflow::NodeDef& nodeDef,
const std::string& name)
158 std::string attribValue =
"";
159 ReadMandatoryNodeAttributeImpl(nodeDef, name, tensorflow::AttrValue::kS,
160 [&attribValue](
const tensorflow::AttrValue& attrValue)
162 attribValue = attrValue.s();
167 std::vector<uint32_t> ReadMandatoryNodeUint32ListAttribute(
const tensorflow::NodeDef& nodeDef,
168 const std::string& name)
170 std::vector<uint32_t> attriList;
171 ReadMandatoryNodeAttributeImpl(nodeDef, name, tensorflow::AttrValue::kList,
172 [&attriList](
const tensorflow::AttrValue& attrValue)
174 for (
int attriNum = 0; attriNum < attrValue.list().i_size(); ++attriNum)
176 attriList.push_back(static_cast<uint32_t>(attrValue.list().i(attriNum)));
183 std::vector<uint32_t> ReadOptionalNodeUint32ListAttribute(
const tensorflow::NodeDef& nodeDef,
184 const std::string& name)
186 std::vector<uint32_t> attriList;
187 ReadOptionalNodeAttributeImpl(nodeDef, name, tensorflow::AttrValue::kList,
188 [&attriList](
const tensorflow::AttrValue& attrValue)
190 for (
int attriNum = 0; attriNum < attrValue.list().i_size(); ++attriNum)
192 attriList.push_back(static_cast<uint32_t>(attrValue.list().i(attriNum)));
199 std::string ReadOptionalNodeStringAttribute(
const tensorflow::NodeDef& nodeDef,
200 const std::string& name,
201 const std::string& defaultValue =
"")
203 std::string attribValue = defaultValue;
204 ReadOptionalNodeAttributeImpl(nodeDef, name, tensorflow::AttrValue::kS,
205 [&attribValue](
const tensorflow::AttrValue& attrValue)
207 attribValue = attrValue.s();
212 bool ReadOptionalNodeBoolAttribute(
const tensorflow::NodeDef& nodeDef,
213 const std::string& name,
214 bool defaultValue =
false)
216 bool attribValue = defaultValue;
217 ReadOptionalNodeAttributeImpl(nodeDef, name, tensorflow::AttrValue::kB,
218 [&attribValue](
const tensorflow::AttrValue& attrValue)
220 attribValue = attrValue.b();
225 tensorflow::DataType ReadMandatoryNodeTypeAttribute(
const tensorflow::NodeDef& nodeDef,
const std::string& name)
228 ReadMandatoryNodeAttributeImpl(nodeDef, name, tensorflow::AttrValue::kType,
229 [&attribValue](
const tensorflow::AttrValue& attrValue)
231 attribValue = attrValue.type();
238 std::vector<unsigned int> outDims(targetDims.begin(), targetDims.end());
239 const auto stretchDim = std::find(targetDims.begin(), targetDims.end(), -1);
241 if (stretchDim != targetDims.end())
243 if (std::find(std::next(stretchDim), targetDims.end(), -1) != targetDims.end())
248 "At most one component of shape can be -1 %1%")
252 auto targetNumElements =
253 boost::numeric_cast<
unsigned int>(
254 std::accumulate(targetDims.begin(), targetDims.end(), -1, std::multiplies<int32_t>()));
255 auto stretchIndex =
static_cast<size_t>(std::distance(targetDims.begin(), stretchDim));
256 outDims[stretchIndex] = input.
GetNumElements() / targetNumElements;
260 reshapeInfo.
SetShape(
TensorShape{
static_cast<unsigned int>(outDims.size()), outDims.data() });
267 INetwork& m_Network,
const tensorflow::NodeDef& nodeDef)
271 const unsigned int matchDim = inputTensorInfo.
GetNumDimensions() - (isNHWC ? 1 : 3);
272 std::array<unsigned int, MaxNumOfTensorDimensions> reshapedDimensions;
273 std::fill_n(reshapedDimensions.begin(), inputTensorInfo.
GetNumDimensions(), 1);
274 reshapedDimensions[matchDim] = input1Info.
GetShape()[0];
279 const std::string reshapeLayerName =
"reshape_for-" + nodeDef.name();
292 OutputId ParseOutputId(
const std::string & name)
294 unsigned int outputNum = 0;
295 size_t colonPos = name.find_last_of(
":");
296 if (colonPos != std::string::npos)
298 int n = std::stoi(name.substr(colonPos+1));
304 "Output tensor id is out of range for %1% %2%")
308 outputNum =
static_cast<unsigned int>(n);
310 return OutputId(name.substr(0,colonPos),outputNum);
313 #define CHECK_DATA_FORMAT(NODE_DEF, FORMAT, NODE_TYPE) \ 314 if( FORMAT != "NHWC" && FORMAT != "NCHW" ) \ 316 throw ParseException( \ 319 "Unsupported data format %1% passed for %2% node %3%. " \ 320 "Only NHWC and NCHW supported %4%") \ 324 % CHECK_LOCATION().AsString())); \ 327 #define CHECK_PADDING_TYPE(NODE_DEF, PADDING) \ 328 if(PADDING != "SAME" && PADDING != "VALID" ) \ 330 throw ParseException( \ 333 "Only 'SAME' and 'VALID' padding supported. Got %1% for %2% %3%") \ 336 % CHECK_LOCATION().AsString())); \ 341 const std::map<std::string, TfParser::OperationParsingFunction> TfParser::ms_OperationNameToParsingFunctions = {
342 {
"Const", &TfParser::ParseConst },
343 {
"Add", &TfParser::ParseAdd },
344 {
"AddN", &TfParser::ParseAddN },
345 {
"BiasAdd", &TfParser::ParseBiasAdd },
346 {
"Identity", &TfParser::ParseIdentity },
347 {
"Conv2D", &TfParser::ParseConv2D },
348 {
"DepthwiseConv2dNative", &TfParser::ParseDepthwiseConv2D },
349 {
"ExpandDims", &TfParser::ParseExpandDims },
350 {
"FusedBatchNorm", &TfParser::ParseFusedBatchNorm },
351 {
"Gather", &TfParser::ParseGather},
352 {
"Greater", &TfParser::ParseGreater},
353 {
"ConcatV2", &TfParser::ParseConcat },
354 {
"LRN", &TfParser::ParseLrn },
355 {
"MatMul", &TfParser::ParseMatMul },
356 {
"Mean", &TfParser::ParseMean },
357 {
"Mul", &TfParser::ParseMul },
358 {
"Placeholder", &TfParser::ParsePlaceholder },
359 {
"RealDiv", &TfParser::ParseRealDiv },
360 {
"Relu", &TfParser::ParseRelu },
361 {
"Relu6", &TfParser::ParseRelu6 },
362 {
"Reshape", &TfParser::ParseReshape },
363 {
"ResizeBilinear", &TfParser::ParseResizeBilinear },
364 {
"Rsqrt", &TfParser::ParseRsqrt },
365 {
"Shape", &TfParser::ParseShape },
366 {
"Squeeze", &TfParser::ParseSqueeze },
367 {
"Sigmoid", &TfParser::ParseSigmoid },
368 {
"Softmax", &TfParser::ParseSoftmax },
369 {
"Softplus", &TfParser::ParseSoftplus },
370 {
"Split", &TfParser::ParseSplit },
371 {
"StridedSlice", &TfParser::ParseStridedSlice },
372 {
"Tanh", &TfParser::ParseTanh },
373 {
"MaxPool", &TfParser::ParseMaxPool },
374 {
"AvgPool", &TfParser::ParseAvgPool },
375 {
"Maximum", &TfParser::ParseMaximum },
376 {
"Minimum", &TfParser::ParseMinimum },
377 {
"Equal", &TfParser::ParseEqual },
378 {
"Pad", &TfParser::ParsePad },
379 {
"Sub", &TfParser::ParseSub },
380 {
"Pack" , &TfParser::ParseStack },
381 {
"Stack", &TfParser::ParseStack }
384 const std::list<std::string> TfParser::m_ControlInputs = {
404 uint32_t filterSize,
bool samePadding,
405 uint32_t* paddingFront, uint32_t* paddingBack) {
410 uint32_t outputSize = (inputSize + stride - 1) / stride;
411 uint32_t temp = (outputSize - 1) * stride + filterSize;
412 if (temp > inputSize) {
413 *paddingFront = (temp - inputSize) / 2;
414 *paddingBack = (temp - inputSize) - *paddingFront;
419 void CalcPadding(uint32_t input, uint32_t kernel, uint32_t stride, uint32_t& outPadHead, uint32_t& outPadTail,
428 class ParsedTfOperation
431 ParsedTfOperation(
TfParser* parser,
const tensorflow::NodeDef& node)
437 virtual ~ParsedTfOperation() {};
439 const tensorflow::NodeDef& GetNode()
const {
return m_Node; }
443 virtual IOutputSlot& ResolveArmnnOutputSlot(
unsigned int tfOutputIndex) = 0;
446 virtual ParsedTfOperation* ResolveIdentityOperations()
453 const tensorflow::NodeDef& m_Node;
458 class SingleLayerParsedTfOperation :
public ParsedTfOperation
462 : ParsedTfOperation(parser, node)
467 IOutputSlot& ResolveArmnnOutputSlot(
unsigned int tfOutputIndex)
override 471 unsigned int armnnOutputSlotIdx = tfOutputIndex;
477 "The requested output slot #%1% " 478 "for %2% does not exist %3%")
491 class DeferredSingleLayerParsedTfOperation :
public SingleLayerParsedTfOperation
494 DeferredSingleLayerParsedTfOperation(
TfParser* parser,
const tensorflow::NodeDef& node)
495 : SingleLayerParsedTfOperation(parser, node,
nullptr)
499 IOutputSlot& ResolveArmnnOutputSlot(
unsigned int tfOutputIndex)
override 503 CreateLayerDeferred();
505 return SingleLayerParsedTfOperation::ResolveArmnnOutputSlot(tfOutputIndex);
509 virtual void CreateLayerDeferred() = 0;
514 : m_Network(nullptr, nullptr)
519 const tensorflow::NodeDef* TfParser::ResolveIdentityNode(
const tensorflow::NodeDef* nodeDef)
521 if (nodeDef->op() !=
"Identity")
526 if (nodeDef->input_size() != 1)
531 "Identity node should have a single input! %1% has %2% inputs %3%")
533 % nodeDef->input_size()
537 auto it = m_NodesByName.find(nodeDef->input(0));
538 if (it != m_NodesByName.end())
540 const tensorflow::NodeDef* inputNode = it->second;
541 return ResolveIdentityNode(inputNode);
548 "Cannot find what the Identity node %1% is linked to! %2%")
554 std::vector<OutputOfConstNodeDef>
555 TfParser::GetTfInputNodes(
const tensorflow::NodeDef& nodeDef)
const 557 std::vector<OutputOfConstNodeDef> ret;
559 if (nodeDef.op() ==
"Const")
565 ret.reserve(boost::numeric_cast<size_t>(nodeDef.input_size()));
566 for (
int j = 0; j < nodeDef.input_size(); ++j)
568 OutputId outputId = ParseOutputId(nodeDef.input(j));
570 if (nodeDef.input(j)[0] ==
'^')
577 if (inputIt == m_NodesByName.end())
582 "Can't find node '%1%', which is listed as an input of '%2%' %3%")
593 std::vector<OutputOfParsedTfOperation>
594 TfParser::GetInputParsedTfOperationsChecked(
const tensorflow::NodeDef& nodeDef,
595 std::size_t expectedNumInputs)
598 std::vector<OutputOfConstNodeDef> nodes = GetTfInputNodes(nodeDef);
599 const std::size_t numInputs = nodes.size();
600 if (numInputs != expectedNumInputs)
605 "Unexpected number of inputs for node %1%. Expected %2%, found %3% %4%")
612 std::vector<OutputOfParsedTfOperation> result;
613 for (
auto&& node : nodes)
615 auto it = m_ParsedTfOperations.find(node.m_IndexedValue->name());
616 if (it == m_ParsedTfOperations.end())
621 "Node with name '%1%' has not been parsed %2%")
622 % node.m_IndexedValue->name()
625 ParsedTfOperation* parsedOp = it->second.get();
627 parsedOp = parsedOp->ResolveIdentityOperations();
634 const tensorflow::NodeDef& nodeDef,
637 const std::string& layerName)
644 if (input0Dim != input1Dim)
648 if (input0Dim == 1 && input1Dim == 4)
650 input0Slot = AddBroadcastReshapeLayer(input1Slot, input0Slot,
true, *m_Network, nodeDef);
652 else if (input0Dim == 4 && input1Dim == 1)
654 input1Slot = AddBroadcastReshapeLayer(input0Slot, input1Slot,
true, *m_Network, nodeDef);
660 boost::format(
"Unsupported broadcast configuration for %1% operation %2% %3%")
673 std::vector<unsigned int> outputShape;
680 outputShape.push_back(std::max(input0Shape[i], input1Shape[i]));
690 const tensorflow::NodeDef& nodeDef,
693 unsigned int numberOfAddition,
694 unsigned long numberOfLayersToConnect,
699 std::string layerName(nodeDef.name());
700 if (isOdd || numberOfLayersToConnect != 2)
703 layerName.append(
"_addN_").append(std::to_string(numberOfAddition));
705 return CreateAdditionLayer(nodeDef, input0Slot, input1Slot, layerName);
709 const tensorflow::NodeDef& nodeDef,
712 unsigned int numberOfAddition)
716 std::string layerName(nodeDef.name());
717 layerName.append(
"_addN_").append(std::to_string(numberOfAddition));
718 return CreateAdditionLayer(nodeDef, input0Slot, input1Slot, layerName);
722 const tensorflow::NodeDef& nodeDef,
728 return CreateAdditionLayer(nodeDef, input0Slot, input1Slot, nodeDef.name());
731 ParsedTfOperationPtr TfParser::ParseAddN(
const tensorflow::NodeDef& nodeDef,
const tensorflow::GraphDef& graphDef)
733 boost::ignore_unused(graphDef);
734 uint32_t numberOfInputs = ReadMandatoryNodeUint32Attribute(nodeDef,
"N");
735 if (numberOfInputs < 2)
741 "AddN Node with name '%1%' has less than two (%2) inputs %3%")
743 % std::to_string(numberOfInputs)
746 else if (numberOfInputs == 2)
749 return AddAdditionLayer(nodeDef,
false);
756 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, numberOfInputs);
757 unsigned int numberOfAdditions = 0;
758 std::vector<IConnectableLayer*> layers;
760 for (
unsigned int i = 0; i < numberOfInputs; ++i)
763 bool onSecondItem = i % 2;
768 nodeDef, inputs[ i - 1], inputs[i], numberOfAdditions);
769 layers.push_back(newLayer);
773 std::vector<IConnectableLayer*> layersToConnect(layers);
774 unsigned long numberOfLayersToConnect = layersToConnect.size();
775 bool isOdd = numberOfInputs % 2;
777 while (numberOfLayersToConnect > 1)
780 for (
unsigned long i = 0; i < numberOfLayersToConnect; ++i) {
781 bool onSecondItem = i % 2;
786 layersToConnect[i - 1],
789 numberOfLayersToConnect,
791 layers.push_back(newLayer);
795 layersToConnect = layers;
796 numberOfLayersToConnect = layersToConnect.size();
805 finalLayer = CreateAdditionLayer(nodeDef, inputs[numberOfInputs - 1], finalLayer);
807 return std::make_unique<SingleLayerParsedTfOperation>(
this, nodeDef, finalLayer);
811 ParsedTfOperationPtr TfParser::ParseAdd(
const tensorflow::NodeDef& nodeDef,
const tensorflow::GraphDef& graphDef)
813 boost::ignore_unused(graphDef);
814 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
818 if (inputs[0].m_IndexedValue->GetNode().op() ==
"MatMul" &&
819 HasParsedConstTensor<float>(inputs[1].m_IndexedValue->GetNode().name()))
822 AddFullyConnectedLayer(inputs[0].m_IndexedValue->GetNode(),
823 &nodeDef,nodeDef.name().c_str());
824 return std::make_unique<SingleLayerParsedTfOperation>(
this, nodeDef, layer);
826 else if (HasParsedConstTensor<float>(inputs[0].m_IndexedValue->GetNode().name()) &&
827 inputs[1].m_IndexedValue->GetNode().op() ==
"MatMul")
830 AddFullyConnectedLayer(inputs[1].m_IndexedValue->GetNode(),
831 &nodeDef,nodeDef.name().c_str());
832 return std::make_unique<SingleLayerParsedTfOperation>(
this, nodeDef, layer);
837 return AddAdditionLayer(nodeDef);
841 ParsedTfOperationPtr TfParser::ParseBiasAdd(
const tensorflow::NodeDef& nodeDef,
const tensorflow::GraphDef& graphDef)
843 boost::ignore_unused(graphDef);
844 return AddAdditionLayer(nodeDef,
true);
848 class ParsedIdentityTfOperation :
public ParsedTfOperation
851 ParsedIdentityTfOperation(
TfParser* parser,
const tensorflow::NodeDef& node, ParsedTfOperation* representative)
852 : ParsedTfOperation(parser, node)
853 , m_Representative(representative)
857 virtual IOutputSlot& ResolveArmnnOutputSlot(
unsigned int tfOutputIndex)
override 859 BOOST_ASSERT(m_Representative);
860 return m_Representative->ResolveArmnnOutputSlot(tfOutputIndex);
863 virtual ParsedTfOperation* ResolveIdentityOperations()
override 865 return m_Representative->ResolveIdentityOperations();
869 ParsedTfOperation* m_Representative;
872 ParsedTfOperationPtr TfParser::ParseIdentity(
const tensorflow::NodeDef& nodeDef,
const tensorflow::GraphDef& graphDef)
874 boost::ignore_unused(graphDef);
875 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
877 return std::make_unique<ParsedIdentityTfOperation>(
this, nodeDef, inputs[0].m_IndexedValue);
883 template <
typename T>
888 const T* tensorData,
const TensorInfo& tensorInfo)
889 : DeferredSingleLayerParsedTfOperation(parser, node),
891 m_TensorInfo(tensorInfo)
896 void CreateLayerDeferred()
override 898 BOOST_ASSERT(
m_Layer ==
nullptr);
899 m_Layer = m_Parser->m_Network->AddConstantLayer(
ConstTensor(m_TensorInfo, m_Storage), m_Node.name().c_str());
903 ConstTensor GetConstTensor(std::vector<T>& outputTensorData)
const 907 memcpy(outputTensorData.data(), m_Storage.data(), m_TensorInfo.
GetNumBytes());
910 ConstTensor constTensor(m_TensorInfo, outputTensorData);
914 const T* GetStorage()
const 916 return m_Storage.data();
926 std::vector<T> m_Storage;
932 const tensorflow::NodeDef& nodeDef)
936 case tensorflow::DT_FLOAT:
937 return DataType::Float32;
939 case tensorflow::DT_INT32:
940 return DataType::Signed32;
946 "Unknown DataType %1% for node %2% %3%")
947 % tensorflow::DataType_Name(tfDataType)
953 struct ParseTfTensorValueList
955 template<
typename DataType>
957 const tensorflow::TensorProto& tfTensor,
958 unsigned int dstElements,
959 std::vector<int8_t>& outputData);
961 template <
typename DataType>
962 static void ReadData(
const void* srcData,
unsigned int numSrcElements,
963 std::vector<int8_t>& dstData,
unsigned int numDstElements)
966 if (numSrcElements == 0)
972 if (numDstElements == 0)
974 numDstElements = numSrcElements;
978 dstData.resize(std::max(numSrcElements, numDstElements) *
sizeof(
DataType));
984 std::copy(srcTensor, srcTensor + numSrcElements, dstTensor);
986 if (numDstElements > numSrcElements)
989 std::fill(dstTensor + numSrcElements, dstTensor + numDstElements, srcTensor[numSrcElements - 1]);
996 void ParseTfTensorValueList::Parse<float>(
const tensorflow::TensorProto& tfTensor,
997 unsigned int dstElements, std::vector<int8_t>& outputData)
999 ReadData<float>(tfTensor.float_val().data(),
static_cast<unsigned int>(tfTensor.float_val_size()),
1000 outputData, dstElements);
1004 void ParseTfTensorValueList::Parse<int32_t>(
const tensorflow::TensorProto& tfTensor,
1005 unsigned int dstElements, std::vector<int8_t>& outputData)
1007 ReadData<int32_t>(tfTensor.int_val().data(),
static_cast<unsigned int>(tfTensor.int_val_size()),
1008 outputData, dstElements);
1011 template <
template<
typename>
class OperatorType,
typename T = int8_t>
1012 struct MakeTfOperation
1014 template<
typename DataType,
class... Args>
1015 inline static std::unique_ptr<OperatorType<DataType>> Parse(
TfParser* parser,
const tensorflow::NodeDef& node,
1018 return std::make_unique<OperatorType<DataType>>(parser, node, std::forward<Args>(args)...);
1023 struct MakeTfOperation<ParsedConstTfOperation>
1025 template<
typename DataType,
class... Args>
1026 inline static std::unique_ptr<ParsedConstTfOperation<DataType>> Parse(
TfParser* parser,
1027 const tensorflow::NodeDef& node,
const std::vector<int8_t>& tensorData,
const TensorInfo& tensorInfo)
1029 return std::make_unique<ParsedConstTfOperation<DataType>>(parser, node,
1030 reinterpret_cast<const DataType*
>(tensorData.data()), tensorInfo);
1034 template <
class FuncType>
1035 struct InvokeParseFunction
1037 template<
class ResType,
class... Args>
1038 inline static ResType Result(
DataType dataType, Args&&... args)
1040 if (dataType == DataType::Float32)
1042 return FuncType::template Parse<float>(std::forward<Args>(args)...);
1044 else if (dataType == DataType::Signed32)
1046 return FuncType::template Parse<int32_t>(std::forward<Args>(args)...);
1052 template<
class... Args>
1053 inline static void Result(
DataType dataType, Args&&... args)
1055 if (dataType == DataType::Float32)
1057 FuncType::template Parse<float>(std::forward<Args>(args)...);
1059 else if (dataType == DataType::Signed32)
1061 FuncType::template Parse<int32_t>(std::forward<Args>(args)...);
1066 ParsedTfOperationPtr TfParser::ParseConst(
const tensorflow::NodeDef& nodeDef,
const tensorflow::GraphDef& graphDef)
1068 boost::ignore_unused(graphDef);
1069 BOOST_ASSERT(nodeDef.op() ==
"Const");
1071 if (nodeDef.attr().count(
"value") == 0)
1076 "Value not found for Const node - %1% %2%")
1081 const tensorflow::TensorProto& tfTensor = nodeDef.attr().at(
"value").tensor();
1082 const tensorflow::TensorShapeProto& tfTensorShape = tfTensor.tensor_shape();
1085 const auto GetDimensionSize = [](
auto& d) {
return d.size(); };
1087 std::vector<unsigned int> dimensionSizes;
1088 std::transform(tfTensorShape.dim().begin(), tfTensorShape.dim().end(),
1089 std::back_inserter(dimensionSizes), GetDimensionSize);
1093 unsigned int numElements = 0U;
1095 if (!dimensionSizes.empty())
1097 numElements = std::accumulate(dimensionSizes.begin(), dimensionSizes.end(),
1098 1U, std::multiplies<unsigned int>());
1101 std::vector<int8_t> tensorData;
1104 if (tfTensor.tensor_content().empty())
1106 InvokeParseFunction<ParseTfTensorValueList>::Result<
void>(dataType, tfTensor, numElements, tensorData);
1110 if (numElements == 0)
1112 const unsigned int tfNumElements =
1113 static_cast<unsigned int>(tensorData.size()) /
GetDataTypeSize(dataType);
1114 dimensionSizes.push_back(tfNumElements);
1120 tensorData.assign(tfTensor.tensor_content().begin(), tfTensor.tensor_content().end());
1123 if (numElements == 0)
1128 "No tensor shape found for Const node - %1% %2%")
1135 if (tensorData.empty())
1140 "No tensor data found for Const node - %1% %2%")
1145 const TensorInfo tensorInfo(static_cast<unsigned int>(dimensionSizes.size()),
1146 dimensionSizes.data(),
1151 if (tensorData.size() > tensorInfo.GetNumBytes())
1156 "Number of elements (%1%) should be less than or equal " 1157 "to the number of elements implied by the shape argument (%2%) for Const node - %3% %4%")
1159 % tensorInfo.GetNumElements()
1164 return InvokeParseFunction<MakeTfOperation<ParsedConstTfOperation>>::Result<ParsedTfOperationPtr>(
1165 dataType,
this, nodeDef, tensorData, tensorInfo);
1168 template<
typename Type>
1169 bool TfParser::HasParsedConstTensor(
const std::string & nodeName)
const 1171 auto it = m_ParsedTfOperations.find(nodeName);
1172 if (it == m_ParsedTfOperations.end())
1176 return dynamic_cast<ParsedConstTfOperation<Type>*
>(it->second.get()) !=
nullptr;
1179 template<
typename Type>
1180 bool TfParser::HasParsedConstTensor(ParsedTfOperation* parsedTfOpPtr)
const 1182 return dynamic_cast<ParsedConstTfOperation<Type>*
>(parsedTfOpPtr) !=
nullptr;
1185 unsigned int TfParser::GetConstInputIndex(
const std::vector<OutputOfParsedTfOperation>& inputs)
1187 for (
unsigned int i = 0; i < inputs.size(); i++)
1189 if (HasParsedConstTensor<int32_t>(inputs[i].m_IndexedValue->GetNode().name()))
1197 "ArmNN only supports operators with constant axis. %1%")
1203 const tensorflow::GraphDef& graphDef)
1205 boost::ignore_unused(graphDef);
1206 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
1207 IOutputSlot& inputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
1210 if (!HasParsedConstTensor<float>(inputs[1].m_IndexedValue->GetNode().name()))
1215 "ArmNN only supports Convolution layers with constant weights for %1%, input %2% %3%")
1217 % inputs[1].m_IndexedValue->GetNode().name()
1220 ParsedConstTfOperation<float>* weightNode =
1221 boost::polymorphic_downcast<ParsedConstTfOperation<float> *>(inputs[1].m_IndexedValue);
1223 std::string paddingString = ReadMandatoryNodeStringAttribute(nodeDef,
"padding");
1224 std::string dataFormat = ReadMandatoryNodeStringAttribute(nodeDef,
"data_format");
1225 std::vector<uint32_t> strides = ReadMandatoryNodeUint32ListAttribute(nodeDef,
"strides");
1228 std::vector<uint32_t> dilations = ReadOptionalNodeUint32ListAttribute(nodeDef,
"dilations");
1229 if (!dilations.empty())
1231 for (
auto dilation : dilations)
1238 "ArmNN only supports Convolution layers with dilations [1,1,1,1] for %1% %2%")
1250 DataLayout dataLayout = dataFormat ==
"NHWC" ? DataLayout::NHWC : DataLayout::NCHW;
1267 dataLayout == DataLayout::NHWC ?
1268 std::initializer_list<unsigned int>{ 1, 2, 3, 0 } :
1269 std::initializer_list<unsigned int>{ 2, 3, 1, 0 };
1272 const TensorInfo& weightTensorInfo = weightNode->GetTensorInfo();
1276 std::vector<float> weightTensorSwizzledData(weightTensorInfo.
GetNumElements());
1278 weightNode->GetStorage(), weightTensorSwizzledData.data(),
sizeof(float));
1281 ConstTensor weightTensor(weightTensorSwizzledInfo, weightTensorSwizzledData);
1286 bool padding =
false;
1288 unsigned int outputHeight = 0;
1289 unsigned int outputWidth = 0;
1293 if (paddingString ==
"SAME")
1297 outputHeight =
static_cast<uint32_t
>(ceil(static_cast<float>(inputHeight) /
1299 outputWidth =
static_cast<uint32_t
>(ceil(static_cast<float>(inputWidth) /
1302 else if (paddingString ==
"VALID")
1306 outputHeight =
static_cast<uint32_t
>(ceil(static_cast<float>(inputHeight - weightHeight + 1) /
1308 outputWidth =
static_cast<uint32_t
>(ceil(static_cast<float>(inputWidth - weightWidth + 1) /
1314 case DataLayout::NHWC:
1321 case DataLayout::NCHW:
1337 nodeDef.name().c_str());
1341 return std::make_unique<SingleLayerParsedTfOperation>(
this, nodeDef, layer);
1345 const tensorflow::GraphDef& graphDef)
1347 boost::ignore_unused(graphDef);
1348 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
1349 IOutputSlot& inputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
1352 if (!HasParsedConstTensor<float>(inputs[1].m_IndexedValue->GetNode().name()))
1357 "ArmNN only supports Depthwise Convolution layer with constant weights. " 1358 "Non const input found %1% for node %2% %3%")
1359 % inputs[1].m_IndexedValue->GetNode().name()
1364 ParsedConstTfOperation<float>* weightNode =
1365 boost::polymorphic_downcast<ParsedConstTfOperation<float> *>(inputs[1].m_IndexedValue);
1367 std::string paddingString = ReadMandatoryNodeStringAttribute(nodeDef,
"padding");
1368 std::string dataFormat = ReadMandatoryNodeStringAttribute(nodeDef,
"data_format");
1369 std::vector<uint32_t> strides = ReadMandatoryNodeUint32ListAttribute(nodeDef,
"strides");
1376 DataLayout dataLayout = dataFormat ==
"NHWC" ? DataLayout::NHWC : DataLayout::NCHW;
1394 const TensorInfo& weightTensorInfo = weightNode->GetTensorInfo();
1398 std::vector<float> weightTensorSwizzledData(weightTensorInfo.
GetNumElements());
1400 weightNode->GetStorage(), weightTensorSwizzledData.data(),
sizeof(float));
1403 ConstTensor weightTensor(weightTensorSwizzledInfo, weightTensorSwizzledData);
1405 uint32_t weightHeight = weightTensor.
GetShape()[2];
1406 uint32_t weightWidth = weightTensor.
GetShape()[3];
1408 bool padding =
false;
1410 unsigned int outputHeight = 0;
1411 unsigned int outputWidth = 0;
1415 if (paddingString ==
"SAME")
1419 outputHeight =
static_cast<uint32_t
>(ceil(static_cast<float>(inputHeight) /
1421 outputWidth =
static_cast<uint32_t
>(ceil(static_cast<float>(inputWidth) /
1424 else if (paddingString ==
"VALID")
1428 outputHeight =
static_cast<uint32_t
>(ceil(static_cast<float>(inputHeight - weightHeight + 1) /
1430 outputWidth =
static_cast<uint32_t
>(ceil(static_cast<float>(inputWidth - weightWidth + 1) /
1436 case DataLayout::NHWC:
1443 case DataLayout::NCHW:
1459 nodeDef.name().c_str());
1463 return std::make_unique<SingleLayerParsedTfOperation>(
this, nodeDef, layer);
1468 BOOST_ASSERT(nodeDef.op() ==
"ExpandDims");
1474 "Unsupported number of dimensions: %1% for input shape for ExpandDims %2% %3%")
1480 std::int32_t expandDim = ReadMandatoryNodeInt32Attribute(nodeDef,
"Tdim");
1482 std::int32_t inputDimSize = boost::numeric_cast<int32_t>(inputTensorInfo.
GetNumDimensions());
1483 std::vector<uint32_t> outputDims;
1486 if (expandDim >= -1 - inputDimSize && expandDim <= inputDimSize)
1490 auto currentDimension = inputTensorInfo.
GetShape()[i];
1491 outputDims.push_back(currentDimension);
1497 auto getPosition = std::next(outputDims.begin() + 0, expandDim);
1498 outputDims.insert(getPosition, 1);
1505 int outputDimSize = boost::numeric_cast<
int>(outputDims.size() + 1);
1506 auto getPosition = std::next(outputDims.begin() + outputDimSize, expandDim);
1507 outputDims.insert(getPosition, 1);
1515 "Cannot expand dimension %1% in input tensor with %2% dimension %3%")
1521 if (outputDims.size() > 4)
1526 "Unsupported number of dimensions: %1% for output shape for ExpandDims %2% %3%")
1538 return outTensorInfo;
1541 ParsedTfOperationPtr TfParser::ParseExpandDims(
const tensorflow::NodeDef& nodeDef,
const tensorflow::GraphDef& graphDef)
1543 boost::ignore_unused(graphDef);
1544 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
1546 IOutputSlot& prevLayerOutputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
1558 return std::make_unique<SingleLayerParsedTfOperation>(
this, nodeDef, layer);
1562 const tensorflow::GraphDef& graphDef)
1564 boost::ignore_unused(graphDef);
1565 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 5);
1567 if (!HasParsedConstTensor<float>(inputs[1].m_IndexedValue->GetNode().name()))
1572 "ArmNN only supports FusedBatchNormalization layers with constant scale. " 1573 "Input %1%. Node %2% %3%")
1574 % inputs[1].m_IndexedValue->GetNode().name()
1578 ParsedConstTfOperation<float>* scaleNode =
1579 boost::polymorphic_downcast<ParsedConstTfOperation<float> *>(inputs[1].m_IndexedValue);
1581 if (!HasParsedConstTensor<float>(inputs[2].m_IndexedValue->GetNode().name()))
1586 "ArmNN only supports FusedBatchNormalization layers with constant offset. " 1587 "Input %1%. Node %2% %3%")
1588 % inputs[2].m_IndexedValue->GetNode().name()
1592 ParsedConstTfOperation<float>* offsetNode =
1593 boost::polymorphic_downcast<ParsedConstTfOperation<float> *>(inputs[2].m_IndexedValue);
1595 if (!HasParsedConstTensor<float>(inputs[3].m_IndexedValue->GetNode().name()))
1600 "ArmNN only supports FusedBatchNormalization layers with constant mean. " 1601 "Input %1%. Node %2% %3%")
1602 % inputs[3].m_IndexedValue->GetNode().name()
1606 ParsedConstTfOperation<float>* meanNode =
1607 boost::polymorphic_downcast<ParsedConstTfOperation<float> *>(inputs[3].m_IndexedValue);
1609 if (!HasParsedConstTensor<float>(inputs[4].m_IndexedValue->GetNode().name()))
1614 "ArmNN only supports FusedBatchNormalization layers with constant variance. " 1615 "Input %1%. Node %2% %3%")
1616 % inputs[4].m_IndexedValue->GetNode().name()
1620 ParsedConstTfOperation<float>* varianceNode =
1621 boost::polymorphic_downcast<ParsedConstTfOperation<float> *>(inputs[4].m_IndexedValue);
1623 const std::string dataFormat = ReadOptionalNodeStringAttribute(nodeDef,
"data_format",
"NHWC");
1628 desc.
m_Eps = ReadMandatoryNodeFloatAttribute(nodeDef,
"epsilon");
1629 desc.
m_DataLayout = dataFormat ==
"NHWC" ? DataLayout::NHWC : DataLayout::NCHW;
1633 std::vector<float> scaleTensorData;
1634 ConstTensor scaleTensor = scaleNode->GetConstTensor(scaleTensorData);
1636 std::vector<float> offsetTensorData;
1637 ConstTensor offsetTensor = offsetNode->GetConstTensor(offsetTensorData);
1639 std::vector<float> meanTensorData;
1640 ConstTensor meanTensor = meanNode->GetConstTensor(meanTensorData);
1642 std::vector<float> varianceTensorData;
1643 ConstTensor varianceTensor = varianceNode->GetConstTensor(varianceTensorData);
1650 nodeDef.name().c_str());
1652 IOutputSlot& inputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
1657 return std::make_unique<SingleLayerParsedTfOperation>(
this, nodeDef, layer);
1660 bool TfParser::IsSupportedLeakyReluPattern(
const tensorflow::NodeDef& mulNodeDef,
1661 size_t alphaLayerIndex,
1666 const tensorflow::NodeDef& otherNodeDef = otherOp.
m_IndexedValue->GetNode();
1675 if (mulNodeDef.op() ==
"Mul")
1677 size_t otherLayerIndex = (alphaLayerIndex == 0 ? 1 : 0);
1678 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(mulNodeDef, 2);
1680 BOOST_ASSERT(inputs.size() == 2);
1681 BOOST_ASSERT((otherLayerIndex == 0 || alphaLayerIndex == 0));
1682 BOOST_ASSERT((otherLayerIndex == 1 || alphaLayerIndex == 1));
1683 BOOST_ASSERT(((otherLayerIndex + alphaLayerIndex) == 1));
1685 if (inputs[otherLayerIndex].m_IndexedValue->GetNode().name() == otherNodeDef.name())
1687 if (HasParsedConstTensor<float>(inputs[alphaLayerIndex].m_IndexedValue->GetNode().name()))
1689 ParsedConstTfOperation<float>* alpha =
1690 boost::polymorphic_downcast<ParsedConstTfOperation<float> *>(
1691 inputs[alphaLayerIndex].m_IndexedValue);
1693 std::vector<float> const_data;
1694 ConstTensor const_tensor = alpha->GetConstTensor(const_data);
1696 if (const_data.size() == 1)
1698 desc.
m_Function = ActivationFunction::LeakyReLu;
1699 desc.
m_A = const_data[0];
1711 const tensorflow::GraphDef& graphDef)
1713 boost::ignore_unused(graphDef);
1714 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
1715 if (inputs.size() != 2)
1720 "Maximum expects two inputs!. Got %1% for Node %2% %3%")
1726 auto inputNode0 = inputs[0].m_IndexedValue->GetNode();
1727 auto inputNode1 = inputs[1].m_IndexedValue->GetNode();
1740 if (IsSupportedLeakyReluPattern(inputNode0, 0, inputs[1], &outputOfLeakyRelu, desc) ||
1741 IsSupportedLeakyReluPattern(inputNode0, 1, inputs[1], &outputOfLeakyRelu, desc) ||
1742 IsSupportedLeakyReluPattern(inputNode1, 0, inputs[0], &outputOfLeakyRelu, desc) ||
1743 IsSupportedLeakyReluPattern(inputNode1, 1, inputs[0], &outputOfLeakyRelu, desc))
1745 BOOST_ASSERT(outputOfLeakyRelu !=
nullptr);
1750 return std::make_unique<SingleLayerParsedTfOperation>(
this, nodeDef, layer);
1756 return AddMaximumLayer(nodeDef);
1760 std::pair<armnn::IOutputSlot*, armnn::IOutputSlot*> TfParser::ProcessElementwiseInputSlots(
1761 const tensorflow::NodeDef& nodeDef,
const std::string& layerName)
1763 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
1765 IOutputSlot* input0Slot = &inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
1766 IOutputSlot* input1Slot = &inputs[1].m_IndexedValue->ResolveArmnnOutputSlot(inputs[1].m_Index);
1770 if (input0Dim != input1Dim)
1774 if (input0Dim == 1 && input1Dim == 4)
1776 input0Slot = AddBroadcastReshapeLayer(input1Slot, input0Slot,
true, *m_Network, nodeDef);
1778 else if (input0Dim == 4 && input1Dim == 1)
1780 input1Slot = AddBroadcastReshapeLayer(input0Slot, input1Slot,
true, *m_Network, nodeDef);
1786 boost::format(
"Unsupported broadcast configuration for %1% operation %2% %3%")
1792 return {input0Slot, input1Slot};
1799 const tensorflow::NodeDef& nodeDef)
1806 std::vector<unsigned int> outputShape;
1813 outputShape.push_back(std::max(input0Shape[i], input1Shape[i]));
1819 return std::make_unique<SingleLayerParsedTfOperation>(
this, nodeDef, layer);
1826 const tensorflow::NodeDef& nodeDef)
1832 std::vector<unsigned int> outputShape;
1839 outputShape.push_back(std::max(input0Shape[i], input1Shape[i]));
1845 return std::make_unique<SingleLayerParsedTfOperation>(
this, nodeDef, layer);
1849 const tensorflow::GraphDef& graphDef)
1851 boost::ignore_unused(graphDef);
1852 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
1853 IOutputSlot& params = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
1854 IOutputSlot& indices = inputs[1].m_IndexedValue->ResolveArmnnOutputSlot(inputs[1].m_Index);
1859 unsigned int outputDim = paramsDim - 1 + indicesDim;
1861 std::vector<unsigned int> dimSizes;
1863 for (
unsigned int i = 0; i < indicesDim; ++i)
1867 for (
unsigned int i = 1; i < paramsDim; ++i)
1879 params.
Connect(layer->GetInputSlot(0));
1880 indices.
Connect(layer->GetInputSlot(1));
1882 return std::make_unique<SingleLayerParsedTfOperation>(
this, nodeDef, layer);
1886 const tensorflow::GraphDef& graphDef)
1888 boost::ignore_unused(graphDef);
1889 std::pair<armnn::IOutputSlot*, armnn::IOutputSlot*> inputLayers = ProcessElementwiseInputSlots(nodeDef,
"Greater");
1896 return ProcessComparisonLayer(input0Slot, input1Slot, layer, nodeDef);
1900 const tensorflow::GraphDef& graphDef)
1902 boost::ignore_unused(graphDef);
1903 std::pair<armnn::IOutputSlot*, armnn::IOutputSlot*> inputLayers = ProcessElementwiseInputSlots(nodeDef,
"Equal");
1910 return ProcessComparisonLayer(input0Slot, input1Slot, layer, nodeDef);
1914 const tensorflow::GraphDef& graphDef)
1916 boost::ignore_unused(graphDef);
1917 std::pair<armnn::IOutputSlot*, armnn::IOutputSlot*> inputLayers = ProcessElementwiseInputSlots(nodeDef,
"Minimum");
1923 return ProcessElementwiseLayer(input0Slot, input1Slot, layer, nodeDef);
1926 ParsedTfOperationPtr TfParser::ParseSub(
const tensorflow::NodeDef& nodeDef,
const tensorflow::GraphDef& graphDef)
1928 boost::ignore_unused(graphDef);
1929 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
1931 IOutputSlot* input0Slot = &inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
1932 IOutputSlot* input1Slot = &inputs[1].m_IndexedValue->ResolveArmnnOutputSlot(inputs[1].m_Index);
1939 const bool isNHWC =
true;
1940 input0Slot = AddBroadcastReshapeLayer(input1Slot, input0Slot, isNHWC, *m_Network, nodeDef);
1945 const bool isNHWC =
true;
1946 input1Slot = AddBroadcastReshapeLayer(input0Slot, input1Slot, isNHWC, *m_Network, nodeDef);
1963 return std::make_unique<SingleLayerParsedTfOperation>(
this, nodeDef, layer);
1966 ParsedTfOperationPtr TfParser::ParseStack(
const tensorflow::NodeDef& nodeDef,
const tensorflow::GraphDef& graphDef)
1968 boost::ignore_unused(graphDef);
1969 std::vector<OutputOfConstNodeDef> nodes = GetTfInputNodes(nodeDef);
1971 unsigned int numInputs =
static_cast<unsigned int>(nodes.size());
1977 "Pack/Stack expects at least one input. Got %1% for Node %2% %3%")
1983 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, numInputs);
1985 IOutputSlot* input0Slot = &inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
1990 int32_t axis = ReadMandatoryNodeInt32Attribute(nodeDef,
"axis");
1991 const int sNumDimensions = (
static_cast<int>(numDimensions) + 1);
1992 if (!(axis < sNumDimensions && axis >= -sNumDimensions))
1997 "Axis index is not in range. Got %1% for Node %2% %3%")
2005 axis =
static_cast<int32_t
>(numDimensions) + axis + 1;
2009 stackDescriptor.
m_Axis =
static_cast<uint32_t
>(axis);
2010 stackDescriptor.
m_NumInputs =
static_cast<uint32_t
>(numInputs);
2013 const unsigned int supportedNumDims = 4;
2014 for (
unsigned int viewIndex = 0; viewIndex < numInputs; ++viewIndex)
2016 IOutputSlot& inputSlot = inputs[viewIndex].m_IndexedValue->ResolveArmnnOutputSlot(inputs[viewIndex].m_Index);
2025 "The number of dimensions: %1% for input tensors of the " 2026 "Pack/Stack op. Number of dimensions should be less than %2% %3%")
2033 std::vector<unsigned int> outputDimensions;
2036 outputDimensions.push_back(stackDescriptor.
m_InputShape[i]);
2038 outputDimensions.insert(outputDimensions.begin() + axis, numInputs);
2043 for (
unsigned int viewIndex = 0; viewIndex < numInputs; ++viewIndex)
2045 IOutputSlot& inputSlot = inputs[viewIndex].m_IndexedValue->ResolveArmnnOutputSlot(inputs[viewIndex].m_Index);
2051 outputDimensions.data(),
2054 return std::make_unique<SingleLayerParsedTfOperation>(
this, nodeDef, layer);
2059 const std::string& nodeName)
2061 unsigned int rank = paddingTensor.
GetShape()[0];
2063 if (rank != expectedRank)
2068 "Expected the padding tensor to be of rank %1 not %2 on Node %3 %4.")
2074 unsigned int second = paddingTensor.
GetShape()[1];
2080 "Expected the padding tensor to be of dimensions [%1, 2] not [%1, %2] on Node %3 %4.")
2090 const std::vector<std::pair<unsigned int, unsigned int>>& padList)
2093 std::vector<unsigned int> outDims;
2094 for (
unsigned int i = 0; i < numDims; ++i)
2096 unsigned int dimSize = inputTensorInfo.
GetShape()[i];
2097 const std::pair<unsigned int, unsigned int>& dimPadding = padList[i];
2098 dimSize += dimPadding.first;
2099 dimSize += dimPadding.second;
2100 outDims.push_back(dimSize);
2102 TensorInfo paddedTensorInfo = inputTensorInfo;
2103 unsigned int outDimsSize =
static_cast<unsigned int>(outDims.size());
2105 return paddedTensorInfo;
2109 const tensorflow::GraphDef& graphDef)
2111 boost::ignore_unused(graphDef);
2115 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
2116 IOutputSlot& previousLayerOutputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2118 if (!HasParsedConstTensor<int32_t>(inputs[1].m_IndexedValue))
2123 "ArmNN only supports Pad with constant padding. " 2124 "Input %1%. Node %2% %3%")
2125 % inputs[1].m_IndexedValue->GetNode().name()
2130 ParsedConstTfOperation<int32_t>* paddingTensorOp =
2131 boost::polymorphic_downcast<ParsedConstTfOperation<int32_t>*>(inputs[1].m_IndexedValue);
2133 std::vector<int32_t> paddingTensorData;
2134 ConstTensor paddingTensor = paddingTensorOp->GetConstTensor(paddingTensorData);
2141 std::vector<std::pair<unsigned int, unsigned int>> padList;
2142 unsigned int rank =
CheckPaddingTensor(paddingTensor, inputTensorInfo, nodeDef.name());
2143 for (
unsigned int i = 0; i < rank; ++i)
2145 std::pair<unsigned int, unsigned int> paddingForDim;
2146 for (
unsigned int j = 0; j < 2; j++)
2148 unsigned int index = (i * 2) + j;
2149 int paddingAmount = paddingTensorData[index];
2151 if (paddingAmount < 0)
2156 "Negative amount %1 specified at [%2, %3] of padding tensor on Node %4 %5.")
2165 paddingForDim.first =
static_cast<unsigned int>(paddingAmount);
2169 paddingForDim.second =
static_cast<unsigned int>(paddingAmount);
2172 padList.push_back(paddingForDim);
2180 return std::make_unique<SingleLayerParsedTfOperation>(
this, nodeDef, layer);
2184 const tensorflow::GraphDef& graphDef)
2186 boost::ignore_unused(graphDef);
2187 std::vector<OutputOfConstNodeDef> nodes = GetTfInputNodes(nodeDef);
2190 unsigned int numInputs =
static_cast<unsigned int>(nodes.size());
2192 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, numInputs);
2195 unsigned int index = GetConstInputIndex(inputs);
2197 ParsedConstTfOperation<int32_t>* shapeNode =
2198 boost::polymorphic_downcast<ParsedConstTfOperation<int32_t>*>(inputs[index].m_IndexedValue);
2200 std::vector<int32_t> axisTensorData;
2201 shapeNode->GetConstTensor(axisTensorData);
2204 const unsigned int concatDim =
static_cast<unsigned int>(axisTensorData[0]);
2207 if (concatDim == 0 || concatDim == 2)
2212 "Dimension %1% for concatenation is not supported by Armnn. " 2219 const unsigned int supportedNumDims = 4;
2220 unsigned int numConcatViews = numInputs - 1;
2221 OriginsDescriptor concatDescriptor(static_cast<uint32_t>(numConcatViews), supportedNumDims);
2224 unsigned int mergeDim = 0;
2225 for (
unsigned int viewIndex = 0; viewIndex < numConcatViews; ++viewIndex)
2228 IOutputSlot& inputSlot = inputs[viewIndex].m_IndexedValue->ResolveArmnnOutputSlot(inputs[viewIndex].m_Index);
2237 "The number of dimensions: %1% for input tensors of the " 2238 "concatenation op should be %2% %3%")
2245 mergeDims = inputTensorInfo.
GetShape();
2246 unsigned int* viewOrigin =
const_cast<unsigned int*
>(concatDescriptor.
GetViewOrigin(viewIndex));
2247 std::fill(viewOrigin, viewOrigin + supportedNumDims, 0);
2251 mergeDim += mergeDims[concatDim];
2255 mergeDims[concatDim] = mergeDim;
2260 for (
unsigned int viewIndex = 0; viewIndex < numConcatViews; ++viewIndex)
2262 IOutputSlot& inputSlot = inputs[viewIndex].m_IndexedValue->ResolveArmnnOutputSlot(inputs[viewIndex].m_Index);
2266 return std::make_unique<SingleLayerParsedTfOperation>(
this, nodeDef, layer);
2270 const tensorflow::GraphDef& graphDef)
2272 boost::ignore_unused(graphDef);
2280 if (tfDataType != tensorflow::DT_INT32)
2285 "Armnn only supports DT_INT32 as out_type. Got %1% for Node %2% %3%")
2286 % tensorflow::DataType_Name(tfDataType)
2291 const std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
2292 IOutputSlot& prevLayerOutputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2296 std::vector<int32_t> shapeTensorData;
2297 shapeTensorData.reserve(prevLayerDimensions);
2299 for (
unsigned int i=0; i<prevLayerDimensions; ++i)
2301 shapeTensorData.push_back(static_cast<int32_t>(prevLayerTensorInfo.
GetShape()[i]));
2304 TensorInfo shapeTensorInfo(1, &prevLayerDimensions, DataType::Signed32);
2306 return std::make_unique<ParsedConstTfOperation<int32_t>>(
this,
2308 &shapeTensorData[0],
2313 const tensorflow::GraphDef& graphDef)
2315 boost::ignore_unused(graphDef);
2316 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
2317 ParsedTfOperation* inputNode = inputs[0].m_IndexedValue;
2319 if (!HasParsedConstTensor<int32_t>(inputs[1].m_IndexedValue->GetNode().name()))
2324 "ArmNN only supports Reshape layers with constant shapes. " 2325 "Input %1% Node %2% %3%")
2326 % inputs[1].m_IndexedValue->GetNode().name()
2330 ParsedConstTfOperation<int32_t>* shapeNode =
2331 boost::polymorphic_downcast<ParsedConstTfOperation<int32_t>*>(inputs[1].m_IndexedValue);
2333 armnn::IOutputSlot& prevLayerOutputSlot = inputNode->ResolveArmnnOutputSlot(inputs[0].m_Index);
2334 TensorInfo inputTensorInfo = prevLayerOutputSlot.GetTensorInfo();
2336 std::vector<int32_t> shapeTensorData;
2337 ConstTensor shapeTensor = shapeNode->GetConstTensor(shapeTensorData);
2338 const TensorInfo outputTensorInfo = PrepareReshape(inputTensorInfo, shapeTensorData);
2348 return std::make_unique<SingleLayerParsedTfOperation>(
this, nodeDef, layer);
2352 const tensorflow::GraphDef& graphDef)
2354 boost::ignore_unused(graphDef);
2355 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
2357 if (!HasParsedConstTensor<int32_t>(inputs[1].m_IndexedValue->GetNode().name()))
2362 "ArmNN only supports ResizeBilinear layers with constant sizes. " 2363 "Input %1%. Node %2% %3%")
2364 % inputs[1].m_IndexedValue->GetNode().name()
2368 ParsedConstTfOperation<int32_t>* sizeNode =
2369 boost::polymorphic_downcast<ParsedConstTfOperation<int32_t>*>(inputs[1].m_IndexedValue);
2372 if (ReadOptionalNodeBoolAttribute(nodeDef,
"align_corners",
false))
2377 "ArmNN only supports ResizeBilinear layers with align_corners set to false. " 2384 std::vector<int32_t> sizeTensorData;
2385 ConstTensor sizeTensor = sizeNode->GetConstTensor(sizeTensorData);
2391 desc.
m_TargetWidth =
static_cast<uint32_t
> (sizeTensorData[1]);
2396 IOutputSlot& inputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2400 unsigned int outBatch = inputTensorInfo.
GetShape()[0];
2401 unsigned int outChannels = inputTensorInfo.
GetShape()[3];
2404 TensorShape outShape({outBatch, outHeight, outWidth, outChannels });
2411 return std::make_unique<SingleLayerParsedTfOperation>(
this, nodeDef, layer);
2416 BOOST_ASSERT(nodeDef.op() ==
"Squeeze");
2420 if (tfDataType == tensorflow::DT_FLOAT)
2422 type = DataType::Float32;
2424 else if (tfDataType == tensorflow::DT_INT32)
2426 type = DataType::Signed32;
2432 boost::format(
"Unsupported DataType %1% for Squeeze operation %2% %3%")
2433 % tensorflow::DataType_Name(tfDataType)
2444 "Unsupported number of dimensions: %1% for input shape for Squeeze %2% %3%")
2450 std::vector<uint32_t> squeezeDims = ReadOptionalNodeUint32ListAttribute(nodeDef,
"squeeze_dims");
2451 static const uint32_t dimensionSequence[] = { 0, 1, 2, 3 };
2453 if (squeezeDims.empty())
2455 squeezeDims.assign(dimensionSequence,
2459 std::vector<uint32_t> outputDims;
2462 bool skipSqueeze = (std::find(squeezeDims.begin(), squeezeDims.end(), i) == squeezeDims.end());
2463 auto currentDimension = inputTensorInfo.
GetShape()[i];
2464 if (skipSqueeze || currentDimension != 1)
2466 outputDims.push_back(currentDimension);
2470 if (outputDims.size() > 4)
2475 "Unsupported number of dimensions: %1% for output shape for Squeeze %2% %3%")
2486 outTensorInfo.SetDataType(type);
2488 return outTensorInfo;
2491 ParsedTfOperationPtr TfParser::ParseSqueeze(
const tensorflow::NodeDef& nodeDef,
const tensorflow::GraphDef& graphDef)
2493 boost::ignore_unused(graphDef);
2494 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
2496 IOutputSlot& prevLayerOutputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2508 return std::make_unique<SingleLayerParsedTfOperation>(
this, nodeDef, layer);
2511 ParsedTfOperationPtr TfParser::ParseLrn(
const tensorflow::NodeDef& nodeDef,
const tensorflow::GraphDef& graphDef)
2513 boost::ignore_unused(graphDef);
2514 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
2517 normalizationDescriptor.
m_NormMethodType = NormalizationAlgorithmMethod::LocalBrightness;
2518 normalizationDescriptor.
m_NormChannelType = NormalizationAlgorithmChannel::Across;
2519 normalizationDescriptor.
m_Alpha = ReadMandatoryNodeFloatAttribute(nodeDef,
"alpha");
2520 normalizationDescriptor.
m_Beta = ReadMandatoryNodeFloatAttribute(nodeDef,
"beta");
2521 normalizationDescriptor.
m_K = ReadMandatoryNodeFloatAttribute(nodeDef,
"bias");
2522 normalizationDescriptor.
m_NormSize = ReadMandatoryNodeUint32Attribute(nodeDef,
"depth_radius");
2528 IOutputSlot& prevLayerOutputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2530 nodeDef.name().c_str());
2534 return std::make_unique<SingleLayerParsedTfOperation>(
this, nodeDef, layer);
2546 : DeferredSingleLayerParsedTfOperation(parser, node)
2550 void CreateLayerDeferred()
override 2552 BOOST_ASSERT(
m_Layer ==
nullptr);
2553 m_Layer = m_Parser->AddFullyConnectedLayer(m_Node,
nullptr, m_Node.name().c_str());
2557 ParsedTfOperationPtr TfParser::ParseMatMul(
const tensorflow::NodeDef& nodeDef,
const tensorflow::GraphDef& graphDef)
2559 boost::ignore_unused(graphDef);
2562 return std::make_unique<ParsedMatMulTfOperation>(
this, nodeDef);
2565 ParsedTfOperationPtr TfParser::ParseMean(
const tensorflow::NodeDef& nodeDef,
const tensorflow::GraphDef& graphDef)
2567 boost::ignore_unused(graphDef);
2568 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
2569 IOutputSlot& inputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2572 if (inputs.size() != 2)
2575 boost::str(boost::format(
"Mean expects two inputs!. Got %1% for Node %2% %3%")
2581 bool keepDims = ReadMandatoryNodeBoolAttribute(nodeDef,
"keep_dims");
2583 ParsedConstTfOperation<int32_t>* axisNode =
2584 boost::polymorphic_downcast<ParsedConstTfOperation<int32_t>*>(inputs[1].m_IndexedValue);
2586 const TensorInfo& axisTensorInfo = axisNode->GetTensorInfo();
2588 ConstTensor axisTensor(axisTensorInfo, axisNode->GetStorage());
2589 const int* axisData =
static_cast<const int*
>(axisTensor.GetMemoryArea());
2598 std::vector<int> rawAxisVector(axisData, axisData + axisTensorInfo.GetNumElements());
2599 std::set<unsigned int> positiveAxisSet;
2602 std::transform(rawAxisVector.begin(), rawAxisVector.end(),
2603 std::inserter(positiveAxisSet, positiveAxisSet.begin()),
2604 [rank](
int i) ->
unsigned int {
return static_cast<unsigned int>((i + rank) % rank); });
2610 meanDescriptor.
m_Axis.assign(positiveAxisSet.begin(), positiveAxisSet.end());
2617 return std::make_unique<SingleLayerParsedTfOperation>(
this, nodeDef, layer);
2629 : DeferredSingleLayerParsedTfOperation(parser, node)
2633 void CreateLayerDeferred()
override 2635 BOOST_ASSERT(
m_Layer ==
nullptr);
2636 m_Layer = m_Parser->AddMultiplicationLayer(m_Node);
2640 ParsedTfOperationPtr TfParser::ParseMul(
const tensorflow::NodeDef& nodeDef,
const tensorflow::GraphDef& graphDef)
2642 boost::ignore_unused(graphDef);
2644 return std::make_unique<ParsedMulTfOperation>(
this, nodeDef);
2648 const tensorflow::GraphDef& graphDef)
2650 boost::ignore_unused(graphDef);
2652 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 0);
2656 auto it = m_InputShapes.find(nodeDef.name());
2657 if (it == m_InputShapes.end())
2662 "Missing input shape for Placeholder '%1%' %2%")
2666 TensorInfo tensorInfo(it->second, DataType::Float32);
2672 TrackInputBinding(layer, layerId, tensorInfo);
2674 return std::make_unique<SingleLayerParsedTfOperation>(
this, nodeDef, layer);
2677 ParsedTfOperationPtr TfParser::ParseRealDiv(
const tensorflow::NodeDef& nodeDef,
const tensorflow::GraphDef& graphDef)
2679 boost::ignore_unused(graphDef);
2680 return AddRealDivLayer(nodeDef);
2684 const tensorflow::GraphDef& graphDef)
2686 boost::ignore_unused(graphDef);
2689 activationDesc.
m_Function = ActivationFunction::ReLu;
2690 return AddActivationLayer(nodeDef, activationDesc);
2694 const tensorflow::GraphDef& graphDef)
2696 boost::ignore_unused(graphDef);
2699 activationDesc.
m_Function = ActivationFunction::BoundedReLu;
2700 activationDesc.
m_A = 6.0f;
2701 activationDesc.
m_B = 0.0f;
2703 return AddActivationLayer(nodeDef, activationDesc);
2707 const tensorflow::GraphDef& graphDef)
2709 boost::ignore_unused(graphDef);
2712 activationDesc.
m_Function = ActivationFunction::Sigmoid;
2714 return AddActivationLayer(nodeDef, activationDesc);
2718 const tensorflow::GraphDef &graphDef)
2720 boost::ignore_unused(graphDef);
2722 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
2727 IOutputSlot& prevLayerOutputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2731 return std::make_unique<SingleLayerParsedTfOperation>(
this, nodeDef, layer);
2735 const tensorflow::GraphDef& graphDef)
2737 boost::ignore_unused(graphDef);
2739 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
2744 IOutputSlot& prevLayerSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2748 return std::make_unique<SingleLayerParsedTfOperation>(
this, nodeDef, layer);
2752 const tensorflow::GraphDef& graphDef)
2754 boost::ignore_unused(graphDef);
2756 std::vector<OutputOfConstNodeDef> nodes = GetTfInputNodes(nodeDef);
2757 unsigned int numInputs =
static_cast<unsigned int>(nodes.size());
2758 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, numInputs);
2761 unsigned int index = GetConstInputIndex(inputs);
2763 ParsedConstTfOperation<int32_t>* shapeNode =
2764 boost::polymorphic_downcast<ParsedConstTfOperation<int32_t>*>(inputs[index].m_IndexedValue);
2766 std::vector<int32_t> axisTensorData;
2767 shapeNode->GetConstTensor(axisTensorData);
2770 const unsigned int splitDim =
static_cast<unsigned int>(axisTensorData[0]);
2773 if (splitDim == 0 || splitDim == 2)
2778 "Dimension %1% for split is not supported by Armnn. " 2786 uint32_t num_split = ReadMandatoryNodeUint32Attribute(nodeDef,
"num_split");
2788 IOutputSlot& inputSlot = inputs[1 - index].m_IndexedValue->ResolveArmnnOutputSlot(inputs[1 - index].m_Index);
2791 const unsigned int supportedNumDims = 4;
2794 if (inputDimSize != supportedNumDims)
2799 "The number of dimensions: %1% for input tensors of the " 2800 "split op should be %2% %3%")
2806 std::vector<unsigned int> splitterDimSizes(inputDimSize);
2809 for (
unsigned int i = 0; i < inputDimSize; ++i)
2811 splitterDimSizes[i] = inputTensorInfo.
GetShape()[i];
2814 if (splitterDimSizes[splitDim] % num_split != 0)
2816 throw ParseException(
"Number of splits must evenly divide the dimension");
2818 splitterDimSizes[splitDim] /= num_split;
2821 for (
unsigned int g = 0; g < num_split; ++g)
2824 for (
unsigned int dimIdx = 0; dimIdx < splitterDimSizes.size(); ++dimIdx)
2826 splitDesc.
SetViewSize(g, dimIdx, splitterDimSizes[dimIdx]);
2836 splitterDimSizes.data());
2843 return std::make_unique<SingleLayerParsedTfOperation>(
this, nodeDef, layer);
2847 const tensorflow::GraphDef& graphDef)
2849 boost::ignore_unused(graphDef);
2852 activationDesc.
m_Function = ActivationFunction::SoftReLu;
2854 return AddActivationLayer(nodeDef, activationDesc);
2858 const tensorflow::GraphDef& graphDef)
2860 boost::ignore_unused(graphDef);
2862 std::vector<OutputOfConstNodeDef> nodes = GetTfInputNodes(nodeDef);
2863 unsigned int numInputs =
static_cast<unsigned int>(nodes.size());
2864 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, numInputs);
2866 ParsedConstTfOperation<int32_t>* beginNode =
2867 boost::polymorphic_downcast<ParsedConstTfOperation<int32_t> *>(inputs[1].m_IndexedValue);
2868 std::vector<int32_t> beginTensorData;
2869 beginNode->GetConstTensor(beginTensorData);
2871 ParsedConstTfOperation<int32_t>* endNode =
2872 boost::polymorphic_downcast<ParsedConstTfOperation<int32_t> *>(inputs[2].m_IndexedValue);
2873 std::vector<int32_t> endTensorData;
2874 endNode->GetConstTensor(endTensorData);
2876 ParsedConstTfOperation<int32_t>* stridesNode =
2877 boost::polymorphic_downcast<ParsedConstTfOperation<int32_t> *>(inputs[3].m_IndexedValue);
2878 std::vector<int32_t> stridesTensorData;
2879 stridesNode->GetConstTensor(stridesTensorData);
2882 desc.
m_Begin = beginTensorData;
2883 desc.
m_End = endTensorData;
2885 desc.
m_BeginMask = ReadMandatoryNodeInt32Attribute(nodeDef,
"begin_mask");
2886 desc.
m_EndMask = ReadMandatoryNodeInt32Attribute(nodeDef,
"end_mask");
2887 desc.
m_EllipsisMask = ReadMandatoryNodeInt32Attribute(nodeDef,
"ellipsis_mask");
2888 desc.
m_NewAxisMask = ReadMandatoryNodeInt32Attribute(nodeDef,
"new_axis_mask");
2889 desc.
m_ShrinkAxisMask = ReadMandatoryNodeInt32Attribute(nodeDef,
"shrink_axis_mask");
2893 IOutputSlot& prevLayerSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2902 return std::make_unique<SingleLayerParsedTfOperation>(
this, nodeDef, layer);
2905 ParsedTfOperationPtr TfParser::ParseTanh(
const tensorflow::NodeDef& nodeDef,
const tensorflow::GraphDef& graphDef)
2907 boost::ignore_unused(graphDef);
2910 activationDesc.
m_Function = ActivationFunction::TanH;
2911 activationDesc.
m_A = 1.0f;
2912 activationDesc.
m_B = 1.0f;
2914 return AddActivationLayer(nodeDef, activationDesc);
2920 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
2924 IOutputSlot& prevLayerOutputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2927 return std::make_unique<SingleLayerParsedTfOperation>(
this, nodeDef, layer);
2931 const tensorflow::GraphDef& graphDef)
2933 return ParsePooling2d(nodeDef, graphDef, PoolingAlgorithm::Max);
2937 const tensorflow::GraphDef& graphDef)
2939 return ParsePooling2d(nodeDef, graphDef, PoolingAlgorithm::Average);
2945 boost::ignore_unused(graphDef);
2947 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
2948 IOutputSlot& inputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2951 if (inputs.size() != 1)
2956 "2D Pooling expects one input!. Got %1% for Node %2% %3%")
2962 std::string paddingString = ReadMandatoryNodeStringAttribute(nodeDef,
"padding");
2963 std::string dataFormat = ReadMandatoryNodeStringAttribute(nodeDef,
"data_format");
2964 std::vector<uint32_t> strides = ReadMandatoryNodeUint32ListAttribute(nodeDef,
"strides");
2965 std::vector<uint32_t> ksize = ReadMandatoryNodeUint32ListAttribute(nodeDef,
"ksize");
2973 DataLayout dataLayout = dataFormat ==
"NHWC" ? DataLayout::NHWC : DataLayout::NCHW;
2985 bool padding =
false;
2987 unsigned int outputHeight = 0;
2988 unsigned int outputWidth = 0;
2992 if (paddingString ==
"SAME")
2996 outputHeight =
static_cast<uint32_t
>(ceil(static_cast<float>(inputHeight) /
2997 static_cast<float>(pooling2dDescriptor.
m_StrideY)));
2998 outputWidth =
static_cast<uint32_t
>(ceil(static_cast<float>(inputWidth) /
2999 static_cast<float>(pooling2dDescriptor.
m_StrideX)));
3001 else if (paddingString ==
"VALID")
3005 outputHeight =
static_cast<uint32_t
>(ceil(
3006 static_cast<float>(inputHeight - pooling2dDescriptor.
m_PoolHeight + 1) /
3007 static_cast<float>(pooling2dDescriptor.
m_StrideY)));
3008 outputWidth =
static_cast<uint32_t
>(ceil(
3009 static_cast<float>(inputWidth - pooling2dDescriptor.
m_PoolWidth + 1) /
3010 static_cast<float>(pooling2dDescriptor.
m_StrideX)));
3015 case DataLayout::NHWC:
3022 case DataLayout::NCHW:
3038 if (layer ==
nullptr)
3043 "Failed to add pooling2d layer for %1% %2%")
3052 return std::make_unique<SingleLayerParsedTfOperation>(
this, nodeDef, layer);
3055 ParsedTfOperationPtr TfParser::AddAdditionLayer(
const tensorflow::NodeDef& nodeDef,
bool isBiasAdd)
3057 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
3059 IOutputSlot* input0Slot = &inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
3060 IOutputSlot* input1Slot = &inputs[1].m_IndexedValue->ResolveArmnnOutputSlot(inputs[1].m_Index);
3074 "Unsupported bias for BiasAdd. It should be a 1D vector. " 3075 "Got %1% dimensions for input %2%. Node %3% %4%")
3077 % inputs[1].m_IndexedValue->GetNode().name()
3082 const std::string dataFormat = ReadMandatoryNodeStringAttribute(nodeDef,
"data_format");
3085 input1Slot = AddBroadcastReshapeLayer(input0Slot, input1Slot, dataFormat ==
"NHWC", *m_Network, nodeDef);
3091 const bool isNHWC =
true;
3092 input0Slot = AddBroadcastReshapeLayer(input1Slot, input0Slot, isNHWC, *m_Network, nodeDef);
3097 const bool isNHWC =
true;
3098 input1Slot = AddBroadcastReshapeLayer(input0Slot, input1Slot, isNHWC, *m_Network, nodeDef);
3112 std::vector<unsigned int> outputShape;
3118 outputShape.push_back(std::max(input0Shape[i], input1Shape[i]));
3134 return std::make_unique<SingleLayerParsedTfOperation>(
this, nodeDef, layer);
3139 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
3142 IOutputSlot* input0Slot = &inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
3143 IOutputSlot* input1Slot = &inputs[1].m_IndexedValue->ResolveArmnnOutputSlot(inputs[1].m_Index);
3149 if (input0NumDims < input1NumDims)
3151 const bool isNHWC =
true;
3152 input0Slot = AddBroadcastReshapeLayer(input1Slot, input0Slot, isNHWC, *m_Network, nodeDef);
3154 if (input1NumDims < input0NumDims)
3156 const bool isNHWC =
true;
3157 input1Slot = AddBroadcastReshapeLayer(input0Slot, input1Slot, isNHWC, *m_Network, nodeDef);
3163 if (input0NumDims < input1NumDims)
3172 return std::make_unique<SingleLayerParsedTfOperation>(
this, nodeDef, layer);
3177 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
3179 IOutputSlot* input0Slot = &inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
3180 IOutputSlot* input1Slot = &inputs[1].m_IndexedValue->ResolveArmnnOutputSlot(inputs[1].m_Index);
3185 if (input0NumDims < input1NumDims)
3187 const bool isNHWC =
true;
3188 input0Slot = AddBroadcastReshapeLayer(input1Slot, input0Slot, isNHWC, *m_Network, nodeDef);
3190 if (input1NumDims < input0NumDims)
3192 const bool isNHWC =
true;
3193 input1Slot = AddBroadcastReshapeLayer(input0Slot, input1Slot, isNHWC, *m_Network, nodeDef);
3202 std::vector<unsigned int> outputShape;
3209 outputShape.push_back(std::max(input0Shape[i], input1Shape[i]));
3215 return std::make_unique<SingleLayerParsedTfOperation>(
this, nodeDef, layer);
3218 IConnectableLayer* TfParser::AddMultiplicationLayer(
const tensorflow::NodeDef& nodeDef)
3220 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
3223 IOutputSlot* input0Slot = &inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
3224 IOutputSlot* input1Slot = &inputs[1].m_IndexedValue->ResolveArmnnOutputSlot(inputs[1].m_Index);
3229 if (input0NumDims < input1NumDims)
3231 const bool isNHWC =
true;
3232 input0Slot = AddBroadcastReshapeLayer(input1Slot, input0Slot, isNHWC, *m_Network, nodeDef);
3234 if (input1NumDims < input0NumDims)
3236 const bool isNHWC =
true;
3237 input1Slot = AddBroadcastReshapeLayer(input0Slot, input1Slot, isNHWC, *m_Network, nodeDef);
3243 if (input0NumDims < input1NumDims)
3254 IConnectableLayer* TfParser::AddFullyConnectedLayer(
const tensorflow::NodeDef& matMulNodeDef,
3255 const tensorflow::NodeDef* addNodeDef,
const char* armnnLayerName)
3258 ParsedConstTfOperation<float>* biasNode =
nullptr;
3259 if (addNodeDef !=
nullptr)
3261 std::vector<OutputOfParsedTfOperation> addInputs = GetInputParsedTfOperationsChecked(*addNodeDef, 2);
3263 if (HasParsedConstTensor<float>(addInputs[0].m_IndexedValue->GetNode().name()))
3265 biasNode = boost::polymorphic_downcast<ParsedConstTfOperation<float>*>(addInputs[0].m_IndexedValue);
3267 else if (HasParsedConstTensor<float>(addInputs[1].m_IndexedValue->GetNode().name()))
3269 biasNode = boost::polymorphic_downcast<ParsedConstTfOperation<float>*>(addInputs[1].m_IndexedValue);
3276 "ArmNN only supports fully connected layers with constant bias. " 3277 "Inputs %1% and %2%. AddNode %3%. MatMulNode %4% %5%")
3278 % addInputs[0].m_IndexedValue->GetNode().name()
3279 % addInputs[1].m_IndexedValue->GetNode().name()
3280 % addNodeDef->name()
3281 % matMulNodeDef.name()
3287 ParsedConstTfOperation<float>* weightNode =
nullptr;
3288 ParsedTfOperation* inputNode =
nullptr;
3289 unsigned int inputIdx = 0;
3290 std::vector<OutputOfParsedTfOperation> mulInputs = GetInputParsedTfOperationsChecked(matMulNodeDef, 2);
3291 if (HasParsedConstTensor<float>(mulInputs[0].m_IndexedValue->GetNode().name()))
3293 weightNode = boost::polymorphic_downcast<ParsedConstTfOperation<float>*>(mulInputs[0].m_IndexedValue);
3294 inputNode = mulInputs[1].m_IndexedValue;
3295 inputIdx = mulInputs[1].m_Index;
3297 else if (HasParsedConstTensor<float>(mulInputs[1].m_IndexedValue->GetNode().name()))
3299 weightNode = boost::polymorphic_downcast<ParsedConstTfOperation<float>*>(mulInputs[1].m_IndexedValue);
3300 inputNode = mulInputs[0].m_IndexedValue;
3301 inputIdx = mulInputs[0].m_Index;
3308 "ArmNN only supports fully connected layers with constant weights. " 3309 "Inputs %1% and %2%. MatMulNode %3% %4%")
3310 % mulInputs[0].m_IndexedValue->GetNode().name()
3311 % mulInputs[1].m_IndexedValue->GetNode().name()
3312 % matMulNodeDef.name()
3316 std::vector<float> weightTensorData;
3318 ConstTensor weights = weightNode->GetConstTensor(weightTensorData);
3325 std::vector<float> biasTensorData;
3327 if (addNodeDef !=
nullptr)
3329 ConstTensor biases = biasNode->GetConstTensor(biasTensorData);
3336 "Shape of matmul weights and bias do not match. " 3337 "AddNode %1%. MatMulNode %2% %3%")
3338 % addNodeDef->name()
3339 % matMulNodeDef.name()
3347 BOOST_ASSERT(layer !=
nullptr);
3349 inputNode->ResolveArmnnOutputSlot(inputIdx).Connect(layer->GetInputSlot(0));
3350 unsigned int batches = inputNode->ResolveArmnnOutputSlot(inputIdx).GetTensorInfo().GetShape()[0];
3354 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
3358 void TfParser::LoadNodeDef(
const tensorflow::NodeDef& nodeDef,
const tensorflow::GraphDef& graphDef)
3362 if (nodeDef.attr().count(
"T") != 0)
3364 auto attr = nodeDef.attr().at(
"T");
3367 else if (nodeDef.attr().count(
"dtype") != 0)
3369 auto attr = nodeDef.attr().at(
"dtype");
3373 if ((type != tensorflow::DT_FLOAT && type != tensorflow::DT_INT32) && nodeDef.op() !=
"Const")
3378 "Currently only FLOAT and INT32 are supported for tensorflow nodes (apart from Const). " 3379 "Got %1% for Node %2% %3%")
3380 % tensorflow::DataType_Name(type)
3385 const std::string& operation = nodeDef.op();
3386 auto itControlInput = std::find(m_ControlInputs.begin(), m_ControlInputs.end(), operation);
3387 if (itControlInput != m_ControlInputs.end())
3392 auto it = ms_OperationNameToParsingFunctions.find(operation);
3393 if (it != ms_OperationNameToParsingFunctions.end())
3395 auto func = it->second;
3397 ParsedTfOperation* parsedTfOperationRaw = parsedTfOperation.get();
3400 auto it = m_ParsedTfOperations.find(nodeDef.name());
3401 if (it != m_ParsedTfOperations.end())
3403 throw ParseException(boost::str(boost::format(
"Name %1% used by more than one node") % nodeDef.name()));
3405 m_ParsedTfOperations[nodeDef.name()] = std::move(parsedTfOperation);
3408 if (std::find(m_RequestedOutputs.begin(), m_RequestedOutputs.end(), nodeDef.name()) !=
3409 m_RequestedOutputs.end())
3411 auto outId = ParseOutputId(nodeDef.name());
3413 IOutputSlot& prevSlot = parsedTfOperationRaw->ResolveArmnnOutputSlot(outId.m_Index);
3415 TensorInfo tensorInfo = prevSlot.GetTensorInfo();
3421 TrackOutputBinding(outputLayer, layerId, tensorInfo);
3429 "Unsupported operation %1% in tensorflow::GraphDef %2%")
3435 void TfParser::LoadGraphDef(
const tensorflow::GraphDef& graphDef)
3438 m_NodesByName.clear();
3439 m_NetworkInputsBindingInfo.clear();
3440 m_NetworkOutputsBindingInfo.clear();
3442 for (
int i = 0; i < graphDef.node_size(); ++i)
3444 const tensorflow::NodeDef& node = graphDef.node(i);
3445 m_NodesByName[node.name()] = &node;
3449 for (
const auto& pair : m_InputShapes)
3451 const std::string& requestedInputName = pair.first;
3452 auto nodeIt = m_NodesByName.find(requestedInputName);
3453 if (nodeIt == m_NodesByName.end())
3458 "Couldn't find requested input node '%1%' in graph %2%")
3459 % requestedInputName
3465 std::vector<const tensorflow::NodeDef*> targetNodes;
3466 for (
const std::string& requestedOutputName : m_RequestedOutputs)
3468 auto nodeIt = m_NodesByName.find(requestedOutputName);
3469 if (nodeIt == m_NodesByName.end())
3474 "Couldn't find requested output node '%1%' in graph %2%")
3475 % requestedOutputName
3478 targetNodes.push_back(nodeIt->second);
3482 std::vector<const tensorflow::NodeDef*> sortedNodes;
3483 if (!armnnUtils::GraphTopologicalSort<const tensorflow::NodeDef*>(
3485 [
this](
const tensorflow::NodeDef* node)
3487 auto outputs = GetTfInputNodes(*node);
3488 std::vector<const tensorflow::NodeDef*> nodesOnly;
3489 for (
const auto & o : outputs) {
3490 nodesOnly.push_back(o.m_IndexedValue);
3499 "Cycle detected in graph %1%")
3504 for (
const auto& it : sortedNodes)
3506 const tensorflow::NodeDef& currentNode = *it;
3507 LoadNodeDef(currentNode, graphDef);
3512 const std::map<std::string, TensorShape>& inputShapes,
3513 const std::vector<std::string>& requestedOutputs)
3515 FILE* fd = fopen(graphFile,
"r");
3522 "Graph file %1% failed to open %2%")
3528 tensorflow::GraphDef graphDef;
3529 auto input =
new google::protobuf::io::FileInputStream(fileno(fd));
3530 bool success = google::protobuf::TextFormat::Parse(input, &graphDef);
3539 "Failed to parse graph file %1%")
3543 return CreateNetworkFromGraphDef(graphDef, inputShapes, requestedOutputs);
3547 const std::map<std::string, TensorShape>& inputShapes,
3548 const std::vector<std::string>& requestedOutputs)
3551 tensorflow::GraphDef graphDef;
3552 bool success = google::protobuf::TextFormat::ParseFromString(protoText, &graphDef);
3559 "Failed to parse graph file %1%")
3563 return CreateNetworkFromGraphDef(graphDef, inputShapes, requestedOutputs);
3567 const std::map<std::string, TensorShape>& inputShapes,
3568 const std::vector<std::string>& requestedOutputs)
3570 FILE* fd = fopen(graphFile,
"rb");
3577 "Graph file %1% failed to open %2%")
3583 tensorflow::GraphDef graphDef;
3585 google::protobuf::io::FileInputStream inStream(fileno(fd));
3586 google::protobuf::io::CodedInputStream codedStream(&inStream);
3587 codedStream.SetTotalBytesLimit(INT_MAX, INT_MAX);
3588 bool success = graphDef.ParseFromCodedStream(&codedStream);
3596 "Failed to parse protobuf file %1% %2%")
3601 return CreateNetworkFromGraphDef(graphDef, inputShapes, requestedOutputs);
3604 INetworkPtr TfParser::CreateNetworkFromGraphDef(
const tensorflow::GraphDef& graphDef,
3605 const std::map<std::string, TensorShape>& inputShapes,
3606 const std::vector<std::string>& requestedOutputs)
3608 m_Network = INetwork::Create();
3610 m_InputShapes = inputShapes;
3611 if (requestedOutputs.size() == 0)
3616 "requestedOutputs must have at least one entry %1%")
3619 m_RequestedOutputs = requestedOutputs;
3623 LoadGraphDef(graphDef);
3633 return std::move(m_Network);
3636 void TfParser::Cleanup()
3639 m_InputShapes.clear();
3640 m_RequestedOutputs.clear();
3641 m_NodesByName.clear();
3642 m_ParsedTfOperations.clear();
3647 return GetBindingInfo(name,
"input", m_NetworkInputsBindingInfo);
3652 return GetBindingInfo(name,
"output", m_NetworkOutputsBindingInfo);
3655 std::pair<LayerBindingId, TensorInfo> TfParser::GetBindingInfo(
const std::string& layerName,
3656 const char* bindingPointDesc,
3657 const std::unordered_map<std::string, BindingPointInfo>& nameToBindingInfo)
3659 auto it = nameToBindingInfo.find(layerName);
3660 if (it == nameToBindingInfo.end())
3665 "Unknown %1% '%2%' %3%")
3675 return TrackBindingPoint(layer,
id, tensorInfo,
"input", m_NetworkInputsBindingInfo);
3680 return TrackBindingPoint(layer,
id, tensorInfo,
"output", m_NetworkOutputsBindingInfo);
3686 const char* bindingPointDesc,
3687 std::unordered_map<std::string, BindingPointInfo>& nameToBindingInfo)
3689 const std::string layerName = layer->
GetName();
3690 auto it = nameToBindingInfo.find(layerName);
3691 if (it == nameToBindingInfo.end())
3693 nameToBindingInfo[layerName] = std::make_pair(
id, tensorInfo);
3700 "Id %1% used by more than one %2% layer %3%")
void SetConcatAxis(unsigned int concatAxis)
Set the concatenation axis value.
virtual armnn::INetworkPtr CreateNetworkFromString(const char *protoText, const std::map< std::string, armnn::TensorShape > &inputShapes, const std::vector< std::string > &requestedOutputs) override
Creates the network directly from protobuf text in a string. Useful for debugging/testing.
float m_A
Alpha upper bound value used by the activation functions. (BoundedReLu, Linear, TanH).
WithOutputTensorIndex< std::string > OutputId
virtual armnn::INetworkPtr CreateNetworkFromTextFile(const char *graphFile, const std::map< std::string, armnn::TensorShape > &inputShapes, const std::vector< std::string > &requestedOutputs) override
Creates the network from a protobuf text file on the disk.
WithOutputTensorIndex< ParsedTfOperation * > OutputOfParsedTfOperation
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
std::unique_ptr< ParsedTfOperation > ParsedTfOperationPtr
virtual IConnectableLayer * AddComparisonLayer(const ComparisonDescriptor &comparisonDescriptor, const char *name=nullptr)=0
unsigned int GetNumOutputSlots() const override
uint32_t m_PadBottom
Padding bottom value in the height dimension.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
virtual const IInputSlot & GetInputSlot(unsigned int index) const =0
uint32_t m_Axis
0-based axis along which to stack the input tensors.
const char * GetName() const override
unsigned int GetNumDimensions() const
virtual IConnectableLayer * AddMaximumLayer(const char *name=nullptr)=0
uint32_t m_PadRight
Padding right value in the width dimension.
void Permute(const armnn::TensorShape &dstShape, const armnn::PermutationVector &mappings, const void *src, void *dst, size_t dataTypeSize)
friend class ParsedConstTfOperation
A NormalizationDescriptor for the NormalizationLayer.
virtual IConnectableLayer * AddBatchNormalizationLayer(const BatchNormalizationDescriptor &desc, const ConstTensor &mean, const ConstTensor &variance, const ConstTensor &beta, const ConstTensor &gamma, const char *name=nullptr)=0
A tensor defined by a TensorInfo (shape and data type) and an immutable backing store.
virtual const TensorInfo & GetTensorInfo() const =0
float m_Alpha
Alpha value for the normalization equation.
virtual IConnectableLayer * AddNormalizationLayer(const NormalizationDescriptor &normalizationDescriptor, const char *name=nullptr)=0
virtual IConnectableLayer * AddDepthwiseConvolution2dLayer(const DepthwiseConvolution2dDescriptor &convolution2dDescriptor, const ConstTensor &weights, const Optional< ConstTensor > &biases, const char *name=nullptr)=0
virtual armnn::INetworkPtr CreateNetworkFromBinaryFile(const char *graphFile, const std::map< std::string, armnn::TensorShape > &inputShapes, const std::vector< std::string > &requestedOutputs) override
Creates the network from a protobuf binary file on the disk.
DataType ConvertTfTensorDataType(const tensorflow::DataType tfDataType, const tensorflow::NodeDef &nodeDef)
virtual IConnectableLayer * AddStackLayer(const StackDescriptor &descriptor, const char *name=nullptr)=0
uint32_t m_PadLeft
Padding left value in the width dimension.
std::vector< int > m_Stride
Stride values for the input that will be sliced.
unsigned int GetNumDimensions() const
armnn::TensorInfo GetTensorInfo(unsigned int numberOfBatches, unsigned int numberOfChannels, unsigned int height, unsigned int width, const armnn::DataLayout dataLayout, const armnn::DataType dataType)
uint32_t m_PoolHeight
Pooling height value.
void CalculateSamePadding(uint32_t inputSize, uint32_t stride, uint32_t filterSize, bool samePadding, uint32_t *paddingFront, uint32_t *paddingBack)
virtual IConnectableLayer * AddElementwiseUnaryLayer(const ElementwiseUnaryDescriptor &elementwiseUnaryDescriptor, const char *name=nullptr)=0
uint32_t m_TargetHeight
Target height value.
unsigned int GetHeightIndex() const
#define CHECK_PADDING_TYPE(NODE_DEF, PADDING)
int32_t m_NewAxisMask
New axis mask value. If set, the begin, end and stride is disregarded and a new 1 dimension is insert...
std::vector< unsigned int > m_Axis
Values for the dimensions to reduce.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
void CalculateStridedSliceOutputTensorInfo(const armnn::TensorInfo &inputTensorInfo, const armnn::StridedSliceDescriptor &desc, armnn::TensorInfo &outputTensorInfo)
Create output tensor info for a StridedSlice operator.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
uint32_t m_PadTop
Padding top value in the height dimension.
A PadDescriptor for the PadLayer.
TensorShape m_InputShape
Required shape of all input tensors.
An ActivationDescriptor for the ActivationLayer.
#define CHECK_DATA_FORMAT(NODE_DEF, FORMAT, NODE_TYPE)
A BatchNormalizationDescriptor for the BatchNormalizationLayer.
uint32_t m_PadRight
Padding right value in the width dimension.
Status SetViewSize(uint32_t view, uint32_t coord, uint32_t value)
Set the size of the views. The arguments are: view, dimension, value. If the view is greater than or ...
armnn::TensorShape Permuted(const armnn::TensorShape &srcShape, const armnn::PermutationVector &mappings)
std::vector< int > m_Begin
Begin values for the input that will be sliced.
const TensorShape & GetShape() const
Status SetViewOriginCoord(uint32_t view, uint32_t coord, uint32_t value)
uint32_t m_PadBottom
Padding bottom value in the height dimension.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
virtual BindingPointInfo GetNetworkInputBindingInfo(const std::string &name) const override
Retrieves binding info (layer id and tensor info) for the network input identified by the given layer...
bool m_KeepDims
Enable/disable keep dimensions. If true, then the reduced dimensions that are of length 1 are kept...
uint32_t m_PadTop
Padding top value in the height dimension.
Status SetViewOriginCoord(uint32_t view, uint32_t coord, uint32_t value)
virtual IConnectableLayer * AddDivisionLayer(const char *name=nullptr)=0
bool m_BiasEnabled
Enable/disable bias.
virtual IConnectableLayer * AddGatherLayer(const char *name=nullptr)=0
A ViewsDescriptor for the SplitterLayer. Descriptor to configure the splitting process. Number of Views must be equal to the number of outputs, and their order must match - e.g. first view corresponds to the first output, second view to the second output, etc.
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
constexpr unsigned int GetDataTypeSize(DataType dataType)
virtual IConnectableLayer * AddSubtractionLayer(const char *name=nullptr)=0
WithOutputTensorIndex< const tensorflow::NodeDef * > OutputOfConstNodeDef
virtual const char * GetName() const =0
virtual void SetTensorInfo(const TensorInfo &tensorInfo)=0
A ReshapeDescriptor for the ReshapeLayer.
uint32_t m_PadTop
Padding top value in the height dimension.
void CalculateReducedOutputTensoInfo(const armnn::TensorInfo &inputTensorInfo, const std::set< unsigned int > &axisSet, bool keepDims, armnn::TensorInfo &outputTensorInfo)
Creates a tensor info after reducing the dimensions mentioned in axisData.
TensorShape m_TargetShape
Target shape value.
unsigned int GetWidthIndex() const
uint32_t m_PadLeft
Padding left value in the width dimension.
A ElementwiseUnaryDescriptor for the ElementwiseUnaryLayer.
uint32_t m_PoolWidth
Pooling width value.
virtual IConnectableLayer * AddInputLayer(LayerBindingId id, const char *name=nullptr)=0
uint32_t m_NumInputs
Number of input tensors.
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
const armnn::PermutationVector ArmNNToNHWC
unsigned int GetNumBytes() const
uint32_t m_NormSize
Depth radius value.
float m_B
Beta lower bound value used by the activation functions. (BoundedReLu, Linear, TanH).
const uint32_t * GetViewOrigin(uint32_t idx) const
Return the view origin at the int value idx.
void SetTensorInfo(const TensorInfo &tensorInfo) override
virtual IConnectableLayer * AddAdditionLayer(const char *name=nullptr)=0
std::vector< int > m_End
End values for the input that will be sliced.
armnn::BindingPointInfo BindingPointInfo
virtual unsigned int GetNumOutputSlots() const =0
TensorInfo OutputShapeOfSqueeze(const tensorflow::NodeDef &nodeDef, TensorInfo inputTensorInfo)
uint32_t m_PadLeft
Padding left value in the width dimension.
float m_Beta
Beta value for the normalization equation.
virtual IConnectableLayer * AddStridedSliceLayer(const StridedSliceDescriptor &stridedSliceDescriptor, const char *name=nullptr)=0
virtual IConnectableLayer * AddReshapeLayer(const ReshapeDescriptor &reshapeDescriptor, const char *name=nullptr)=0
virtual IConnectableLayer * AddConcatLayer(const ConcatDescriptor &concatDescriptor, const char *name=nullptr)=0
A FullyConnectedDescriptor for the FullyConnectedLayer.
unsigned int CheckPaddingTensor(const ConstTensor &paddingTensor, const TensorInfo &inputTensorInfo, const std::string &nodeName)
virtual BindingPointInfo GetNetworkOutputBindingInfo(const std::string &name) const override
Retrieves binding info (layer id and tensor info) for the network output identified by the given laye...
virtual IConnectableLayer * AddPooling2dLayer(const Pooling2dDescriptor &pooling2dDescriptor, const char *name=nullptr)=0
virtual IConnectableLayer * AddFullyConnectedLayer(const FullyConnectedDescriptor &fullyConnectedDescriptor, const ConstTensor &weights, const Optional< ConstTensor > &biases, const char *name=nullptr)=0
float m_Eps
Value to add to the variance. Used to avoid dividing by zero.
void SetDataType(DataType type)
void SetShape(const TensorShape &newShape)
virtual IConnectableLayer * AddPadLayer(const PadDescriptor &padDescriptor, const char *name=nullptr)=0
void CalcPadding(uint32_t input, uint32_t kernel, uint32_t stride, uint32_t &outPadHead, uint32_t &outPadTail, bool samePadding)
PaddingMethod m_PaddingMethod
The padding method to be used. (Exclude, IgnoreValue).
std::unique_ptr< INetwork, void(*)(INetwork *network)> INetworkPtr
bool m_BiasEnabled
Enable/disable bias.
virtual IConnectableLayer * AddSplitterLayer(const ViewsDescriptor &splitterDescriptor, const char *name=nullptr)=0
int32_t m_EllipsisMask
Ellipsis mask value.
virtual IConnectableLayer * AddMultiplicationLayer(const char *name=nullptr)=0
OutputShapeRounding m_OutputShapeRounding
The rounding method for the output shape. (Floor, Ceiling).
A StackDescriptor for the StackLayer.
int32_t m_BeginMask
Begin mask value. If set, then the begin is disregarded and the fullest range is used for the dimensi...
TensorInfo CalculatePaddedOutputTensorInfo(const TensorInfo &inputTensorInfo, const std::vector< std::pair< unsigned int, unsigned int >> &padList)
ActivationFunction m_Function
The activation function to use (Sigmoid, TanH, Linear, ReLu, BoundedReLu, SoftReLu, LeakyReLu, Abs, Sqrt, Square).
A SoftmaxDescriptor for the SoftmaxLayer.
An output connection slot for a layer. The output slot may be connected to 1 or more input slots of s...
uint32_t m_TargetWidth
Target width value.
TensorInfo OutputShapeOfExpandDims(const tensorflow::NodeDef &nodeDef, TensorInfo inputTensorInfo)
NormalizationAlgorithmMethod m_NormMethodType
Normalization method algorithm to use (LocalBrightness, LocalContrast).
std::unique_ptr< ITfParser, void(*)(ITfParser *parser)> ITfParserPtr
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
PoolingAlgorithm m_PoolType
The pooling algorithm to use (Max. Average, L2).
bool m_BiasEnabled
Enable/disable bias.
float m_K
Kappa value used for the across channel normalization equation.
unsigned int GetNumElements() const
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
friend class ParsedMatMulTfOperation
DataType GetDataType() const
A Pooling2dDescriptor for the Pooling2dLayer.
Parses a directed acyclic graph from a tensorflow protobuf file.
virtual IConnectableLayer * AddResizeLayer(const ResizeDescriptor &resizeDescriptor, const char *name=nullptr)=0
virtual IConnectableLayer * AddActivationLayer(const ActivationDescriptor &activationDescriptor, const char *name=nullptr)=0
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
virtual IConnectableLayer * AddOutputLayer(LayerBindingId id, const char *name=nullptr)=0
A DepthwiseConvolution2dDescriptor for the DepthwiseConvolution2dLayer.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
int32_t m_ShrinkAxisMask
Shrink axis mask value. If set, the nth specification shrinks the dimensionality by 1...
A Convolution2dDescriptor for the Convolution2dLayer.
A MeanDescriptor for the MeanLayer.
virtual int Connect(IInputSlot &destination)=0
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
uint32_t m_PadBottom
Padding bottom value in the height dimension.
const armnn::PermutationVector NHWCToArmNN
NormalizationAlgorithmChannel m_NormChannelType
Normalization channel algorithm to use (Across, Within).
const TensorShape & GetShape() const
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
virtual IConnectableLayer * AddSoftmaxLayer(const SoftmaxDescriptor &softmaxDescriptor, const char *name=nullptr)=0
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
int LayerBindingId
Type of identifiers for bindable layers (inputs, outputs).
const OutputSlot & GetOutputSlot(unsigned int index=0) const override
virtual IConnectableLayer * AddMeanLayer(const MeanDescriptor &meanDescriptor, const char *name=nullptr)=0
int32_t m_EndMask
End mask value. If set, then the end is disregarded and the fullest range is used for the dimension...
friend class ParsedMulTfOperation
A ResizeDescriptor for the ResizeLayer.
virtual IConnectableLayer * AddConvolution2dLayer(const Convolution2dDescriptor &convolution2dDescriptor, const ConstTensor &weights, const Optional< ConstTensor > &biases, const char *name=nullptr)=0
A ComparisonDescriptor for the ComparisonLayer.
virtual const IOutputSlot & GetOutputSlot(unsigned int index) const =0
virtual IConnectableLayer * AddMinimumLayer(const char *name=nullptr)=0
An OriginsDescriptor for the ConcatLayer. Descriptor to configure the concatenation process...
A StridedSliceDescriptor for the StridedSliceLayer.
uint32_t m_PadRight
Padding right value in the width dimension.