23 #include <google/protobuf/io/zero_copy_stream_impl.h> 24 #include <google/protobuf/text_format.h> 26 #include <tensorflow/core/framework/graph.pb.h> 29 #include <fmt/format.h> 34 using namespace armnn;
39 ITfParser::ITfParser() : pTfParserImpl(new ITfParser::TfParserImpl()){}
41 ITfParser::~ITfParser() =
default;
59 const std::map<std::string, armnn::TensorShape>& inputShapes,
60 const std::vector<std::string>& requestedOutputs)
66 const std::map<std::string, armnn::TensorShape>& inputShapes,
67 const std::vector<std::string>& requestedOutputs)
69 return pTfParserImpl->CreateNetworkFromBinaryFile(graphFile, inputShapes, requestedOutputs);
73 const std::map<std::string, armnn::TensorShape>& inputShapes,
74 const std::vector<std::string>& requestedOutputs)
76 return pTfParserImpl->CreateNetworkFromString(protoText, inputShapes, requestedOutputs);
81 return pTfParserImpl->GetNetworkInputBindingInfo(name);
86 return pTfParserImpl->GetNetworkOutputBindingInfo(name);
95 template <
typename Callable>
96 void ReadMandatoryNodeAttributeImpl(
const tensorflow::NodeDef& nodeDef,
97 const std::string& attribName,
98 tensorflow::AttrValue::ValueCase expectedValueCase,
101 auto iter = nodeDef.attr().find(attribName);
102 if (iter != nodeDef.attr().end())
104 const auto& attrValue = iter->second;
105 if (attrValue.value_case() == expectedValueCase)
112 fmt::format(
"Attribute {} of node {} expected to have {} as tensorflow::AttrValue::ValueCase, " 113 "but found {} instead {}",
116 static_cast<int>(expectedValueCase),
117 static_cast<int>(attrValue.value_case()),
124 fmt::format(
"Could not find required attribute {} in node {} {}",
131 template <
typename Callable>
132 void ReadOptionalNodeAttributeImpl(
const tensorflow::NodeDef& nodeDef,
133 const std::string& attribName,
134 tensorflow::AttrValue::ValueCase expectedValueCase,
137 auto iter = nodeDef.attr().find(attribName);
138 if (iter != nodeDef.attr().end())
140 const auto& attrValue = iter->second;
141 if (attrValue.value_case() == expectedValueCase)
148 fmt::format(
"Attribute {} of node {} expected to have {} as tensorflow::AttrValue::ValueCase, " 149 "but found {} instead {}",
152 static_cast<int>(expectedValueCase),
153 static_cast<int>(attrValue.value_case()),
159 float ReadMandatoryNodeFloatAttribute(
const tensorflow::NodeDef& nodeDef,
const std::string& name)
161 float attribValue = 0.0f;
162 ReadMandatoryNodeAttributeImpl(nodeDef, name, tensorflow::AttrValue::kF,
163 [&attribValue](
const tensorflow::AttrValue& attrValue)
165 attribValue = attrValue.f();
170 int32_t ReadMandatoryNodeInt32Attribute(
const tensorflow::NodeDef& nodeDef,
const std::string& name)
172 int32_t attribValue = 0u;
173 ReadMandatoryNodeAttributeImpl(nodeDef, name, tensorflow::AttrValue::kI,
174 [&attribValue](
const tensorflow::AttrValue& attrValue)
176 attribValue =
static_cast<int32_t
>(attrValue.i());
181 bool ReadMandatoryNodeBoolAttribute(
const tensorflow::NodeDef& nodeDef,
const std::string& name)
183 bool attribValue =
false;
184 ReadMandatoryNodeAttributeImpl(nodeDef, name, tensorflow::AttrValue::kB,
185 [&attribValue](
const tensorflow::AttrValue& attrValue)
187 attribValue =
static_cast<bool>(attrValue.b());
192 uint32_t ReadMandatoryNodeUint32Attribute(
const tensorflow::NodeDef& nodeDef,
const std::string& name)
194 uint32_t attribValue = 0u;
195 ReadMandatoryNodeAttributeImpl(nodeDef, name, tensorflow::AttrValue::kI,
196 [&attribValue](
const tensorflow::AttrValue& attrValue)
198 attribValue =
static_cast<uint32_t
>(attrValue.i());
203 std::string ReadMandatoryNodeStringAttribute(
const tensorflow::NodeDef& nodeDef,
const std::string& name)
205 std::string attribValue =
"";
206 ReadMandatoryNodeAttributeImpl(nodeDef, name, tensorflow::AttrValue::kS,
207 [&attribValue](
const tensorflow::AttrValue& attrValue)
209 attribValue = attrValue.s();
214 std::vector<uint32_t> ReadMandatoryNodeUint32ListAttribute(
const tensorflow::NodeDef& nodeDef,
215 const std::string& name)
217 std::vector<uint32_t> attriList;
218 ReadMandatoryNodeAttributeImpl(nodeDef, name, tensorflow::AttrValue::kList,
219 [&attriList](
const tensorflow::AttrValue& attrValue)
221 for (
int attriNum = 0; attriNum < attrValue.list().i_size(); ++attriNum)
223 attriList.push_back(static_cast<uint32_t>(attrValue.list().i(attriNum)));
230 std::vector<uint32_t> ReadOptionalNodeUint32ListAttribute(
const tensorflow::NodeDef& nodeDef,
231 const std::string& name)
233 std::vector<uint32_t> attriList;
234 ReadOptionalNodeAttributeImpl(nodeDef, name, tensorflow::AttrValue::kList,
235 [&attriList](
const tensorflow::AttrValue& attrValue)
237 for (
int attriNum = 0; attriNum < attrValue.list().i_size(); ++attriNum)
239 attriList.push_back(static_cast<uint32_t>(attrValue.list().i(attriNum)));
246 std::string ReadOptionalNodeStringAttribute(
const tensorflow::NodeDef& nodeDef,
247 const std::string& name,
248 const std::string& defaultValue =
"")
250 std::string attribValue = defaultValue;
251 ReadOptionalNodeAttributeImpl(nodeDef, name, tensorflow::AttrValue::kS,
252 [&attribValue](
const tensorflow::AttrValue& attrValue)
254 attribValue = attrValue.s();
259 bool ReadOptionalNodeBoolAttribute(
const tensorflow::NodeDef& nodeDef,
260 const std::string& name,
261 bool defaultValue =
false)
263 bool attribValue = defaultValue;
264 ReadOptionalNodeAttributeImpl(nodeDef, name, tensorflow::AttrValue::kB,
265 [&attribValue](
const tensorflow::AttrValue& attrValue)
267 attribValue = attrValue.b();
272 tensorflow::DataType ReadMandatoryNodeTypeAttribute(
const tensorflow::NodeDef& nodeDef,
const std::string& name)
275 ReadMandatoryNodeAttributeImpl(nodeDef, name, tensorflow::AttrValue::kType,
276 [&attribValue](
const tensorflow::AttrValue& attrValue)
278 attribValue = attrValue.type();
285 std::vector<unsigned int> outDims(targetDims.begin(), targetDims.end());
286 const auto stretchDim = std::find(targetDims.begin(), targetDims.end(), -1);
288 if (stretchDim != targetDims.end())
290 if (std::find(std::next(stretchDim), targetDims.end(), -1) != targetDims.end())
293 fmt::format(
"At most one component of shape can be -1 {}",
297 auto targetNumElements =
299 std::accumulate(targetDims.begin(), targetDims.end(), -1, std::multiplies<int32_t>()));
300 auto stretchIndex =
static_cast<size_t>(std::distance(targetDims.begin(), stretchDim));
301 outDims[stretchIndex] = input.
GetNumElements() / targetNumElements;
305 reshapeInfo.
SetShape(
TensorShape{
static_cast<unsigned int>(outDims.size()), outDims.data() });
312 INetwork& m_Network,
const tensorflow::NodeDef& nodeDef)
316 const unsigned int matchDim = inputTensorInfo.
GetNumDimensions() - (isNHWC ? 1 : 3);
317 std::array<unsigned int, MaxNumOfTensorDimensions> reshapedDimensions;
318 std::fill_n(reshapedDimensions.begin(), inputTensorInfo.
GetNumDimensions(), 1);
319 reshapedDimensions[matchDim] = input1Info.
GetShape()[0];
324 const std::string reshapeLayerName =
"reshape_for-" + nodeDef.name();
337 OutputId ParseOutputId(
const std::string & name)
339 unsigned int outputNum = 0;
340 size_t colonPos = name.find_last_of(
":");
341 if (colonPos != std::string::npos)
343 int n = std::stoi(name.substr(colonPos+1));
347 fmt::format(
"Output tensor id is out of range for {} {}",
351 outputNum =
static_cast<unsigned int>(n);
353 return OutputId(name.substr(0,colonPos),outputNum);
356 #define CHECK_DATA_FORMAT(NODE_DEF, FORMAT, NODE_TYPE) \ 357 if( FORMAT != "NHWC" && FORMAT != "NCHW" ) \ 359 throw ParseException( \ 360 fmt::format("Unsupported data format {} passed for {} node {}. " \ 361 "Only NHWC and NCHW supported {}", \ 365 CHECK_LOCATION().AsString())); \ 368 #define CHECK_PADDING_TYPE(NODE_DEF, PADDING) \ 369 if(PADDING != "SAME" && PADDING != "VALID" ) \ 371 throw ParseException( \ 372 fmt::format("Only 'SAME' and 'VALID' padding supported. Got {} for {} {}", \ 375 CHECK_LOCATION().AsString())); \ 380 const std::map<std::string, ITfParser::TfParserImpl::OperationParsingFunction>
381 ITfParser::TfParserImpl::ms_OperationNameToParsingFunctions = {
382 {
"Const", &TfParserImpl::ParseConst },
383 {
"Add", &TfParserImpl::ParseAdd },
384 {
"AddN", &TfParserImpl::ParseAddN },
385 {
"BiasAdd", &TfParserImpl::ParseBiasAdd },
386 {
"Identity", &TfParserImpl::ParseIdentity },
387 {
"Conv2D", &TfParserImpl::ParseConv2D },
388 {
"DepthwiseConv2dNative", &TfParserImpl::ParseDepthwiseConv2D },
389 {
"ExpandDims", &TfParserImpl::ParseExpandDims },
390 {
"FusedBatchNorm", &TfParserImpl::ParseFusedBatchNorm },
391 {
"Gather", &TfParserImpl::ParseGather},
392 {
"Greater", &TfParserImpl::ParseGreater},
393 {
"ConcatV2", &TfParserImpl::ParseConcat },
394 {
"LRN", &TfParserImpl::ParseLrn },
395 {
"MatMul", &TfParserImpl::ParseMatMul },
396 {
"Mean", &TfParserImpl::ParseMean },
397 {
"Mul", &TfParserImpl::ParseMul },
398 {
"Placeholder", &TfParserImpl::ParsePlaceholder },
399 {
"RealDiv", &TfParserImpl::ParseRealDiv },
400 {
"Relu", &TfParserImpl::ParseRelu },
401 {
"Relu6", &TfParserImpl::ParseRelu6 },
402 {
"Reshape", &TfParserImpl::ParseReshape },
403 {
"ResizeBilinear", &TfParserImpl::ParseResizeBilinear },
404 {
"Rsqrt", &TfParserImpl::ParseRsqrt },
405 {
"Shape", &TfParserImpl::ParseShape },
406 {
"Squeeze", &TfParserImpl::ParseSqueeze },
407 {
"Sigmoid", &TfParserImpl::ParseSigmoid },
408 {
"Softmax", &TfParserImpl::ParseSoftmax },
409 {
"Softplus", &TfParserImpl::ParseSoftplus },
410 {
"Split", &TfParserImpl::ParseSplit },
411 {
"StridedSlice", &TfParserImpl::ParseStridedSlice },
412 {
"Tanh", &TfParserImpl::ParseTanh },
413 {
"MaxPool", &TfParserImpl::ParseMaxPool },
414 {
"AvgPool", &TfParserImpl::ParseAvgPool },
415 {
"Maximum", &TfParserImpl::ParseMaximum },
416 {
"Minimum", &TfParserImpl::ParseMinimum },
417 {
"Equal", &TfParserImpl::ParseEqual },
418 {
"Pad", &TfParserImpl::ParsePad },
419 {
"Sub", &TfParserImpl::ParseSub },
420 {
"Pack" , &TfParserImpl::ParseStack },
421 {
"Stack", &TfParserImpl::ParseStack },
422 {
"Transpose", &TfParserImpl::ParseTranspose },
425 const std::list<std::string> ITfParser::TfParserImpl::m_ControlInputs = {
433 uint32_t& paddingFront,
434 uint32_t& paddingBack,
441 uint32_t outputSize = (inputSize + stride - 1) / stride;
442 uint32_t dilatedSize = filterSize + (dilation - 1) * (filterSize - 1);
443 uint32_t temp = (outputSize - 1) * stride + dilatedSize;
444 if (temp > inputSize)
446 paddingFront = (temp - inputSize) / 2;
447 paddingBack = (temp - inputSize) - paddingFront;
455 class ParsedTfOperation
464 virtual ~ParsedTfOperation() {};
466 const tensorflow::NodeDef& GetNode()
const {
return m_Node; }
470 virtual IOutputSlot& ResolveArmnnOutputSlot(
unsigned int tfOutputIndex) = 0;
473 virtual ParsedTfOperation* ResolveIdentityOperations()
480 const tensorflow::NodeDef& m_Node;
485 class SingleLayerParsedTfOperation :
public ParsedTfOperation
489 const tensorflow::NodeDef& node,
491 : ParsedTfOperation(parser, node)
496 IOutputSlot& ResolveArmnnOutputSlot(
unsigned int tfOutputIndex)
override 500 unsigned int armnnOutputSlotIdx = tfOutputIndex;
504 fmt::format(
"The requested output slot #{} " 505 "for {} does not exist {}",
518 class DeferredSingleLayerParsedTfOperation :
public SingleLayerParsedTfOperation
522 : SingleLayerParsedTfOperation(parser, node,
nullptr)
526 IOutputSlot& ResolveArmnnOutputSlot(
unsigned int tfOutputIndex)
override 530 CreateLayerDeferred();
532 return SingleLayerParsedTfOperation::ResolveArmnnOutputSlot(tfOutputIndex);
536 virtual void CreateLayerDeferred() = 0;
540 ITfParser::TfParserImpl::TfParserImpl()
541 : m_Network(nullptr, nullptr)
548 if (nodeDef->op() !=
"Identity")
553 if (nodeDef->input_size() != 1)
556 fmt::format(
"Identity node should have a single input! {} has {} inputs {}",
558 nodeDef->input_size(),
565 const tensorflow::NodeDef* inputNode = it->second;
571 fmt::format(
"Cannot find what the Identity node {} is linked to! {}",
577 std::vector<OutputOfConstNodeDef>
580 std::vector<OutputOfConstNodeDef> ret;
582 if (nodeDef.op() ==
"Const")
588 ret.reserve(armnn::numeric_cast<size_t>(nodeDef.input_size()));
589 for (
int j = 0; j < nodeDef.input_size(); ++j)
591 OutputId outputId = ParseOutputId(nodeDef.input(j));
593 if (nodeDef.input(j)[0] ==
'^')
603 fmt::format(
"Can't find node '{}', which is listed as an input of '{}' {}",
614 std::vector<OutputOfParsedTfOperation>
616 std::size_t expectedNumInputs)
620 const std::size_t numInputs = nodes.size();
621 if (numInputs != expectedNumInputs)
624 fmt::format(
"Unexpected number of inputs for node {}. Expected {}, found {} {}",
631 std::vector<OutputOfParsedTfOperation> result;
632 for (
auto&& node : nodes)
638 fmt::format(
"Node with name '{}' has not been parsed {}",
639 node.m_IndexedValue->name(),
644 parsedOp = parsedOp->ResolveIdentityOperations();
651 const tensorflow::NodeDef& nodeDef,
654 const std::string& layerName)
661 if (input0Dim != input1Dim)
665 if (input0Dim == 1 && input1Dim == 4)
669 else if (input0Dim == 4 && input1Dim == 1)
676 fmt::format(
"Unsupported broadcast configuration for {} operation {} {}",
689 std::vector<unsigned int> outputShape;
696 outputShape.push_back(std::max(input0Shape[i], input1Shape[i]));
706 const tensorflow::NodeDef& nodeDef,
709 unsigned int numberOfAddition,
710 unsigned long numberOfLayersToConnect,
715 std::string layerName(nodeDef.name());
716 if (isOdd || numberOfLayersToConnect != 2)
719 layerName.append(
"_addN_").append(std::to_string(numberOfAddition));
725 const tensorflow::NodeDef& nodeDef,
728 unsigned int numberOfAddition)
732 std::string layerName(nodeDef.name());
733 layerName.append(
"_addN_").append(std::to_string(numberOfAddition));
738 const tensorflow::NodeDef& nodeDef,
748 const tensorflow::GraphDef& graphDef)
751 uint32_t numberOfInputs = ReadMandatoryNodeUint32Attribute(nodeDef,
"N");
752 if (numberOfInputs < 2)
756 fmt::format(
"AddN Node with name '{}' has less than two ({}) inputs {}",
758 std::to_string(numberOfInputs),
761 else if (numberOfInputs == 2)
772 unsigned int numberOfAdditions = 0;
773 std::vector<IConnectableLayer*> layers;
775 for (
unsigned int i = 0; i < numberOfInputs; ++i)
778 bool onSecondItem = i % 2;
783 nodeDef, inputs[ i - 1], inputs[i], numberOfAdditions);
784 layers.push_back(newLayer);
788 std::vector<IConnectableLayer*> layersToConnect(layers);
789 unsigned long numberOfLayersToConnect = layersToConnect.size();
790 bool isOdd = numberOfInputs % 2;
792 while (numberOfLayersToConnect > 1)
795 for (
unsigned long i = 0; i < numberOfLayersToConnect; ++i) {
796 bool onSecondItem = i % 2;
801 layersToConnect[i - 1],
804 numberOfLayersToConnect,
806 layers.push_back(newLayer);
810 layersToConnect = layers;
811 numberOfLayersToConnect = layersToConnect.size();
822 return std::make_unique<SingleLayerParsedTfOperation>(
this, nodeDef, finalLayer);
827 const tensorflow::GraphDef& graphDef)
834 if (inputs[0].m_IndexedValue->GetNode().op() ==
"MatMul" &&
835 HasParsedConstTensor<float>(inputs[1].m_IndexedValue->GetNode().name()))
839 &nodeDef,nodeDef.name().c_str());
840 return std::make_unique<SingleLayerParsedTfOperation>(
this, nodeDef, layer);
842 else if (HasParsedConstTensor<float>(inputs[0].m_IndexedValue->GetNode().name()) &&
843 inputs[1].m_IndexedValue->GetNode().op() ==
"MatMul")
847 &nodeDef,nodeDef.name().c_str());
848 return std::make_unique<SingleLayerParsedTfOperation>(
this, nodeDef, layer);
858 const tensorflow::GraphDef& graphDef)
869 const tensorflow::NodeDef& node,
872 , m_Representative(representative)
876 virtual IOutputSlot& ResolveArmnnOutputSlot(
unsigned int tfOutputIndex)
override 879 return m_Representative->ResolveArmnnOutputSlot(tfOutputIndex);
884 return m_Representative->ResolveIdentityOperations();
892 const tensorflow::GraphDef& graphDef)
897 return std::make_unique<ParsedIdentityTfOperation>(
this, nodeDef, inputs[0].m_IndexedValue);
903 template <
typename T>
908 const T* tensorData,
const TensorInfo& tensorInfo)
911 m_TensorInfo(tensorInfo)
916 void CreateLayerDeferred()
override 920 m_Node.name().c_str());
924 ConstTensor GetConstTensor(std::vector<T>& outputTensorData)
const 928 memcpy(outputTensorData.data(), m_Storage.data(), m_TensorInfo.
GetNumBytes());
931 ConstTensor constTensor(m_TensorInfo, outputTensorData);
935 const T* GetStorage()
const 937 return m_Storage.data();
947 std::vector<T> m_Storage;
953 const tensorflow::NodeDef& nodeDef)
957 case tensorflow::DT_FLOAT:
958 return DataType::Float32;
960 case tensorflow::DT_INT32:
961 return DataType::Signed32;
965 fmt::format(
"Unknown DataType {} for node {} {}",
966 tensorflow::DataType_Name(tfDataType),
972 struct ParseTfTensorValueList
974 template<
typename DataType>
976 const tensorflow::TensorProto& tfTensor,
977 unsigned int dstElements,
978 std::vector<int8_t>& outputData);
980 template <
typename DataType>
981 static void ReadData(
const void* srcData,
unsigned int numSrcElements,
982 std::vector<int8_t>& dstData,
unsigned int numDstElements)
985 if (numSrcElements == 0)
991 if (numDstElements == 0)
993 numDstElements = numSrcElements;
997 dstData.resize(std::max(numSrcElements, numDstElements) *
sizeof(
DataType));
1003 std::copy(srcTensor, srcTensor + numSrcElements, dstTensor);
1005 if (numDstElements > numSrcElements)
1008 std::fill(dstTensor + numSrcElements, dstTensor + numDstElements, srcTensor[numSrcElements - 1]);
1015 void ParseTfTensorValueList::Parse<float>(
const tensorflow::TensorProto& tfTensor,
1016 unsigned int dstElements, std::vector<int8_t>& outputData)
1018 ReadData<float>(tfTensor.float_val().data(),
static_cast<unsigned int>(tfTensor.float_val_size()),
1019 outputData, dstElements);
1023 void ParseTfTensorValueList::Parse<int32_t>(
const tensorflow::TensorProto& tfTensor,
1024 unsigned int dstElements, std::vector<int8_t>& outputData)
1026 ReadData<int32_t>(tfTensor.int_val().data(),
static_cast<unsigned int>(tfTensor.int_val_size()),
1027 outputData, dstElements);
1030 template <
template<
typename>
class OperatorType,
typename T = int8_t>
1033 template<
typename DataType,
class... Args>
1035 const tensorflow::NodeDef& node,
1038 return std::make_unique<OperatorType<DataType>>(parser, node, std::forward<Args>(args)...);
1045 template<
typename DataType,
class... Args>
1047 const tensorflow::NodeDef& node,
const std::vector<int8_t>& tensorData,
const TensorInfo& tensorInfo)
1049 return std::make_unique<ParsedConstTfOperation<DataType>>(parser, node,
1050 reinterpret_cast<const DataType*
>(tensorData.data()), tensorInfo);
1054 template <
class FuncType>
1055 struct InvokeParseFunction
1057 template<
class ResType,
class... Args>
1058 inline static ResType Result(
DataType dataType, Args&&... args)
1060 if (dataType == DataType::Float32)
1062 return FuncType::template Parse<float>(std::forward<Args>(args)...);
1064 else if (dataType == DataType::Signed32)
1066 return FuncType::template Parse<int32_t>(std::forward<Args>(args)...);
1072 template<
class... Args>
1073 inline static void Result(
DataType dataType, Args&&... args)
1075 if (dataType == DataType::Float32)
1077 FuncType::template Parse<float>(std::forward<Args>(args)...);
1079 else if (dataType == DataType::Signed32)
1081 FuncType::template Parse<int32_t>(std::forward<Args>(args)...);
1087 const tensorflow::GraphDef& graphDef)
1092 if (nodeDef.attr().count(
"value") == 0)
1095 fmt::format(
"Value not found for Const node - {} {}",
1100 const tensorflow::TensorProto& tfTensor = nodeDef.attr().at(
"value").tensor();
1101 const tensorflow::TensorShapeProto& tfTensorShape = tfTensor.tensor_shape();
1104 const auto GetDimensionSize = [](
auto& d) {
return d.size(); };
1106 std::vector<unsigned int> dimensionSizes;
1107 std::transform(tfTensorShape.dim().begin(), tfTensorShape.dim().end(),
1108 std::back_inserter(dimensionSizes), GetDimensionSize);
1112 unsigned int numElements = 0U;
1114 if (!dimensionSizes.empty())
1116 numElements = std::accumulate(dimensionSizes.begin(), dimensionSizes.end(),
1117 1U, std::multiplies<unsigned int>());
1120 std::vector<int8_t> tensorData;
1123 if (tfTensor.tensor_content().empty())
1125 InvokeParseFunction<ParseTfTensorValueList>::Result<
void>(dataType, tfTensor, numElements, tensorData);
1129 if (numElements == 0)
1131 const unsigned int tfNumElements =
1132 static_cast<unsigned int>(tensorData.size()) /
GetDataTypeSize(dataType);
1133 dimensionSizes.push_back(tfNumElements);
1139 tensorData.assign(tfTensor.tensor_content().begin(), tfTensor.tensor_content().end());
1142 if (numElements == 0)
1145 fmt::format(
"No tensor shape found for Const node - {} {}",
1152 if (tensorData.empty())
1155 fmt::format(
"No tensor data found for Const node - {} {}",
1160 const TensorInfo tensorInfo(static_cast<unsigned int>(dimensionSizes.size()),
1161 dimensionSizes.data(),
1166 if (tensorData.size() > tensorInfo.GetNumBytes())
1169 fmt::format(
"Number of elements ({}) should be less than or equal " 1170 "to the number of elements implied by the shape argument ({}) for Const node - {} {}",
1172 tensorInfo.GetNumElements(),
1177 return InvokeParseFunction<MakeTfOperation<ParsedConstTfOperation>>::Result<ParsedTfOperationPtr>(
1178 dataType,
this, nodeDef, tensorData, tensorInfo);
1181 template<
typename Type>
1189 return dynamic_cast<ParsedConstTfOperation<Type>*
>(it->second.get()) !=
nullptr;
1192 template<
typename Type>
1195 return dynamic_cast<ParsedConstTfOperation<Type>*
>(parsedTfOpPtr) !=
nullptr;
1200 for (
unsigned int i = 0; i < inputs.size(); i++)
1202 if (HasParsedConstTensor<int32_t>(inputs[i].m_IndexedValue->GetNode().name()))
1208 fmt::format(
"ArmNN only supports operators with constant axis. {}",
1214 const tensorflow::GraphDef& graphDef)
1218 IOutputSlot& inputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
1221 if (!HasParsedConstTensor<float>(inputs[1].m_IndexedValue->GetNode().name()))
1224 fmt::format(
"ArmNN only supports Convolution layers with constant weights for {}, input {} {}",
1226 inputs[1].m_IndexedValue->GetNode().name(),
1229 ParsedConstTfOperation<float>* weightNode =
1230 PolymorphicDowncast<ParsedConstTfOperation<float> *>(inputs[1].m_IndexedValue);
1232 std::string paddingString = ReadMandatoryNodeStringAttribute(nodeDef,
"padding");
1233 std::string dataFormat = ReadMandatoryNodeStringAttribute(nodeDef,
"data_format");
1234 std::vector<uint32_t> strides = ReadMandatoryNodeUint32ListAttribute(nodeDef,
"strides");
1241 DataLayout dataLayout = dataFormat ==
"NHWC" ? DataLayout::NHWC : DataLayout::NCHW;
1250 std::vector<uint32_t> dilations = ReadOptionalNodeUint32ListAttribute(nodeDef,
"dilations");
1251 if (!dilations.empty())
1265 dataLayout == DataLayout::NHWC ?
1266 std::initializer_list<unsigned int>{ 1, 2, 3, 0 } :
1267 std::initializer_list<unsigned int>{ 2, 3, 1, 0 };
1270 const TensorInfo& weightTensorInfo = weightNode->GetTensorInfo();
1274 std::vector<float> weightTensorSwizzledData(weightTensorInfo.
GetNumElements());
1276 weightNode->GetStorage(), weightTensorSwizzledData.data(),
sizeof(float));
1279 ConstTensor weightTensor(weightTensorSwizzledInfo, weightTensorSwizzledData);
1284 bool padding =
false;
1286 unsigned int outputHeight = 0;
1287 unsigned int outputWidth = 0;
1291 if (paddingString ==
"SAME")
1295 else if (paddingString ==
"VALID")
1304 unsigned int dilatedFilterWidth = weightWidth + (desc.
m_DilationX - 1) * (weightWidth - 1);
1305 unsigned int readWidth = (inputWidth + desc.
m_PadLeft + desc.
m_PadRight) - dilatedFilterWidth;
1306 outputWidth = 1 + (readWidth / desc.
m_StrideX);
1308 unsigned int dilatedFilterHeight = weightHeight + (desc.
m_DilationY - 1) * (weightHeight - 1);
1309 unsigned int readHeight = (inputHeight + desc.
m_PadTop + desc.
m_PadBottom) - dilatedFilterHeight;
1310 outputHeight = 1 + (readHeight / desc.
m_StrideY);
1314 case DataLayout::NHWC:
1321 case DataLayout::NCHW:
1334 nodeDef.name().c_str());
1338 return std::make_unique<SingleLayerParsedTfOperation>(
this, nodeDef, layer);
1342 const tensorflow::GraphDef& graphDef)
1346 IOutputSlot& inputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
1349 if (!HasParsedConstTensor<float>(inputs[1].m_IndexedValue->GetNode().name()))
1352 fmt::format(
"ArmNN only supports Depthwise Convolution layer with constant weights. " 1353 "Non const input found {} for node {} {}",
1354 inputs[1].m_IndexedValue->GetNode().name(),
1359 ParsedConstTfOperation<float>* weightNode =
1360 PolymorphicDowncast<ParsedConstTfOperation<float> *>(inputs[1].m_IndexedValue);
1362 std::string paddingString = ReadMandatoryNodeStringAttribute(nodeDef,
"padding");
1363 std::string dataFormat = ReadMandatoryNodeStringAttribute(nodeDef,
"data_format");
1364 std::vector<uint32_t> strides = ReadMandatoryNodeUint32ListAttribute(nodeDef,
"strides");
1371 DataLayout dataLayout = dataFormat ==
"NHWC" ? DataLayout::NHWC : DataLayout::NCHW;
1379 std::vector<uint32_t> dilations = ReadOptionalNodeUint32ListAttribute(nodeDef,
"dilations");
1380 if (!dilations.empty())
1395 const TensorInfo& weightTensorInfo = weightNode->GetTensorInfo();
1399 std::vector<float> weightTensorSwizzledData(weightTensorInfo.
GetNumElements());
1401 weightNode->GetStorage(), weightTensorSwizzledData.data(),
sizeof(float));
1404 ConstTensor weightTensor(weightTensorSwizzledInfo, weightTensorSwizzledData);
1406 uint32_t weightHeight = weightTensor.
GetShape()[2];
1407 uint32_t weightWidth = weightTensor.
GetShape()[3];
1409 bool padding =
false;
1411 unsigned int outputHeight = 0;
1412 unsigned int outputWidth = 0;
1416 if (paddingString ==
"SAME")
1420 else if (paddingString ==
"VALID")
1429 unsigned int dilatedFilterWidth = weightWidth + (desc.
m_DilationX - 1) * (weightWidth - 1);
1430 unsigned int readWidth = (inputWidth + desc.
m_PadLeft + desc.
m_PadRight) - dilatedFilterWidth;
1431 outputWidth = 1 + (readWidth / desc.
m_StrideX);
1433 unsigned int dilatedFilterHeight = weightHeight + (desc.
m_DilationY - 1) * (weightHeight - 1);
1434 unsigned int readHeight = (inputHeight + desc.
m_PadTop + desc.
m_PadBottom) - dilatedFilterHeight;
1435 outputHeight = 1 + (readHeight / desc.
m_StrideY);
1439 case DataLayout::NHWC:
1446 case DataLayout::NCHW:
1459 nodeDef.name().c_str());
1463 return std::make_unique<SingleLayerParsedTfOperation>(
this, nodeDef, layer);
1468 std::int32_t expandDim)
1474 fmt::format(
"Unsupported number of dimensions: {} for input shape for ExpandDims {} {}",
1481 std::vector<uint32_t> outputDims;
1484 if (expandDim >= -1 - inputDimSize && expandDim <= inputDimSize)
1488 auto currentDimension = inputTensorInfo.
GetShape()[i];
1489 outputDims.push_back(currentDimension);
1495 auto getPosition = std::next(outputDims.begin() + 0, expandDim);
1496 outputDims.insert(getPosition, 1);
1504 auto getPosition = std::next(outputDims.begin() + outputDimSize, expandDim);
1505 outputDims.insert(getPosition, 1);
1511 fmt::format(
"Cannot expand dimension {} in input tensor with {} dimension {}",
1517 if (outputDims.size() > 4)
1520 fmt::format(
"Unsupported number of dimensions: {} for output shape for ExpandDims {} {}",
1532 return outTensorInfo;
1536 const tensorflow::GraphDef& graphDef)
1544 const std::size_t numInputs = nodes.size();
1545 std::vector<OutputOfParsedTfOperation> inputs;
1546 std::int32_t expandDim;
1550 expandDim = ReadMandatoryNodeInt32Attribute(nodeDef,
"Tdim");
1557 IOutputSlot& prevLayerOutputSlot = inputs[1].m_IndexedValue->ResolveArmnnOutputSlot(inputs[1].m_Index);
1563 fmt::format(
"The axis parameter of ExpandDims operation given as second input is not of type int32." 1564 " Input {0} Node {1} {2}",
1565 inputs[1].m_IndexedValue->GetNode().name(),
1571 if (!HasParsedConstTensor<int32_t>(inputs[1].m_IndexedValue->GetNode().name()))
1574 fmt::format(
"ArmNN only supports ExpandDims layers with constant axis/dim parameter. " 1575 "Input {0} Node {1} {2}",
1576 inputs[1].m_IndexedValue->GetNode().name(),
1588 fmt::format(
"The axis parameter of ExpandDims operation given as second input is not " 1589 "allowed to hold more than one value. " 1590 "Input {0} Node {1} {2}",
1591 inputs[1].m_IndexedValue->GetNode().name(),
1596 ParsedConstTfOperation<int32_t>* expandDimsNode =
1597 PolymorphicDowncast<ParsedConstTfOperation<int32_t>*>(inputs[1].m_IndexedValue);
1599 memcpy(&expandDim, expandDimsNode->GetStorage(),
sizeof(expandDim));
1603 IOutputSlot& prevLayerOutputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
1615 return std::make_unique<SingleLayerParsedTfOperation>(
this, nodeDef, layer);
1619 const tensorflow::GraphDef& graphDef)
1624 if (!HasParsedConstTensor<float>(inputs[1].m_IndexedValue->GetNode().name()))
1627 fmt::format(
"ArmNN only supports FusedBatchNormalization layers with constant scale. " 1628 "Input {}. Node {} {}",
1629 inputs[1].m_IndexedValue->GetNode().name(),
1633 ParsedConstTfOperation<float>* scaleNode =
1634 PolymorphicDowncast<ParsedConstTfOperation<float> *>(inputs[1].m_IndexedValue);
1636 if (!HasParsedConstTensor<float>(inputs[2].m_IndexedValue->GetNode().name()))
1639 fmt::format(
"ArmNN only supports FusedBatchNormalization layers with constant offset. " 1640 "Input {}. Node {} {}",
1641 inputs[2].m_IndexedValue->GetNode().name(),
1645 ParsedConstTfOperation<float>* offsetNode =
1646 PolymorphicDowncast<ParsedConstTfOperation<float> *>(inputs[2].m_IndexedValue);
1648 if (!HasParsedConstTensor<float>(inputs[3].m_IndexedValue->GetNode().name()))
1651 fmt::format(
"ArmNN only supports FusedBatchNormalization layers with constant mean. " 1652 "Input {}. Node {} {}",
1653 inputs[3].m_IndexedValue->GetNode().name(),
1657 ParsedConstTfOperation<float>* meanNode =
1658 PolymorphicDowncast<ParsedConstTfOperation<float> *>(inputs[3].m_IndexedValue);
1660 if (!HasParsedConstTensor<float>(inputs[4].m_IndexedValue->GetNode().name()))
1663 fmt::format(
"ArmNN only supports FusedBatchNormalization layers with constant variance. " 1664 "Input {}. Node {} {}",
1665 inputs[4].m_IndexedValue->GetNode().name(),
1669 ParsedConstTfOperation<float>* varianceNode =
1670 PolymorphicDowncast<ParsedConstTfOperation<float> *>(inputs[4].m_IndexedValue);
1672 const std::string dataFormat = ReadOptionalNodeStringAttribute(nodeDef,
"data_format",
"NHWC");
1677 desc.
m_Eps = ReadMandatoryNodeFloatAttribute(nodeDef,
"epsilon");
1678 desc.
m_DataLayout = dataFormat ==
"NHWC" ? DataLayout::NHWC : DataLayout::NCHW;
1682 std::vector<float> scaleTensorData;
1683 ConstTensor scaleTensor = scaleNode->GetConstTensor(scaleTensorData);
1685 std::vector<float> offsetTensorData;
1686 ConstTensor offsetTensor = offsetNode->GetConstTensor(offsetTensorData);
1688 std::vector<float> meanTensorData;
1689 ConstTensor meanTensor = meanNode->GetConstTensor(meanTensorData);
1691 std::vector<float> varianceTensorData;
1692 ConstTensor varianceTensor = varianceNode->GetConstTensor(varianceTensorData);
1699 nodeDef.name().c_str());
1701 IOutputSlot& inputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
1706 return std::make_unique<SingleLayerParsedTfOperation>(
this, nodeDef, layer);
1710 size_t alphaLayerIndex,
1715 const tensorflow::NodeDef& otherNodeDef = otherOp.
m_IndexedValue->GetNode();
1724 if (mulNodeDef.op() ==
"Mul")
1726 size_t otherLayerIndex = (alphaLayerIndex == 0 ? 1 : 0);
1730 ARMNN_ASSERT((otherLayerIndex == 0 || alphaLayerIndex == 0));
1731 ARMNN_ASSERT((otherLayerIndex == 1 || alphaLayerIndex == 1));
1732 ARMNN_ASSERT(((otherLayerIndex + alphaLayerIndex) == 1));
1734 if (inputs[otherLayerIndex].m_IndexedValue->GetNode().name() == otherNodeDef.name())
1736 if (HasParsedConstTensor<float>(inputs[alphaLayerIndex].m_IndexedValue->GetNode().name()))
1738 ParsedConstTfOperation<float>* alpha =
1739 PolymorphicDowncast<ParsedConstTfOperation<float> *>(
1740 inputs[alphaLayerIndex].m_IndexedValue);
1742 std::vector<float> const_data;
1743 ConstTensor const_tensor = alpha->GetConstTensor(const_data);
1745 if (const_data.size() == 1)
1747 desc.
m_Function = ActivationFunction::LeakyReLu;
1748 desc.
m_A = const_data[0];
1760 const tensorflow::GraphDef& graphDef)
1764 if (inputs.size() != 2)
1767 fmt::format(
"Maximum expects two inputs!. Got {} for Node {} {}",
1773 auto inputNode0 = inputs[0].m_IndexedValue->GetNode();
1774 auto inputNode1 = inputs[1].m_IndexedValue->GetNode();
1797 return std::make_unique<SingleLayerParsedTfOperation>(
this, nodeDef, layer);
1808 const tensorflow::NodeDef& nodeDef,
const std::string& layerName)
1812 IOutputSlot* input0Slot = &inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
1813 IOutputSlot* input1Slot = &inputs[1].m_IndexedValue->ResolveArmnnOutputSlot(inputs[1].m_Index);
1817 if (input0Dim != input1Dim)
1821 if (input0Dim == 1 && input1Dim == 4)
1825 else if (input0Dim == 4 && input1Dim == 1)
1832 fmt::format(
"Unsupported broadcast configuration for {} operation {} {}",
1838 return {input0Slot, input1Slot};
1845 const tensorflow::NodeDef& nodeDef)
1852 std::vector<unsigned int> outputShape;
1859 outputShape.push_back(std::max(input0Shape[i], input1Shape[i]));
1865 return std::make_unique<SingleLayerParsedTfOperation>(
this, nodeDef, layer);
1872 const tensorflow::NodeDef& nodeDef)
1878 std::vector<unsigned int> outputShape;
1885 outputShape.push_back(std::max(input0Shape[i], input1Shape[i]));
1891 return std::make_unique<SingleLayerParsedTfOperation>(
this, nodeDef, layer);
1895 const tensorflow::GraphDef& graphDef)
1899 IOutputSlot& params = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
1900 IOutputSlot& indices = inputs[1].m_IndexedValue->ResolveArmnnOutputSlot(inputs[1].m_Index);
1902 descriptor.
m_Axis = ReadMandatoryNodeInt32Attribute(nodeDef,
"axis");
1907 unsigned int outputDim = paramsDim - 1 + indicesDim;
1909 std::vector<unsigned int> dimSizes;
1911 for (
unsigned int i = 0; i < indicesDim; ++i)
1915 for (
unsigned int i = 1; i < paramsDim; ++i)
1925 layer->GetOutputSlot(0).SetTensorInfo(inferredOutputInfo);
1927 params.
Connect(layer->GetInputSlot(0));
1928 indices.
Connect(layer->GetInputSlot(1));
1930 return std::make_unique<SingleLayerParsedTfOperation>(
this, nodeDef, layer);
1934 const tensorflow::GraphDef& graphDef)
1948 const tensorflow::GraphDef& graphDef)
1962 const tensorflow::GraphDef& graphDef)
1975 const tensorflow::GraphDef& graphDef)
1980 IOutputSlot* input0Slot = &inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
1981 IOutputSlot* input1Slot = &inputs[1].m_IndexedValue->ResolveArmnnOutputSlot(inputs[1].m_Index);
1988 const bool isNHWC =
true;
1994 const bool isNHWC =
true;
2012 return std::make_unique<SingleLayerParsedTfOperation>(
this, nodeDef, layer);
2016 const tensorflow::GraphDef& graphDef)
2021 unsigned int numInputs =
static_cast<unsigned int>(nodes.size());
2025 fmt::format(
"Pack/Stack expects at least one input. Got {} for Node {} {}",
2033 IOutputSlot* input0Slot = &inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2038 int32_t axis = ReadMandatoryNodeInt32Attribute(nodeDef,
"axis");
2039 const int sNumDimensions = (
static_cast<int>(numDimensions) + 1);
2040 if (!(axis < sNumDimensions && axis >= -sNumDimensions))
2043 fmt::format(
"Axis index is not in range. Got {} for Node {} {}",
2051 axis =
static_cast<int32_t
>(numDimensions) + axis + 1;
2055 stackDescriptor.
m_Axis =
static_cast<uint32_t
>(axis);
2056 stackDescriptor.
m_NumInputs =
static_cast<uint32_t
>(numInputs);
2059 const unsigned int supportedNumDims = 4;
2060 for (
unsigned int viewIndex = 0; viewIndex < numInputs; ++viewIndex)
2062 IOutputSlot& inputSlot = inputs[viewIndex].m_IndexedValue->ResolveArmnnOutputSlot(inputs[viewIndex].m_Index);
2069 fmt::format(
"The number of dimensions: {} for input tensors of the " 2070 "Pack/Stack op. Number of dimensions should be less than {} {}",
2077 std::vector<unsigned int> outputDimensions;
2080 outputDimensions.push_back(stackDescriptor.
m_InputShape[i]);
2082 outputDimensions.insert(outputDimensions.begin() + axis, numInputs);
2087 for (
unsigned int viewIndex = 0; viewIndex < numInputs; ++viewIndex)
2089 IOutputSlot& inputSlot = inputs[viewIndex].m_IndexedValue->ResolveArmnnOutputSlot(inputs[viewIndex].m_Index);
2095 outputDimensions.data(),
2098 return std::make_unique<SingleLayerParsedTfOperation>(
this, nodeDef, layer);
2102 const tensorflow::GraphDef& graphDef)
2107 const auto inputCount = inputs.size();
2109 if (inputCount != 2)
2112 fmt::format(
"The number of given input is {}. It should be two for Transpose op." 2119 auto* input0Slot = &inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2122 auto* permuteVectorInput =
2123 PolymorphicDowncast<ParsedConstTfOperation<int32_t>*>(constInput.m_IndexedValue);
2124 const auto& permuteVectorInfo = permuteVectorInput->GetTensorInfo();
2126 std::vector<int32_t> permuteVectorData;
2127 permuteVectorInput->GetConstTensor(permuteVectorData);
2129 std::vector<unsigned int> armnnPermuteVectorData(permuteVectorData.begin(), permuteVectorData.end());
2131 const auto permutationVector =
PermutationVector(armnnPermuteVectorData.data(), permuteVectorInfo.GetNumElements());
2134 auto* layer =
m_Network->AddTransposeLayer(desc, nodeDef.name().c_str());
2137 input0Slot->Connect(layer->GetInputSlot(0));
2139 const auto& input0Info = input0Slot->GetTensorInfo();
2142 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
2144 return std::make_unique<SingleLayerParsedTfOperation>(
this, nodeDef, layer);
2149 const std::string& nodeName)
2151 unsigned int rank = paddingTensor.
GetShape()[0];
2153 if (rank != expectedRank)
2156 fmt::format(
"Expected the padding tensor to be of rank {} not {} on Node {} {}.",
2162 unsigned int second = paddingTensor.
GetShape()[1];
2166 fmt::format(
"Expected the padding tensor to be of dimensions " 2167 "[{1}, 2] not [{1}, {2}] on Node {3} {4}.",
2177 const std::vector<std::pair<unsigned int, unsigned int>>& padList)
2180 std::vector<unsigned int> outDims;
2181 for (
unsigned int i = 0; i < numDims; ++i)
2183 unsigned int dimSize = inputTensorInfo.
GetShape()[i];
2184 const std::pair<unsigned int, unsigned int>& dimPadding = padList[i];
2185 dimSize += dimPadding.first;
2186 dimSize += dimPadding.second;
2187 outDims.push_back(dimSize);
2189 TensorInfo paddedTensorInfo = inputTensorInfo;
2190 unsigned int outDimsSize =
static_cast<unsigned int>(outDims.size());
2192 return paddedTensorInfo;
2196 const tensorflow::GraphDef& graphDef)
2203 IOutputSlot& previousLayerOutputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2205 if (!HasParsedConstTensor<int32_t>(inputs[1].m_IndexedValue))
2208 fmt::format(
"ArmNN only supports Pad with constant padding. " 2209 "Input {}. Node {} {}",
2210 inputs[1].m_IndexedValue->GetNode().name(),
2215 ParsedConstTfOperation<int32_t>* paddingTensorOp =
2216 PolymorphicDowncast<ParsedConstTfOperation<int32_t>*>(inputs[1].m_IndexedValue);
2218 std::vector<int32_t> paddingTensorData;
2219 ConstTensor paddingTensor = paddingTensorOp->GetConstTensor(paddingTensorData);
2226 std::vector<std::pair<unsigned int, unsigned int>> padList;
2227 unsigned int rank =
CheckPaddingTensor(paddingTensor, inputTensorInfo, nodeDef.name());
2228 for (
unsigned int i = 0; i < rank; ++i)
2230 std::pair<unsigned int, unsigned int> paddingForDim;
2231 for (
unsigned int j = 0; j < 2; j++)
2233 unsigned int index = (i * 2) + j;
2234 int paddingAmount = paddingTensorData[index];
2236 if (paddingAmount < 0)
2239 fmt::format(
"Negative amount {} specified at [{}, {}] of padding tensor on Node {} {}.",
2248 paddingForDim.first =
static_cast<unsigned int>(paddingAmount);
2252 paddingForDim.second =
static_cast<unsigned int>(paddingAmount);
2255 padList.push_back(paddingForDim);
2263 return std::make_unique<SingleLayerParsedTfOperation>(
this, nodeDef, layer);
2267 const tensorflow::GraphDef& graphDef)
2273 unsigned int numInputs =
static_cast<unsigned int>(nodes.size());
2280 ParsedConstTfOperation<int32_t>* shapeNode =
2281 PolymorphicDowncast<ParsedConstTfOperation<int32_t>*>(inputs[index].m_IndexedValue);
2283 std::vector<int32_t> axisTensorData;
2284 shapeNode->GetConstTensor(axisTensorData);
2287 const unsigned int concatDim =
static_cast<unsigned int>(axisTensorData[0]);
2290 if (concatDim == 0 || concatDim == 2)
2293 fmt::format(
"Dimension {} for concatenation is not supported by Armnn. " 2300 const unsigned int supportedNumDims = 4;
2301 unsigned int numConcatViews = numInputs - 1;
2302 OriginsDescriptor concatDescriptor(static_cast<uint32_t>(numConcatViews), supportedNumDims);
2305 unsigned int mergeDim = 0;
2306 for (
unsigned int viewIndex = 0; viewIndex < numConcatViews; ++viewIndex)
2309 IOutputSlot& inputSlot = inputs[viewIndex].m_IndexedValue->ResolveArmnnOutputSlot(inputs[viewIndex].m_Index);
2316 fmt::format(
"The number of dimensions: {} for input tensors of the " 2317 "concatenation op should be {} {}",
2324 mergeDims = inputTensorInfo.
GetShape();
2325 unsigned int* viewOrigin =
const_cast<unsigned int*
>(concatDescriptor.
GetViewOrigin(viewIndex));
2326 std::fill(viewOrigin, viewOrigin + supportedNumDims, 0);
2330 mergeDim += mergeDims[concatDim];
2334 mergeDims[concatDim] = mergeDim;
2339 for (
unsigned int viewIndex = 0; viewIndex < numConcatViews; ++viewIndex)
2341 IOutputSlot& inputSlot = inputs[viewIndex].m_IndexedValue->ResolveArmnnOutputSlot(inputs[viewIndex].m_Index);
2345 return std::make_unique<SingleLayerParsedTfOperation>(
this, nodeDef, layer);
2349 const tensorflow::GraphDef& graphDef)
2359 if (tfDataType != tensorflow::DT_INT32)
2362 fmt::format(
"Armnn only supports DT_INT32 as out_type. Got {} for Node {} {}",
2363 tensorflow::DataType_Name(tfDataType),
2369 IOutputSlot& prevLayerOutputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2373 std::vector<int32_t> shapeTensorData;
2374 shapeTensorData.reserve(prevLayerDimensions);
2376 for (
unsigned int i=0; i<prevLayerDimensions; ++i)
2378 shapeTensorData.push_back(static_cast<int32_t>(prevLayerTensorInfo.
GetShape()[i]));
2381 TensorInfo shapeTensorInfo(1, &prevLayerDimensions, DataType::Signed32);
2383 return std::make_unique<ParsedConstTfOperation<int32_t>>(
this,
2385 &shapeTensorData[0],
2390 const tensorflow::GraphDef& graphDef)
2396 if (!HasParsedConstTensor<int32_t>(inputs[1].m_IndexedValue->GetNode().name()))
2399 fmt::format(
"ArmNN only supports Reshape layers with constant shapes. " 2400 "Input {} Node {} {}",
2401 inputs[1].m_IndexedValue->GetNode().name(),
2405 ParsedConstTfOperation<int32_t>* shapeNode =
2406 PolymorphicDowncast<ParsedConstTfOperation<int32_t>*>(inputs[1].m_IndexedValue);
2408 armnn::IOutputSlot& prevLayerOutputSlot = inputNode->ResolveArmnnOutputSlot(inputs[0].m_Index);
2409 TensorInfo inputTensorInfo = prevLayerOutputSlot.GetTensorInfo();
2411 std::vector<int32_t> shapeTensorData;
2412 ConstTensor shapeTensor = shapeNode->GetConstTensor(shapeTensorData);
2413 const TensorInfo outputTensorInfo = PrepareReshape(inputTensorInfo, shapeTensorData);
2423 return std::make_unique<SingleLayerParsedTfOperation>(
this, nodeDef, layer);
2427 const tensorflow::GraphDef& graphDef)
2432 if (!HasParsedConstTensor<int32_t>(inputs[1].m_IndexedValue->GetNode().name()))
2435 fmt::format(
"ArmNN only supports ResizeBilinear layers with constant sizes. " 2436 "Input {}. Node {} {}",
2437 inputs[1].m_IndexedValue->GetNode().name(),
2441 ParsedConstTfOperation<int32_t>* sizeNode =
2442 PolymorphicDowncast<ParsedConstTfOperation<int32_t>*>(inputs[1].m_IndexedValue);
2445 if (ReadOptionalNodeBoolAttribute(nodeDef,
"align_corners",
false))
2448 fmt::format(
"ArmNN only supports ResizeBilinear layers with align_corners set to false. " 2455 std::vector<int32_t> sizeTensorData;
2456 ConstTensor sizeTensor = sizeNode->GetConstTensor(sizeTensorData);
2462 desc.
m_TargetWidth =
static_cast<uint32_t
> (sizeTensorData[1]);
2467 IOutputSlot& inputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2471 unsigned int outBatch = inputTensorInfo.
GetShape()[0];
2472 unsigned int outChannels = inputTensorInfo.
GetShape()[3];
2475 TensorShape outShape({outBatch, outHeight, outWidth, outChannels });
2482 return std::make_unique<SingleLayerParsedTfOperation>(
this, nodeDef, layer);
2491 if (tfDataType == tensorflow::DT_FLOAT)
2493 type = DataType::Float32;
2495 else if (tfDataType == tensorflow::DT_INT32)
2497 type = DataType::Signed32;
2502 fmt::format(
"Unsupported DataType {} for Squeeze operation {} {}",
2503 tensorflow::DataType_Name(tfDataType),
2512 fmt::format(
"Unsupported number of dimensions: {} for input shape for Squeeze {} {}",
2518 std::vector<uint32_t> squeezeDims = ReadOptionalNodeUint32ListAttribute(nodeDef,
"squeeze_dims");
2519 static const uint32_t dimensionSequence[] = { 0, 1, 2, 3 };
2521 if (squeezeDims.empty())
2523 squeezeDims.assign(dimensionSequence,
2527 std::vector<uint32_t> outputDims;
2530 bool skipSqueeze = (std::find(squeezeDims.begin(), squeezeDims.end(), i) == squeezeDims.end());
2531 auto currentDimension = inputTensorInfo.
GetShape()[i];
2532 if (skipSqueeze || currentDimension != 1)
2534 outputDims.push_back(currentDimension);
2538 if (outputDims.size() > 4)
2541 fmt::format(
"Unsupported number of dimensions: {} for output shape for Squeeze {} {}",
2552 outTensorInfo.SetDataType(type);
2554 return outTensorInfo;
2558 const tensorflow::GraphDef& graphDef)
2563 IOutputSlot& prevLayerOutputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2575 return std::make_unique<SingleLayerParsedTfOperation>(
this, nodeDef, layer);
2579 const tensorflow::GraphDef& graphDef)
2585 normalizationDescriptor.
m_NormMethodType = NormalizationAlgorithmMethod::LocalBrightness;
2586 normalizationDescriptor.
m_NormChannelType = NormalizationAlgorithmChannel::Across;
2587 normalizationDescriptor.
m_Alpha = ReadMandatoryNodeFloatAttribute(nodeDef,
"alpha");
2588 normalizationDescriptor.
m_Beta = ReadMandatoryNodeFloatAttribute(nodeDef,
"beta");
2589 normalizationDescriptor.
m_K = ReadMandatoryNodeFloatAttribute(nodeDef,
"bias");
2590 normalizationDescriptor.
m_NormSize = ReadMandatoryNodeUint32Attribute(nodeDef,
"depth_radius");
2596 IOutputSlot& prevLayerOutputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2598 nodeDef.name().c_str());
2602 return std::make_unique<SingleLayerParsedTfOperation>(
this, nodeDef, layer);
2618 void CreateLayerDeferred()
override 2621 m_Layer = m_Parser->AddFullyConnectedLayer(m_Node,
nullptr, m_Node.name().c_str());
2626 const tensorflow::GraphDef& graphDef)
2631 return std::make_unique<ParsedMatMulTfOperation>(
this, nodeDef);
2635 const tensorflow::GraphDef& graphDef)
2639 IOutputSlot& inputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2642 if (inputs.size() != 2)
2645 fmt::format(
"Mean expects two inputs!. Got {} for Node {} {}",
2651 bool keepDims = ReadMandatoryNodeBoolAttribute(nodeDef,
"keep_dims");
2653 ParsedConstTfOperation<int32_t>* axisNode =
2654 PolymorphicDowncast<ParsedConstTfOperation<int32_t>*>(inputs[1].m_IndexedValue);
2656 const TensorInfo& axisTensorInfo = axisNode->GetTensorInfo();
2658 ConstTensor axisTensor(axisTensorInfo, axisNode->GetStorage());
2659 const int* axisData =
static_cast<const int*
>(axisTensor.GetMemoryArea());
2668 std::vector<int> rawAxisVector(axisData, axisData + axisTensorInfo.GetNumElements());
2669 std::set<unsigned int> positiveAxisSet;
2672 std::transform(rawAxisVector.begin(), rawAxisVector.end(),
2673 std::inserter(positiveAxisSet, positiveAxisSet.begin()),
2674 [rank](
int i) ->
unsigned int {
return static_cast<unsigned int>((i + rank) % rank); });
2680 meanDescriptor.
m_Axis.assign(positiveAxisSet.begin(), positiveAxisSet.end());
2687 return std::make_unique<SingleLayerParsedTfOperation>(
this, nodeDef, layer);
2703 void CreateLayerDeferred()
override 2706 m_Layer = m_Parser->AddMultiplicationLayer(m_Node);
2711 const tensorflow::GraphDef& graphDef)
2715 return std::make_unique<ParsedMulTfOperation>(
this, nodeDef);
2719 const tensorflow::GraphDef& graphDef)
2731 fmt::format(
"Missing input shape for Placeholder '{}' {}",
2735 TensorInfo tensorInfo(it->second, DataType::Float32);
2743 return std::make_unique<SingleLayerParsedTfOperation>(
this, nodeDef, layer);
2747 const tensorflow::GraphDef& graphDef)
2754 const tensorflow::GraphDef& graphDef)
2759 activationDesc.
m_Function = ActivationFunction::ReLu;
2764 const tensorflow::GraphDef& graphDef)
2769 activationDesc.
m_Function = ActivationFunction::BoundedReLu;
2770 activationDesc.
m_A = 6.0f;
2771 activationDesc.
m_B = 0.0f;
2777 const tensorflow::GraphDef& graphDef)
2782 activationDesc.
m_Function = ActivationFunction::Sigmoid;
2788 const tensorflow::GraphDef &graphDef)
2797 IOutputSlot& prevLayerOutputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2801 return std::make_unique<SingleLayerParsedTfOperation>(
this, nodeDef, layer);
2805 const tensorflow::GraphDef& graphDef)
2814 IOutputSlot& prevLayerSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2818 return std::make_unique<SingleLayerParsedTfOperation>(
this, nodeDef, layer);
2822 const tensorflow::GraphDef& graphDef)
2827 unsigned int numInputs =
static_cast<unsigned int>(nodes.size());
2833 ParsedConstTfOperation<int32_t>* shapeNode =
2834 PolymorphicDowncast<ParsedConstTfOperation<int32_t>*>(inputs[index].m_IndexedValue);
2836 std::vector<int32_t> axisTensorData;
2837 shapeNode->GetConstTensor(axisTensorData);
2840 const unsigned int splitDim =
static_cast<unsigned int>(axisTensorData[0]);
2843 if (splitDim == 0 || splitDim == 2)
2846 fmt::format(
"Dimension {} for split is not supported by Armnn. " 2854 uint32_t num_split = ReadMandatoryNodeUint32Attribute(nodeDef,
"num_split");
2856 IOutputSlot& inputSlot = inputs[1 - index].m_IndexedValue->ResolveArmnnOutputSlot(inputs[1 - index].m_Index);
2859 const unsigned int supportedNumDims = 4;
2862 if (inputDimSize != supportedNumDims)
2865 fmt::format(
"The number of dimensions: {} for input tensors of the " 2866 "split op should be {} {}",
2872 std::vector<unsigned int> splitterDimSizes(inputDimSize);
2875 for (
unsigned int i = 0; i < inputDimSize; ++i)
2877 splitterDimSizes[i] = inputTensorInfo.
GetShape()[i];
2880 if (splitterDimSizes[splitDim] % num_split != 0)
2882 throw ParseException(
"Number of splits must evenly divide the dimension");
2884 splitterDimSizes[splitDim] /= num_split;
2887 for (
unsigned int g = 0; g < num_split; ++g)
2890 for (
unsigned int dimIdx = 0; dimIdx < splitterDimSizes.size(); ++dimIdx)
2892 splitDesc.
SetViewSize(g, dimIdx, splitterDimSizes[dimIdx]);
2902 splitterDimSizes.data());
2909 return std::make_unique<SingleLayerParsedTfOperation>(
this, nodeDef, layer);
2913 const tensorflow::GraphDef& graphDef)
2918 activationDesc.
m_Function = ActivationFunction::SoftReLu;
2924 const tensorflow::GraphDef& graphDef)
2929 unsigned int numInputs =
static_cast<unsigned int>(nodes.size());
2932 ParsedConstTfOperation<int32_t>* beginNode =
2933 PolymorphicDowncast<ParsedConstTfOperation<int32_t> *>(inputs[1].m_IndexedValue);
2934 std::vector<int32_t> beginTensorData;
2935 beginNode->GetConstTensor(beginTensorData);
2937 ParsedConstTfOperation<int32_t>* endNode =
2938 PolymorphicDowncast<ParsedConstTfOperation<int32_t> *>(inputs[2].m_IndexedValue);
2939 std::vector<int32_t> endTensorData;
2940 endNode->GetConstTensor(endTensorData);
2942 ParsedConstTfOperation<int32_t>* stridesNode =
2943 PolymorphicDowncast<ParsedConstTfOperation<int32_t> *>(inputs[3].m_IndexedValue);
2944 std::vector<int32_t> stridesTensorData;
2945 stridesNode->GetConstTensor(stridesTensorData);
2948 desc.
m_Begin = beginTensorData;
2949 desc.
m_End = endTensorData;
2951 desc.
m_BeginMask = ReadMandatoryNodeInt32Attribute(nodeDef,
"begin_mask");
2952 desc.
m_EndMask = ReadMandatoryNodeInt32Attribute(nodeDef,
"end_mask");
2953 desc.
m_EllipsisMask = ReadMandatoryNodeInt32Attribute(nodeDef,
"ellipsis_mask");
2954 desc.
m_NewAxisMask = ReadMandatoryNodeInt32Attribute(nodeDef,
"new_axis_mask");
2955 desc.
m_ShrinkAxisMask = ReadMandatoryNodeInt32Attribute(nodeDef,
"shrink_axis_mask");
2959 IOutputSlot& prevLayerSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2968 return std::make_unique<SingleLayerParsedTfOperation>(
this, nodeDef, layer);
2972 const tensorflow::GraphDef& graphDef)
2977 activationDesc.
m_Function = ActivationFunction::TanH;
2978 activationDesc.
m_A = 1.0f;
2979 activationDesc.
m_B = 1.0f;
2991 IOutputSlot& prevLayerOutputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2994 return std::make_unique<SingleLayerParsedTfOperation>(
this, nodeDef, layer);
2998 const tensorflow::GraphDef& graphDef)
3004 const tensorflow::GraphDef& graphDef)
3006 return ParsePooling2d(nodeDef, graphDef, PoolingAlgorithm::Average);
3015 IOutputSlot& inputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
3018 if (inputs.size() != 1)
3021 fmt::format(
"2D Pooling expects one input!. Got {} for Node {} {}",
3027 std::string paddingString = ReadMandatoryNodeStringAttribute(nodeDef,
"padding");
3028 std::string dataFormat = ReadMandatoryNodeStringAttribute(nodeDef,
"data_format");
3029 std::vector<uint32_t> strides = ReadMandatoryNodeUint32ListAttribute(nodeDef,
"strides");
3030 std::vector<uint32_t> ksize = ReadMandatoryNodeUint32ListAttribute(nodeDef,
"ksize");
3038 DataLayout dataLayout = dataFormat ==
"NHWC" ? DataLayout::NHWC : DataLayout::NCHW;
3050 bool padding =
false;
3052 unsigned int outputHeight = 0;
3053 unsigned int outputWidth = 0;
3057 if (paddingString ==
"SAME")
3061 outputHeight =
static_cast<uint32_t
>(ceil(static_cast<float>(inputHeight) /
3062 static_cast<float>(pooling2dDescriptor.
m_StrideY)));
3063 outputWidth =
static_cast<uint32_t
>(ceil(static_cast<float>(inputWidth) /
3064 static_cast<float>(pooling2dDescriptor.
m_StrideX)));
3066 else if (paddingString ==
"VALID")
3070 outputHeight =
static_cast<uint32_t
>(ceil(
3071 static_cast<float>(inputHeight - pooling2dDescriptor.
m_PoolHeight + 1) /
3072 static_cast<float>(pooling2dDescriptor.
m_StrideY)));
3073 outputWidth =
static_cast<uint32_t
>(ceil(
3074 static_cast<float>(inputWidth - pooling2dDescriptor.
m_PoolWidth + 1) /
3075 static_cast<float>(pooling2dDescriptor.
m_StrideX)));
3080 case DataLayout::NHWC:
3087 case DataLayout::NCHW:
3103 if (layer ==
nullptr)
3106 fmt::format(
"Failed to add pooling2d layer for {} {}",
3115 return std::make_unique<SingleLayerParsedTfOperation>(
this, nodeDef, layer);
3122 IOutputSlot* input0Slot = &inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
3123 IOutputSlot* input1Slot = &inputs[1].m_IndexedValue->ResolveArmnnOutputSlot(inputs[1].m_Index);
3135 fmt::format(
"Unsupported bias for BiasAdd. It should be a 1D vector. " 3136 "Got {} dimensions for input {}. Node {} {}",
3138 inputs[1].m_IndexedValue->GetNode().name(),
3143 const std::string dataFormat = ReadMandatoryNodeStringAttribute(nodeDef,
"data_format");
3152 const bool isNHWC =
true;
3158 const bool isNHWC =
true;
3173 std::vector<unsigned int> outputShape;
3179 outputShape.push_back(std::max(input0Shape[i], input1Shape[i]));
3195 return std::make_unique<SingleLayerParsedTfOperation>(
this, nodeDef, layer);
3203 IOutputSlot* input0Slot = &inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
3204 IOutputSlot* input1Slot = &inputs[1].m_IndexedValue->ResolveArmnnOutputSlot(inputs[1].m_Index);
3210 if (input0NumDims < input1NumDims)
3212 const bool isNHWC =
true;
3215 if (input1NumDims < input0NumDims)
3217 const bool isNHWC =
true;
3224 if (input0NumDims < input1NumDims)
3233 return std::make_unique<SingleLayerParsedTfOperation>(
this, nodeDef, layer);
3240 IOutputSlot* input0Slot = &inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
3241 IOutputSlot* input1Slot = &inputs[1].m_IndexedValue->ResolveArmnnOutputSlot(inputs[1].m_Index);
3246 if (input0NumDims < input1NumDims)
3248 const bool isNHWC =
true;
3251 if (input1NumDims < input0NumDims)
3253 const bool isNHWC =
true;
3263 std::vector<unsigned int> outputShape;
3270 outputShape.push_back(std::max(input0Shape[i], input1Shape[i]));
3276 return std::make_unique<SingleLayerParsedTfOperation>(
this, nodeDef, layer);
3284 IOutputSlot* input0Slot = &inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
3285 IOutputSlot* input1Slot = &inputs[1].m_IndexedValue->ResolveArmnnOutputSlot(inputs[1].m_Index);
3290 if (input0NumDims < input1NumDims)
3292 const bool isNHWC =
true;
3295 if (input1NumDims < input0NumDims)
3297 const bool isNHWC =
true;
3304 if (input0NumDims < input1NumDims)
3316 const tensorflow::NodeDef* addNodeDef,
const char* armnnLayerName)
3319 ParsedConstTfOperation<float>* biasNode =
nullptr;
3320 if (addNodeDef !=
nullptr)
3324 if (HasParsedConstTensor<float>(addInputs[0].m_IndexedValue->GetNode().name()))
3326 biasNode = PolymorphicDowncast<ParsedConstTfOperation<float>*>(addInputs[0].m_IndexedValue);
3328 else if (HasParsedConstTensor<float>(addInputs[1].m_IndexedValue->GetNode().name()))
3330 biasNode = PolymorphicDowncast<ParsedConstTfOperation<float>*>(addInputs[1].m_IndexedValue);
3335 fmt::format(
"ArmNN only supports fully connected layers with constant bias. " 3336 "Inputs {} and {}. AddNode {}. MatMulNode {} {}",
3337 addInputs[0].m_IndexedValue->GetNode().name(),
3338 addInputs[1].m_IndexedValue->GetNode().name(),
3340 matMulNodeDef.name(),
3346 ParsedConstTfOperation<float>* weightNode =
nullptr;
3348 unsigned int inputIdx = 0;
3350 if (HasParsedConstTensor<float>(mulInputs[0].m_IndexedValue->GetNode().name()))
3352 weightNode = PolymorphicDowncast<ParsedConstTfOperation<float>*>(mulInputs[0].m_IndexedValue);
3353 inputNode = mulInputs[1].m_IndexedValue;
3354 inputIdx = mulInputs[1].m_Index;
3356 else if (HasParsedConstTensor<float>(mulInputs[1].m_IndexedValue->GetNode().name()))
3358 weightNode = PolymorphicDowncast<ParsedConstTfOperation<float>*>(mulInputs[1].m_IndexedValue);
3359 inputNode = mulInputs[0].m_IndexedValue;
3360 inputIdx = mulInputs[0].m_Index;
3365 fmt::format(
"ArmNN only supports fully connected layers with constant weights. " 3366 "Inputs {} and {}. MatMulNode {} {}",
3367 mulInputs[0].m_IndexedValue->GetNode().name(),
3368 mulInputs[1].m_IndexedValue->GetNode().name(),
3369 matMulNodeDef.name(),
3373 std::vector<float> weightTensorData;
3375 ConstTensor weights = weightNode->GetConstTensor(weightTensorData);
3382 std::vector<float> biasTensorData;
3384 if (addNodeDef !=
nullptr)
3386 ConstTensor biases = biasNode->GetConstTensor(biasTensorData);
3391 fmt::format(
"Shape of matmul weights and bias do not match. " 3392 "AddNode {}. MatMulNode {} {}",
3394 matMulNodeDef.name(),
3400 layer =
m_Network->AddFullyConnectedLayer(desc, weights, optionalBiases, armnnLayerName);
3404 inputNode->ResolveArmnnOutputSlot(inputIdx).Connect(layer->GetInputSlot(0));
3405 unsigned int batches = inputNode->ResolveArmnnOutputSlot(inputIdx).GetTensorInfo().GetShape()[0];
3409 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
3417 if (nodeDef.attr().count(
"T") != 0)
3419 auto attr = nodeDef.attr().at(
"T");
3422 else if (nodeDef.attr().count(
"dtype") != 0)
3424 auto attr = nodeDef.attr().at(
"dtype");
3428 if ((type != tensorflow::DT_FLOAT && type != tensorflow::DT_INT32) && nodeDef.op() !=
"Const")
3431 fmt::format(
"Currently only FLOAT and INT32 are supported for tensorflow nodes (apart from Const). " 3432 "Got {} for Node {} {}",
3433 tensorflow::DataType_Name(type),
3438 const std::string& operation = nodeDef.op();
3448 auto func = it->second;
3456 throw ParseException(fmt::format(
"Name {} used by more than one node", nodeDef.name()));
3464 auto outId = ParseOutputId(nodeDef.name());
3466 IOutputSlot& prevSlot = parsedTfOperationRaw->ResolveArmnnOutputSlot(outId.m_Index);
3468 TensorInfo tensorInfo = prevSlot.GetTensorInfo();
3480 fmt::format(
"Unsupported operation {} in tensorflow::GraphDef {}",
3493 for (
int i = 0; i < graphDef.node_size(); ++i)
3495 const tensorflow::NodeDef& node = graphDef.node(i);
3502 const std::string& requestedInputName = pair.first;
3507 fmt::format(
"Couldn't find requested input node '{}' in graph {}",
3514 std::vector<const tensorflow::NodeDef*> targetNodes;
3521 fmt::format(
"Couldn't find requested output node '{}' in graph {}",
3522 requestedOutputName,
3525 targetNodes.push_back(nodeIt->second);
3529 std::vector<const tensorflow::NodeDef*> sortedNodes;
3530 if (!armnnUtils::GraphTopologicalSort<const tensorflow::NodeDef*>(
3532 [
this](
const tensorflow::NodeDef* node)
3535 std::vector<const tensorflow::NodeDef*> nodesOnly;
3536 for (
const auto & o : outputs) {
3537 nodesOnly.push_back(o.m_IndexedValue);
3544 fmt::format(
"Cycle detected in graph {}",
3549 for (
const auto& it : sortedNodes)
3551 const tensorflow::NodeDef& currentNode = *it;
3557 const std::map<std::string, TensorShape>& inputShapes,
3558 const std::vector<std::string>& requestedOutputs)
3560 FILE* fd = fopen(graphFile,
"r");
3565 fmt::format(
"Graph file {} failed to open {}",
3571 tensorflow::GraphDef graphDef;
3572 auto input =
new google::protobuf::io::FileInputStream(fileno(fd));
3573 bool success = google::protobuf::TextFormat::Parse(input, &graphDef);
3580 fmt::format(
"Failed to parse graph file {}",
3588 const std::map<std::string, TensorShape>& inputShapes,
3589 const std::vector<std::string>& requestedOutputs)
3592 tensorflow::GraphDef graphDef;
3593 bool success = google::protobuf::TextFormat::ParseFromString(protoText, &graphDef);
3598 fmt::format(
"Failed to parse graph file {}",
3606 const std::map<std::string, TensorShape>& inputShapes,
3607 const std::vector<std::string>& requestedOutputs)
3609 FILE* fd = fopen(graphFile,
"rb");
3614 fmt::format(
"Graph file {} failed to open {}",
3620 tensorflow::GraphDef graphDef;
3622 google::protobuf::io::FileInputStream inStream(fileno(fd));
3623 google::protobuf::io::CodedInputStream codedStream(&inStream);
3624 codedStream.SetTotalBytesLimit(INT_MAX);
3625 bool success = graphDef.ParseFromCodedStream(&codedStream);
3631 fmt::format(
"Failed to parse protobuf file {} {}",
3640 const std::map<std::string, TensorShape>& inputShapes,
3641 const std::vector<std::string>& requestedOutputs)
3646 if (requestedOutputs.size() == 0)
3649 fmt::format(
"requestedOutputs must have at least one entry {}",
3689 const char* bindingPointDesc,
3690 const std::unordered_map<std::string, BindingPointInfo>& nameToBindingInfo)
3692 auto it = nameToBindingInfo.find(layerName);
3693 if (it == nameToBindingInfo.end())
3696 fmt::format(
"Unknown {} '{}' {}",
3721 const char* bindingPointDesc,
3722 std::unordered_map<std::string, BindingPointInfo>& nameToBindingInfo)
3724 const std::string layerName = layer->
GetName();
3725 auto it = nameToBindingInfo.find(layerName);
3726 if (it == nameToBindingInfo.end())
3728 nameToBindingInfo[layerName] = std::make_pair(
id, tensorInfo);
3733 fmt::format(
"Id {} used by more than one {} layer {}",
friend class ParsedConstTfOperation
uint32_t m_PadBottom
Padding bottom value in the height dimension.
bool m_BiasEnabled
Enable/disable bias.
std::unique_ptr< ITfParser, void(*)(ITfParser *parser)> ITfParserPtr
virtual unsigned int GetNumOutputSlots() const =0
Returns the number of connectable output slots.
armnn::INetworkPtr CreateNetworkFromTextFile(const char *graphFile, const std::map< std::string, armnn::TensorShape > &inputShapes, const std::vector< std::string > &requestedOutputs)
Create the network from a protobuf text file on the disk.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
ParsedTfOperationPtr ParseMaximum(const tensorflow::NodeDef &nodeDef, const tensorflow::GraphDef &graphDef)
std::map< std::string, armnn::TensorShape > m_InputShapes
ParsedTfOperationPtr ParsePlaceholder(const tensorflow::NodeDef &nodeDef, const tensorflow::GraphDef &graphDef)
armnn::INetworkPtr CreateNetworkFromTextFile(const char *graphFile, const std::map< std::string, armnn::TensorShape > &inputShapes, const std::vector< std::string > &requestedOutputs)
Creates the network from a protobuf text file on the disk.
uint32_t m_Axis
0-based axis along which to stack the input tensors.
A ViewsDescriptor for the SplitterLayer.
ParsedTfOperationPtr ParseMean(const tensorflow::NodeDef &nodeDef, const tensorflow::GraphDef &graphDef)
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
uint32_t m_PadBottom
Padding bottom value in the height dimension.
bool m_BiasEnabled
Enable/disable bias.
static void TrackBindingPoint(armnn::IConnectableLayer *layer, armnn::LayerBindingId id, const armnn::TensorInfo &tensorInfo, const char *bindingPointDesc, std::unordered_map< std::string, BindingPointInfo > &nameToBindingInfo)
ParsedTfOperationPtr ParseTanh(const tensorflow::NodeDef &nodeDef, const tensorflow::GraphDef &graphDef)
const tensorflow::NodeDef * ResolveIdentityNode(const tensorflow::NodeDef *nodeDef)
Handling identity layers as the input for Conv2D layer.
unsigned int GetWidthIndex() const
float m_K
Kappa value used for the across channel normalization equation.
WithOutputTensorIndex< ParsedTfOperation * > OutputOfParsedTfOperation
const TensorShape & GetShape() const
uint32_t m_PadBottom
Padding bottom value in the height dimension.
uint32_t m_PadLeft
Padding left value in the width dimension.
void TrackInputBinding(armnn::IConnectableLayer *layer, armnn::LayerBindingId id, const armnn::TensorInfo &tensorInfo)
armnn::BindingPointInfo BindingPointInfo
int32_t m_ShrinkAxisMask
Shrink axis mask value. If set, the nth specification shrinks the dimensionality by 1...
A ReshapeDescriptor for the ReshapeLayer.
friend class ParsedIdentityTfOperation
WithOutputTensorIndex< std::string > OutputId
ParsedTfOperationPtr ParseStack(const tensorflow::NodeDef &nodeDef, const tensorflow::GraphDef &graphDef)
std::vector< int > m_Begin
Begin values for the input that will be sliced.
ParsedTfOperationPtr ParsePooling2d(const tensorflow::NodeDef &nodeDef, const tensorflow::GraphDef &graphDef, armnn::PoolingAlgorithm pooltype)
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
A ComparisonDescriptor for the ComparisonLayer.
TensorShape m_InputShape
Required shape of all input tensors.
BindingPointInfo GetNetworkInputBindingInfo(const std::string &name) const
Retrieves binding info (layer id and tensor info) for the network input identified by the given layer...
uint32_t m_PoolWidth
Pooling width value.
ParsedTfOperationPtr ParseMul(const tensorflow::NodeDef &nodeDef, const tensorflow::GraphDef &graphDef)
A Convolution2dDescriptor for the Convolution2dLayer.
float m_Alpha
Alpha value for the normalization equation.
armnn::IConnectableLayer * AddMultiplicationLayer(const tensorflow::NodeDef &nodeDef)
uint32_t m_PadLeft
Padding left value in the width dimension.
ParsedTfOperationPtr ParseTranspose(const tensorflow::NodeDef &nodeDef, const tensorflow::GraphDef &graphDef)
void LoadGraphDef(const tensorflow::GraphDef &graphDef)
Sets up variables and then performs BFS to parse all nodes.
void CalculateReducedOutputTensoInfo(const armnn::TensorInfo &inputTensorInfo, const std::set< unsigned int > &axisSet, bool keepDims, armnn::TensorInfo &outputTensorInfo)
Creates a tensor info after reducing the dimensions mentioned in axisData.
armnn::IConnectableLayer * AddFullyConnectedLayer(const tensorflow::NodeDef &matMulNodeDef, const tensorflow::NodeDef *addNodeDef, const char *armnnLayerName)
const TensorShape & GetShape() const
unsigned int GetNumBytes() const
ResizeMethod m_Method
The Interpolation method to use (Bilinear, NearestNeighbor).
ParsedTfOperationPtr ParseReshape(const tensorflow::NodeDef &nodeDef, const tensorflow::GraphDef &graphDef)
float m_Eps
Value to add to the variance. Used to avoid dividing by zero.
PaddingMethod m_PaddingMethod
The padding method to be used. (Exclude, IgnoreValue).
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
ParsedTfOperationPtr ParseFusedBatchNorm(const tensorflow::NodeDef &nodeDef, const tensorflow::GraphDef &graphDef)
unsigned int CheckPaddingTensor(const ConstTensor &paddingTensor, const TensorInfo &inputTensorInfo, const std::string &nodeName)
friend class ParsedTfOperation
Main network class which provides the interface for building up a neural network. ...
uint32_t m_PadTop
Padding top value in the height dimension.
uint32_t m_PadRight
Padding right value in the width dimension.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
Copyright (c) 2021 ARM Limited and Contributors.
void IgnoreUnused(Ts &&...)
armnn::INetworkPtr m_Network
The network we're building. Gets cleared after it is passed to the user.
int32_t m_BeginMask
Begin mask value.
std::unordered_map< std::string, ParsedTfOperationPtr > m_ParsedTfOperations
uint32_t m_DilationY
Dilation along y axis.
int32_t m_EndMask
End mask value.
std::vector< std::string > m_RequestedOutputs
const armnn::PermutationVector NHWCToArmNN
unsigned int GetNumOutputSlots() const override
Returns the number of connectable output slots.
uint32_t m_DilationY
Dilation factor value for height dimension.
ParsedTfOperationPtr ParseIdentity(const tensorflow::NodeDef &nodeDef, const tensorflow::GraphDef &graphDef)
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
armnn::IConnectableLayer * CreateAdditionLayer(const tensorflow::NodeDef &nodeDef, armnn::IOutputSlot *input0Slot, armnn::IOutputSlot *input1Slot, const std::string &layerName)
int LayerBindingId
Type of identifiers for bindable layers (inputs, outputs).
unsigned int GetHeightIndex() const
virtual void SetTensorInfo(const TensorInfo &tensorInfo)=0
NormalizationAlgorithmMethod m_NormMethodType
Normalization method algorithm to use (LocalBrightness, LocalContrast).
void SetShape(const TensorShape &newShape)
A ResizeDescriptor for the ResizeLayer.
std::unordered_map< std::string, BindingPointInfo > m_NetworkInputsBindingInfo
Maps input layer names to their corresponding ids and tensor info.
std::vector< unsigned int > m_Axis
Values for the dimensions to reduce.
A StackDescriptor for the StackLayer.
TensorShape m_TargetShape
Target shape value.
armnn::INetworkPtr CreateNetworkFromGraphDef(const tensorflow::GraphDef &graphDef, const std::map< std::string, armnn::TensorShape > &inputShapes, const std::vector< std::string > &requestedOutputs)
Parses a GraphDef loaded into memory from one of the other CreateNetwork*.
bool IsSupportedLeakyReluPattern(const tensorflow::NodeDef &mulNodeDef, size_t alphaLayerIndex, const OutputOfParsedTfOperation &otherOp, armnn::IOutputSlot **outputOfLeakyRelu, armnn::ActivationDescriptor &desc)
std::unique_ptr< ParsedTfOperation > ParsedTfOperationPtr
TensorInfo OutputShapeOfExpandDims(const tensorflow::NodeDef &nodeDef, TensorInfo inputTensorInfo, std::int32_t expandDim)
ParsedTfOperationPtr ParseMatMul(const tensorflow::NodeDef &nodeDef, const tensorflow::GraphDef &graphDef)
uint32_t m_PoolHeight
Pooling height value.
uint32_t m_PadTop
Padding top value in the height dimension.
A PadDescriptor for the PadLayer.
void Permute(const armnn::TensorShape &dstShape, const armnn::PermutationVector &mappings, const void *src, void *dst, size_t dataTypeSize)
const uint32_t * GetViewOrigin(uint32_t idx) const
Return the view origin at the int value idx.
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
WithOutputTensorIndex< const tensorflow::NodeDef * > OutputOfConstNodeDef
ParsedTfOperationPtr ParseStridedSlice(const tensorflow::NodeDef &nodeDef, const tensorflow::GraphDef &graphDef)
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
friend struct MakeTfOperation
std::pair< armnn::IOutputSlot *, armnn::IOutputSlot * > ProcessElementwiseInputSlots(const tensorflow::NodeDef &nodeDef, const std::string &layerName)
ParsedTfOperationPtr ParseLrn(const tensorflow::NodeDef &nodeDef, const tensorflow::GraphDef &graphDef)
ParsedTfOperationPtr ParseDepthwiseConv2D(const tensorflow::NodeDef &nodeDef, const tensorflow::GraphDef &graphDef)
ParsedTfOperationPtr ParseRelu6(const tensorflow::NodeDef &nodeDef, const tensorflow::GraphDef &graphDef)
uint32_t m_PadRight
Padding right value in the width dimension.
uint32_t m_DilationX
Dilation factor value for width dimension.
uint32_t m_PadTop
Padding top value in the height dimension.
armnn::INetworkPtr CreateNetworkFromString(const char *protoText, const std::map< std::string, armnn::TensorShape > &inputShapes, const std::vector< std::string > &requestedOutputs)
Creates the network directly from protobuf text in a string. Useful for debugging/testing.
Status SetViewSize(uint32_t view, uint32_t coord, uint32_t value)
Set the size of the views.
int32_t m_NewAxisMask
New axis mask value.
bool m_KeepDims
Enable/disable keep dimensions. If true, then the reduced dimensions that are of length 1 are kept...
An output connection slot for a layer.
BindingPointInfo GetNetworkOutputBindingInfo(const std::string &name) const
Retrieves binding info (layer id and tensor info) for the network output identified by the given laye...
#define TF_PARSER_VERSION
TF_PARSER_VERSION: "X.Y.Z" where: X = Major version number Y = Minor version number Z = Patch version...
Provides access to the appropriate indexes for Channels, Height and Width based on DataLayout...
DataType GetDataType() const
An OriginsDescriptor for the ConcatLayer.
A FullyConnectedDescriptor for the FullyConnectedLayer.
int32_t m_EllipsisMask
Ellipsis mask value.
ParsedTfOperationPtr ProcessElementwiseLayer(armnn::IOutputSlot *input0Slot, armnn::IOutputSlot *input1Slot, armnn::IConnectableLayer *const layer, const tensorflow::NodeDef &nodeDef)
bool m_BiasEnabled
Enable/disable bias.
WithOutputTensorIndex wraps a value and an index.
A tensor defined by a TensorInfo (shape and data type) and an immutable backing store.
friend class ParsedMatMulTfOperation
uint32_t m_TargetWidth
Target width value.
A GatherDescriptor for the GatherLayer.
ParsedTfOperationPtr ParseGreater(const tensorflow::NodeDef &nodeDef, const tensorflow::GraphDef &graphDef)
void CalcPadding(uint32_t inputSize, uint32_t filterSize, uint32_t stride, uint32_t dilation, uint32_t &paddingFront, uint32_t &paddingBack, bool samePadding)
#define ARMNN_ASSERT(COND)
ParsedTfOperationPtr ParseAdd(const tensorflow::NodeDef &nodeDef, const tensorflow::GraphDef &graphDef)
ParsedTfOperationPtr ParseConst(const tensorflow::NodeDef &nodeDef, const tensorflow::GraphDef &graphDef)
ParsedTfOperationPtr ParseSoftplus(const tensorflow::NodeDef &nodeDef, const tensorflow::GraphDef &graphDef)
std::vector< int > m_Stride
Stride values for the input that will be sliced.
ParsedTfOperationPtr AddRealDivLayer(const tensorflow::NodeDef &nodeDef)
An ActivationDescriptor for the ActivationLayer.
ParsedTfOperationPtr ParseSoftmax(const tensorflow::NodeDef &nodeDef, const tensorflow::GraphDef &graphDef)
ParsedTfOperationPtr ParseMinimum(const tensorflow::NodeDef &nodeDef, const tensorflow::GraphDef &graphDef)
void SetDataType(DataType type)
uint32_t m_NumInputs
Number of input tensors.
uint32_t m_TargetHeight
Target height value.
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
ParsedTfOperationPtr ParseRsqrt(const tensorflow::NodeDef &nodeDef, const tensorflow::GraphDef &graphDef)
ParsedTfOperationPtr ParseAvgPool(const tensorflow::NodeDef &nodeDef, const tensorflow::GraphDef &graphDef)
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
void CalculateStridedSliceOutputTensorInfo(const armnn::TensorInfo &inputTensorInfo, const armnn::StridedSliceDescriptor &desc, armnn::TensorInfo &outputTensorInfo)
Create output tensor info for a StridedSlice operator.
std::vector< int > m_End
End values for the input that will be sliced.
NormalizationAlgorithmChannel m_NormChannelType
Normalization channel algorithm to use (Across, Within).
DataType ConvertTfTensorDataType(const tensorflow::DataType tfDataType, const tensorflow::NodeDef &nodeDef)
float m_A
Alpha upper bound value used by the activation functions. (BoundedReLu, Linear, TanH, Elu).
uint32_t m_DilationX
Dilation along x axis.
ParsedTfOperationPtr ParseConcat(const tensorflow::NodeDef &nodeDef, const tensorflow::GraphDef &graphDef)
#define CHECK_DATA_FORMAT(NODE_DEF, FORMAT, NODE_TYPE)
ParsedTfOperationPtr ParseSplit(const tensorflow::NodeDef &nodeDef, const tensorflow::GraphDef &graphDef)
ParsedTfOperationPtr ParseAddN(const tensorflow::NodeDef &nodeDef, const tensorflow::GraphDef &graphDef)
ParsedTfOperationPtr ParseResizeBilinear(const tensorflow::NodeDef &nodeDef, const tensorflow::GraphDef &graphDef)
IConnectableLayer * AddReshapeLayer(const ReshapeDescriptor &reshapeDescriptor, const char *name=nullptr)
Adds a reshape layer to the network.
EmptyOptional is used to initialize the Optional class in case we want to have default value for an O...
static const std::list< std::string > m_ControlInputs
int32_t m_Axis
The axis in params to gather indices from.
A ElementwiseUnaryDescriptor for the ElementwiseUnaryLayer.
PoolingAlgorithm m_PoolType
The pooling algorithm to use (Max. Average, L2).
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
ParsedTfOperationPtr ParseRelu(const tensorflow::NodeDef &nodeDef, const tensorflow::GraphDef &graphDef)
ParsedTfOperationPtr ParseConv2D(const tensorflow::NodeDef &nodeDef, const tensorflow::GraphDef &graphDef)
armnn::INetworkPtr CreateNetworkFromBinaryFile(const char *graphFile, const std::map< std::string, armnn::TensorShape > &inputShapes, const std::vector< std::string > &requestedOutputs)
Creates the network from a protobuf binary file on the disk.
Parses a directed acyclic graph from a tensorflow protobuf file.
ParsedTfOperationPtr ParseSqueeze(const tensorflow::NodeDef &nodeDef, const tensorflow::GraphDef &graphDef)
unsigned int GetNumDimensions() const
Function that returns the tensor rank.
ParsedTfOperationPtr AddAdditionLayer(const tensorflow::NodeDef &nodeDef, bool isBiasAdd=false)
unsigned int GetConstInputIndex(const std::vector< OutputOfParsedTfOperation > &inputs)
OutputShapeRounding m_OutputShapeRounding
The rounding method for the output shape. (Floor, Ceiling).
ParsedTfOperationPtr ParseShape(const tensorflow::NodeDef &nodeDef, const tensorflow::GraphDef &graphDef)
std::vector< OutputOfParsedTfOperation > GetInputParsedTfOperationsChecked(const tensorflow::NodeDef &nodeDef, std::size_t expectedNumInputs)
Finds the IParsedTfOperations for the nodes connected as inputs of the given node in the graph...
void SetConcatAxis(unsigned int concatAxis)
Set the concatenation axis value.
void SetTensorInfo(const TensorInfo &tensorInfo) override
virtual const IInputSlot & GetInputSlot(unsigned int index) const =0
Get a const input slot handle by slot index.
A MeanDescriptor for the MeanLayer.
ParsedTfOperationPtr ParseExpandDims(const tensorflow::NodeDef &nodeDef, const tensorflow::GraphDef &graphDef)
std::enable_if_t< std::is_unsigned< Source >::value &&std::is_unsigned< Dest >::value, Dest > numeric_cast(Source source)
armnn::TensorInfo GetTensorInfo(unsigned int numberOfBatches, unsigned int numberOfChannels, unsigned int height, unsigned int width, const armnn::DataLayout dataLayout, const armnn::DataType dataType)
ParsedTfOperationPtr ParseBiasAdd(const tensorflow::NodeDef &nodeDef, const tensorflow::GraphDef &graphDef)
const OutputSlot & GetOutputSlot(unsigned int index=0) const override
Get the const output slot handle by slot index.
std::vector< OutputOfConstNodeDef > GetTfInputNodes(const tensorflow::NodeDef &nodeDef) const
Finds the nodes connected as inputs of the given node in the graph.
A TransposeDescriptor for the TransposeLayer.
A StridedSliceDescriptor for the StridedSliceLayer.
virtual const TensorInfo & GetTensorInfo() const =0
virtual const IOutputSlot & GetOutputSlot(unsigned int index) const =0
Get the const output slot handle by slot index.
ParsedTfOperationPtr ParsePad(const tensorflow::NodeDef &nodeDef, const tensorflow::GraphDef &graphDef)
std::unordered_map< std::string, const tensorflow::NodeDef * > m_NodesByName
Map of nodes extracted from the GraphDef to speed up parsing.
const char * GetName() const override
Returns the name of the layer.
virtual const char * GetName() const =0
Returns the name of the layer.
static const std::string GetVersion()
Retrieve version in X.Y.Z form.
std::unique_ptr< INetwork, void(*)(INetwork *network)> INetworkPtr
armnn::TensorShape TransposeTensorShape(const armnn::TensorShape &srcShape, const armnn::PermutationVector &mappings)
ParsedTfOperationPtr ParseEqual(const tensorflow::NodeDef &nodeDef, const tensorflow::GraphDef &graphDef)
#define CHECK_PADDING_TYPE(NODE_DEF, PADDING)
static const std::map< std::string, OperationParsingFunction > ms_OperationNameToParsingFunctions
Map of TensorFlow operation names to parsing member functions.
bool HasParsedConstTensor(const std::string &nodeName) const
Checks if there is a pre-parsed const tensor available with the given name and Type.
virtual int Connect(IInputSlot &destination)=0
OptimizeForType< Layer, AddBroadcastReshapeLayerImpl > AddBroadcastReshapeLayer
A Pooling2dDescriptor for the Pooling2dLayer.
ParsedTfOperationPtr AddActivationLayer(const tensorflow::NodeDef &nodeDef, armnn::ActivationDescriptor &desc)
A NormalizationDescriptor for the NormalizationLayer.
ParsedTfOperationPtr ParseSub(const tensorflow::NodeDef &nodeDef, const tensorflow::GraphDef &graphDef)
ParsedTfOperationPtr ParseMaxPool(const tensorflow::NodeDef &nodeDef, const tensorflow::GraphDef &graphDef)
ParsedTfOperationPtr ProcessComparisonLayer(armnn::IOutputSlot *input0Slot, armnn::IOutputSlot *input1Slot, armnn::IConnectableLayer *const layer, const tensorflow::NodeDef &nodeDef)
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
friend class ParsedMulTfOperation
ParsedTfOperationPtr ParseSigmoid(const tensorflow::NodeDef &nodeDef, const tensorflow::GraphDef &graphDef)
void TrackOutputBinding(armnn::IConnectableLayer *layer, armnn::LayerBindingId id, const armnn::TensorInfo &tensorInfo)
static std::pair< armnn::LayerBindingId, armnn::TensorInfo > GetBindingInfo(const std::string &layerName, const char *bindingPointDesc, const std::unordered_map< std::string, BindingPointInfo > &nameToBindingInfo)
ParsedTfOperationPtr ParseGather(const tensorflow::NodeDef &nodeDef, const tensorflow::GraphDef &graphDef)
const armnn::PermutationVector ArmNNToNHWC
unsigned int GetNumDimensions() const
float m_B
Beta lower bound value used by the activation functions. (BoundedReLu, Linear, TanH).
armnn::TensorShape Permuted(const armnn::TensorShape &srcShape, const armnn::PermutationVector &mappings)
A SoftmaxDescriptor for the SoftmaxLayer.
friend class DeferredSingleLayerParsedTfOperation
float m_Beta
Beta value for the normalization equation.
uint32_t m_NormSize
Depth radius value.
Status SetViewOriginCoord(uint32_t view, uint32_t coord, uint32_t value)
Set the view origin coordinates.
ParsedTfOperationPtr ParseRealDiv(const tensorflow::NodeDef &nodeDef, const tensorflow::GraphDef &graphDef)
std::unordered_map< std::string, BindingPointInfo > m_NetworkOutputsBindingInfo
Maps output layer names to their corresponding ids and tensor info.
void LoadNodeDef(const tensorflow::NodeDef &nodeDef, const tensorflow::GraphDef &graphDef)
Parses a given node, assuming nodes before it in the graph have been done.
ActivationFunction m_Function
The activation function to use (Sigmoid, TanH, Linear, ReLu, BoundedReLu, SoftReLu, LeakyReLu, Abs, Sqrt, Square, Elu).
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
A DepthwiseConvolution2dDescriptor for the DepthwiseConvolution2dLayer.
A BatchNormalizationDescriptor for the BatchNormalizationLayer.
uint32_t m_PadLeft
Padding left value in the width dimension.
unsigned int GetNumElements() const
Status SetViewOriginCoord(uint32_t view, uint32_t coord, uint32_t value)
Set the view origin coordinates.
constexpr unsigned int GetDataTypeSize(DataType dataType)
TensorInfo CalculatePaddedOutputTensorInfo(const TensorInfo &inputTensorInfo, const std::vector< std::pair< unsigned int, unsigned int >> &padList)
uint32_t m_PadRight
Padding right value in the width dimension.
ParsedTfOperationPtr AddMaximumLayer(const tensorflow::NodeDef &nodeDef)
TensorInfo OutputShapeOfSqueeze(const tensorflow::NodeDef &nodeDef, TensorInfo inputTensorInfo)