14 #include <fmt/format.h> 18 using namespace armnn;
19 namespace fb = flatbuffers;
25 ISerializer::ISerializer() : pSerializerImpl(new SerializerImpl())
29 ISerializer::~ISerializer() =
default;
48 pSerializerImpl->Serialize(inNetwork);
53 return pSerializerImpl->SaveSerializedToStream(stream);
101 if (m_guidMap.empty())
103 m_guidMap.insert(std::make_pair(guid, m_layerId));
105 else if (m_guidMap.find(guid) == m_guidMap.end())
108 m_guidMap.insert(std::make_pair(guid, m_layerId));
112 return m_guidMap[guid];
125 flatBufferInputBaseLayer,
128 m_inputIds.push_back(
id);
148 flatBufferOutputBaseLayer,
151 m_outputIds.push_back(
id);
187 flatBufferDescriptor);
226 flatBufferDescriptor);
241 std::vector<unsigned int> crops;
242 crops.reserve(descriptor.
m_Crops.size() * 2);
243 for (
auto& crop : descriptor.
m_Crops)
245 crops.push_back(crop.first);
246 crops.push_back(crop.second);
249 auto flatBufferDescriptor =
251 m_flatBufferBuilder.CreateVector(descriptor.
m_BlockShape),
252 m_flatBufferBuilder.CreateVector(crops),
257 flatBufferDescriptor);
262 void SerializerStrategy::SerializeBatchNormalizationLayer(
265 const std::vector<armnn::ConstTensor>& constants,
278 batchNormDescriptor.
m_Eps,
281 auto fbMeanConstTensorInfo = CreateConstTensorInfo(mean);
282 auto fbVarianceConstTensorInfo = CreateConstTensorInfo(variance);
283 auto fbBetaConstTensorInfo = CreateConstTensorInfo(beta);
284 auto fbGammaConstTensorInfo = CreateConstTensorInfo(gamma);
286 fbBatchNormalizationBaseLayer,
287 fbBatchNormalizationDescriptor,
288 fbMeanConstTensorInfo,
289 fbVarianceConstTensorInfo,
290 fbBetaConstTensorInfo,
291 fbGammaConstTensorInfo);
313 const std::vector<armnn::ConstTensor>& constants,
323 auto flatBufferConstTensorInfo = CreateConstTensorInfo(input);
327 flatBufferConstantBaseLayer,
328 flatBufferConstTensorInfo);
337 const std::vector<armnn::ConstTensor>& constants,
358 auto flatBufferWeightsConstTensorInfo = CreateConstTensorInfo(weights);
359 flatbuffers::Offset<serializer::ConstTensor> flatBufferBiasesConstTensorInfo;
361 if (constants.size() > 1)
364 flatBufferBiasesConstTensorInfo = CreateConstTensorInfo(biases);
370 flatBufferDescriptor,
371 flatBufferWeightsConstTensorInfo,
372 flatBufferBiasesConstTensorInfo);
396 const std::vector<armnn::ConstTensor>& constants,
416 flatbuffers::Offset<serializer::ConstTensor> fbWeightsConstTensorInfo = CreateConstTensorInfo(weights);
417 flatbuffers::Offset<serializer::ConstTensor> fbBiasesConstTensorInfo;
419 if (constants.size() > 1)
422 fbBiasesConstTensorInfo = CreateConstTensorInfo(biases);
428 fbWeightsConstTensorInfo,
429 fbBiasesConstTensorInfo);
447 const std::vector<armnn::ConstTensor>& constants,
468 flatbuffers::Offset<serializer::ConstTensor> fbAnchorsConstTensorInfo = CreateConstTensorInfo(anchors);
473 fbAnchorsConstTensorInfo);
563 void SerializerStrategy::SerializeInstanceNormalizationLayer(
572 instanceNormalizationDescriptor.
m_Gamma,
573 instanceNormalizationDescriptor.
m_Beta,
574 instanceNormalizationDescriptor.
m_Eps,
596 l2NormalizationDescriptor.
m_Eps);
629 auto flatBufferLogSoftmaxDesc =
631 logSoftmaxDescriptor.
m_Beta,
632 logSoftmaxDescriptor.
m_Axis);
635 auto flatBufferLogSoftmaxLayer =
637 flatBufferLogSoftmaxBaseLayer,
638 flatBufferLogSoftmaxDesc);
645 const std::vector<armnn::ConstTensor>& constants,
666 auto inputToForgetWeights = CreateConstTensorInfo(constants[i++]);
667 auto inputToCellWeights = CreateConstTensorInfo(constants[i++]);
668 auto inputToOutputWeights = CreateConstTensorInfo(constants[i++]);
669 auto recurrentToForgetWeights = CreateConstTensorInfo(constants[i++]);
670 auto recurrentToCellWeights = CreateConstTensorInfo(constants[i++]);
671 auto recurrentToOutputWeights = CreateConstTensorInfo(constants[i++]);
672 auto forgetGateBias = CreateConstTensorInfo(constants[i++]);
673 auto cellBias = CreateConstTensorInfo(constants[i++]);
674 auto outputGateBias = CreateConstTensorInfo(constants[i++]);
679 flatbuffers::Offset<serializer::ConstTensor> inputToInputWeights;
680 flatbuffers::Offset<serializer::ConstTensor> recurrentToInputWeights;
681 flatbuffers::Offset<serializer::ConstTensor> cellToInputWeights;
682 flatbuffers::Offset<serializer::ConstTensor> inputGateBias;
683 flatbuffers::Offset<serializer::ConstTensor> projectionWeights;
684 flatbuffers::Offset<serializer::ConstTensor> projectionBias;
685 flatbuffers::Offset<serializer::ConstTensor> cellToForgetWeights;
686 flatbuffers::Offset<serializer::ConstTensor> cellToOutputWeights;
687 flatbuffers::Offset<serializer::ConstTensor> inputLayerNormWeights;
688 flatbuffers::Offset<serializer::ConstTensor> forgetLayerNormWeights;
689 flatbuffers::Offset<serializer::ConstTensor> cellLayerNormWeights;
690 flatbuffers::Offset<serializer::ConstTensor> outputLayerNormWeights;
694 inputToInputWeights = CreateConstTensorInfo(constants[i++]);
695 recurrentToInputWeights = CreateConstTensorInfo(constants[i++]);
696 inputGateBias = CreateConstTensorInfo(constants[i++]);
703 cellToInputWeights = CreateConstTensorInfo(constants[i++]);
705 cellToForgetWeights = CreateConstTensorInfo(constants[i++]);
706 cellToOutputWeights = CreateConstTensorInfo(constants[i++]);
711 projectionWeights = CreateConstTensorInfo(constants[i++]);
712 projectionBias = CreateConstTensorInfo(constants[i++]);
719 inputLayerNormWeights = CreateConstTensorInfo(constants[i++]);
721 forgetLayerNormWeights = CreateConstTensorInfo(constants[i++]);
722 cellLayerNormWeights = CreateConstTensorInfo(constants[i++]);
723 outputLayerNormWeights = CreateConstTensorInfo(constants[i++]);
728 inputToForgetWeights,
730 inputToOutputWeights,
731 recurrentToForgetWeights,
732 recurrentToCellWeights,
733 recurrentToOutputWeights,
738 recurrentToInputWeights,
745 inputLayerNormWeights,
746 forgetLayerNormWeights,
747 cellLayerNormWeights,
748 outputLayerNormWeights);
777 m_flatBufferBuilder.CreateVector(descriptor.
m_Axis),
811 SerializeConcatLayer(layer, mergerDescriptor, name);
822 std::vector<flatbuffers::Offset<UintVector>> views;
823 for (
unsigned int v = 0; v < concatDescriptor.
GetNumViews(); ++v)
826 std::vector<uint32_t> origins;
829 origins.push_back(origin[d]);
831 auto view = m_flatBufferBuilder.CreateVector(origins);
833 views.push_back(uintVector);
840 m_flatBufferBuilder.CreateVector(views));
843 flatBufferConcatBaseLayer,
844 flatBufferConcatDescriptor);
855 fbMultiplicationBaseLayer);
868 std::vector<unsigned int> padList;
871 padList.push_back(p.first);
872 padList.push_back(p.second);
876 m_flatBufferBuilder.CreateVector(padList),
895 std::vector<unsigned int> dimMappings;
902 m_flatBufferBuilder.CreateVector(dimMappings));
906 flatBufferPermuteBaseLayer,
907 flatBufferPermuteDesc);
931 m_flatBufferBuilder.CreateVector(reduceDescriptor.
m_vAxis),
950 std::vector<unsigned int> targetShape;
957 m_flatBufferBuilder.CreateVector(targetShape));
961 flatBufferReshapeDesc);
975 auto flatBufferDescriptor =
985 flatBufferDescriptor);
998 auto flatBufferDescriptor =
1008 flatBufferBaseLayer,
1009 flatBufferDescriptor);
1032 m_flatBufferBuilder.CreateVector(sliceDescriptor.
m_Begin),
1033 m_flatBufferBuilder.CreateVector(sliceDescriptor.
m_Size));
1051 auto flatBufferSoftmaxDesc =
1055 auto flatBufferSoftmaxLayer =
1057 flatBufferSoftmaxBaseLayer,
1058 flatBufferSoftmaxDesc);
1071 m_flatBufferBuilder,
1086 fbPooling2dBaseLayer,
1087 fbPooling2dDescriptor);
1113 fbQuantizeBaseLayer);
1120 const std::vector<armnn::ConstTensor>& constants,
1131 auto flatBufferDescriptor =
1137 auto flatBufferWeights = CreateConstTensorInfo(weights);
1140 flatbuffers::Offset<serializer::ConstTensor> flatBufferBiases;
1144 flatBufferBiases = CreateConstTensorInfo(biases);
1149 flatBufferBaseLayer,
1150 flatBufferDescriptor,
1168 std::vector<unsigned int> padList;
1169 padList.reserve(spaceToBatchNdDescriptor.
m_PadList.size()*2);
1170 for (
auto& pad : spaceToBatchNdDescriptor.
m_PadList)
1172 padList.push_back(pad.first);
1173 padList.push_back(pad.second);
1176 auto flatBufferDescriptor =
1178 m_flatBufferBuilder.CreateVector(spaceToBatchNdDescriptor.
m_BlockShape),
1179 m_flatBufferBuilder.CreateVector(padList),
1183 flatBufferBaseLayer,
1184 flatBufferDescriptor);
1197 auto flatBufferDescriptor =
1203 flatBufferBaseLayer,
1204 flatBufferDescriptor);
1217 std::vector<flatbuffers::Offset<UintVector>> flatBufferViewOrigins;
1218 flatBufferViewOrigins.reserve(viewsDescriptor.
GetNumViews());
1220 for(
unsigned int vIdx = 0; vIdx < viewsDescriptor.
GetNumViews(); ++vIdx)
1222 std::vector<uint32_t> viewOrigin;
1226 for(
unsigned int dIdx = 0; dIdx < viewsDescriptor.
GetNumDimensions(); ++dIdx)
1228 viewOrigin.push_back(viewsDescriptor.
GetViewOrigin(vIdx)[dIdx]);
1232 m_flatBufferBuilder.CreateVector(viewOrigin)));
1240 m_flatBufferBuilder.CreateVector(flatBufferViewOrigins));
1243 std::vector<flatbuffers::Offset<UintVector>> flatBufferViewSizes;
1244 flatBufferViewSizes.reserve(viewsDescriptor.
GetNumViews());
1246 for(
unsigned int vIdx = 0; vIdx < viewsDescriptor.
GetNumViews(); ++vIdx)
1248 std::vector<uint32_t> viewSize;
1252 for(
unsigned int dIdx = 0; dIdx < viewsDescriptor.
GetNumDimensions(); ++dIdx)
1254 viewSize.push_back(viewsDescriptor.
GetViewSizes(vIdx)[dIdx]);
1258 m_flatBufferBuilder.CreateVector(viewSize)));
1263 flatBufferOriginDescriptor,
1264 m_flatBufferBuilder.CreateVector(flatBufferViewSizes));
1270 flatBufferBaseLayer,
1271 flatBufferViewsDescriptor);
1285 m_flatBufferBuilder,
1295 fbNormalizationBaseLayer,
1296 fbNormalizationDescriptor);
1309 std::vector<unsigned int> inputShape;
1318 m_flatBufferBuilder.CreateVector(inputShape));
1348 auto flatBufferDescriptor =
1350 m_flatBufferBuilder.CreateVector(stridedSliceDescriptor.
m_Begin),
1351 m_flatBufferBuilder.CreateVector(stridedSliceDescriptor.
m_End),
1352 m_flatBufferBuilder.CreateVector(stridedSliceDescriptor.
m_Stride),
1361 flatBufferBaseLayer,
1362 flatBufferDescriptor);
1387 void SerializerStrategy::SerializeTransposeConvolution2dLayer(
1390 const std::vector<armnn::ConstTensor>& constants,
1409 auto fbWeightsConstTensorInfo = CreateConstTensorInfo(weights);
1410 flatbuffers::Offset<serializer::ConstTensor> fbBiasesConstTensorInfo;
1411 if (constants.size() > 1)
1414 fbBiasesConstTensorInfo = CreateConstTensorInfo(biases);
1420 fbWeightsConstTensorInfo,
1421 fbBiasesConstTensorInfo);
1435 std::vector<unsigned int> dimMappings;
1442 m_flatBufferBuilder.CreateVector(dimMappings));
1446 flatBufferBaseLayer,
1455 const std::vector<armnn::ConstTensor>& constants,
1463 m_flatBufferBuilder,
1482 auto inputToForgetWeights = CreateConstTensorInfo(constants[i++]);
1483 auto inputToCellWeights = CreateConstTensorInfo(constants[i++]);
1484 auto inputToOutputWeights = CreateConstTensorInfo(constants[i++]);
1485 auto recurrentToForgetWeights = CreateConstTensorInfo(constants[i++]);
1486 auto recurrentToCellWeights = CreateConstTensorInfo(constants[i++]);
1487 auto recurrentToOutputWeights = CreateConstTensorInfo(constants[i++]);
1488 auto forgetGateBias = CreateConstTensorInfo(constants[i++]);
1489 auto cellBias = CreateConstTensorInfo(constants[i++]);
1490 auto outputGateBias = CreateConstTensorInfo(constants[i++]);
1493 flatbuffers::Offset<serializer::ConstTensor> inputToInputWeights;
1494 flatbuffers::Offset<serializer::ConstTensor> recurrentToInputWeights;
1495 flatbuffers::Offset<serializer::ConstTensor> inputGateBias;
1499 inputToInputWeights = CreateConstTensorInfo(constants[i++]);
1500 recurrentToInputWeights = CreateConstTensorInfo(constants[i++]);
1501 inputGateBias = CreateConstTensorInfo(constants[i++]);
1505 flatbuffers::Offset<serializer::ConstTensor> cellToInputWeights;
1506 flatbuffers::Offset<serializer::ConstTensor> cellToForgetWeights;
1507 flatbuffers::Offset<serializer::ConstTensor> cellToOutputWeights;
1513 cellToInputWeights = CreateConstTensorInfo(constants[i++]);
1515 cellToForgetWeights = CreateConstTensorInfo(constants[i++]);
1516 cellToOutputWeights = CreateConstTensorInfo(constants[i++]);
1520 flatbuffers::Offset<serializer::ConstTensor> projectionWeights;
1521 flatbuffers::Offset<serializer::ConstTensor> projectionBias;
1525 projectionWeights = CreateConstTensorInfo(constants[i++]);
1526 projectionBias = CreateConstTensorInfo(constants[i++]);
1530 flatbuffers::Offset<serializer::ConstTensor> inputLayerNormWeights;
1531 flatbuffers::Offset<serializer::ConstTensor> forgetLayerNormWeights;
1532 flatbuffers::Offset<serializer::ConstTensor> cellLayerNormWeights;
1533 flatbuffers::Offset<serializer::ConstTensor> outputLayerNormWeights;
1539 inputLayerNormWeights = CreateConstTensorInfo(constants[i++]);
1541 forgetLayerNormWeights = CreateConstTensorInfo(constants[i++]);
1542 cellLayerNormWeights = CreateConstTensorInfo(constants[i++]);
1543 outputLayerNormWeights = CreateConstTensorInfo(constants[i++]);
1547 m_flatBufferBuilder,
1548 inputToForgetWeights,
1550 inputToOutputWeights,
1551 recurrentToForgetWeights,
1552 recurrentToCellWeights,
1553 recurrentToOutputWeights,
1557 inputToInputWeights,
1558 recurrentToInputWeights,
1563 cellToForgetWeights,
1564 cellToOutputWeights,
1565 inputLayerNormWeights,
1566 forgetLayerNormWeights,
1567 cellLayerNormWeights,
1568 outputLayerNormWeights);
1571 m_flatBufferBuilder,
1580 const std::vector<armnn::ConstTensor>& constants,
1591 auto inputToInputWeights = CreateConstTensorInfo(constants[i++]);
1592 auto inputToForgetWeights = CreateConstTensorInfo(constants[i++]);
1593 auto inputToCellWeights = CreateConstTensorInfo(constants[i++]);
1594 auto inputToOutputWeights = CreateConstTensorInfo(constants[i++]);
1596 auto recurrentToInputWeights = CreateConstTensorInfo(constants[i++]);
1597 auto recurrentToForgetWeights = CreateConstTensorInfo(constants[i++]);
1598 auto recurrentToCellWeights = CreateConstTensorInfo(constants[i++]);
1599 auto recurrentToOutputWeights = CreateConstTensorInfo(constants[i++]);
1601 auto inputGateBias = CreateConstTensorInfo(constants[i++]);
1602 auto forgetGateBias = CreateConstTensorInfo(constants[i++]);
1603 auto cellBias = CreateConstTensorInfo(constants[i++]);
1604 auto outputGateBias = CreateConstTensorInfo(constants[i++]);
1607 m_flatBufferBuilder,
1608 inputToInputWeights,
1609 inputToForgetWeights,
1611 inputToOutputWeights,
1612 recurrentToInputWeights,
1613 recurrentToForgetWeights,
1614 recurrentToCellWeights,
1615 recurrentToOutputWeights,
1622 m_flatBufferBuilder,
1623 fbQuantizedLstmBaseLayer,
1624 fbQuantizedLstmParams);
1629 fb::Offset<serializer::LayerBase> SerializerStrategy::CreateLayerBase(
const IConnectableLayer* layer,
1633 uint32_t fbIndex = GetSerializedId(layer->
GetGuid());
1635 std::vector<fb::Offset<serializer::InputSlot>> inputSlots = CreateInputSlots(layer);
1636 std::vector<fb::Offset<serializer::OutputSlot>> outputSlots = CreateOutputSlots(layer);
1640 m_flatBufferBuilder.CreateString(layer->
GetName()),
1642 m_flatBufferBuilder.CreateVector(inputSlots),
1643 m_flatBufferBuilder.CreateVector(outputSlots));
1646 void SerializerStrategy::CreateAnyLayer(
const flatbuffers::Offset<void>& layer,
const serializer::Layer serializerLayer)
1650 m_serializedLayers.push_back(anyLayer);
1653 template <
typename T>
1654 flatbuffers::Offset<flatbuffers::Vector<T>> SerializerStrategy::CreateDataVector(
const void* memory,
unsigned int size)
1656 const T* buffer =
reinterpret_cast<const T*
>(memory);
1657 std::vector<T> vector(buffer, buffer + (size /
sizeof(T)));
1658 auto fbVector = m_flatBufferBuilder.CreateVector(vector);
1662 flatbuffers::Offset<TensorInfo> SerializerStrategy::CreateTensorInfo(
const armnn::TensorInfo& tensorInfo)
1665 std::vector<unsigned int> shape;
1668 shape.push_back(tensorInfo.
GetShape()[dim]);
1671 std::vector<bool> specificity;
1682 auto flatBufferTensorInfo =
1684 m_flatBufferBuilder.CreateVector(shape),
1690 static_cast<unsigned int> 1692 m_flatBufferBuilder.CreateVector(specificity));
1693 return flatBufferTensorInfo;
1698 m_flatBufferBuilder.CreateVector(shape),
1704 static_cast<unsigned int> 1706 m_flatBufferBuilder.CreateVector(specificity));
1707 return flatBufferTensorInfo;
1710 flatbuffers::Offset<serializer::ConstTensor>
1715 flatbuffers::Offset<void> fbPayload;
1724 m_flatBufferBuilder,
1726 fbPayload = flatBuffersData.o;
1735 m_flatBufferBuilder,
1737 fbPayload = flatBuffersData.o;
1748 m_flatBufferBuilder,
1750 fbPayload = flatBuffersData.o;
1754 m_flatBufferBuilder,
1758 return flatBufferConstTensor;
1763 flatbuffers::Offset<armnnSerializer::FeatureCompatibilityVersions> versionsTable =
1765 m_flatBufferBuilder,
1768 return versionsTable;
1771 std::vector<fb::Offset<serializer::InputSlot>>
1774 std::vector<fb::Offset<serializer::InputSlot>> inputSlots;
1777 for (
unsigned int slotIndex = 0; slotIndex<layer->
GetNumInputSlots(); ++slotIndex)
1793 std::vector<fb::Offset<serializer::OutputSlot>>
1796 std::vector<fb::Offset<serializer::OutputSlot>> outputSlots;
1799 for (
unsigned int slotIndex = 0; slotIndex < layer->
GetNumOutputSlots(); ++slotIndex)
1814 const std::vector<armnn::ConstTensor>& constants,
1826 SerializeActivationLayer(layer, layerDescriptor, name);
1831 SerializeAdditionLayer(layer, name);
1838 SerializeArgMinMaxLayer(layer, layerDescriptor, name);
1845 SerializeBatchNormalizationLayer(layer,
1855 SerializeBatchToSpaceNdLayer(layer,
1864 SerializeComparisonLayer(layer,
1873 SerializeConcatLayer(layer,
1880 SerializeConstantLayer(layer,
1889 SerializeConvolution2dLayer(layer,
1899 SerializeDepthToSpaceLayer(layer,
1908 SerializeDepthwiseConvolution2dLayer(layer,
1916 SerializeDequantizeLayer(layer,
1924 SerializeDetectionPostProcessLayer(layer, layerDescriptor, constants, name);
1929 SerializeDivisionLayer(layer, name);
1936 SerializeElementwiseUnaryLayer(layer, layerDescriptor, name);
1943 SerializeFillLayer(layer, layerDescriptor, name);
1948 SerializeFloorLayer(layer, name);
1955 SerializeFullyConnectedLayer(layer, layerDescriptor, constants, name);
1962 SerializeGatherLayer(layer, layerDescriptor, name);
1967 SerializeInputLayer(layer,
id, name);
1974 SerializeInstanceNormalizationLayer(layer, layerDescriptor, name);
1981 SerializeL2NormalizationLayer(layer, layerDescriptor, name);
1988 SerializeLogicalBinaryLayer(layer, layerDescriptor, name);
1995 SerializeLogSoftmaxLayer(layer, layerDescriptor, name);
2002 SerializeLstmLayer(layer, layerDescriptor, constants, name);
2009 SerializeQLstmLayer(layer, layerDescriptor, constants, name);
2014 SerializeMaximumLayer(layer, name);
2021 SerializeMeanLayer(layer, layerDescriptor, name);
2026 SerializeMergeLayer(layer, name);
2031 SerializeMinimumLayer(layer, name);
2036 SerializeMultiplicationLayer(layer, name);
2043 SerializeNormalizationLayer(layer, layerDescriptor, name);
2048 SerializeOutputLayer(layer,
id, name);
2055 SerializePadLayer(layer, layerDescriptor, name);
2062 SerializePermuteLayer(layer, layerDescriptor, name);
2069 SerializePooling2dLayer(layer, layerDescriptor, name);
2074 SerializePreluLayer(layer, name);
2079 SerializeQuantizeLayer(layer, name);
2083 SerializeQuantizedLstmLayer(layer, constants, name);
2089 SerializeReshapeLayer(layer, layerDescriptor, name);
2094 SerializeRankLayer(layer, name);
2101 SerializeReduceLayer(layer, layerDescriptor, name);
2108 SerializeResizeLayer(layer, layerDescriptor, name);
2115 SerializeSliceLayer(layer, layerDescriptor, name);
2122 SerializeSoftmaxLayer(layer, layerDescriptor, name);
2129 SerializeSpaceToBatchNdLayer(layer, layerDescriptor, name);
2136 SerializeSpaceToDepthLayer(layer, layerDescriptor, name);
2143 SerializeSplitterLayer(layer, layerDescriptor, name);
2150 SerializeStackLayer(layer, layerDescriptor, name);
2157 SerializeStandInLayer(layer, layerDescriptor, name);
2164 SerializeStridedSliceLayer(layer, layerDescriptor, name);
2169 SerializeSubtractionLayer(layer, name);
2174 SerializeSwitchLayer(layer, name);
2181 SerializeTransposeLayer(layer, layerDescriptor, name);
2188 SerializeTransposeConvolution2dLayer(layer, layerDescriptor, constants, name);
2194 fmt::format(
"A layer of unknown type was given to the serializer. Layer name: {}; Layer Id: {}",
2205 flatbuffers::FlatBufferBuilder& fbBuilder = m_SerializerStrategy.GetFlatBufferBuilder();
2210 fbBuilder.CreateVector(m_SerializerStrategy.GetSerializedLayers()),
2211 fbBuilder.CreateVector(m_SerializerStrategy.GetInputIds()),
2212 fbBuilder.CreateVector(m_SerializerStrategy.GetOutputIds()),
2213 m_SerializerStrategy.GetVersionTable());
2216 fbBuilder.Finish(serializedGraph);
2222 flatbuffers::FlatBufferBuilder& fbBuilder = m_SerializerStrategy.GetFlatBufferBuilder();
2225 stream.write(reinterpret_cast<const char*>(fbBuilder.GetBufferPointer()), bytesToWrite);
2226 return !stream.bad();
uint32_t m_PadBottom
Padding bottom value in the height dimension.
bool m_BiasEnabled
Enable/disable bias.
float m_Eps
Used to avoid dividing by zero.
virtual unsigned int GetNumOutputSlots() const =0
Returns the number of connectable output slots.
flatbuffers::Offset< FullyConnectedDescriptor > CreateFullyConnectedDescriptor(flatbuffers::FlatBufferBuilder &_fbb, bool biasEnabled=false, bool transposeWeightsMatrix=false)
bool m_HalfPixelCenters
Half Pixel Centers.
armnnSerializer::UnaryOperation GetFlatBufferUnaryOperation(armnn::UnaryOperation comparisonOperation)
bool m_ProjectionEnabled
Enable/disable the projection layer.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
flatbuffers::Offset< ReshapeDescriptor > CreateReshapeDescriptor(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< flatbuffers::Vector< uint32_t >> targetShape=0)
bool m_AlignCorners
Aligned corners.
flatbuffers::Offset< ReduceLayer > CreateReduceLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::ReduceDescriptor > descriptor=0)
UnaryOperation m_Operation
Specifies the elementwiseUnary operation to execute.
uint32_t m_Axis
0-based axis along which to stack the input tensors.
flatbuffers::Offset< OutputSlot > CreateOutputSlot(flatbuffers::FlatBufferBuilder &_fbb, uint32_t index=0, flatbuffers::Offset< armnnSerializer::TensorInfo > tensorInfo=0)
A ViewsDescriptor for the SplitterLayer.
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
flatbuffers::Offset< DepthwiseConvolution2dDescriptor > CreateDepthwiseConvolution2dDescriptor(flatbuffers::FlatBufferBuilder &_fbb, uint32_t padLeft=0, uint32_t padRight=0, uint32_t padTop=0, uint32_t padBottom=0, uint32_t strideX=0, uint32_t strideY=0, uint32_t dilationX=1, uint32_t dilationY=1, bool biasEnabled=false, armnnSerializer::DataLayout dataLayout=armnnSerializer::DataLayout_NCHW)
float m_ScaleW
Center size encoding scale weight.
uint32_t m_PadBottom
Padding bottom value in the height dimension.
bool m_BiasEnabled
Enable/disable bias.
void ExecuteStrategy(IStrategy &strategy) const
virtual unsigned int GetNumInputSlots() const =0
Returns the number of connectable input slots.
float m_K
Kappa value used for the across channel normalization equation.
int m_Axis
Scalar, defaulted to the last index (-1), specifying the dimension the activation will be performed o...
A TransposeConvolution2dDescriptor for the TransposeConvolution2dLayer.
flatbuffers::Offset< AbsLayer > CreateAbsLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0)
flatbuffers::Offset< LstmLayer > CreateLstmLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::LstmDescriptor > descriptor=0, flatbuffers::Offset< armnnSerializer::LstmInputParams > inputParams=0)
const TensorShape & GetShape() const
flatbuffers::Offset< L2NormalizationLayer > CreateL2NormalizationLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::L2NormalizationDescriptor > descriptor=0)
uint32_t m_PadBottom
Padding bottom value in the height dimension.
uint32_t m_PadLeft
Padding left value in the width dimension.
flatbuffers::Offset< TransposeConvolution2dDescriptor > CreateTransposeConvolution2dDescriptor(flatbuffers::FlatBufferBuilder &_fbb, uint32_t padLeft=0, uint32_t padRight=0, uint32_t padTop=0, uint32_t padBottom=0, uint32_t strideX=0, uint32_t strideY=0, bool biasEnabled=false, armnnSerializer::DataLayout dataLayout=armnnSerializer::DataLayout_NCHW)
float m_ClippingThresProj
Clipping threshold value for the projection.
int32_t m_ShrinkAxisMask
Shrink axis mask value. If set, the nth specification shrinks the dimensionality by 1...
A ReshapeDescriptor for the ReshapeLayer.
flatbuffers::Offset< ResizeDescriptor > CreateResizeDescriptor(flatbuffers::FlatBufferBuilder &_fbb, uint32_t targetHeight=0, uint32_t targetWidth=0, armnnSerializer::ResizeMethod method=armnnSerializer::ResizeMethod_NearestNeighbor, armnnSerializer::DataLayout dataLayout=armnnSerializer::DataLayout_NHWC, bool alignCorners=false, bool halfPixelCenters=false)
flatbuffers::Offset< FillLayer > CreateFillLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::FillDescriptor > descriptor=0)
std::vector< int > m_Begin
Begin values for the input that will be sliced.
flatbuffers::Offset< SoftmaxDescriptor > CreateSoftmaxDescriptor(flatbuffers::FlatBufferBuilder &_fbb, float beta=0.0f)
float m_PadValue
Optional value to use for padding, defaults to 0.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
uint32_t GetNumDimensions() const
Get the number of dimensions.
A ComparisonDescriptor for the ComparisonLayer.
void Serialize(const armnn::INetwork &inNetwork)
Serializes the network to ArmNN SerializedGraph.
float m_ScaleX
Center size encoding scale x.
TensorShape m_InputShape
Required shape of all input tensors.
uint32_t m_TargetWidth
Target width value.
bool m_TransposeWeightMatrix
Enable/disable transpose weight matrix.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
Dimensionality GetDimensionality() const
Function that returns the tensor type.
flatbuffers::Offset< GatherLayer > CreateGatherLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::GatherDescriptor > descriptor=0)
flatbuffers::Offset< RankLayer > CreateRankLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0)
bool HasPerAxisQuantization() const
uint32_t m_PoolWidth
Pooling width value.
bool m_PeepholeEnabled
Enable/disable peephole.
armnnSerializer::OutputShapeRounding GetFlatBufferOutputShapeRounding(armnn::OutputShapeRounding outputShapeRounding)
flatbuffers::Offset< TransposeLayer > CreateTransposeLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::TransposeDescriptor > descriptor=0)
A Convolution2dDescriptor for the Convolution2dLayer.
float m_Alpha
Alpha value for the normalization equation.
uint32_t m_PadLeft
Padding left value in the width dimension.
flatbuffers::Offset< ComparisonLayer > CreateComparisonLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::ComparisonDescriptor > descriptor=0)
bool m_KeepDims
if true then output shape has no change.
float m_HiddenStateScale
Hidden State quantization scale.
bool m_BiasEnabled
Enable/disable bias.
flatbuffers::Offset< ConstTensor > CreateConstTensor(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::TensorInfo > info=0, armnnSerializer::ConstTensorData data_type=armnnSerializer::ConstTensorData_NONE, flatbuffers::Offset< void > data=0)
Optional< unsigned int > GetQuantizationDim() const
flatbuffers::Offset< QuantizeLayer > CreateQuantizeLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0)
float m_OutputIntermediateScale
Output intermediate quantization scale.
ResizeMethod m_Method
The Interpolation method to use (Bilinear, NearestNeighbor).
float m_Gamma
Gamma, the scale scalar value applied for the normalized tensor. Defaults to 1.0. ...
float m_Beta
Exponentiation value.
flatbuffers::Offset< InputSlot > CreateInputSlot(flatbuffers::FlatBufferBuilder &_fbb, uint32_t index=0, const armnnSerializer::Connection *connection=0)
std::vector< unsigned int > m_Size
Size of the slice in each dimension.
static ISerializer * CreateRaw()
flatbuffers::Offset< SpaceToDepthDescriptor > CreateSpaceToDepthDescriptor(flatbuffers::FlatBufferBuilder &_fbb, uint32_t blockSize=0, armnnSerializer::DataLayout dataLayout=armnnSerializer::DataLayout_NHWC)
float m_Eps
Value to add to the variance. Used to avoid dividing by zero.
PaddingMethod m_PaddingMethod
The padding method to be used. (Exclude, IgnoreValue).
ArgMinMaxFunction m_Function
Specify if the function is to find Min or Max.
uint32_t m_DetectionsPerClass
Detections per classes, used in Regular NMS.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
flatbuffers::Offset< QuantizedLstmLayer > CreateQuantizedLstmLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::QuantizedLstmInputParams > inputParams=0)
flatbuffers::Offset< TransposeDescriptor > CreateTransposeDescriptor(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< flatbuffers::Vector< uint32_t >> dimMappings=0)
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
serializer::ActivationFunction GetFlatBufferActivationFunction(armnn::ActivationFunction function)
Main network class which provides the interface for building up a neural network. ...
flatbuffers::Offset< DetectionPostProcessDescriptor > CreateDetectionPostProcessDescriptor(flatbuffers::FlatBufferBuilder &_fbb, uint32_t maxDetections=0, uint32_t maxClassesPerDetection=0, uint32_t detectionsPerClass=0, float nmsScoreThreshold=0.0f, float nmsIouThreshold=0.0f, uint32_t numClasses=0, bool useRegularNms=false, float scaleX=0.0f, float scaleY=0.0f, float scaleW=0.0f, float scaleH=0.0f)
armnnSerializer::NormalizationAlgorithmMethod GetFlatBufferNormalizationAlgorithmMethod(armnn::NormalizationAlgorithmMethod normalizationAlgorithmMethod)
uint32_t m_PadTop
Padding top value in the height dimension.
flatbuffers::Offset< AnyLayer > CreateAnyLayer(flatbuffers::FlatBufferBuilder &_fbb, armnnSerializer::Layer layer_type=armnnSerializer::Layer_NONE, flatbuffers::Offset< void > layer=0)
flatbuffers::Offset< DepthwiseConvolution2dLayer > CreateDepthwiseConvolution2dLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::DepthwiseConvolution2dDescriptor > descriptor=0, flatbuffers::Offset< armnnSerializer::ConstTensor > weights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > biases=0)
A LogicalBinaryDescriptor for the LogicalBinaryLayer.
flatbuffers::Offset< MergeLayer > CreateMergeLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0)
uint32_t m_PadRight
Padding right value in the width dimension.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
MemoryType GetMemoryArea() const
std::vector< std::pair< unsigned int, unsigned int > > m_PadList
Specifies the padding for input dimension.
uint32_t GetNumViews() const
Get the number of views.
ReduceOperation m_ReduceOperation
Specifies the reduction operation to execute.
flatbuffers::Offset< QLstmInputParams > CreateQLstmInputParams(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::ConstTensor > inputToForgetWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > inputToCellWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > inputToOutputWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > recurrentToForgetWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > recurrentToCellWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > recurrentToOutputWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > forgetGateBias=0, flatbuffers::Offset< armnnSerializer::ConstTensor > cellBias=0, flatbuffers::Offset< armnnSerializer::ConstTensor > outputGateBias=0, flatbuffers::Offset< armnnSerializer::ConstTensor > inputToInputWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > recurrentToInputWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > inputGateBias=0, flatbuffers::Offset< armnnSerializer::ConstTensor > projectionWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > projectionBias=0, flatbuffers::Offset< armnnSerializer::ConstTensor > cellToInputWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > cellToForgetWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > cellToOutputWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > inputLayerNormWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > forgetLayerNormWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > cellLayerNormWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > outputLayerNormWeights=0)
Copyright (c) 2021 ARM Limited and Contributors.
void IgnoreUnused(Ts &&...)
uint32_t m_PadBottom
Padding bottom value in the height dimension.
int32_t m_BeginMask
Begin mask value.
flatbuffers::Offset< TransposeConvolution2dLayer > CreateTransposeConvolution2dLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::TransposeConvolution2dDescriptor > descriptor=0, flatbuffers::Offset< armnnSerializer::ConstTensor > weights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > biases=0)
uint32_t m_DilationY
Dilation along y axis.
int32_t m_EndMask
End mask value.
A SpaceToDepthDescriptor for the SpaceToDepthLayer.
flatbuffers::Offset< TensorInfo > CreateTensorInfo(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< flatbuffers::Vector< uint32_t >> dimensions=0, armnnSerializer::DataType dataType=armnnSerializer::DataType_Float16, float quantizationScale=1.0f, int32_t quantizationOffset=0, flatbuffers::Offset< flatbuffers::Vector< float >> quantizationScales=0, uint32_t quantizationDim=0, uint32_t dimensionality=1, flatbuffers::Offset< flatbuffers::Vector< uint8_t >> dimensionSpecificity=0)
flatbuffers::Offset< PreluLayer > CreatePreluLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0)
std::vector< std::pair< unsigned int, unsigned int > > m_PadList
Specifies the padding values for the input dimension: heightPad{top, bottom} widthPad{left, right}.
std::vector< float > GetQuantizationScales() const
flatbuffers::Offset< StandInDescriptor > CreateStandInDescriptor(flatbuffers::FlatBufferBuilder &_fbb, uint32_t numInputs=0, uint32_t numOutputs=0)
bool SaveSerializedToStream(std::ostream &stream)
Serializes the SerializedGraph to the stream.
uint32_t m_DilationY
Dilation factor value for height dimension.
armnnSerializer::ConstTensorData GetFlatBufferConstTensorData(armnn::DataType dataType)
bool GetDimensionSpecificity(unsigned int i) const
Gets information about if the dimension size has been specified or not.
LogicalBinaryOperation m_Operation
Specifies the logical operation to execute.
A BatchToSpaceNdDescriptor for the BatchToSpaceNdLayer.
void ExecuteStrategy(const armnn::IConnectableLayer *layer, const armnn::BaseDescriptor &descriptor, const std::vector< armnn::ConstTensor > &constants, const char *name, const armnn::LayerBindingId id) override
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
PermutationVector m_DimMappings
Indicates how to translate tensor elements from a given source into the target destination, when source and target potentially have different memory layouts e.g.
flatbuffers::Offset< MultiplicationLayer > CreateMultiplicationLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0)
flatbuffers::Offset< DepthToSpaceLayer > CreateDepthToSpaceLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::DepthToSpaceDescriptor > descriptor=0)
flatbuffers::Offset< InstanceNormalizationLayer > CreateInstanceNormalizationLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::InstanceNormalizationDescriptor > descriptor=0)
armnnSerializer::ReduceOperation GetFlatBufferReduceOperation(armnn::ReduceOperation reduceOperation)
int LayerBindingId
Type of identifiers for bindable layers (inputs, outputs).
flatbuffers::Offset< SliceLayer > CreateSliceLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::SliceDescriptor > descriptor=0)
armnnSerializer::DataType GetFlatBufferDataType(armnn::DataType dataType)
uint32_t m_NumOutputs
Number of output tensors.
NormalizationAlgorithmMethod m_NormMethodType
Normalization method algorithm to use (LocalBrightness, LocalContrast).
flatbuffers::Offset< Convolution2dDescriptor > CreateConvolution2dDescriptor(flatbuffers::FlatBufferBuilder &_fbb, uint32_t padLeft=0, uint32_t padRight=0, uint32_t padTop=0, uint32_t padBottom=0, uint32_t strideX=0, uint32_t strideY=0, uint32_t dilationX=1, uint32_t dilationY=1, bool biasEnabled=false, armnnSerializer::DataLayout dataLayout=armnnSerializer::DataLayout_NCHW)
flatbuffers::Offset< InputLayer > CreateInputLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::BindableLayerBase > base=0)
A ResizeDescriptor for the ResizeLayer.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
uint32_t m_MaxClassesPerDetection
Maximum numbers of classes per detection, used in Fast NMS.
Base class for all descriptors.
std::vector< unsigned int > m_Axis
Values for the dimensions to reduce.
A StackDescriptor for the StackLayer.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
flatbuffers::Offset< ShortData > CreateShortData(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< flatbuffers::Vector< int16_t >> data=0)
serializer::ArgMinMaxFunction GetFlatBufferArgMinMaxFunction(armnn::ArgMinMaxFunction function)
TensorShape m_TargetShape
Target shape value.
bool SaveSerializedToStream(std::ostream &stream)
Serializes the SerializedGraph to the stream.
flatbuffers::Offset< ConcatLayer > CreateConcatLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::OriginsDescriptor > descriptor=0)
uint32_t m_PoolHeight
Pooling height value.
uint32_t m_PadTop
Padding top value in the height dimension.
uint32_t m_MaxDetections
Maximum numbers of detections.
A PadDescriptor for the PadLayer.
flatbuffers::Offset< SubtractionLayer > CreateSubtractionLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0)
flatbuffers::Offset< BindableLayerBase > CreateBindableLayerBase(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, int32_t layerBindingId=0)
const uint32_t * GetViewOrigin(uint32_t idx) const
Return the view origin at the int value idx.
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
flatbuffers::Offset< ArgMinMaxLayer > CreateArgMinMaxLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::ArgMinMaxDescriptor > descriptor=0)
armnnSerializer::NormalizationAlgorithmChannel GetFlatBufferNormalizationAlgorithmChannel(armnn::NormalizationAlgorithmChannel normalizationAlgorithmChannel)
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
flatbuffers::Offset< QLstmDescriptor > CreateQLstmDescriptor(flatbuffers::FlatBufferBuilder &_fbb, bool cifgEnabled=true, bool peepholeEnabled=false, bool projectionEnabled=false, bool layerNormEnabled=false, float cellClip=0.0f, float projectionClip=0.0f, float inputIntermediateScale=0.0f, float forgetIntermediateScale=0.0f, float cellIntermediateScale=0.0f, float outputIntermediateScale=0.0f, int32_t hiddenStateZeroPoint=0, float hiddenStateScale=0.0f)
bool m_LayerNormEnabled
Enable/disable layer normalization.
flatbuffers::Offset< GreaterLayer > CreateGreaterLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0)
float m_NmsIouThreshold
Intersection over union threshold.
flatbuffers::Offset< ReshapeLayer > CreateReshapeLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::ReshapeDescriptor > descriptor=0)
armnnSerializer::LogicalBinaryOperation GetFlatBufferLogicalBinaryOperation(armnn::LogicalBinaryOperation logicalBinaryOperation)
flatbuffers::Offset< ArgMinMaxDescriptor > CreateArgMinMaxDescriptor(flatbuffers::FlatBufferBuilder &_fbb, armnnSerializer::ArgMinMaxFunction argMinMaxFunction=armnnSerializer::ArgMinMaxFunction_Min, int32_t axis=0)
An LstmDescriptor for the LstmLayer.
uint32_t m_PadRight
Padding right value in the width dimension.
flatbuffers::Offset< AdditionLayer > CreateAdditionLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0)
uint32_t m_DilationX
Dilation factor value for width dimension.
uint32_t m_PadTop
Padding top value in the height dimension.
flatbuffers::Offset< L2NormalizationDescriptor > CreateL2NormalizationDescriptor(flatbuffers::FlatBufferBuilder &_fbb, armnnSerializer::DataLayout dataLayout=armnnSerializer::DataLayout_NCHW, float eps=1e-12f)
std::vector< unsigned int > m_Begin
Beginning indices of the slice in each dimension.
int32_t m_NewAxisMask
New axis mask value.
flatbuffers::Offset< MinimumLayer > CreateMinimumLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0)
bool m_KeepDims
Enable/disable keep dimensions. If true, then the reduced dimensions that are of length 1 are kept...
flatbuffers::Offset< ByteData > CreateByteData(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< flatbuffers::Vector< int8_t >> data=0)
std::vector< unsigned int > m_BlockShape
Block shape values.
float m_Eps
Epsilon, small scalar value added to variance to avoid dividing by zero. Defaults to 1e-12f...
An output connection slot for a layer.
flatbuffers::Offset< DepthToSpaceDescriptor > CreateDepthToSpaceDescriptor(flatbuffers::FlatBufferBuilder &_fbb, uint32_t blockSize=0, armnnSerializer::DataLayout dataLayout=armnnSerializer::DataLayout_NHWC)
A L2NormalizationDescriptor for the L2NormalizationLayer.
int32_t GetQuantizationOffset() const
An ArgMinMaxDescriptor for ArgMinMaxLayer.
float GetQuantizationScale() const
flatbuffers::Offset< LstmInputParams > CreateLstmInputParams(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::ConstTensor > inputToForgetWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > inputToCellWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > inputToOutputWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > recurrentToForgetWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > recurrentToCellWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > recurrentToOutputWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > forgetGateBias=0, flatbuffers::Offset< armnnSerializer::ConstTensor > cellBias=0, flatbuffers::Offset< armnnSerializer::ConstTensor > outputGateBias=0, flatbuffers::Offset< armnnSerializer::ConstTensor > inputToInputWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > recurrentToInputWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > cellToInputWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > inputGateBias=0, flatbuffers::Offset< armnnSerializer::ConstTensor > projectionWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > projectionBias=0, flatbuffers::Offset< armnnSerializer::ConstTensor > cellToForgetWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > cellToOutputWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > inputLayerNormWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > forgetLayerNormWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > cellLayerNormWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > outputLayerNormWeights=0)
DataType GetDataType() const
An OriginsDescriptor for the ConcatLayer.
A ReduceDescriptor for the REDUCE operators.
float m_ProjectionClip
Clipping threshold value for the projection.
flatbuffers::Offset< LayerBase > CreateLayerBase(flatbuffers::FlatBufferBuilder &_fbb, uint32_t index=0, flatbuffers::Offset< flatbuffers::String > layerName=0, armnnSerializer::LayerType layerType=armnnSerializer::LayerType_Addition, flatbuffers::Offset< flatbuffers::Vector< flatbuffers::Offset< armnnSerializer::InputSlot >>> inputSlots=0, flatbuffers::Offset< flatbuffers::Vector< flatbuffers::Offset< armnnSerializer::OutputSlot >>> outputSlots=0)
A FullyConnectedDescriptor for the FullyConnectedLayer.
int32_t m_EllipsisMask
Ellipsis mask value.
virtual LayerGuid GetGuid() const =0
Returns the unique id of the layer.
bool m_BiasEnabled
Enable/disable bias.
A tensor defined by a TensorInfo (shape and data type) and an immutable backing store.
flatbuffers::Offset< QuantizedLstmInputParams > CreateQuantizedLstmInputParams(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::ConstTensor > inputToInputWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > inputToForgetWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > inputToCellWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > inputToOutputWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > recurrentToInputWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > recurrentToForgetWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > recurrentToCellWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > recurrentToOutputWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > inputGateBias=0, flatbuffers::Offset< armnnSerializer::ConstTensor > forgetGateBias=0, flatbuffers::Offset< armnnSerializer::ConstTensor > cellBias=0, flatbuffers::Offset< armnnSerializer::ConstTensor > outputGateBias=0)
flatbuffers::Offset< ReduceDescriptor > CreateReduceDescriptor(flatbuffers::FlatBufferBuilder &_fbb, bool keepDims=false, flatbuffers::Offset< flatbuffers::Vector< uint32_t >> axis=0, armnnSerializer::ReduceOperation reduceOperation=armnnSerializer::ReduceOperation_Sum)
flatbuffers::Offset< StackDescriptor > CreateStackDescriptor(flatbuffers::FlatBufferBuilder &_fbb, uint32_t axis=0, uint32_t numInputs=0, flatbuffers::Offset< flatbuffers::Vector< uint32_t >> inputShape=0)
flatbuffers::Offset< BatchToSpaceNdDescriptor > CreateBatchToSpaceNdDescriptor(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< flatbuffers::Vector< uint32_t >> blockShape=0, flatbuffers::Offset< flatbuffers::Vector< uint32_t >> crops=0, armnnSerializer::DataLayout dataLayout=armnnSerializer::DataLayout_NHWC)
float m_InputIntermediateScale
Input intermediate quantization scale.
flatbuffers::Offset< PadDescriptor > CreatePadDescriptor(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< flatbuffers::Vector< uint32_t >> padList=0, float padValue=0.0f)
uint32_t m_TargetWidth
Target width value.
flatbuffers::Offset< SplitterLayer > CreateSplitterLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::ViewsDescriptor > descriptor=0)
A GatherDescriptor for the GatherLayer.
bool m_PeepholeEnabled
Enable/disable peephole.
uint32_t m_NumClasses
Number of classes.
bool m_HalfPixelCenters
Half Pixel Centers.
flatbuffers::Offset< OutputLayer > CreateOutputLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::BindableLayerBase > base=0)
void Serialize(const armnn::INetwork &inNetwork)
Serializes the network to ArmNN SerializedGraph.
flatbuffers::Offset< SoftmaxLayer > CreateSoftmaxLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::SoftmaxDescriptor > descriptor=0)
flatbuffers::Offset< FillDescriptor > CreateFillDescriptor(flatbuffers::FlatBufferBuilder &_fbb, float value=0.0f)
uint32_t m_PadTop
Padding top value in the height dimension.
A StandInDescriptor for the StandIn layer.
A QLstmDescriptor for the QLstmLayer.
flatbuffers::Offset< StridedSliceLayer > CreateStridedSliceLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::StridedSliceDescriptor > descriptor=0)
virtual unsigned int CalculateIndexOnOwner() const =0
flatbuffers::Offset< LogSoftmaxDescriptor > CreateLogSoftmaxDescriptor(flatbuffers::FlatBufferBuilder &_fbb, float beta=1.0f, int32_t axis=-1)
bool m_UseRegularNms
Use Regular NMS.
flatbuffers::Offset< RsqrtLayer > CreateRsqrtLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0)
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
flatbuffers::Offset< MeanLayer > CreateMeanLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::MeanDescriptor > descriptor=0)
std::vector< unsigned int > m_BlockShape
Block shape value.
std::vector< int > m_Stride
Stride values for the input that will be sliced.
An ActivationDescriptor for the ActivationLayer.
flatbuffers::Offset< ActivationLayer > CreateActivationLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::ActivationDescriptor > descriptor=0)
flatbuffers::Offset< SpaceToDepthLayer > CreateSpaceToDepthLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::SpaceToDepthDescriptor > descriptor=0)
flatbuffers::Offset< SliceDescriptor > CreateSliceDescriptor(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< flatbuffers::Vector< uint32_t >> begin=0, flatbuffers::Offset< flatbuffers::Vector< uint32_t >> size=0)
const TensorInfo & GetInfo() const
min(a, max(b, input)) ReLu1 & ReLu6.
flatbuffers::Offset< BatchNormalizationLayer > CreateBatchNormalizationLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::BatchNormalizationDescriptor > descriptor=0, flatbuffers::Offset< armnnSerializer::ConstTensor > mean=0, flatbuffers::Offset< armnnSerializer::ConstTensor > variance=0, flatbuffers::Offset< armnnSerializer::ConstTensor > beta=0, flatbuffers::Offset< armnnSerializer::ConstTensor > gamma=0)
flatbuffers::Offset< BatchNormalizationDescriptor > CreateBatchNormalizationDescriptor(flatbuffers::FlatBufferBuilder &_fbb, float eps=0.0f, armnnSerializer::DataLayout dataLayout=armnnSerializer::DataLayout_NHWC)
uint32_t m_TargetHeight
Target height value.
uint32_t m_NumInputs
Number of input tensors.
flatbuffers::Offset< GatherDescriptor > CreateGatherDescriptor(flatbuffers::FlatBufferBuilder &_fbb, int32_t axis=0)
flatbuffers::Offset< ActivationDescriptor > CreateActivationDescriptor(flatbuffers::FlatBufferBuilder &_fbb, armnnSerializer::ActivationFunction activationFunction=armnnSerializer::ActivationFunction_Sigmoid, float a=0.0f, float b=0.0f)
uint32_t m_TargetHeight
Target height value.
uint32_t m_ActivationFunc
The activation function to use.
A SliceDescriptor for the SliceLayer.
flatbuffers::Offset< NormalizationLayer > CreateNormalizationLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::NormalizationDescriptor > descriptor=0)
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
flatbuffers::Offset< ViewsDescriptor > CreateViewsDescriptor(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::OriginsDescriptor > origins=0, flatbuffers::Offset< flatbuffers::Vector< flatbuffers::Offset< armnnSerializer::UintVector >>> viewSizes=0)
virtual LayerType GetType() const =0
Returns the armnn::LayerType of this layer.
flatbuffers::Offset< PermuteDescriptor > CreatePermuteDescriptor(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< flatbuffers::Vector< uint32_t >> dimMappings=0)
float m_ClippingThresCell
Clipping threshold value for the cell state.
unsigned int m_BlockSize
Scalar specifying the input block size. It must be >= 1.
flatbuffers::Offset< MeanDescriptor > CreateMeanDescriptor(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< flatbuffers::Vector< uint32_t >> axis=0, bool keepDims=false)
const uint32_t * GetViewOrigin(uint32_t idx) const
Get the view origin at the int value idx.
flatbuffers::Offset< StandInLayer > CreateStandInLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::StandInDescriptor > descriptor=0)
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
float m_ForgetIntermediateScale
Forget intermediate quantization scale.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
float m_Beta
Beta, the offset scalar value applied for the normalized tensor. Defaults to 1.0. ...
armnnSerializer::DataLayout GetFlatBufferDataLayout(armnn::DataLayout dataLayout)
std::vector< uint32_t > m_vAxis
The indices of the dimensions to reduce.
float m_ScaleH
Center size encoding scale height.
ComparisonOperation m_Operation
Specifies the comparison operation to execute.
std::vector< int > m_End
End values for the input that will be sliced.
flatbuffers::Offset< SwitchLayer > CreateSwitchLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0)
A SpaceToBatchNdDescriptor for the SpaceToBatchNdLayer.
flatbuffers::Offset< ResizeBilinearDescriptor > CreateResizeBilinearDescriptor(flatbuffers::FlatBufferBuilder &_fbb, uint32_t targetWidth=0, uint32_t targetHeight=0, armnnSerializer::DataLayout dataLayout=armnnSerializer::DataLayout_NHWC, bool alignCorners=false, bool halfPixelCenters=false)
NormalizationAlgorithmChannel m_NormChannelType
Normalization channel algorithm to use (Across, Within).
const uint32_t * GetViewSizes(uint32_t idx) const
Get the view sizes at the int value idx.
float m_CellClip
Clipping threshold value for the cell state.
flatbuffers::Offset< ElementwiseUnaryDescriptor > CreateElementwiseUnaryDescriptor(flatbuffers::FlatBufferBuilder &_fbb, armnnSerializer::UnaryOperation operation=armnnSerializer::UnaryOperation_Abs)
float m_A
Alpha upper bound value used by the activation functions. (BoundedReLu, Linear, TanH, Elu).
uint32_t m_DilationX
Dilation along x axis.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
bool m_CifgEnabled
Enable/disable cifg (coupled input & forget gate).
flatbuffers::Offset< PadLayer > CreatePadLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::PadDescriptor > descriptor=0)
flatbuffers::Offset< FloorLayer > CreateFloorLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0)
std::unique_ptr< ISerializer, void(*)(ISerializer *serializer)> ISerializerPtr
flatbuffers::Offset< NormalizationDescriptor > CreateNormalizationDescriptor(flatbuffers::FlatBufferBuilder &_fbb, armnnSerializer::NormalizationAlgorithmChannel normChannelType=armnnSerializer::NormalizationAlgorithmChannel_Across, armnnSerializer::NormalizationAlgorithmMethod normMethodType=armnnSerializer::NormalizationAlgorithmMethod_LocalBrightness, uint32_t normSize=0, float alpha=0.0f, float beta=0.0f, float k=0.0f, armnnSerializer::DataLayout dataLayout=armnnSerializer::DataLayout_NCHW)
uint32_t m_PadLeft
Padding left value in the width dimension.
armnnSerializer::ComparisonOperation GetFlatBufferComparisonOperation(armnn::ComparisonOperation comparisonOperation)
flatbuffers::Offset< Pooling2dDescriptor > CreatePooling2dDescriptor(flatbuffers::FlatBufferBuilder &_fbb, armnnSerializer::PoolingAlgorithm poolType=armnnSerializer::PoolingAlgorithm_Max, uint32_t padLeft=0, uint32_t padRight=0, uint32_t padTop=0, uint32_t padBottom=0, uint32_t poolWidth=0, uint32_t poolHeight=0, uint32_t strideX=0, uint32_t strideY=0, armnnSerializer::OutputShapeRounding outputShapeRounding=armnnSerializer::OutputShapeRounding_Floor, armnnSerializer::PaddingMethod paddingMethod=armnnSerializer::PaddingMethod_IgnoreValue, armnnSerializer::DataLayout dataLayout=armnnSerializer::DataLayout_NHWC)
flatbuffers::Offset< EqualLayer > CreateEqualLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0)
bool m_AlignCorners
Aligned corners.
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
flatbuffers::Offset< ConstantLayer > CreateConstantLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::ConstTensor > input=0)
int32_t m_Axis
The axis in params to gather indices from.
A ElementwiseUnaryDescriptor for the ElementwiseUnaryLayer.
PoolingAlgorithm m_PoolType
The pooling algorithm to use (Max. Average, L2).
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
flatbuffers::Offset< UintVector > CreateUintVector(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< flatbuffers::Vector< uint32_t >> data=0)
flatbuffers::Offset< StackLayer > CreateStackLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::StackDescriptor > descriptor=0)
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
std::vector< std::pair< unsigned int, unsigned int > > m_Crops
The values to crop from the input dimension.
flatbuffers::Offset< Convolution2dLayer > CreateConvolution2dLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::Convolution2dDescriptor > descriptor=0, flatbuffers::Offset< armnnSerializer::ConstTensor > weights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > biases=0)
unsigned int GetNumDimensions() const
Function that returns the tensor rank.
flatbuffers::Offset< Pooling2dLayer > CreatePooling2dLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::Pooling2dDescriptor > descriptor=0)
bool m_ProjectionEnabled
Enable/disable the projection layer.
flatbuffers::Offset< SpaceToBatchNdLayer > CreateSpaceToBatchNdLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::SpaceToBatchNdDescriptor > descriptor=0)
OutputShapeRounding m_OutputShapeRounding
The rounding method for the output shape. (Floor, Ceiling).
armnnSerializer::ResizeMethod GetFlatBufferResizeMethod(armnn::ResizeMethod method)
uint32_t m_NumInputs
Number of input tensors.
uint32_t GetNumDimensions() const
Get the number of dimensions.
flatbuffers::Offset< ComparisonDescriptor > CreateComparisonDescriptor(flatbuffers::FlatBufferBuilder &_fbb, armnnSerializer::ComparisonOperation operation=armnnSerializer::ComparisonOperation_Equal)
virtual const IInputSlot & GetInputSlot(unsigned int index) const =0
Get a const input slot handle by slot index.
A MeanDescriptor for the MeanLayer.
flatbuffers::Offset< MaximumLayer > CreateMaximumLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0)
static ISerializerPtr Create()
armnnSerializer::PaddingMethod GetFlatBufferPaddingMethod(armnn::PaddingMethod paddingMethod)
bool m_LayerNormEnabled
Enable/disable layer normalization.
std::enable_if_t< std::is_unsigned< Source >::value &&std::is_unsigned< Dest >::value, Dest > numeric_cast(Source source)
uint32_t m_PadRight
Padding right value in the width dimension.
flatbuffers::Offset< InstanceNormalizationDescriptor > CreateInstanceNormalizationDescriptor(flatbuffers::FlatBufferBuilder &_fbb, float gamma=0.0f, float beta=0.0f, float eps=0.0f, armnnSerializer::DataLayout dataLayout=armnnSerializer::DataLayout_NHWC)
A TransposeDescriptor for the TransposeLayer.
A StridedSliceDescriptor for the StridedSliceLayer.
virtual const TensorInfo & GetTensorInfo() const =0
virtual const IOutputSlot & GetOutputSlot(unsigned int index) const =0
Get the const output slot handle by slot index.
int m_Axis
Axis to reduce across the input tensor.
flatbuffers::Offset< IntData > CreateIntData(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< flatbuffers::Vector< int32_t >> data=0)
virtual const char * GetName() const =0
Returns the name of the layer.
float m_ScaleY
Center size encoding scale y.
flatbuffers::Offset< ResizeLayer > CreateResizeLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::ResizeDescriptor > descriptor=0)
uint32_t GetNumViews() const
Get the number of views.
flatbuffers::Offset< FullyConnectedLayer > CreateFullyConnectedLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::FullyConnectedDescriptor > descriptor=0, flatbuffers::Offset< armnnSerializer::ConstTensor > weights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > biases=0)
float m_NmsScoreThreshold
NMS score threshold.
flatbuffers::Offset< DequantizeLayer > CreateDequantizeLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0)
flatbuffers::Offset< ResizeBilinearLayer > CreateResizeBilinearLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::ResizeBilinearDescriptor > descriptor=0)
virtual LayerGuid GetOwningLayerGuid() const =0
A Pooling2dDescriptor for the Pooling2dLayer.
flatbuffers::Offset< DetectionPostProcessLayer > CreateDetectionPostProcessLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::DetectionPostProcessDescriptor > descriptor=0, flatbuffers::Offset< armnnSerializer::ConstTensor > anchors=0)
A NormalizationDescriptor for the NormalizationLayer.
flatbuffers::Offset< FeatureCompatibilityVersions > CreateFeatureCompatibilityVersions(flatbuffers::FlatBufferBuilder &_fbb, uint32_t bindingIdsScheme=0)
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
flatbuffers::Offset< BatchToSpaceNdLayer > CreateBatchToSpaceNdLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::BatchToSpaceNdDescriptor > descriptor=0)
flatbuffers::Offset< armnnSerializer::FeatureCompatibilityVersions > GetVersionTable()
An InstanceNormalizationDescriptor for InstanceNormalizationLayer.
unsigned int GetConcatAxis() const
Get the concatenation axis value.
A ResizeBilinearDescriptor for the ResizeBilinearLayer.
flatbuffers::Offset< LogSoftmaxLayer > CreateLogSoftmaxLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::LogSoftmaxDescriptor > descriptor=0)
float m_CellIntermediateScale
Cell intermediate quantization scale.
flatbuffers::Offset< StridedSliceDescriptor > CreateStridedSliceDescriptor(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< flatbuffers::Vector< int32_t >> begin=0, flatbuffers::Offset< flatbuffers::Vector< int32_t >> end=0, flatbuffers::Offset< flatbuffers::Vector< int32_t >> stride=0, int32_t beginMask=0, int32_t endMask=0, int32_t shrinkAxisMask=0, int32_t ellipsisMask=0, int32_t newAxisMask=0, armnnSerializer::DataLayout dataLayout=armnnSerializer::DataLayout_NHWC)
flatbuffers::Offset< OriginsDescriptor > CreateOriginsDescriptor(flatbuffers::FlatBufferBuilder &_fbb, uint32_t concatAxis=0, uint32_t numViews=0, uint32_t numDimensions=0, flatbuffers::Offset< flatbuffers::Vector< flatbuffers::Offset< armnnSerializer::UintVector >>> viewOrigins=0)
flatbuffers::Offset< QLstmLayer > CreateQLstmLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::QLstmDescriptor > descriptor=0, flatbuffers::Offset< armnnSerializer::QLstmInputParams > inputParams=0)
flatbuffers::Offset< LstmDescriptor > CreateLstmDescriptor(flatbuffers::FlatBufferBuilder &_fbb, uint32_t activationFunc=0, float clippingThresCell=0.0f, float clippingThresProj=0.0f, bool cifgEnabled=true, bool peepholeEnabled=false, bool projectionEnabled=false, bool layerNormEnabled=false)
float m_B
Beta lower bound value used by the activation functions. (BoundedReLu, Linear, TanH).
static void Destroy(ISerializer *serializer)
A SoftmaxDescriptor for the SoftmaxLayer.
float m_Beta
Beta value for the normalization equation.
flatbuffers::Offset< ElementwiseUnaryLayer > CreateElementwiseUnaryLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::ElementwiseUnaryDescriptor > descriptor=0)
flatbuffers::Offset< PermuteLayer > CreatePermuteLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::PermuteDescriptor > descriptor=0)
const OriginsDescriptor & GetOrigins() const
Get the View Origins.
bool m_CifgEnabled
Enable/disable CIFG (coupled input & forget gate).
PermutationVector m_DimMappings
Indicates how to translate tensor elements from a given source into the target destination, when source and target potentially have different memory layouts e.g.
uint32_t m_NormSize
Depth radius value.
flatbuffers::Offset< SpaceToBatchNdDescriptor > CreateSpaceToBatchNdDescriptor(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< flatbuffers::Vector< uint32_t >> blockShape=0, flatbuffers::Offset< flatbuffers::Vector< uint32_t >> padList=0, armnnSerializer::DataLayout dataLayout=armnnSerializer::DataLayout_NHWC)
ActivationFunction m_Function
The activation function to use (Sigmoid, TanH, Linear, ReLu, BoundedReLu, SoftReLu, LeakyReLu, Abs, Sqrt, Square, Elu).
flatbuffers::Offset< SerializedGraph > CreateSerializedGraph(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< flatbuffers::Vector< flatbuffers::Offset< armnnSerializer::AnyLayer >>> layers=0, flatbuffers::Offset< flatbuffers::Vector< int32_t >> inputIds=0, flatbuffers::Offset< flatbuffers::Vector< int32_t >> outputIds=0, flatbuffers::Offset< armnnSerializer::FeatureCompatibilityVersions > featureVersions=0)
armnnSerializer::PoolingAlgorithm GetFlatBufferPoolingAlgorithm(armnn::PoolingAlgorithm poolingAlgorithm)
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
A DepthwiseConvolution2dDescriptor for the DepthwiseConvolution2dLayer.
A FillDescriptor for the FillLayer.
A BatchNormalizationDescriptor for the BatchNormalizationLayer.
uint32_t m_PadLeft
Padding left value in the width dimension.
unsigned int GetNumBytes() const
flatbuffers::Offset< LogicalBinaryDescriptor > CreateLogicalBinaryDescriptor(flatbuffers::FlatBufferBuilder &_fbb, armnnSerializer::LogicalBinaryOperation operation=armnnSerializer::LogicalBinaryOperation_LogicalAnd)
flatbuffers::Offset< DivisionLayer > CreateDivisionLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0)
A PermuteDescriptor for the PermuteLayer.
flatbuffers::Offset< LogicalBinaryLayer > CreateLogicalBinaryLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::LogicalBinaryDescriptor > descriptor=0)
uint32_t m_PadRight
Padding right value in the width dimension.
int32_t m_HiddenStateZeroPoint
Hidden State zero point.
std::vector< float > anchors({ 0.5f, 0.5f, 1.0f, 1.0f, 0.5f, 0.5f, 1.0f, 1.0f, 0.5f, 0.5f, 1.0f, 1.0f, 0.5f, 10.5f, 1.0f, 1.0f, 0.5f, 10.5f, 1.0f, 1.0f, 0.5f, 100.5f, 1.0f, 1.0f })