14 #include <fmt/format.h> 17 using namespace armnn;
18 namespace fb = flatbuffers;
24 ISerializer::ISerializer() : pSerializerImpl(new SerializerImpl())
28 ISerializer::~ISerializer() =
default;
47 pSerializerImpl->Serialize(inNetwork);
52 return pSerializerImpl->SaveSerializedToStream(stream);
100 if (m_guidMap.empty())
102 m_guidMap.insert(std::make_pair(guid, m_layerId));
104 else if (m_guidMap.find(guid) == m_guidMap.end())
107 m_guidMap.insert(std::make_pair(guid, m_layerId));
111 return m_guidMap[guid];
124 flatBufferInputBaseLayer,
127 m_inputIds.push_back(
id);
147 flatBufferOutputBaseLayer,
150 m_outputIds.push_back(
id);
186 flatBufferDescriptor);
225 flatBufferDescriptor);
240 std::vector<unsigned int> crops;
241 crops.reserve(descriptor.
m_Crops.size() * 2);
242 for (
auto& crop : descriptor.
m_Crops)
244 crops.push_back(crop.first);
245 crops.push_back(crop.second);
248 auto flatBufferDescriptor =
250 m_flatBufferBuilder.CreateVector(descriptor.
m_BlockShape),
251 m_flatBufferBuilder.CreateVector(crops),
256 flatBufferDescriptor);
261 void SerializerStrategy::SerializeBatchNormalizationLayer(
264 const std::vector<armnn::ConstTensor>& constants,
277 batchNormDescriptor.
m_Eps,
280 auto fbMeanConstTensorInfo = CreateConstTensorInfo(mean);
281 auto fbVarianceConstTensorInfo = CreateConstTensorInfo(variance);
282 auto fbBetaConstTensorInfo = CreateConstTensorInfo(beta);
283 auto fbGammaConstTensorInfo = CreateConstTensorInfo(gamma);
285 fbBatchNormalizationBaseLayer,
286 fbBatchNormalizationDescriptor,
287 fbMeanConstTensorInfo,
288 fbVarianceConstTensorInfo,
289 fbBetaConstTensorInfo,
290 fbGammaConstTensorInfo);
322 const std::vector<armnn::ConstTensor>& constants,
332 auto flatBufferConstTensorInfo = CreateConstTensorInfo(input);
336 flatBufferConstantBaseLayer,
337 flatBufferConstTensorInfo);
346 const std::vector<armnn::ConstTensor>& constants,
367 auto flatBufferWeightsConstTensorInfo = CreateConstTensorInfo(weights);
368 flatbuffers::Offset<serializer::ConstTensor> flatBufferBiasesConstTensorInfo;
370 if (constants.size() > 1)
373 flatBufferBiasesConstTensorInfo = CreateConstTensorInfo(biases);
379 flatBufferDescriptor,
380 flatBufferWeightsConstTensorInfo,
381 flatBufferBiasesConstTensorInfo);
405 const std::vector<armnn::ConstTensor>& constants,
425 flatbuffers::Offset<serializer::ConstTensor> fbWeightsConstTensorInfo = CreateConstTensorInfo(weights);
426 flatbuffers::Offset<serializer::ConstTensor> fbBiasesConstTensorInfo;
428 if (constants.size() > 1)
431 fbBiasesConstTensorInfo = CreateConstTensorInfo(biases);
437 fbWeightsConstTensorInfo,
438 fbBiasesConstTensorInfo);
456 const std::vector<armnn::ConstTensor>& constants,
477 flatbuffers::Offset<serializer::ConstTensor> fbAnchorsConstTensorInfo = CreateConstTensorInfo(anchors);
482 fbAnchorsConstTensorInfo);
572 void SerializerStrategy::SerializeInstanceNormalizationLayer(
581 instanceNormalizationDescriptor.
m_Gamma,
582 instanceNormalizationDescriptor.
m_Beta,
583 instanceNormalizationDescriptor.
m_Eps,
605 l2NormalizationDescriptor.
m_Eps);
638 auto flatBufferLogSoftmaxDesc =
640 logSoftmaxDescriptor.
m_Beta,
641 logSoftmaxDescriptor.
m_Axis);
644 auto flatBufferLogSoftmaxLayer =
646 flatBufferLogSoftmaxBaseLayer,
647 flatBufferLogSoftmaxDesc);
654 const std::vector<armnn::ConstTensor>& constants,
675 auto inputToForgetWeights = CreateConstTensorInfo(constants[i++]);
676 auto inputToCellWeights = CreateConstTensorInfo(constants[i++]);
677 auto inputToOutputWeights = CreateConstTensorInfo(constants[i++]);
678 auto recurrentToForgetWeights = CreateConstTensorInfo(constants[i++]);
679 auto recurrentToCellWeights = CreateConstTensorInfo(constants[i++]);
680 auto recurrentToOutputWeights = CreateConstTensorInfo(constants[i++]);
681 auto forgetGateBias = CreateConstTensorInfo(constants[i++]);
682 auto cellBias = CreateConstTensorInfo(constants[i++]);
683 auto outputGateBias = CreateConstTensorInfo(constants[i++]);
688 flatbuffers::Offset<serializer::ConstTensor> inputToInputWeights;
689 flatbuffers::Offset<serializer::ConstTensor> recurrentToInputWeights;
690 flatbuffers::Offset<serializer::ConstTensor> cellToInputWeights;
691 flatbuffers::Offset<serializer::ConstTensor> inputGateBias;
692 flatbuffers::Offset<serializer::ConstTensor> projectionWeights;
693 flatbuffers::Offset<serializer::ConstTensor> projectionBias;
694 flatbuffers::Offset<serializer::ConstTensor> cellToForgetWeights;
695 flatbuffers::Offset<serializer::ConstTensor> cellToOutputWeights;
696 flatbuffers::Offset<serializer::ConstTensor> inputLayerNormWeights;
697 flatbuffers::Offset<serializer::ConstTensor> forgetLayerNormWeights;
698 flatbuffers::Offset<serializer::ConstTensor> cellLayerNormWeights;
699 flatbuffers::Offset<serializer::ConstTensor> outputLayerNormWeights;
703 inputToInputWeights = CreateConstTensorInfo(constants[i++]);
704 recurrentToInputWeights = CreateConstTensorInfo(constants[i++]);
705 inputGateBias = CreateConstTensorInfo(constants[i++]);
712 cellToInputWeights = CreateConstTensorInfo(constants[i++]);
714 cellToForgetWeights = CreateConstTensorInfo(constants[i++]);
715 cellToOutputWeights = CreateConstTensorInfo(constants[i++]);
720 projectionWeights = CreateConstTensorInfo(constants[i++]);
721 projectionBias = CreateConstTensorInfo(constants[i++]);
728 inputLayerNormWeights = CreateConstTensorInfo(constants[i++]);
730 forgetLayerNormWeights = CreateConstTensorInfo(constants[i++]);
731 cellLayerNormWeights = CreateConstTensorInfo(constants[i++]);
732 outputLayerNormWeights = CreateConstTensorInfo(constants[i++]);
737 inputToForgetWeights,
739 inputToOutputWeights,
740 recurrentToForgetWeights,
741 recurrentToCellWeights,
742 recurrentToOutputWeights,
747 recurrentToInputWeights,
754 inputLayerNormWeights,
755 forgetLayerNormWeights,
756 cellLayerNormWeights,
757 outputLayerNormWeights);
786 m_flatBufferBuilder.CreateVector(descriptor.
m_Axis),
820 SerializeConcatLayer(layer, mergerDescriptor, name);
831 std::vector<flatbuffers::Offset<UintVector>> views;
832 for (
unsigned int v = 0; v < concatDescriptor.
GetNumViews(); ++v)
835 std::vector<uint32_t> origins;
838 origins.push_back(origin[d]);
840 auto view = m_flatBufferBuilder.CreateVector(origins);
842 views.push_back(uintVector);
849 m_flatBufferBuilder.CreateVector(views));
852 flatBufferConcatBaseLayer,
853 flatBufferConcatDescriptor);
864 fbMultiplicationBaseLayer);
877 std::vector<unsigned int> padList;
880 padList.push_back(p.first);
881 padList.push_back(p.second);
885 m_flatBufferBuilder.CreateVector(padList),
904 std::vector<unsigned int> dimMappings;
911 m_flatBufferBuilder.CreateVector(dimMappings));
915 flatBufferPermuteBaseLayer,
916 flatBufferPermuteDesc);
940 m_flatBufferBuilder.CreateVector(reduceDescriptor.
m_vAxis),
959 std::vector<unsigned int> targetShape;
966 m_flatBufferBuilder.CreateVector(targetShape));
970 flatBufferReshapeDesc);
984 auto flatBufferDescriptor =
994 flatBufferDescriptor);
1007 auto flatBufferDescriptor =
1017 flatBufferBaseLayer,
1018 flatBufferDescriptor);
1041 m_flatBufferBuilder.CreateVector(sliceDescriptor.
m_Begin),
1042 m_flatBufferBuilder.CreateVector(sliceDescriptor.
m_Size));
1060 auto flatBufferSoftmaxDesc =
1064 auto flatBufferSoftmaxLayer =
1066 flatBufferSoftmaxBaseLayer,
1067 flatBufferSoftmaxDesc);
1080 m_flatBufferBuilder,
1095 fbPooling2dBaseLayer,
1096 fbPooling2dDescriptor);
1122 fbQuantizeBaseLayer);
1129 const std::vector<armnn::ConstTensor>& constants,
1136 auto flatBufferDescriptor =
1143 flatbuffers::Offset<serializer::ConstTensor> flatBufferWeights;
1145 flatbuffers::Offset<serializer::ConstTensor> flatBufferBiases;
1149 flatBufferWeights = CreateConstTensorInfo(weights);
1154 flatBufferBiases = CreateConstTensorInfo(biases);
1160 flatBufferBaseLayer,
1161 flatBufferDescriptor,
1179 std::vector<unsigned int> padList;
1180 padList.reserve(spaceToBatchNdDescriptor.
m_PadList.size()*2);
1181 for (
auto& pad : spaceToBatchNdDescriptor.
m_PadList)
1183 padList.push_back(pad.first);
1184 padList.push_back(pad.second);
1187 auto flatBufferDescriptor =
1189 m_flatBufferBuilder.CreateVector(spaceToBatchNdDescriptor.
m_BlockShape),
1190 m_flatBufferBuilder.CreateVector(padList),
1194 flatBufferBaseLayer,
1195 flatBufferDescriptor);
1208 auto flatBufferDescriptor =
1214 flatBufferBaseLayer,
1215 flatBufferDescriptor);
1228 std::vector<flatbuffers::Offset<UintVector>> flatBufferViewOrigins;
1229 flatBufferViewOrigins.reserve(viewsDescriptor.
GetNumViews());
1231 for(
unsigned int vIdx = 0; vIdx < viewsDescriptor.
GetNumViews(); ++vIdx)
1233 std::vector<uint32_t> viewOrigin;
1237 for(
unsigned int dIdx = 0; dIdx < viewsDescriptor.
GetNumDimensions(); ++dIdx)
1239 viewOrigin.push_back(viewsDescriptor.
GetViewOrigin(vIdx)[dIdx]);
1243 m_flatBufferBuilder.CreateVector(viewOrigin)));
1251 m_flatBufferBuilder.CreateVector(flatBufferViewOrigins));
1254 std::vector<flatbuffers::Offset<UintVector>> flatBufferViewSizes;
1255 flatBufferViewSizes.reserve(viewsDescriptor.
GetNumViews());
1257 for(
unsigned int vIdx = 0; vIdx < viewsDescriptor.
GetNumViews(); ++vIdx)
1259 std::vector<uint32_t> viewSize;
1263 for(
unsigned int dIdx = 0; dIdx < viewsDescriptor.
GetNumDimensions(); ++dIdx)
1265 viewSize.push_back(viewsDescriptor.
GetViewSizes(vIdx)[dIdx]);
1269 m_flatBufferBuilder.CreateVector(viewSize)));
1274 flatBufferOriginDescriptor,
1275 m_flatBufferBuilder.CreateVector(flatBufferViewSizes));
1281 flatBufferBaseLayer,
1282 flatBufferViewsDescriptor);
1296 m_flatBufferBuilder,
1306 fbNormalizationBaseLayer,
1307 fbNormalizationDescriptor);
1320 std::vector<unsigned int> inputShape;
1329 m_flatBufferBuilder.CreateVector(inputShape));
1359 auto flatBufferDescriptor =
1361 m_flatBufferBuilder.CreateVector(stridedSliceDescriptor.
m_Begin),
1362 m_flatBufferBuilder.CreateVector(stridedSliceDescriptor.
m_End),
1363 m_flatBufferBuilder.CreateVector(stridedSliceDescriptor.
m_Stride),
1372 flatBufferBaseLayer,
1373 flatBufferDescriptor);
1398 void SerializerStrategy::SerializeTransposeConvolution2dLayer(
1401 const std::vector<armnn::ConstTensor>& constants,
1420 auto fbWeightsConstTensorInfo = CreateConstTensorInfo(weights);
1421 flatbuffers::Offset<serializer::ConstTensor> fbBiasesConstTensorInfo;
1422 if (constants.size() > 1)
1425 fbBiasesConstTensorInfo = CreateConstTensorInfo(biases);
1431 fbWeightsConstTensorInfo,
1432 fbBiasesConstTensorInfo);
1446 std::vector<unsigned int> dimMappings;
1453 m_flatBufferBuilder.CreateVector(dimMappings));
1457 flatBufferBaseLayer,
1466 const std::vector<armnn::ConstTensor>& constants,
1474 m_flatBufferBuilder,
1493 auto inputToForgetWeights = CreateConstTensorInfo(constants[i++]);
1494 auto inputToCellWeights = CreateConstTensorInfo(constants[i++]);
1495 auto inputToOutputWeights = CreateConstTensorInfo(constants[i++]);
1496 auto recurrentToForgetWeights = CreateConstTensorInfo(constants[i++]);
1497 auto recurrentToCellWeights = CreateConstTensorInfo(constants[i++]);
1498 auto recurrentToOutputWeights = CreateConstTensorInfo(constants[i++]);
1499 auto forgetGateBias = CreateConstTensorInfo(constants[i++]);
1500 auto cellBias = CreateConstTensorInfo(constants[i++]);
1501 auto outputGateBias = CreateConstTensorInfo(constants[i++]);
1504 flatbuffers::Offset<serializer::ConstTensor> inputToInputWeights;
1505 flatbuffers::Offset<serializer::ConstTensor> recurrentToInputWeights;
1506 flatbuffers::Offset<serializer::ConstTensor> inputGateBias;
1510 inputToInputWeights = CreateConstTensorInfo(constants[i++]);
1511 recurrentToInputWeights = CreateConstTensorInfo(constants[i++]);
1512 inputGateBias = CreateConstTensorInfo(constants[i++]);
1516 flatbuffers::Offset<serializer::ConstTensor> cellToInputWeights;
1517 flatbuffers::Offset<serializer::ConstTensor> cellToForgetWeights;
1518 flatbuffers::Offset<serializer::ConstTensor> cellToOutputWeights;
1524 cellToInputWeights = CreateConstTensorInfo(constants[i++]);
1526 cellToForgetWeights = CreateConstTensorInfo(constants[i++]);
1527 cellToOutputWeights = CreateConstTensorInfo(constants[i++]);
1531 flatbuffers::Offset<serializer::ConstTensor> projectionWeights;
1532 flatbuffers::Offset<serializer::ConstTensor> projectionBias;
1536 projectionWeights = CreateConstTensorInfo(constants[i++]);
1537 projectionBias = CreateConstTensorInfo(constants[i++]);
1541 flatbuffers::Offset<serializer::ConstTensor> inputLayerNormWeights;
1542 flatbuffers::Offset<serializer::ConstTensor> forgetLayerNormWeights;
1543 flatbuffers::Offset<serializer::ConstTensor> cellLayerNormWeights;
1544 flatbuffers::Offset<serializer::ConstTensor> outputLayerNormWeights;
1550 inputLayerNormWeights = CreateConstTensorInfo(constants[i++]);
1552 forgetLayerNormWeights = CreateConstTensorInfo(constants[i++]);
1553 cellLayerNormWeights = CreateConstTensorInfo(constants[i++]);
1554 outputLayerNormWeights = CreateConstTensorInfo(constants[i++]);
1558 m_flatBufferBuilder,
1559 inputToForgetWeights,
1561 inputToOutputWeights,
1562 recurrentToForgetWeights,
1563 recurrentToCellWeights,
1564 recurrentToOutputWeights,
1568 inputToInputWeights,
1569 recurrentToInputWeights,
1574 cellToForgetWeights,
1575 cellToOutputWeights,
1576 inputLayerNormWeights,
1577 forgetLayerNormWeights,
1578 cellLayerNormWeights,
1579 outputLayerNormWeights);
1582 m_flatBufferBuilder,
1591 const std::vector<armnn::ConstTensor>& constants,
1602 auto inputToInputWeights = CreateConstTensorInfo(constants[i++]);
1603 auto inputToForgetWeights = CreateConstTensorInfo(constants[i++]);
1604 auto inputToCellWeights = CreateConstTensorInfo(constants[i++]);
1605 auto inputToOutputWeights = CreateConstTensorInfo(constants[i++]);
1607 auto recurrentToInputWeights = CreateConstTensorInfo(constants[i++]);
1608 auto recurrentToForgetWeights = CreateConstTensorInfo(constants[i++]);
1609 auto recurrentToCellWeights = CreateConstTensorInfo(constants[i++]);
1610 auto recurrentToOutputWeights = CreateConstTensorInfo(constants[i++]);
1612 auto inputGateBias = CreateConstTensorInfo(constants[i++]);
1613 auto forgetGateBias = CreateConstTensorInfo(constants[i++]);
1614 auto cellBias = CreateConstTensorInfo(constants[i++]);
1615 auto outputGateBias = CreateConstTensorInfo(constants[i++]);
1618 m_flatBufferBuilder,
1619 inputToInputWeights,
1620 inputToForgetWeights,
1622 inputToOutputWeights,
1623 recurrentToInputWeights,
1624 recurrentToForgetWeights,
1625 recurrentToCellWeights,
1626 recurrentToOutputWeights,
1633 m_flatBufferBuilder,
1634 fbQuantizedLstmBaseLayer,
1635 fbQuantizedLstmParams);
1640 fb::Offset<serializer::LayerBase> SerializerStrategy::CreateLayerBase(
const IConnectableLayer* layer,
1644 uint32_t fbIndex = GetSerializedId(layer->
GetGuid());
1646 std::vector<fb::Offset<serializer::InputSlot>> inputSlots = CreateInputSlots(layer);
1647 std::vector<fb::Offset<serializer::OutputSlot>> outputSlots = CreateOutputSlots(layer);
1651 m_flatBufferBuilder.CreateString(layer->
GetName()),
1653 m_flatBufferBuilder.CreateVector(inputSlots),
1654 m_flatBufferBuilder.CreateVector(outputSlots));
1657 void SerializerStrategy::CreateAnyLayer(
const flatbuffers::Offset<void>& layer,
const serializer::Layer serializerLayer)
1661 m_serializedLayers.push_back(anyLayer);
1664 template <
typename T>
1665 flatbuffers::Offset<flatbuffers::Vector<T>> SerializerStrategy::CreateDataVector(
const void* memory,
unsigned int size)
1667 const T* buffer =
reinterpret_cast<const T*
>(memory);
1668 std::vector<T> vector(buffer, buffer + (size /
sizeof(T)));
1669 auto fbVector = m_flatBufferBuilder.CreateVector(vector);
1673 flatbuffers::Offset<TensorInfo> SerializerStrategy::CreateTensorInfo(
const armnn::TensorInfo& tensorInfo)
1676 std::vector<unsigned int> shape;
1677 std::vector<bool> specificity;
1686 shape.push_back(tensorInfo.
GetShape()[dim]);
1697 auto flatBufferTensorInfo =
1699 m_flatBufferBuilder.CreateVector(shape),
1705 static_cast<unsigned int> 1707 m_flatBufferBuilder.CreateVector(specificity));
1708 return flatBufferTensorInfo;
1713 m_flatBufferBuilder.CreateVector(shape),
1719 static_cast<unsigned int> 1721 m_flatBufferBuilder.CreateVector(specificity));
1722 return flatBufferTensorInfo;
1725 flatbuffers::Offset<serializer::ConstTensor>
1730 flatbuffers::Offset<void> fbPayload;
1738 m_flatBufferBuilder,
1740 fbPayload = flatBuffersData.o;
1748 m_flatBufferBuilder,
1750 fbPayload = flatBuffersData.o;
1759 m_flatBufferBuilder,
1761 fbPayload = flatBuffersData.o;
1772 m_flatBufferBuilder,
1774 fbPayload = flatBuffersData.o;
1778 m_flatBufferBuilder,
1782 return flatBufferConstTensor;
1787 flatbuffers::Offset<armnnSerializer::FeatureCompatibilityVersions> versionsTable =
1789 m_flatBufferBuilder,
1792 return versionsTable;
1795 std::vector<fb::Offset<serializer::InputSlot>>
1798 std::vector<fb::Offset<serializer::InputSlot>> inputSlots;
1801 for (
unsigned int slotIndex = 0; slotIndex<layer->
GetNumInputSlots(); ++slotIndex)
1817 std::vector<fb::Offset<serializer::OutputSlot>>
1820 std::vector<fb::Offset<serializer::OutputSlot>> outputSlots;
1823 for (
unsigned int slotIndex = 0; slotIndex < layer->
GetNumOutputSlots(); ++slotIndex)
1838 const std::vector<armnn::ConstTensor>& constants,
1850 SerializeActivationLayer(layer, layerDescriptor, name);
1855 SerializeAdditionLayer(layer, name);
1862 SerializeArgMinMaxLayer(layer, layerDescriptor, name);
1869 SerializeBatchNormalizationLayer(layer,
1879 SerializeBatchToSpaceNdLayer(layer,
1886 SerializeCastLayer(layer, name);
1893 SerializeComparisonLayer(layer,
1902 SerializeConcatLayer(layer,
1909 SerializeConstantLayer(layer,
1918 SerializeConvolution2dLayer(layer,
1928 SerializeDepthToSpaceLayer(layer,
1937 SerializeDepthwiseConvolution2dLayer(layer,
1945 SerializeDequantizeLayer(layer,
1953 SerializeDetectionPostProcessLayer(layer, layerDescriptor, constants, name);
1958 SerializeDivisionLayer(layer, name);
1965 SerializeElementwiseUnaryLayer(layer, layerDescriptor, name);
1972 SerializeFillLayer(layer, layerDescriptor, name);
1977 SerializeFloorLayer(layer, name);
1984 SerializeFullyConnectedLayer(layer, layerDescriptor, constants, name);
1991 SerializeGatherLayer(layer, layerDescriptor, name);
1996 SerializeInputLayer(layer,
id, name);
2003 SerializeInstanceNormalizationLayer(layer, layerDescriptor, name);
2010 SerializeL2NormalizationLayer(layer, layerDescriptor, name);
2017 SerializeLogicalBinaryLayer(layer, layerDescriptor, name);
2024 SerializeLogSoftmaxLayer(layer, layerDescriptor, name);
2031 SerializeLstmLayer(layer, layerDescriptor, constants, name);
2038 SerializeQLstmLayer(layer, layerDescriptor, constants, name);
2043 SerializeMaximumLayer(layer, name);
2050 SerializeMeanLayer(layer, layerDescriptor, name);
2055 SerializeMergeLayer(layer, name);
2060 SerializeMinimumLayer(layer, name);
2065 SerializeMultiplicationLayer(layer, name);
2072 SerializeNormalizationLayer(layer, layerDescriptor, name);
2077 SerializeOutputLayer(layer,
id, name);
2084 SerializePadLayer(layer, layerDescriptor, name);
2091 SerializePermuteLayer(layer, layerDescriptor, name);
2098 SerializePooling2dLayer(layer, layerDescriptor, name);
2103 SerializePreluLayer(layer, name);
2108 SerializeQuantizeLayer(layer, name);
2112 SerializeQuantizedLstmLayer(layer, constants, name);
2118 SerializeReshapeLayer(layer, layerDescriptor, name);
2123 SerializeRankLayer(layer, name);
2130 SerializeReduceLayer(layer, layerDescriptor, name);
2137 SerializeResizeLayer(layer, layerDescriptor, name);
2144 SerializeSliceLayer(layer, layerDescriptor, name);
2151 SerializeSoftmaxLayer(layer, layerDescriptor, name);
2158 SerializeSpaceToBatchNdLayer(layer, layerDescriptor, name);
2165 SerializeSpaceToDepthLayer(layer, layerDescriptor, name);
2172 SerializeSplitterLayer(layer, layerDescriptor, name);
2179 SerializeStackLayer(layer, layerDescriptor, name);
2186 SerializeStandInLayer(layer, layerDescriptor, name);
2193 SerializeStridedSliceLayer(layer, layerDescriptor, name);
2198 SerializeSubtractionLayer(layer, name);
2203 SerializeSwitchLayer(layer, name);
2210 SerializeTransposeLayer(layer, layerDescriptor, name);
2217 SerializeTransposeConvolution2dLayer(layer, layerDescriptor, constants, name);
2223 fmt::format(
"A layer of unknown type was given to the serializer. Layer name: {}; Layer Id: {}",
2234 flatbuffers::FlatBufferBuilder& fbBuilder = m_SerializerStrategy.GetFlatBufferBuilder();
2239 fbBuilder.CreateVector(m_SerializerStrategy.GetSerializedLayers()),
2240 fbBuilder.CreateVector(m_SerializerStrategy.GetInputIds()),
2241 fbBuilder.CreateVector(m_SerializerStrategy.GetOutputIds()),
2242 m_SerializerStrategy.GetVersionTable());
2245 fbBuilder.Finish(serializedGraph);
2251 flatbuffers::FlatBufferBuilder& fbBuilder = m_SerializerStrategy.GetFlatBufferBuilder();
2254 stream.write(reinterpret_cast<const char*>(fbBuilder.GetBufferPointer()), bytesToWrite);
2255 return !stream.bad();
uint32_t m_PadBottom
Padding bottom value in the height dimension.
bool m_BiasEnabled
Enable/disable bias.
flatbuffers::Offset< LongData > CreateLongData(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< flatbuffers::Vector< int64_t >> data=0)
float m_Eps
Used to avoid dividing by zero.
virtual unsigned int GetNumOutputSlots() const =0
Returns the number of connectable output slots.
bool m_HalfPixelCenters
Half Pixel Centers.
armnnSerializer::UnaryOperation GetFlatBufferUnaryOperation(armnn::UnaryOperation comparisonOperation)
bool m_ProjectionEnabled
Enable/disable the projection layer.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
flatbuffers::Offset< ReshapeDescriptor > CreateReshapeDescriptor(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< flatbuffers::Vector< uint32_t >> targetShape=0)
bool m_AlignCorners
Aligned corners.
flatbuffers::Offset< ReduceLayer > CreateReduceLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::ReduceDescriptor > descriptor=0)
UnaryOperation m_Operation
Specifies the elementwiseUnary operation to execute.
uint32_t m_Axis
0-based axis along which to stack the input tensors.
flatbuffers::Offset< OutputSlot > CreateOutputSlot(flatbuffers::FlatBufferBuilder &_fbb, uint32_t index=0, flatbuffers::Offset< armnnSerializer::TensorInfo > tensorInfo=0)
A ViewsDescriptor for the SplitterLayer.
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
flatbuffers::Offset< DepthwiseConvolution2dDescriptor > CreateDepthwiseConvolution2dDescriptor(flatbuffers::FlatBufferBuilder &_fbb, uint32_t padLeft=0, uint32_t padRight=0, uint32_t padTop=0, uint32_t padBottom=0, uint32_t strideX=0, uint32_t strideY=0, uint32_t dilationX=1, uint32_t dilationY=1, bool biasEnabled=false, armnnSerializer::DataLayout dataLayout=armnnSerializer::DataLayout_NCHW)
float m_ScaleW
Center size encoding scale weight.
uint32_t m_PadBottom
Padding bottom value in the height dimension.
bool m_BiasEnabled
Enable/disable bias.
void ExecuteStrategy(IStrategy &strategy) const
virtual unsigned int GetNumInputSlots() const =0
Returns the number of connectable input slots.
float m_K
Kappa value used for the across channel normalization equation.
int m_Axis
Scalar, defaulted to the last index (-1), specifying the dimension the activation will be performed o...
A TransposeConvolution2dDescriptor for the TransposeConvolution2dLayer.
flatbuffers::Offset< AbsLayer > CreateAbsLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0)
flatbuffers::Offset< LstmLayer > CreateLstmLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::LstmDescriptor > descriptor=0, flatbuffers::Offset< armnnSerializer::LstmInputParams > inputParams=0)
const TensorShape & GetShape() const
flatbuffers::Offset< L2NormalizationLayer > CreateL2NormalizationLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::L2NormalizationDescriptor > descriptor=0)
uint32_t m_PadBottom
Padding bottom value in the height dimension.
uint32_t m_PadLeft
Padding left value in the width dimension.
flatbuffers::Offset< TransposeConvolution2dDescriptor > CreateTransposeConvolution2dDescriptor(flatbuffers::FlatBufferBuilder &_fbb, uint32_t padLeft=0, uint32_t padRight=0, uint32_t padTop=0, uint32_t padBottom=0, uint32_t strideX=0, uint32_t strideY=0, bool biasEnabled=false, armnnSerializer::DataLayout dataLayout=armnnSerializer::DataLayout_NCHW)
float m_ClippingThresProj
Clipping threshold value for the projection.
int32_t m_ShrinkAxisMask
Shrink axis mask value. If set, the nth specification shrinks the dimensionality by 1...
A ReshapeDescriptor for the ReshapeLayer.
flatbuffers::Offset< ResizeDescriptor > CreateResizeDescriptor(flatbuffers::FlatBufferBuilder &_fbb, uint32_t targetHeight=0, uint32_t targetWidth=0, armnnSerializer::ResizeMethod method=armnnSerializer::ResizeMethod_NearestNeighbor, armnnSerializer::DataLayout dataLayout=armnnSerializer::DataLayout_NHWC, bool alignCorners=false, bool halfPixelCenters=false)
flatbuffers::Offset< FillLayer > CreateFillLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::FillDescriptor > descriptor=0)
std::vector< int > m_Begin
Begin values for the input that will be sliced.
flatbuffers::Offset< SoftmaxDescriptor > CreateSoftmaxDescriptor(flatbuffers::FlatBufferBuilder &_fbb, float beta=0.0f)
float m_PadValue
Optional value to use for padding, defaults to 0.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
uint32_t GetNumDimensions() const
Get the number of dimensions.
A ComparisonDescriptor for the ComparisonLayer.
void Serialize(const armnn::INetwork &inNetwork)
Serializes the network to ArmNN SerializedGraph.
float m_ScaleX
Center size encoding scale x.
TensorShape m_InputShape
Required shape of all input tensors.
uint32_t m_TargetWidth
Target width value.
bool m_TransposeWeightMatrix
Enable/disable transpose weight matrix.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
Dimensionality GetDimensionality() const
Function that returns the tensor type.
flatbuffers::Offset< GatherLayer > CreateGatherLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::GatherDescriptor > descriptor=0)
flatbuffers::Offset< RankLayer > CreateRankLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0)
bool HasPerAxisQuantization() const
uint32_t m_PoolWidth
Pooling width value.
bool m_PeepholeEnabled
Enable/disable peephole.
armnnSerializer::OutputShapeRounding GetFlatBufferOutputShapeRounding(armnn::OutputShapeRounding outputShapeRounding)
flatbuffers::Offset< TransposeLayer > CreateTransposeLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::TransposeDescriptor > descriptor=0)
A Convolution2dDescriptor for the Convolution2dLayer.
float m_Alpha
Alpha value for the normalization equation.
uint32_t m_PadLeft
Padding left value in the width dimension.
flatbuffers::Offset< ComparisonLayer > CreateComparisonLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::ComparisonDescriptor > descriptor=0)
bool m_KeepDims
if true then output shape has no change.
float m_HiddenStateScale
Hidden State quantization scale.
bool m_BiasEnabled
Enable/disable bias.
flatbuffers::Offset< ConstTensor > CreateConstTensor(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::TensorInfo > info=0, armnnSerializer::ConstTensorData data_type=armnnSerializer::ConstTensorData_NONE, flatbuffers::Offset< void > data=0)
Optional< unsigned int > GetQuantizationDim() const
flatbuffers::Offset< QuantizeLayer > CreateQuantizeLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0)
float m_OutputIntermediateScale
Output intermediate quantization scale.
ResizeMethod m_Method
The Interpolation method to use (Bilinear, NearestNeighbor).
float m_Gamma
Gamma, the scale scalar value applied for the normalized tensor. Defaults to 1.0. ...
float m_Beta
Exponentiation value.
flatbuffers::Offset< InputSlot > CreateInputSlot(flatbuffers::FlatBufferBuilder &_fbb, uint32_t index=0, const armnnSerializer::Connection *connection=0)
std::vector< unsigned int > m_Size
Size of the slice in each dimension.
static ISerializer * CreateRaw()
flatbuffers::Offset< SpaceToDepthDescriptor > CreateSpaceToDepthDescriptor(flatbuffers::FlatBufferBuilder &_fbb, uint32_t blockSize=0, armnnSerializer::DataLayout dataLayout=armnnSerializer::DataLayout_NHWC)
float m_Eps
Value to add to the variance. Used to avoid dividing by zero.
PaddingMethod m_PaddingMethod
The padding method to be used. (Exclude, IgnoreValue).
ArgMinMaxFunction m_Function
Specify if the function is to find Min or Max.
uint32_t m_DetectionsPerClass
Detections per classes, used in Regular NMS.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
flatbuffers::Offset< QuantizedLstmLayer > CreateQuantizedLstmLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::QuantizedLstmInputParams > inputParams=0)
flatbuffers::Offset< TransposeDescriptor > CreateTransposeDescriptor(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< flatbuffers::Vector< uint32_t >> dimMappings=0)
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
serializer::ActivationFunction GetFlatBufferActivationFunction(armnn::ActivationFunction function)
Main network class which provides the interface for building up a neural network. ...
flatbuffers::Offset< DetectionPostProcessDescriptor > CreateDetectionPostProcessDescriptor(flatbuffers::FlatBufferBuilder &_fbb, uint32_t maxDetections=0, uint32_t maxClassesPerDetection=0, uint32_t detectionsPerClass=0, float nmsScoreThreshold=0.0f, float nmsIouThreshold=0.0f, uint32_t numClasses=0, bool useRegularNms=false, float scaleX=0.0f, float scaleY=0.0f, float scaleW=0.0f, float scaleH=0.0f)
armnnSerializer::NormalizationAlgorithmMethod GetFlatBufferNormalizationAlgorithmMethod(armnn::NormalizationAlgorithmMethod normalizationAlgorithmMethod)
uint32_t m_PadTop
Padding top value in the height dimension.
flatbuffers::Offset< AnyLayer > CreateAnyLayer(flatbuffers::FlatBufferBuilder &_fbb, armnnSerializer::Layer layer_type=armnnSerializer::Layer_NONE, flatbuffers::Offset< void > layer=0)
flatbuffers::Offset< DepthwiseConvolution2dLayer > CreateDepthwiseConvolution2dLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::DepthwiseConvolution2dDescriptor > descriptor=0, flatbuffers::Offset< armnnSerializer::ConstTensor > weights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > biases=0)
A LogicalBinaryDescriptor for the LogicalBinaryLayer.
flatbuffers::Offset< MergeLayer > CreateMergeLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0)
uint32_t m_PadRight
Padding right value in the width dimension.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
MemoryType GetMemoryArea() const
std::vector< std::pair< unsigned int, unsigned int > > m_PadList
Specifies the padding for input dimension.
uint32_t GetNumViews() const
Get the number of views.
ReduceOperation m_ReduceOperation
Specifies the reduction operation to execute.
flatbuffers::Offset< QLstmInputParams > CreateQLstmInputParams(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::ConstTensor > inputToForgetWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > inputToCellWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > inputToOutputWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > recurrentToForgetWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > recurrentToCellWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > recurrentToOutputWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > forgetGateBias=0, flatbuffers::Offset< armnnSerializer::ConstTensor > cellBias=0, flatbuffers::Offset< armnnSerializer::ConstTensor > outputGateBias=0, flatbuffers::Offset< armnnSerializer::ConstTensor > inputToInputWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > recurrentToInputWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > inputGateBias=0, flatbuffers::Offset< armnnSerializer::ConstTensor > projectionWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > projectionBias=0, flatbuffers::Offset< armnnSerializer::ConstTensor > cellToInputWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > cellToForgetWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > cellToOutputWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > inputLayerNormWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > forgetLayerNormWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > cellLayerNormWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > outputLayerNormWeights=0)
Copyright (c) 2021 ARM Limited and Contributors.
void IgnoreUnused(Ts &&...)
uint32_t m_PadBottom
Padding bottom value in the height dimension.
int32_t m_BeginMask
Begin mask value.
flatbuffers::Offset< FullyConnectedDescriptor > CreateFullyConnectedDescriptor(flatbuffers::FlatBufferBuilder &_fbb, bool biasEnabled=false, bool transposeWeightsMatrix=false, bool constantWeights=true)
flatbuffers::Offset< TransposeConvolution2dLayer > CreateTransposeConvolution2dLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::TransposeConvolution2dDescriptor > descriptor=0, flatbuffers::Offset< armnnSerializer::ConstTensor > weights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > biases=0)
uint32_t m_DilationY
Dilation along y axis.
int32_t m_EndMask
End mask value.
A SpaceToDepthDescriptor for the SpaceToDepthLayer.
flatbuffers::Offset< TensorInfo > CreateTensorInfo(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< flatbuffers::Vector< uint32_t >> dimensions=0, armnnSerializer::DataType dataType=armnnSerializer::DataType_Float16, float quantizationScale=1.0f, int32_t quantizationOffset=0, flatbuffers::Offset< flatbuffers::Vector< float >> quantizationScales=0, uint32_t quantizationDim=0, uint32_t dimensionality=1, flatbuffers::Offset< flatbuffers::Vector< uint8_t >> dimensionSpecificity=0)
flatbuffers::Offset< PreluLayer > CreatePreluLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0)
std::vector< std::pair< unsigned int, unsigned int > > m_PadList
Specifies the padding values for the input dimension: heightPad{top, bottom} widthPad{left, right}.
std::vector< float > GetQuantizationScales() const
flatbuffers::Offset< StandInDescriptor > CreateStandInDescriptor(flatbuffers::FlatBufferBuilder &_fbb, uint32_t numInputs=0, uint32_t numOutputs=0)
bool SaveSerializedToStream(std::ostream &stream)
Serializes the SerializedGraph to the stream.
uint32_t m_DilationY
Dilation factor value for height dimension.
armnnSerializer::ConstTensorData GetFlatBufferConstTensorData(armnn::DataType dataType)
bool GetDimensionSpecificity(unsigned int i) const
Gets information about if the dimension size has been specified or not.
LogicalBinaryOperation m_Operation
Specifies the logical operation to execute.
A BatchToSpaceNdDescriptor for the BatchToSpaceNdLayer.
void ExecuteStrategy(const armnn::IConnectableLayer *layer, const armnn::BaseDescriptor &descriptor, const std::vector< armnn::ConstTensor > &constants, const char *name, const armnn::LayerBindingId id) override
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
PermutationVector m_DimMappings
Indicates how to translate tensor elements from a given source into the target destination, when source and target potentially have different memory layouts e.g.
flatbuffers::Offset< MultiplicationLayer > CreateMultiplicationLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0)
flatbuffers::Offset< DepthToSpaceLayer > CreateDepthToSpaceLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::DepthToSpaceDescriptor > descriptor=0)
flatbuffers::Offset< InstanceNormalizationLayer > CreateInstanceNormalizationLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::InstanceNormalizationDescriptor > descriptor=0)
armnnSerializer::ReduceOperation GetFlatBufferReduceOperation(armnn::ReduceOperation reduceOperation)
int LayerBindingId
Type of identifiers for bindable layers (inputs, outputs).
flatbuffers::Offset< SliceLayer > CreateSliceLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::SliceDescriptor > descriptor=0)
armnnSerializer::DataType GetFlatBufferDataType(armnn::DataType dataType)
uint32_t m_NumOutputs
Number of output tensors.
NormalizationAlgorithmMethod m_NormMethodType
Normalization method algorithm to use (LocalBrightness, LocalContrast).
flatbuffers::Offset< Convolution2dDescriptor > CreateConvolution2dDescriptor(flatbuffers::FlatBufferBuilder &_fbb, uint32_t padLeft=0, uint32_t padRight=0, uint32_t padTop=0, uint32_t padBottom=0, uint32_t strideX=0, uint32_t strideY=0, uint32_t dilationX=1, uint32_t dilationY=1, bool biasEnabled=false, armnnSerializer::DataLayout dataLayout=armnnSerializer::DataLayout_NCHW)
flatbuffers::Offset< InputLayer > CreateInputLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::BindableLayerBase > base=0)
A ResizeDescriptor for the ResizeLayer.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
uint32_t m_MaxClassesPerDetection
Maximum numbers of classes per detection, used in Fast NMS.
Base class for all descriptors.
std::vector< unsigned int > m_Axis
Values for the dimensions to reduce.
A StackDescriptor for the StackLayer.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
flatbuffers::Offset< ShortData > CreateShortData(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< flatbuffers::Vector< int16_t >> data=0)
serializer::ArgMinMaxFunction GetFlatBufferArgMinMaxFunction(armnn::ArgMinMaxFunction function)
TensorShape m_TargetShape
Target shape value.
bool SaveSerializedToStream(std::ostream &stream)
Serializes the SerializedGraph to the stream.
flatbuffers::Offset< ConcatLayer > CreateConcatLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::OriginsDescriptor > descriptor=0)
uint32_t m_PoolHeight
Pooling height value.
uint32_t m_PadTop
Padding top value in the height dimension.
uint32_t m_MaxDetections
Maximum numbers of detections.
A PadDescriptor for the PadLayer.
flatbuffers::Offset< SubtractionLayer > CreateSubtractionLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0)
flatbuffers::Offset< BindableLayerBase > CreateBindableLayerBase(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, int32_t layerBindingId=0)
const uint32_t * GetViewOrigin(uint32_t idx) const
Return the view origin at the int value idx.
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
flatbuffers::Offset< ArgMinMaxLayer > CreateArgMinMaxLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::ArgMinMaxDescriptor > descriptor=0)
armnnSerializer::NormalizationAlgorithmChannel GetFlatBufferNormalizationAlgorithmChannel(armnn::NormalizationAlgorithmChannel normalizationAlgorithmChannel)
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
flatbuffers::Offset< QLstmDescriptor > CreateQLstmDescriptor(flatbuffers::FlatBufferBuilder &_fbb, bool cifgEnabled=true, bool peepholeEnabled=false, bool projectionEnabled=false, bool layerNormEnabled=false, float cellClip=0.0f, float projectionClip=0.0f, float inputIntermediateScale=0.0f, float forgetIntermediateScale=0.0f, float cellIntermediateScale=0.0f, float outputIntermediateScale=0.0f, int32_t hiddenStateZeroPoint=0, float hiddenStateScale=0.0f)
bool m_LayerNormEnabled
Enable/disable layer normalization.
flatbuffers::Offset< GreaterLayer > CreateGreaterLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0)
float m_NmsIouThreshold
Intersection over union threshold.
flatbuffers::Offset< ReshapeLayer > CreateReshapeLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::ReshapeDescriptor > descriptor=0)
armnnSerializer::LogicalBinaryOperation GetFlatBufferLogicalBinaryOperation(armnn::LogicalBinaryOperation logicalBinaryOperation)
flatbuffers::Offset< ArgMinMaxDescriptor > CreateArgMinMaxDescriptor(flatbuffers::FlatBufferBuilder &_fbb, armnnSerializer::ArgMinMaxFunction argMinMaxFunction=armnnSerializer::ArgMinMaxFunction_Min, int32_t axis=0)
An LstmDescriptor for the LstmLayer.
uint32_t m_PadRight
Padding right value in the width dimension.
flatbuffers::Offset< AdditionLayer > CreateAdditionLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0)
uint32_t m_DilationX
Dilation factor value for width dimension.
uint32_t m_PadTop
Padding top value in the height dimension.
flatbuffers::Offset< L2NormalizationDescriptor > CreateL2NormalizationDescriptor(flatbuffers::FlatBufferBuilder &_fbb, armnnSerializer::DataLayout dataLayout=armnnSerializer::DataLayout_NCHW, float eps=1e-12f)
std::vector< unsigned int > m_Begin
Beginning indices of the slice in each dimension.
int32_t m_NewAxisMask
New axis mask value.
flatbuffers::Offset< MinimumLayer > CreateMinimumLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0)
bool m_KeepDims
Enable/disable keep dimensions. If true, then the reduced dimensions that are of length 1 are kept...
flatbuffers::Offset< ByteData > CreateByteData(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< flatbuffers::Vector< int8_t >> data=0)
std::vector< unsigned int > m_BlockShape
Block shape values.
float m_Eps
Epsilon, small scalar value added to variance to avoid dividing by zero. Defaults to 1e-12f...
An output connection slot for a layer.
flatbuffers::Offset< DepthToSpaceDescriptor > CreateDepthToSpaceDescriptor(flatbuffers::FlatBufferBuilder &_fbb, uint32_t blockSize=0, armnnSerializer::DataLayout dataLayout=armnnSerializer::DataLayout_NHWC)
A L2NormalizationDescriptor for the L2NormalizationLayer.
int32_t GetQuantizationOffset() const
An ArgMinMaxDescriptor for ArgMinMaxLayer.
float GetQuantizationScale() const
flatbuffers::Offset< LstmInputParams > CreateLstmInputParams(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::ConstTensor > inputToForgetWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > inputToCellWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > inputToOutputWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > recurrentToForgetWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > recurrentToCellWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > recurrentToOutputWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > forgetGateBias=0, flatbuffers::Offset< armnnSerializer::ConstTensor > cellBias=0, flatbuffers::Offset< armnnSerializer::ConstTensor > outputGateBias=0, flatbuffers::Offset< armnnSerializer::ConstTensor > inputToInputWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > recurrentToInputWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > cellToInputWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > inputGateBias=0, flatbuffers::Offset< armnnSerializer::ConstTensor > projectionWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > projectionBias=0, flatbuffers::Offset< armnnSerializer::ConstTensor > cellToForgetWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > cellToOutputWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > inputLayerNormWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > forgetLayerNormWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > cellLayerNormWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > outputLayerNormWeights=0)
DataType GetDataType() const
An OriginsDescriptor for the ConcatLayer.
A ReduceDescriptor for the REDUCE operators.
float m_ProjectionClip
Clipping threshold value for the projection.
flatbuffers::Offset< CastLayer > CreateCastLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0)
flatbuffers::Offset< LayerBase > CreateLayerBase(flatbuffers::FlatBufferBuilder &_fbb, uint32_t index=0, flatbuffers::Offset< flatbuffers::String > layerName=0, armnnSerializer::LayerType layerType=armnnSerializer::LayerType_Addition, flatbuffers::Offset< flatbuffers::Vector< flatbuffers::Offset< armnnSerializer::InputSlot >>> inputSlots=0, flatbuffers::Offset< flatbuffers::Vector< flatbuffers::Offset< armnnSerializer::OutputSlot >>> outputSlots=0)
A FullyConnectedDescriptor for the FullyConnectedLayer.
int32_t m_EllipsisMask
Ellipsis mask value.
virtual LayerGuid GetGuid() const =0
Returns the unique id of the layer.
bool m_BiasEnabled
Enable/disable bias.
A tensor defined by a TensorInfo (shape and data type) and an immutable backing store.
flatbuffers::Offset< QuantizedLstmInputParams > CreateQuantizedLstmInputParams(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::ConstTensor > inputToInputWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > inputToForgetWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > inputToCellWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > inputToOutputWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > recurrentToInputWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > recurrentToForgetWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > recurrentToCellWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > recurrentToOutputWeights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > inputGateBias=0, flatbuffers::Offset< armnnSerializer::ConstTensor > forgetGateBias=0, flatbuffers::Offset< armnnSerializer::ConstTensor > cellBias=0, flatbuffers::Offset< armnnSerializer::ConstTensor > outputGateBias=0)
flatbuffers::Offset< ReduceDescriptor > CreateReduceDescriptor(flatbuffers::FlatBufferBuilder &_fbb, bool keepDims=false, flatbuffers::Offset< flatbuffers::Vector< uint32_t >> axis=0, armnnSerializer::ReduceOperation reduceOperation=armnnSerializer::ReduceOperation_Sum)
flatbuffers::Offset< StackDescriptor > CreateStackDescriptor(flatbuffers::FlatBufferBuilder &_fbb, uint32_t axis=0, uint32_t numInputs=0, flatbuffers::Offset< flatbuffers::Vector< uint32_t >> inputShape=0)
flatbuffers::Offset< BatchToSpaceNdDescriptor > CreateBatchToSpaceNdDescriptor(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< flatbuffers::Vector< uint32_t >> blockShape=0, flatbuffers::Offset< flatbuffers::Vector< uint32_t >> crops=0, armnnSerializer::DataLayout dataLayout=armnnSerializer::DataLayout_NHWC)
float m_InputIntermediateScale
Input intermediate quantization scale.
flatbuffers::Offset< PadDescriptor > CreatePadDescriptor(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< flatbuffers::Vector< uint32_t >> padList=0, float padValue=0.0f)
uint32_t m_TargetWidth
Target width value.
flatbuffers::Offset< SplitterLayer > CreateSplitterLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::ViewsDescriptor > descriptor=0)
A GatherDescriptor for the GatherLayer.
bool m_PeepholeEnabled
Enable/disable peephole.
uint32_t m_NumClasses
Number of classes.
bool m_HalfPixelCenters
Half Pixel Centers.
flatbuffers::Offset< OutputLayer > CreateOutputLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::BindableLayerBase > base=0)
void Serialize(const armnn::INetwork &inNetwork)
Serializes the network to ArmNN SerializedGraph.
flatbuffers::Offset< SoftmaxLayer > CreateSoftmaxLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::SoftmaxDescriptor > descriptor=0)
flatbuffers::Offset< FillDescriptor > CreateFillDescriptor(flatbuffers::FlatBufferBuilder &_fbb, float value=0.0f)
uint32_t m_PadTop
Padding top value in the height dimension.
A StandInDescriptor for the StandIn layer.
A QLstmDescriptor for the QLstmLayer.
flatbuffers::Offset< StridedSliceLayer > CreateStridedSliceLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::StridedSliceDescriptor > descriptor=0)
virtual unsigned int CalculateIndexOnOwner() const =0
flatbuffers::Offset< LogSoftmaxDescriptor > CreateLogSoftmaxDescriptor(flatbuffers::FlatBufferBuilder &_fbb, float beta=1.0f, int32_t axis=-1)
bool m_UseRegularNms
Use Regular NMS.
flatbuffers::Offset< RsqrtLayer > CreateRsqrtLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0)
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
flatbuffers::Offset< MeanLayer > CreateMeanLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::MeanDescriptor > descriptor=0)
std::vector< unsigned int > m_BlockShape
Block shape value.
std::vector< int > m_Stride
Stride values for the input that will be sliced.
An ActivationDescriptor for the ActivationLayer.
flatbuffers::Offset< ActivationLayer > CreateActivationLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::ActivationDescriptor > descriptor=0)
flatbuffers::Offset< SpaceToDepthLayer > CreateSpaceToDepthLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::SpaceToDepthDescriptor > descriptor=0)
flatbuffers::Offset< SliceDescriptor > CreateSliceDescriptor(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< flatbuffers::Vector< uint32_t >> begin=0, flatbuffers::Offset< flatbuffers::Vector< uint32_t >> size=0)
const TensorInfo & GetInfo() const
min(a, max(b, input)) ReLu1 & ReLu6.
flatbuffers::Offset< BatchNormalizationLayer > CreateBatchNormalizationLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::BatchNormalizationDescriptor > descriptor=0, flatbuffers::Offset< armnnSerializer::ConstTensor > mean=0, flatbuffers::Offset< armnnSerializer::ConstTensor > variance=0, flatbuffers::Offset< armnnSerializer::ConstTensor > beta=0, flatbuffers::Offset< armnnSerializer::ConstTensor > gamma=0)
flatbuffers::Offset< BatchNormalizationDescriptor > CreateBatchNormalizationDescriptor(flatbuffers::FlatBufferBuilder &_fbb, float eps=0.0f, armnnSerializer::DataLayout dataLayout=armnnSerializer::DataLayout_NHWC)
uint32_t m_TargetHeight
Target height value.
uint32_t m_NumInputs
Number of input tensors.
flatbuffers::Offset< GatherDescriptor > CreateGatherDescriptor(flatbuffers::FlatBufferBuilder &_fbb, int32_t axis=0)
flatbuffers::Offset< ActivationDescriptor > CreateActivationDescriptor(flatbuffers::FlatBufferBuilder &_fbb, armnnSerializer::ActivationFunction activationFunction=armnnSerializer::ActivationFunction_Sigmoid, float a=0.0f, float b=0.0f)
uint32_t m_TargetHeight
Target height value.
uint32_t m_ActivationFunc
The activation function to use.
A SliceDescriptor for the SliceLayer.
flatbuffers::Offset< NormalizationLayer > CreateNormalizationLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::NormalizationDescriptor > descriptor=0)
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
flatbuffers::Offset< ViewsDescriptor > CreateViewsDescriptor(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::OriginsDescriptor > origins=0, flatbuffers::Offset< flatbuffers::Vector< flatbuffers::Offset< armnnSerializer::UintVector >>> viewSizes=0)
virtual LayerType GetType() const =0
Returns the armnn::LayerType of this layer.
flatbuffers::Offset< PermuteDescriptor > CreatePermuteDescriptor(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< flatbuffers::Vector< uint32_t >> dimMappings=0)
float m_ClippingThresCell
Clipping threshold value for the cell state.
unsigned int m_BlockSize
Scalar specifying the input block size. It must be >= 1.
flatbuffers::Offset< MeanDescriptor > CreateMeanDescriptor(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< flatbuffers::Vector< uint32_t >> axis=0, bool keepDims=false)
const uint32_t * GetViewOrigin(uint32_t idx) const
Get the view origin at the int value idx.
flatbuffers::Offset< StandInLayer > CreateStandInLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::StandInDescriptor > descriptor=0)
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
float m_ForgetIntermediateScale
Forget intermediate quantization scale.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
float m_Beta
Beta, the offset scalar value applied for the normalized tensor. Defaults to 1.0. ...
armnnSerializer::DataLayout GetFlatBufferDataLayout(armnn::DataLayout dataLayout)
std::vector< uint32_t > m_vAxis
The indices of the dimensions to reduce.
float m_ScaleH
Center size encoding scale height.
ComparisonOperation m_Operation
Specifies the comparison operation to execute.
std::vector< int > m_End
End values for the input that will be sliced.
flatbuffers::Offset< SwitchLayer > CreateSwitchLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0)
A SpaceToBatchNdDescriptor for the SpaceToBatchNdLayer.
flatbuffers::Offset< ResizeBilinearDescriptor > CreateResizeBilinearDescriptor(flatbuffers::FlatBufferBuilder &_fbb, uint32_t targetWidth=0, uint32_t targetHeight=0, armnnSerializer::DataLayout dataLayout=armnnSerializer::DataLayout_NHWC, bool alignCorners=false, bool halfPixelCenters=false)
NormalizationAlgorithmChannel m_NormChannelType
Normalization channel algorithm to use (Across, Within).
const uint32_t * GetViewSizes(uint32_t idx) const
Get the view sizes at the int value idx.
float m_CellClip
Clipping threshold value for the cell state.
flatbuffers::Offset< ElementwiseUnaryDescriptor > CreateElementwiseUnaryDescriptor(flatbuffers::FlatBufferBuilder &_fbb, armnnSerializer::UnaryOperation operation=armnnSerializer::UnaryOperation_Abs)
float m_A
Alpha upper bound value used by the activation functions. (BoundedReLu, Linear, TanH, Elu).
uint32_t m_DilationX
Dilation along x axis.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
bool m_CifgEnabled
Enable/disable cifg (coupled input & forget gate).
flatbuffers::Offset< PadLayer > CreatePadLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::PadDescriptor > descriptor=0)
flatbuffers::Offset< FloorLayer > CreateFloorLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0)
std::unique_ptr< ISerializer, void(*)(ISerializer *serializer)> ISerializerPtr
flatbuffers::Offset< NormalizationDescriptor > CreateNormalizationDescriptor(flatbuffers::FlatBufferBuilder &_fbb, armnnSerializer::NormalizationAlgorithmChannel normChannelType=armnnSerializer::NormalizationAlgorithmChannel_Across, armnnSerializer::NormalizationAlgorithmMethod normMethodType=armnnSerializer::NormalizationAlgorithmMethod_LocalBrightness, uint32_t normSize=0, float alpha=0.0f, float beta=0.0f, float k=0.0f, armnnSerializer::DataLayout dataLayout=armnnSerializer::DataLayout_NCHW)
uint32_t m_PadLeft
Padding left value in the width dimension.
armnnSerializer::ComparisonOperation GetFlatBufferComparisonOperation(armnn::ComparisonOperation comparisonOperation)
flatbuffers::Offset< Pooling2dDescriptor > CreatePooling2dDescriptor(flatbuffers::FlatBufferBuilder &_fbb, armnnSerializer::PoolingAlgorithm poolType=armnnSerializer::PoolingAlgorithm_Max, uint32_t padLeft=0, uint32_t padRight=0, uint32_t padTop=0, uint32_t padBottom=0, uint32_t poolWidth=0, uint32_t poolHeight=0, uint32_t strideX=0, uint32_t strideY=0, armnnSerializer::OutputShapeRounding outputShapeRounding=armnnSerializer::OutputShapeRounding_Floor, armnnSerializer::PaddingMethod paddingMethod=armnnSerializer::PaddingMethod_IgnoreValue, armnnSerializer::DataLayout dataLayout=armnnSerializer::DataLayout_NHWC)
flatbuffers::Offset< EqualLayer > CreateEqualLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0)
bool m_AlignCorners
Aligned corners.
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
flatbuffers::Offset< ConstantLayer > CreateConstantLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::ConstTensor > input=0)
int32_t m_Axis
The axis in params to gather indices from.
A ElementwiseUnaryDescriptor for the ElementwiseUnaryLayer.
PoolingAlgorithm m_PoolType
The pooling algorithm to use (Max. Average, L2).
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
flatbuffers::Offset< UintVector > CreateUintVector(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< flatbuffers::Vector< uint32_t >> data=0)
flatbuffers::Offset< StackLayer > CreateStackLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::StackDescriptor > descriptor=0)
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
std::vector< std::pair< unsigned int, unsigned int > > m_Crops
The values to crop from the input dimension.
flatbuffers::Offset< Convolution2dLayer > CreateConvolution2dLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::Convolution2dDescriptor > descriptor=0, flatbuffers::Offset< armnnSerializer::ConstTensor > weights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > biases=0)
unsigned int GetNumDimensions() const
Function that returns the tensor rank.
flatbuffers::Offset< Pooling2dLayer > CreatePooling2dLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::Pooling2dDescriptor > descriptor=0)
bool m_ProjectionEnabled
Enable/disable the projection layer.
flatbuffers::Offset< SpaceToBatchNdLayer > CreateSpaceToBatchNdLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::SpaceToBatchNdDescriptor > descriptor=0)
OutputShapeRounding m_OutputShapeRounding
The rounding method for the output shape. (Floor, Ceiling).
armnnSerializer::ResizeMethod GetFlatBufferResizeMethod(armnn::ResizeMethod method)
uint32_t m_NumInputs
Number of input tensors.
uint32_t GetNumDimensions() const
Get the number of dimensions.
flatbuffers::Offset< ComparisonDescriptor > CreateComparisonDescriptor(flatbuffers::FlatBufferBuilder &_fbb, armnnSerializer::ComparisonOperation operation=armnnSerializer::ComparisonOperation_Equal)
virtual const IInputSlot & GetInputSlot(unsigned int index) const =0
Get a const input slot handle by slot index.
A MeanDescriptor for the MeanLayer.
flatbuffers::Offset< MaximumLayer > CreateMaximumLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0)
static ISerializerPtr Create()
armnnSerializer::PaddingMethod GetFlatBufferPaddingMethod(armnn::PaddingMethod paddingMethod)
bool m_LayerNormEnabled
Enable/disable layer normalization.
std::enable_if_t< std::is_unsigned< Source >::value &&std::is_unsigned< Dest >::value, Dest > numeric_cast(Source source)
uint32_t m_PadRight
Padding right value in the width dimension.
flatbuffers::Offset< InstanceNormalizationDescriptor > CreateInstanceNormalizationDescriptor(flatbuffers::FlatBufferBuilder &_fbb, float gamma=0.0f, float beta=0.0f, float eps=0.0f, armnnSerializer::DataLayout dataLayout=armnnSerializer::DataLayout_NHWC)
A TransposeDescriptor for the TransposeLayer.
A StridedSliceDescriptor for the StridedSliceLayer.
virtual const TensorInfo & GetTensorInfo() const =0
virtual const IOutputSlot & GetOutputSlot(unsigned int index) const =0
Get the const output slot handle by slot index.
int m_Axis
Axis to reduce across the input tensor.
flatbuffers::Offset< IntData > CreateIntData(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< flatbuffers::Vector< int32_t >> data=0)
virtual const char * GetName() const =0
Returns the name of the layer.
float m_ScaleY
Center size encoding scale y.
flatbuffers::Offset< ResizeLayer > CreateResizeLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::ResizeDescriptor > descriptor=0)
uint32_t GetNumViews() const
Get the number of views.
flatbuffers::Offset< FullyConnectedLayer > CreateFullyConnectedLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::FullyConnectedDescriptor > descriptor=0, flatbuffers::Offset< armnnSerializer::ConstTensor > weights=0, flatbuffers::Offset< armnnSerializer::ConstTensor > biases=0)
float m_NmsScoreThreshold
NMS score threshold.
flatbuffers::Offset< DequantizeLayer > CreateDequantizeLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0)
flatbuffers::Offset< ResizeBilinearLayer > CreateResizeBilinearLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::ResizeBilinearDescriptor > descriptor=0)
virtual LayerGuid GetOwningLayerGuid() const =0
A Pooling2dDescriptor for the Pooling2dLayer.
flatbuffers::Offset< DetectionPostProcessLayer > CreateDetectionPostProcessLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::DetectionPostProcessDescriptor > descriptor=0, flatbuffers::Offset< armnnSerializer::ConstTensor > anchors=0)
A NormalizationDescriptor for the NormalizationLayer.
flatbuffers::Offset< FeatureCompatibilityVersions > CreateFeatureCompatibilityVersions(flatbuffers::FlatBufferBuilder &_fbb, uint32_t bindingIdsScheme=0)
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
flatbuffers::Offset< BatchToSpaceNdLayer > CreateBatchToSpaceNdLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::BatchToSpaceNdDescriptor > descriptor=0)
flatbuffers::Offset< armnnSerializer::FeatureCompatibilityVersions > GetVersionTable()
An InstanceNormalizationDescriptor for InstanceNormalizationLayer.
unsigned int GetConcatAxis() const
Get the concatenation axis value.
A ResizeBilinearDescriptor for the ResizeBilinearLayer.
flatbuffers::Offset< LogSoftmaxLayer > CreateLogSoftmaxLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::LogSoftmaxDescriptor > descriptor=0)
float m_CellIntermediateScale
Cell intermediate quantization scale.
flatbuffers::Offset< StridedSliceDescriptor > CreateStridedSliceDescriptor(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< flatbuffers::Vector< int32_t >> begin=0, flatbuffers::Offset< flatbuffers::Vector< int32_t >> end=0, flatbuffers::Offset< flatbuffers::Vector< int32_t >> stride=0, int32_t beginMask=0, int32_t endMask=0, int32_t shrinkAxisMask=0, int32_t ellipsisMask=0, int32_t newAxisMask=0, armnnSerializer::DataLayout dataLayout=armnnSerializer::DataLayout_NHWC)
flatbuffers::Offset< OriginsDescriptor > CreateOriginsDescriptor(flatbuffers::FlatBufferBuilder &_fbb, uint32_t concatAxis=0, uint32_t numViews=0, uint32_t numDimensions=0, flatbuffers::Offset< flatbuffers::Vector< flatbuffers::Offset< armnnSerializer::UintVector >>> viewOrigins=0)
flatbuffers::Offset< QLstmLayer > CreateQLstmLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::QLstmDescriptor > descriptor=0, flatbuffers::Offset< armnnSerializer::QLstmInputParams > inputParams=0)
flatbuffers::Offset< LstmDescriptor > CreateLstmDescriptor(flatbuffers::FlatBufferBuilder &_fbb, uint32_t activationFunc=0, float clippingThresCell=0.0f, float clippingThresProj=0.0f, bool cifgEnabled=true, bool peepholeEnabled=false, bool projectionEnabled=false, bool layerNormEnabled=false)
float m_B
Beta lower bound value used by the activation functions. (BoundedReLu, Linear, TanH).
static void Destroy(ISerializer *serializer)
A SoftmaxDescriptor for the SoftmaxLayer.
float m_Beta
Beta value for the normalization equation.
flatbuffers::Offset< ElementwiseUnaryLayer > CreateElementwiseUnaryLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::ElementwiseUnaryDescriptor > descriptor=0)
flatbuffers::Offset< PermuteLayer > CreatePermuteLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::PermuteDescriptor > descriptor=0)
const OriginsDescriptor & GetOrigins() const
Get the View Origins.
bool m_CifgEnabled
Enable/disable CIFG (coupled input & forget gate).
PermutationVector m_DimMappings
Indicates how to translate tensor elements from a given source into the target destination, when source and target potentially have different memory layouts e.g.
uint32_t m_NormSize
Depth radius value.
flatbuffers::Offset< SpaceToBatchNdDescriptor > CreateSpaceToBatchNdDescriptor(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< flatbuffers::Vector< uint32_t >> blockShape=0, flatbuffers::Offset< flatbuffers::Vector< uint32_t >> padList=0, armnnSerializer::DataLayout dataLayout=armnnSerializer::DataLayout_NHWC)
ActivationFunction m_Function
The activation function to use (Sigmoid, TanH, Linear, ReLu, BoundedReLu, SoftReLu, LeakyReLu, Abs, Sqrt, Square, Elu).
flatbuffers::Offset< SerializedGraph > CreateSerializedGraph(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< flatbuffers::Vector< flatbuffers::Offset< armnnSerializer::AnyLayer >>> layers=0, flatbuffers::Offset< flatbuffers::Vector< int32_t >> inputIds=0, flatbuffers::Offset< flatbuffers::Vector< int32_t >> outputIds=0, flatbuffers::Offset< armnnSerializer::FeatureCompatibilityVersions > featureVersions=0)
armnnSerializer::PoolingAlgorithm GetFlatBufferPoolingAlgorithm(armnn::PoolingAlgorithm poolingAlgorithm)
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
A DepthwiseConvolution2dDescriptor for the DepthwiseConvolution2dLayer.
A FillDescriptor for the FillLayer.
A BatchNormalizationDescriptor for the BatchNormalizationLayer.
uint32_t m_PadLeft
Padding left value in the width dimension.
unsigned int GetNumBytes() const
flatbuffers::Offset< LogicalBinaryDescriptor > CreateLogicalBinaryDescriptor(flatbuffers::FlatBufferBuilder &_fbb, armnnSerializer::LogicalBinaryOperation operation=armnnSerializer::LogicalBinaryOperation_LogicalAnd)
flatbuffers::Offset< DivisionLayer > CreateDivisionLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0)
A PermuteDescriptor for the PermuteLayer.
flatbuffers::Offset< LogicalBinaryLayer > CreateLogicalBinaryLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::LogicalBinaryDescriptor > descriptor=0)
uint32_t m_PadRight
Padding right value in the width dimension.
int32_t m_HiddenStateZeroPoint
Hidden State zero point.
bool m_ConstantWeights
Enable/disable constant weights and biases.
std::vector< float > anchors({ 0.5f, 0.5f, 1.0f, 1.0f, 0.5f, 0.5f, 1.0f, 1.0f, 0.5f, 0.5f, 1.0f, 1.0f, 0.5f, 10.5f, 1.0f, 1.0f, 0.5f, 10.5f, 1.0f, 1.0f, 0.5f, 100.5f, 1.0f, 1.0f })