43 return new Network(networkOptions);
53 delete PolymorphicDowncast<Network*>(network);
58 delete PolymorphicDowncast<OptimizedNetwork*>(network);
69 return m_Graph->SerializeToDot(stream);
73 Optional<std::vector<std::string>&> errorMessages)
75 std::stringstream fullErrorMessage;
76 fullErrorMessage <<
"ERROR: " << errorMessage;
80 errorMessages.value().push_back(fullErrorMessage.str());
85 Optional<std::vector<std::string>&> warningMessages)
87 std::stringstream fullWarningMessage;
88 fullWarningMessage <<
"WARNING: " << warningMessage;
92 warningMessages.value().push_back(fullWarningMessage.str());
99 Optional<std::vector<std::string>&> errMessages)
101 std::stringstream failureMsg;
113 bool noErrors =
true;
115 for (
unsigned int i = 0; i < numOutputs; i++) {
121 std::stringstream ss;
123 <<
" (" << layer->
GetNameStr() <<
") is of type" 124 <<
" Quantized 8 bit but its scale parameter has not been set";
132 std::stringstream ss;
133 ss <<
"Quantization parameters for Softmax layer (Scale: " <<
135 ") are incorrect and have been updated to Scale: 0.00390625 and Offset: 0";
146 template <
typename LayerT>
149 LayerT* layer = PolymorphicDowncast<LayerT*>(l);
160 layer->m_Weight->template GetTensor<armnn::BFloat16>(), info.
GetNumElements(), newValues.data());
176 const std::vector<BackendId>& availablePreferredBackends,
177 std::string& reasonIfUnsupported,
178 Optional<std::vector<std::string>&> errMessages)
183 auto ReturnError = [&](
const Layer* layer)
200 std::vector<ConvertFp16ToFp32Layer*> convertFp16ToFp32Layers;
203 convertFp16ToFp32Layers =
208 std::vector<ConvertFp32ToFp16Layer*> convertFp32ToFp16Layers;
211 convertFp32ToFp16Layers =
216 auto AssignFirstSupportedBackend = [&](
Layer* layer,
BackendId preferredBackend)
218 bool supportedBackendFound =
false;
219 std::string reasonIfUnsupported;
225 reasonIfUnsupported))
227 supportedBackendFound =
true;
231 for (
const auto& backend : availablePreferredBackends)
234 if (backend == preferredBackend)
242 reasonIfUnsupported))
244 supportedBackendFound =
true;
250 return supportedBackendFound;
255 if (!AssignFirstSupportedBackend(convertLayer, backend))
257 return ReturnError(convertLayer);
263 if (!AssignFirstSupportedBackend(convertLayer, backend))
265 return ReturnError(convertLayer);
279 std::vector<ConvertBf16ToFp32Layer*> convertBf16ToFp32Layers;
282 convertBf16ToFp32Layers =
286 ConvertBf16ToFp32Weight<Convolution2dLayer>(layer);
290 ConvertBf16ToFp32Weight<FullyConnectedLayer>(layer);
295 std::vector<ConvertFp32ToBf16Layer*> convertFp32ToBf16Layers;
298 convertFp32ToBf16Layers =
303 auto AssignFirstSupportedBackend = [&](
Layer* layer,
BackendId preferredBackend)
305 bool supportedBackendFound =
false;
306 std::string reasonIfUnsupported;
312 reasonIfUnsupported))
314 supportedBackendFound =
true;
318 for (
const auto& backend : availablePreferredBackends)
321 if (backend == preferredBackend)
329 reasonIfUnsupported))
331 supportedBackendFound =
true;
337 return supportedBackendFound;
342 if (!AssignFirstSupportedBackend(convertLayer, backend))
344 return ReturnError(convertLayer);
350 if (!AssignFirstSupportedBackend(convertLayer, backend))
352 return ReturnError(convertLayer);
360 std::stringstream warningMsg;
362 <<
" is not supported on requested backend " << layer->
GetBackendId().
Get()
365 <<
" (reason: " << reasonIfUnsupported
366 <<
"), falling back to the next backend.";
382 Optional<std::vector<std::string>&> errMessages)
387 auto ReturnError = [&](
const Layer* layer)
394 if (availablePreferredBackends.empty())
396 std::stringstream failureMsg;
397 failureMsg <<
"No preferred backends are available";
404 for (
auto it = firstLayer; it != lastLayer; ++it)
409 layer->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo().GetDataType();
411 layer->GetOutputSlot(0).GetTensorInfo().GetDataType();
413 std::string reasonIfUnsupported;
423 if (layer->GetBackendHint().has_value() &&
428 layer->GetBackendHint().value(),
431 availablePreferredBackends,
441 for (
const auto& backend : availablePreferredBackends)
443 if (layer->GetBackendHint().has_value() &&
444 layer->GetBackendHint().value() == backend)
455 availablePreferredBackends,
491 layer->SetBackendId(cpuBackendId);
496 return ReturnError(layer);
507 Optional<std::vector<std::string>&> errMessages)
525 auto backendFactory = backendRegistry.GetFactory(selectedBackend);
526 auto backendObjPtr = backendFactory();
529 backendObjPtr->RegisterTensorHandleFactories(handleFactoryRegistry);
531 backends[backendObjPtr->GetId()] = std::move(backendObjPtr);
541 Optional<std::vector<std::string>&> errMessages)
553 auto backendObjPtr = backends.find(selectedBackend)->second.get();
560 [&backendObjPtr](
const Layer& layer)
566 if (subgraphs.empty())
573 for (
auto& subgraph : subgraphs)
576 OptimizationViews optimizationViews = backendObjPtr->OptimizeSubgraphView(*subgraph, modelOptions);
583 SubgraphView& replacementSubgraph = substitution.m_ReplacementSubgraph;
584 SubgraphView& substitutableSubgraph = substitution.m_SubstitutableSubgraph;
588 std::for_each(replacementSubgraph.
begin(), replacementSubgraph.
end(), [&selectedBackend](
Layer* l)
591 l->SetBackendId(selectedBackend);
597 std::stringstream warningMsg;
598 warningMsg <<
"Some sub-graph(s) failed to optimized on " << backendObjPtr->GetId() <<
" backend.";
603 if (!backendObjPtr->GetId().IsCpuRef())
606 settingsCopy.m_IgnoredBackends.insert(backendObjPtr->GetId());
613 std::stringstream subgraphMsg;
614 subgraphMsg <<
"Re-assigning backends to " << failedSubgraph.GetLayers().size()
615 <<
" layers inside sub-graph " << count++;
622 if (reassignmentResult.m_Error)
645 if (srcFactory && dstFactory &&
670 if (frmBackend == backends.end() ||
671 !frmBackend->second->SupportsTensorAllocatorAPI())
678 std::map<ITensorHandleFactory::FactoryId, int> factoryScores;
684 const Layer& connectedLayer = connection->GetOwningLayer();
686 auto toBackend = backends.find(connectedLayer.
GetBackendId());
687 ARMNN_ASSERT_MSG(toBackend != backends.end(),
"Backend id not found for the connected layer");
689 if (!toBackend->second.get()->SupportsTensorAllocatorAPI())
695 auto dstPrefs = toBackend->second.get()->GetHandleFactoryPreferences();
696 for (
auto&& dst : dstPrefs)
709 auto it = factoryScores.find(dst);
710 if (it == factoryScores.end())
713 factoryScores[dst] = 0;
722 factoryScores[dst]++;
725 if (factoryScores[dst] > topScore)
727 topScore = factoryScores[dst];
755 if (frmBackend == backends.end() ||
756 !frmBackend->second->SupportsTensorAllocatorAPI())
762 bool requiresMapUnmap =
false;
765 const Layer& connectedLayer = connection->GetOwningLayer();
768 requiresMapUnmap =
true;
776 std::map<ITensorHandleFactory::FactoryId, int> factoryScores;
777 for (
auto&& pref : srcPrefs)
779 if (requiresMapUnmap)
789 auto it = factoryScores.find(pref);
790 if (it == factoryScores.end())
793 factoryScores[pref] = 0;
800 const Layer& connectedLayer = connection->GetOwningLayer();
802 auto toBackend = backends.find(connectedLayer.
GetBackendId());
803 ARMNN_ASSERT_MSG(toBackend != backends.end(),
"Backend id not found for the connected layer");
805 auto dstPrefs = toBackend->second.get()->GetHandleFactoryPreferences();
806 for (
auto&& src : srcPrefs)
808 if (factoryScores.find(src) == factoryScores.end())
813 for (
auto&& dst : dstPrefs)
818 factoryScores[src]++;
826 int minScore = std::numeric_limits<int>::max();
827 for (
auto it : factoryScores)
829 minScore = std::min(minScore, it.second);
833 std::vector<ITensorHandleFactory::FactoryId> optimalFactories;
834 for (
auto it : factoryScores)
836 if (it.second == minScore)
838 optimalFactories.push_back(it.first);
843 for (
auto&& srcPref : srcPrefs)
845 for (
auto&& comp : optimalFactories)
860 const Layer& connectedLayer,
864 auto toBackend = backends.find(connectedLayer.
GetBackendId());
865 ARMNN_ASSERT_MSG(toBackend != backends.end(),
"Backend id not found for the connected layer");
867 auto dstPrefs = toBackend->second.get()->GetHandleFactoryPreferences();
890 for (
auto&& pref : dstPrefs)
892 if (pref == srcFactoryId)
902 for (
auto&& pref : dstPrefs)
918 if (srcCapability.empty() && dstCapability.empty())
929 for (
auto&& pref : dstPrefs)
947 Optional<std::vector<std::string>&> errMessages)
951 optGraph.
ForEachLayer([&backends, ®istry, &result, &errMessages, importEnabled](
Layer* layer)
982 unsigned int connectionIdx = 0;
985 const Layer& connectedLayer = connection->GetOwningLayer();
988 registry, importEnabled);
995 errMessages.value().emplace_back(
"Could not find valid strategy required for compatibility" 996 " between backends.");
1012 const std::vector<BackendId>& backendPreferences,
1015 Optional<std::vector<std::string>&> messages)
1017 if (backendPreferences.empty())
1027 const Network& network = *PolymorphicDowncast<const Network*>(&inNetwork);
1028 std::unique_ptr<Graph> graph = std::make_unique<Graph>(network.
GetGraph());
1033 OptimizedNetwork* optNetObjPtr = PolymorphicDowncast<OptimizedNetwork*>(optNet.get());
1039 using namespace optimizations;
1043 optGraph.InferTensorInfos();
1083 std::stringstream failureMsg;
1084 failureMsg <<
"None of the preferred backends " << backendPreferences
1102 if (assignBackendsResult.
m_Error)
1117 if (backendOptimizationResult.
m_Error)
1133 tensorHandleFactoryRegistry,
1143 optGraph.AddCompatibilityLayers(backends, tensorHandleFactoryRegistry);
1153 auto backendPtr = factoryFun();
1157 auto backendSpecificOptimizations = backendPtr->GetOptimizations();
1160 if (!backendSpecificOptimizations.empty())
1168 bool Network::GetShapeInferenceMethod()
1170 if (m_NetworkOptions.size() > 0 && m_NetworkOptions[0].GetBackendId().Get() ==
"ShapeInferenceMethod")
1172 return m_NetworkOptions[0].GetOption(0).GetValue().AsBool();
1178 : m_NetworkOptions(networkOptions),
1179 m_Graph(
std::make_unique<
Graph>(GetShapeInferenceMethod()))
1194 return m_Graph->AddLayer<
InputLayer>(id, name);
1206 return m_Graph->AddLayer<
ComparisonLayer>(comparisonDescriptor, name);
1218 return m_Graph->AddLayer<
FillLayer>(fillDescriptor, name);
1231 const auto layer = m_Graph->AddLayer<
FullyConnectedLayer>(fullyConnectedDescriptor, name);
1233 layer->
m_Weight = std::make_unique<ScopedCpuTensorHandle>(weights);
1235 if (fullyConnectedDescriptor.m_BiasEnabled)
1237 layer->m_Bias = std::make_unique<ScopedCpuTensorHandle>(biases.
value());
1248 return AddFullyConnectedLayerImpl(fullyConnectedDescriptor, weights, biases, name);
1256 return AddFullyConnectedLayerImpl(fullyConnectedDescriptor, weights, biases, name);
1265 return AddFullyConnectedLayerImpl(fullyConnectedDescriptor, weights, optionalBiases, name);
1271 return m_Graph->AddLayer<
ConcatLayer>(concatDescriptor, name);
1284 const auto layer = m_Graph->AddLayer<
Convolution2dLayer>(convolution2dDescriptor, name);
1286 layer->
m_Weight = std::make_unique<ScopedCpuTensorHandle>(weights);
1288 if (convolution2dDescriptor.m_BiasEnabled)
1290 layer->m_Bias = std::make_unique<ScopedCpuTensorHandle>(biases.
value());
1301 return AddConvolution2dLayerImpl(convolution2dDescriptor, weights, biases, name);
1309 return AddConvolution2dLayerImpl(convolution2dDescriptor, weights, biases, name);
1318 return AddConvolution2dLayerImpl(convolution2dDescriptor, weights, optionalBiases, name);
1334 layer->
m_Weight = std::make_unique<ScopedCpuTensorHandle>(weights);
1336 if (convolution2dDescriptor.m_BiasEnabled)
1338 layer->m_Bias = std::make_unique<ScopedCpuTensorHandle>(biases.
value());
1356 return AddDepthwiseConvolution2dLayerImpl(convolution2dDescriptor, weights, biases, name);
1365 return AddDepthwiseConvolution2dLayerImpl(convolution2dDescriptor, weights, biases, name);
1375 return AddDepthwiseConvolution2dLayerImpl(convolution2dDescriptor, weights, optionalBiases, name);
1391 return m_Graph->AddLayer<
PermuteLayer>(permuteDescriptor, name);
1397 return m_Graph->AddLayer<
Pooling2dLayer>(pooling2dDescriptor, name);
1403 return m_Graph->AddLayer<
ActivationLayer>(activationDescriptor, name);
1409 return m_Graph->AddLayer<
ArgMinMaxLayer>(argMinMaxDescriptor, name);
1413 normalizationDescriptor,
1421 return m_Graph->AddLayer<
SliceLayer>(sliceDescriptor, name);
1427 return m_Graph->AddLayer<
SoftmaxLayer>(softmaxDescriptor, name);
1433 return m_Graph->AddLayer<
SplitterLayer>(splitterDescriptor, name);
1481 layer->
m_Mean = std::make_unique<ScopedCpuTensorHandle>(mean);
1482 layer->m_Variance = std::make_unique<ScopedCpuTensorHandle>(variance);
1483 layer->m_Beta = std::make_unique<ScopedCpuTensorHandle>(beta);
1484 layer->m_Gamma = std::make_unique<ScopedCpuTensorHandle>(gamma);
1491 return m_Graph->AddLayer<
RankLayer>(name);
1505 return m_Graph->AddLayer<
ResizeLayer>(resizeDescriptor, name);
1509 resizeDescriptor,
const char* name)
1511 return m_Graph->AddLayer<
ResizeLayer>(resizeDescriptor, name);
1536 layer->
m_LayerOutput = std::make_unique<ScopedCpuTensorHandle>(input);
1544 return m_Graph->AddLayer<
ReshapeLayer>(reshapeDescriptor, name);
1568 const auto layer = m_Graph->AddLayer<
LstmLayer>(descriptor, name);
1573 layer->m_BasicParameters.m_InputToCellWeights =
1575 layer->m_BasicParameters.m_InputToOutputWeights =
1577 layer->m_BasicParameters.m_RecurrentToForgetWeights =
1579 layer->m_BasicParameters.m_RecurrentToCellWeights =
1581 layer->m_BasicParameters.m_RecurrentToOutputWeights =
1583 layer->m_BasicParameters.m_ForgetGateBias =
1585 layer->m_BasicParameters.m_CellBias =
1586 std::make_unique<ScopedCpuTensorHandle>(*(params.
m_CellBias));
1587 layer->m_BasicParameters.m_OutputGateBias =
1591 if(!descriptor.m_CifgEnabled)
1596 "when CIFG is disabled.");
1601 "AddLstmLayer: Recurrent To Input Weights cannot be NULL " 1602 "when CIFG is disabled.");
1607 "when CIFG is disabled.");
1609 layer->m_CifgParameters.m_InputToInputWeights =
1611 layer->m_CifgParameters.m_RecurrentToInputWeights =
1613 layer->m_CifgParameters.m_InputGateBias =
1618 if(descriptor.m_ProjectionEnabled)
1623 "when projection is enabled.");
1625 layer->m_ProjectionParameters.m_ProjectionWeights =
1629 layer->m_ProjectionParameters.m_ProjectionBias =
1635 if(descriptor.m_PeepholeEnabled)
1637 if(!descriptor.m_CifgEnabled)
1642 "when Peephole is enabled and CIFG disabled.");
1645 layer->m_PeepholeParameters.m_CellToInputWeights =
1652 "when Peephole is enabled.");
1657 "when Peephole is enabled.");
1660 layer->m_PeepholeParameters.m_CellToForgetWeights =
1662 layer->m_PeepholeParameters.m_CellToOutputWeights =
1667 if(descriptor.m_LayerNormEnabled)
1669 if(!descriptor.m_CifgEnabled)
1674 "when layer normalization is enabled and CIFG disabled.");
1676 layer->m_LayerNormParameters.m_InputLayerNormWeights =
1683 "when layer normalization is enabled.");
1688 "when layer normalization is enabled.");
1693 "when layer normalization is enabled.");
1695 layer->m_LayerNormParameters.m_ForgetLayerNormWeights =
1697 layer->m_LayerNormParameters.m_CellLayerNormWeights =
1699 layer->m_LayerNormParameters.m_OutputLayerNormWeights =
1717 return m_Graph->AddLayer<
MeanLayer>(meanDescriptor,name);
1722 return m_Graph->AddLayer<
PadLayer>(padDescriptor,name);
1765 return m_Graph->AddLayer<
GatherLayer>(gatherDescriptor, name);
1795 layer->
m_Weight = std::make_unique<ScopedCpuTensorHandle>(weights);
1797 if (descriptor.m_BiasEnabled)
1799 layer->m_Bias = std::make_unique<ScopedCpuTensorHandle>(biases.
value());
1808 return m_Graph->AddLayer<
TransposeLayer>(transposeDescriptor, name);
1814 return m_Graph->AddLayer<
StackLayer>(stackDescriptor, name);
1832 layer->m_QuantizedLstmParameters.m_InputToForgetWeights =
1834 layer->m_QuantizedLstmParameters.m_InputToCellWeights =
1836 layer->m_QuantizedLstmParameters.m_InputToOutputWeights =
1840 layer->m_QuantizedLstmParameters.m_RecurrentToInputWeights =
1842 layer->m_QuantizedLstmParameters.m_RecurrentToForgetWeights =
1844 layer->m_QuantizedLstmParameters.m_RecurrentToCellWeights =
1846 layer->m_QuantizedLstmParameters.m_RecurrentToOutputWeights =
1850 layer->m_QuantizedLstmParameters.m_InputGateBias =
1852 layer->m_QuantizedLstmParameters.m_ForgetGateBias =
1854 layer->m_QuantizedLstmParameters.m_CellBias =
1855 std::make_unique<ScopedCpuTensorHandle>(params.
GetCellBias());
1856 layer->m_QuantizedLstmParameters.m_OutputGateBias =
1866 const auto layer = m_Graph->AddLayer<
QLstmLayer>(descriptor, name);
1871 layer->m_BasicParameters.m_InputToCellWeights =
1873 layer->m_BasicParameters.m_InputToOutputWeights =
1875 layer->m_BasicParameters.m_RecurrentToForgetWeights =
1877 layer->m_BasicParameters.m_RecurrentToCellWeights =
1879 layer->m_BasicParameters.m_RecurrentToOutputWeights =
1881 layer->m_BasicParameters.m_ForgetGateBias =
1883 layer->m_BasicParameters.m_CellBias =
1884 std::make_unique<ScopedCpuTensorHandle>(*(params.
m_CellBias));
1885 layer->m_BasicParameters.m_OutputGateBias =
1889 if(!descriptor.m_CifgEnabled)
1899 "AddQLstmLayer: Recurrent To Input Weights cannot be NULL");
1907 layer->m_CifgParameters.m_InputToInputWeights =
1909 layer->m_CifgParameters.m_RecurrentToInputWeights =
1911 layer->m_CifgParameters.m_InputGateBias =
1916 if(descriptor.m_ProjectionEnabled)
1923 layer->m_ProjectionParameters.m_ProjectionWeights =
1929 layer->m_ProjectionParameters.m_ProjectionBias =
1936 if(descriptor.m_PeepholeEnabled)
1948 if(!descriptor.m_CifgEnabled)
1955 layer->m_PeepholeParameters.m_CellToInputWeights =
1959 layer->m_PeepholeParameters.m_CellToForgetWeights =
1961 layer->m_PeepholeParameters.m_CellToOutputWeights =
1966 if(descriptor.m_LayerNormEnabled)
1983 if(!descriptor.m_CifgEnabled)
1990 layer->m_LayerNormParameters.m_InputLayerNormWeights =
1994 layer->m_LayerNormParameters.m_ForgetLayerNormWeights =
1996 layer->m_LayerNormParameters.m_CellLayerNormWeights =
1998 layer->m_LayerNormParameters.m_OutputLayerNormWeights =
2014 layer->Accept(visitor);
2024 : m_Graph(
std::move(graph)), m_Guid(profiling::
ProfilingService::GetNextGuid()), m_ModelOptions(modelOptions)
A layer that the constant data can be bound to.
OptimizeForConnection< Layer, PermuteLayer, SquashEqualSiblingsImpl< PermuteLayer > > SquashEqualPermuteSiblings
void ReportError(const std::string &errorMessage, Optional< std::vector< std::string > &> errorMessages)
bool m_BiasEnabled
Enable/disable bias.
ModelOptions m_ModelOptions
bool m_HalfPixelCenters
Half Pixel Centers.
IConnectableLayer * AddPooling2dLayer(const Pooling2dDescriptor &pooling2dDescriptor, const char *name=nullptr) override
Adds a pooling layer to the network.
bool m_AlignCorners
Aligned corners.
This layer represents a minimum operation.
static const FactoryId DeferredFactoryId
Use the workload factory to create the tensor handle.
This layer represents a split operation.
LstmBasicParameters m_BasicParameters
const std::vector< InputSlot * > & GetConnections() const
FactoryFunction GetFactory(const BackendId &id) const
This layer represents a batch normalization operation.
static void Destroy(INetwork *network)
A ViewsDescriptor for the SplitterLayer.
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
OptimizeForConnection< PermuteLayer, PermuteLayer, OptimizeInversePermutesImpl< PermuteLayer > > OptimizeInversePermutes
virtual bool SupportsMapUnmap() const final
std::vector< ConvertFp32ToFp16Layer * > InsertConvertFp32ToFp16LayersAfter(Graph &graph, Layer &layer)
bool m_BiasEnabled
Enable/disable bias.
IConnectableLayer * AddOutputLayer(LayerBindingId id, const char *name=nullptr) override
Adds an output layer to the network.
IConnectableLayer * AddRankLayer(const char *name=nullptr) override
Adds a rank layer to the network.
void SetEdgeStrategy(unsigned int connectionIndex, EdgeStrategy strategy)
QuantizedLstmParameters m_QuantizedLstmParameters
This layer represents a 2D transpose convolution operation.
No strategy has been defined. Used internally to verify integrity of optimizations.
std::vector< ConvertFp16ToFp32Layer * > InsertConvertFp16ToFp32LayersBefore(Graph &graph, Layer &layer, bool expectCorrectInputType)
IConnectableLayer * AddL2NormalizationLayer(const L2NormalizationDescriptor &desc, const char *name=nullptr) override
Adds an L2 normalization layer to the network.
A TransposeConvolution2dDescriptor for the TransposeConvolution2dLayer.
const TensorShape & GetShape() const
CPU Execution: Reference C++ kernels.
Optimizer::Optimizations MakeOptimizations(Args &&... args)
std::unique_ptr< ScopedCpuTensorHandle > m_Weight
A unique pointer to store Weight values.
IConnectableLayer * AddResizeBilinearLayer(const ResizeBilinearDescriptor &resizeDesc, const char *name=nullptr) override
Adds a resize bilinear layer to the network.
IConnectableLayer * AddFillLayer(const FillDescriptor &fillDescriptor, const char *name=nullptr) override
Add an Fill layer to the network.
IConnectableLayer * AddMeanLayer(const MeanDescriptor &meanDescriptor, const char *name=nullptr) override
Add a Mean layer to the network.
A ReshapeDescriptor for the ReshapeLayer.
ITensorHandleFactory::FactoryId CalculateSlotOption(BackendsMap &backends, OutputSlot &outputSlot, TensorHandleFactoryRegistry ®istry)
OptimizeForConnection< TransposeLayer, TransposeLayer, OptimizeInversePermutesImpl< TransposeLayer > > OptimizeInverseTransposes
OptimizeForConnection< TransposeLayer, BatchToSpaceNdLayer, PermuteAndBatchToSpaceAsDepthToSpaceImpl< TransposeLayer > > TransposeAndBatchToSpaceAsDepthToSpace
OptimizeForExclusiveConnection< DepthwiseConvolution2dLayer, BatchNormalizationLayer, FuseBatchNorm< DepthwiseConvolution2dLayer, armnn::DataType::Float32 > > FuseBatchNormIntoDepthwiseConvolution2DFloat32
#define ARMNN_NO_DEPRECATE_WARN_BEGIN
ITensorHandleFactory::FactoryId CalculateSlotOptionForOutput(BackendsMap &backends, OutputSlot &slot, TensorHandleFactoryRegistry ®istry)
A ComparisonDescriptor for the ComparisonLayer.
This layer represents a depthwise convolution 2d operation.
static void ConvertBFloat16ToFloat32(const void *srcBFloat16Buffer, size_t numElements, float *dstFloat32Buffer)
bool RequiresCopy(ITensorHandleFactory::FactoryId src, ITensorHandleFactory::FactoryId dst, TensorHandleFactoryRegistry ®istry)
uint32_t m_TargetWidth
Target width value.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
std::vector< BackendOptions > ModelOptions
IConnectableLayer * AddLogSoftmaxLayer(const LogSoftmaxDescriptor &logSoftmaxDescriptor, const char *name=nullptr) override
Adds a log softmax layer to the network.
A Convolution2dDescriptor for the Convolution2dLayer.
Layer & GetOwningLayer() const
Source backends tensor data can be exported to destination backend tensor without copy...
IConnectableLayer * AddDepthwiseConvolution2dLayer(const DepthwiseConvolution2dDescriptor &convolution2dDescriptor, const ConstTensor &weights, const Optional< ConstTensor > &biases, const char *name=nullptr) override
Adds a 2D depthwise convolution layer to the network.
This layer converts data type Float 16 to Float 32.
bool m_BiasEnabled
Enable/disable bias.
const Graph & GetGraph() const
IConnectableLayer * AddSwitchLayer(const char *name=nullptr) override
Adds a switch layer to the network.
ResizeMethod m_Method
The Interpolation method to use (Bilinear, NearestNeighbor).
IConnectableLayer * AddFloorLayer(const char *name=nullptr) override
Adds a floor layer to the network.
static void Pass(Graph &graph, const Optimizations &optimizations)
OptimizeForExclusiveConnection< DepthwiseConvolution2dLayer, BatchNormalizationLayer, FuseBatchNorm< DepthwiseConvolution2dLayer, armnn::DataType::Float16 > > FuseBatchNormIntoDepthwiseConvolution2DFloat16
This layer represents a SpaceToDepth operation.
IConnectableLayer * AddInputLayer(LayerBindingId id, const char *name=nullptr) override
Adds an input layer to the network.
This layer represents a reshape operation.
std::unique_ptr< ScopedCpuTensorHandle > m_InputToInputWeights
A unique pointer to represent 2D weights tensor with dimensions [outputSize, inputSize] (QAsymm8)...
OptimizeForExclusiveConnection< Convolution2dLayer, BatchNormalizationLayer, FuseBatchNorm< Convolution2dLayer, armnn::DataType::Float16 > > FuseBatchNormIntoConvolution2DFloat16
OptimizeForExclusiveConnection< Convolution2dLayer, BatchNormalizationLayer, FuseBatchNorm< Convolution2dLayer, armnn::DataType::Float32 > > FuseBatchNormIntoConvolution2DFloat32
#define ARMNN_LOG(severity)
std::unique_ptr< ScopedCpuTensorHandle > m_InputToForgetWeights
A unique pointer to represent 2D weights tensor with dimensions [num_units, inputSize] (QSymmS8)...
Main network class which provides the interface for building up a neural network. ...
This layer represents an activation operation with the specified activation function.
BackendRegistry & BackendRegistryInstance()
This layer converts data type BFloat16 to Float32.
LayerT * ConvertBf16ToFp32Weight(Layer *l)
std::vector< BackendOptions > NetworkOptions
void Accept(ILayerVisitor &visitor) const override
A LogicalBinaryDescriptor for the LogicalBinaryLayer.
IConnectableLayer * AddMinimumLayer(const char *name=nullptr) override
Add a Minimum layer to the network.
This layer represents an unknown operation in the input graph.
OptimizeForConnection< Layer, ReshapeLayer, SquashEqualSiblingsImpl< ReshapeLayer > > SquashEqualReshapeSiblings
This layer represents a detection postprocess operator.
BackendIdSet m_SupportedBackends
OptimizeForConnection< Layer, TransposeLayer, MoveTransposeUpImpl > MoveTransposeUp
OptimizationResult ReturnWithError(OptimizationResult res, const Layer *layer, const BackendSettings &backendSettings, Optional< std::vector< std::string > &> errMessages)
std::unique_ptr< ScopedCpuTensorHandle > m_Weight
A unique pointer to store weight values.
Copyright (c) 2020 ARM Limited.
IConnectableLayer * AddQLstmLayer(const QLstmDescriptor &descriptor, const LstmInputParams ¶ms, const char *name=nullptr) override
Add a QLstm layer to the network.
This layer represents a pad operation.
This layer represents a LSTM operation.
void IgnoreUnused(Ts &&...)
OptimizeForConnection< PadLayer, Convolution2dLayer, FoldPadIntoConvolution2dImpl > FoldPadIntoConvolution2d
void SetBackendId(const BackendId &id)
bool IsBackendSupported(const BackendId &backend) const
LayerList::const_iterator Iterator
IConnectableLayer * AddFullyConnectedLayer(const FullyConnectedDescriptor &fullyConnectedDescriptor, const ConstTensor &weights, const Optional< ConstTensor > &biases, const char *name=nullptr) override
Adds a fully connected layer to the network.
A SpaceToDepthDescriptor for the SpaceToDepthLayer.
This layer represents a permutation operation.
unsigned int GetNumOutputSlots() const override
Returns the number of connectable output slots.
This layer represents a SpaceToBatchNd operation.
IConnectableLayer * AddRsqrtLayer(const char *name=nullptr) override
Add Reciprocal of square root layer to the network.
A BatchToSpaceNdDescriptor for the BatchToSpaceNdLayer.
OptimizeForType< Layer, AddDebugImpl > InsertDebugLayer
OptimizeForConnection< ReshapeLayer, ReshapeLayer, OptimizeConsecutiveReshapesImpl > OptimizeConsecutiveReshapes
int LayerBindingId
Type of identifiers for bindable layers (inputs, outputs).
This layer represents a elementwiseUnary operation.
constexpr const char * GetDataTypeName(DataType dataType)
A ResizeDescriptor for the ResizeLayer.
IConnectableLayer * AddEqualLayer(const char *name=nullptr) override
Add a Equal layer to the network.
IConnectableLayer * AddConvolution2dLayer(const Convolution2dDescriptor &convolution2dDescriptor, const ConstTensor &weights, const Optional< ConstTensor > &biases, const char *name=nullptr) override
Adds a 2D convolution layer to the network.
A StackDescriptor for the StackLayer.
IConnectableLayer * AddTransposeConvolution2dLayer(const TransposeConvolution2dDescriptor &descriptor, const ConstTensor &weights, const Optional< ConstTensor > &biases, const char *name=nullptr) override
Adds a 2D transpose convolution layer to the network.
Destination backend can work directly with tensors on source backend.
virtual std::vector< ITensorHandleFactory::FactoryId > GetHandleFactoryPreferences() const
(Optional) Returns a vector of supported TensorHandleFactory ids in preference order.
OptimizeForConnection< ConvertFp16ToFp32Layer, ConvertFp32ToFp16Layer, OptimizeInverseConversionsImpl > OptimizeInverseConversionsFp16
The SubgraphView class represents a subgraph of a Graph.
A PadDescriptor for the PadLayer.
This layer represents an instance normalization operation.
OptimizeForConnection< PermuteLayer, BatchToSpaceNdLayer, PermuteAndBatchToSpaceAsDepthToSpaceImpl< PermuteLayer > > PermuteAndBatchToSpaceAsDepthToSpace
OptimizeForConnection< Layer, PermuteLayer, MovePermuteUpImpl > MovePermuteUp
This layer represents a Logical Binary operation.
std::unique_ptr< ScopedCpuTensorHandle > m_LayerOutput
IConnectableLayer * AddReshapeLayer(const ReshapeDescriptor &reshapeDescriptor, const char *name=nullptr) override
Adds a reshape layer to the network.
IConnectableLayer * AddTransposeLayer(const TransposeDescriptor &transposeDescriptor, const char *name=nullptr) override
Adds a transpose layer to the network.
A layer user-provided data can be bound to (e.g. inputs, outputs).
void ForEachLayer(Func func) const
virtual std::vector< Capability > GetCapabilities(const IConnectableLayer *layer, const IConnectableLayer *connectedLayer, CapabilityClass capabilityClass)
Status PrintGraph() override
IConnectableLayer * AddMergeLayer(const char *name=nullptr) override
Adds a merge layer to the network.
This layer dequantizes the input tensor.
ConvertConstants< Float32ToFloat16, IsFloat16Layer > ConvertConstantsFloatToHalf
OptimizeForType< TransposeLayer, TransposeAsReshapeImpl > TransposeAsReshape
IConnectableLayer * AddMaximumLayer(const char *name=nullptr) override
Add a Maximum layer to the network.
This layer represents a Gather operator.
std::unique_ptr< ScopedCpuTensorHandle > m_Anchors
A unique pointer to store Anchor values.
This layer represents a fully connected operation.
An LstmDescriptor for the LstmLayer.
#define ARMNN_NO_DEPRECATE_WARN_END
IOptimizedNetworkPtr Optimize(const INetwork &network, const std::vector< BackendId > &backendPreferences, const IDeviceSpec &deviceSpec, const OptimizerOptions &options=OptimizerOptions(), Optional< std::vector< std::string > &> messages=EmptyOptional())
Create an optimized version of the network.
#define ARMNN_ASSERT_MSG(COND, MSG)
This layer represents a QuantizedLstm operation.
char const * GetLayerTypeAsCString(LayerType type)
This layer represents a log softmax operation.
std::unique_ptr< ScopedCpuTensorHandle > m_Mean
A unique pointer to store Mean values.
A L2NormalizationDescriptor for the L2NormalizationLayer.
int32_t GetQuantizationOffset() const
An ArgMinMaxDescriptor for ArgMinMaxLayer.
float GetQuantizationScale() const
DataType GetDataType() const
An OriginsDescriptor for the ConcatLayer.
bool has_value() const noexcept
A FullyConnectedDescriptor for the FullyConnectedLayer.
std::unique_ptr< ScopedCpuTensorHandle > m_Weight
A unique pointer to store Weight values.
bool m_BiasEnabled
Enable/disable bias.
This layer represents a stack operation.
A tensor defined by a TensorInfo (shape and data type) and an immutable backing store.
IConnectableLayer * AddConcatLayer(const ConcatDescriptor &concatDescriptor, const char *name=nullptr) override
Adds a concatenation layer to the network.
const Subgraphs & GetFailedSubgraphs() const
This layer represents a merge operation.
This layer represents a softmax operation.
const std::string & GetNameStr() const
uint32_t m_TargetWidth
Target width value.
std::vector< ConvertBf16ToFp32Layer * > InsertConvertBf16ToFp32LayersBefore(Graph &graph, Layer &layer, bool expectCorrectInputType)
A GatherDescriptor for the GatherLayer.
This layer represents a BatchToSpaceNd operation.
std::vector< SubgraphViewPtr > Subgraphs
std::unique_ptr< IOptimizedNetwork, void(*)(IOptimizedNetwork *network)> IOptimizedNetworkPtr
bool m_HalfPixelCenters
Half Pixel Centers.
IConnectableLayer * AddPadLayer(const PadDescriptor &padDescriptor, const char *name=nullptr) override
Adds a fully pad layer to the network.
IConnectableLayer * AddStandInLayer(const StandInDescriptor &descriptor, const char *name=nullptr) override
Add a stand-in layer for a type unknown to the Arm NN framework.
void SetQuantizationScale(float scale)
This layer represents a ArgMinMax operation.
#define ARMNN_ASSERT(COND)
A StandInDescriptor for the StandIn layer.
A QLstmDescriptor for the QLstmLayer.
IConnectableLayer * AddActivationLayer(const ActivationDescriptor &activationDescriptor, const char *name=nullptr) override
Adds an activation layer to the network.
BackendIdVector GetAvailablePreferredBackends() const
IConnectableLayer * AddSubtractionLayer(const char *name=nullptr) override
Adds a subtraction layer to the network.
IConnectableLayer * AddInstanceNormalizationLayer(const InstanceNormalizationDescriptor &desc, const char *name=nullptr) override
Adds an instance normalization layer to the network.
Device specific knowledge to be passed to the optimizer.
static bool IsLayerSupported(const BackendId &backendId, const IConnectableLayer &layer, Optional< DataType > dataType, std::string &outReasonIfUnsupported)
IConnectableLayer * AddDepthToSpaceLayer(const DepthToSpaceDescriptor &depthToSpaceDescriptor, const char *name=nullptr) override
Adds a depth to space layer to the network.
bool Validate(const SubgraphView &originalSubgraph) const
An ActivationDescriptor for the ActivationLayer.
std::vector< ConvertFp32ToBf16Layer * > InsertConvertFp32ToBf16LayersAfter(Graph &graph, Layer &layer)
const BackendId & GetBackendId() const
uint32_t m_TargetHeight
Target height value.
IConnectableLayer * AddStackLayer(const StackDescriptor &stackDescriptor, const char *name=nullptr) override
Adds a stack layer to the network.
This layer represents a floor operation.
uint32_t m_TargetHeight
Target height value.
A SliceDescriptor for the SliceLayer.
This layer represents a normalization operation.
virtual MemorySourceFlags GetExportFlags() const
This layer represents a pooling 2d operation.
This layer converts data type Float 32 to Float 16.
OptimizedNetwork(std::unique_ptr< Graph > graph)
This layer represents a transpose operation.
IConnectableLayer * AddSplitterLayer(const ViewsDescriptor &splitterDescriptor, const char *name=nullptr) override
Adds a splitter layer to the network.
This layer represents an addition operation.
QLstmBasicParameters m_BasicParameters
void SubstituteSubgraph(SubgraphView &subgraph, IConnectableLayer *substituteLayer)
Substitutes the given sub-graph with either a new layer or a new sub-graph.
Status PrintGraph() override
IConnectableLayer * AddSpaceToDepthLayer(const SpaceToDepthDescriptor &spaceToDepthDescriptor, const char *name=nullptr) override
Adds a space to depth layer to the network.
bool CheckScaleSetOnQuantizedType(Layer *layer, Optional< std::vector< std::string > &> errMessages)
Private implementation of INetwork.
void SetTensorHandleFactory(const ITensorHandleFactory::FactoryId &id)
A SpaceToBatchNdDescriptor for the SpaceToBatchNdLayer.
IConnectableLayer * AddPreluLayer(const char *name=nullptr) override
Adds a PReLU layer to the network.
OptimizeForType< PermuteLayer, PermuteAsReshapeImpl > PermuteAsReshape
bool IsWarningOnly() const
OptimizeForConnection< Layer, TransposeLayer, SquashEqualSiblingsImpl< TransposeLayer > > SquashEqualTransposeSiblings
This layer represents a QLstm operation.
IConnectableLayer * AddQuantizeLayer(const char *name=nullptr) override
Add a quantize layer to the network.
const Substitutions & GetSubstitutions() const
IConnectableLayer * AddArgMinMaxLayer(const ArgMinMaxDescriptor &desc, const char *name=nullptr) override
Adds an ArgMinMax layer to the network.
BackendIdVector m_PreferredBackends
OptimizationResult AssignBackends(OptimizedNetwork *optNetObjPtr, BackendSettings &backendSettings, Graph::Iterator &firstLayer, Graph::Iterator &lastLayer, Optional< std::vector< std::string > &> errMessages)
This layer represents a subtraction operation.
This layer calculates both true and false outputs for input.
EmptyOptional is used to initialize the Optional class in case we want to have default value for an O...
ConvertConstants< Float16ToFloat32, IsFloat32Layer > ConvertConstantsHalfToFloat
bool m_AlignCorners
Aligned corners.
A ElementwiseUnaryDescriptor for the ElementwiseUnaryLayer.
IConnectableLayer * AddBatchNormalizationLayer(const BatchNormalizationDescriptor &desc, const ConstTensor &mean, const ConstTensor &variance, const ConstTensor &beta, const ConstTensor &gamma, const char *name=nullptr) override
Adds a batch normalization layer to the network.
static Subgraphs SelectSubgraphs(Graph &graph, const LayerSelectorFunction &selector)
Selects subgraphs from a graph based on the selector function and the algorithm.
IConnectableLayer * AddBatchToSpaceNdLayer(const BatchToSpaceNdDescriptor &batchToSpaceNdDescriptor, const char *name=nullptr) override
Adds a batch to space ND layer to the network.
This layer represents a L2 normalization operation.
BackendsMap CreateSupportedBackends(TensorHandleFactoryRegistry &handleFactoryRegistry, BackendSettings &backendSettings)
OptimizeForConnection< ConvertFp32ToFp16Layer, ConvertFp16ToFp32Layer, OptimizeInverseConversionsImpl > OptimizeInverseConversionsFp32
ITensorHandleFactory::FactoryId CalculateSlotOptionForInput(BackendsMap &backends, OutputSlot &slot, TensorHandleFactoryRegistry ®istry)
const std::string & Get() const
Status SerializeToDot(std::ostream &stream) const override
IConnectableLayer * AddAdditionLayer(const char *name=nullptr) override
Adds an addition layer to the network.
BackendIdSet m_SelectedBackends
IConnectableLayer * AddDequantizeLayer(const char *name=nullptr) override
Adds a Dequantize layer to the network.
IConnectableLayer * AddSpaceToBatchNdLayer(const SpaceToBatchNdDescriptor &spaceToBatchNdDescriptor, const char *name=nullptr) override
Adds a space to batch layer to the network.
OptimizationResult AttemptBackendAssignment(BackendSettings &backendSettings, Graph &graph, Layer *layer, BackendId backend, DataType dataTypeIn, DataType dataTypeOut, const std::vector< BackendId > &availablePreferredBackends, std::string &reasonIfUnsupported, Optional< std::vector< std::string > &> errMessages)
ITensorHandleFactory * GetFactory(ITensorHandleFactory::FactoryId id) const
Find a TensorHandleFactory by Id Returns nullptr if not found.
IConnectableLayer * AddLogicalBinaryLayer(const LogicalBinaryDescriptor &logicalBinaryDescriptor, const char *name=nullptr) override
Adds a Logical Binary layer to the network.
void SetTensorInfo(const TensorInfo &tensorInfo) override
IConnectableLayer * AddAbsLayer(const char *name=nullptr) override
Add absolute layer to the network.
A MeanDescriptor for the MeanLayer.
This layer represents a division operation.
IConnectableLayer * AddSliceLayer(const SliceDescriptor &sliceDescriptor, const char *name=nullptr) override
Adds a slice layer to the network.
This layer represents a strided slice operation.
LayerType GetType() const
IConnectableLayer * AddGreaterLayer(const char *name=nullptr) override
Add a Greater layer to the network.
This layer represents a maximum operation.
const OutputSlot & GetOutputSlot(unsigned int index=0) const override
Get the const output slot handle by slot index.
IConnectableLayer * AddMergerLayer(const MergerDescriptor &mergerDescriptor, const char *name=nullptr) override
Adds a concat layer to the network.
OptimizeForType< Layer, ConvertFp32NetworkToFp16Impl > Fp32NetworkToFp16Converter
IConnectableLayer * AddResizeLayer(const ResizeDescriptor &resizeDescriptor, const char *name=nullptr) override
Adds a resize layer to the network.
IConnectableLayer * AddConstantLayer(const ConstTensor &input, const char *name=nullptr) override
Adds a layer with no inputs and a single output, which always corresponds to the passed in constant t...
A TransposeDescriptor for the TransposeLayer.
A StridedSliceDescriptor for the StridedSliceLayer.
IConnectableLayer * AddMultiplicationLayer(const char *name=nullptr) override
Adds a multiplication layer to the network.
OptimizationResult SelectTensorHandleStrategy(Graph &optGraph, BackendsMap &backends, TensorHandleFactoryRegistry ®istry, bool importEnabled, Optional< std::vector< std::string > &> errMessages)
IConnectableLayer * AddComparisonLayer(const ComparisonDescriptor &comparisonDescriptor, const char *name=nullptr) override
Add a Comparison layer to the network.
void ReportWarning(const std::string &warningMessage, Optional< std::vector< std::string > &> warningMessages)
This layer represents a convolution 2d operation.
This layer converts data type Float32 to BFloat16.
bool CheckFlag(MemorySourceFlags flags, MemorySource source)
void SetQuantizationOffset(int32_t offset)
OptimizationResult ApplyBackendOptimizations(OptimizedNetwork *optNetObjPtr, BackendSettings &backendSettings, BackendsMap &backends, const ModelOptions &modelOptions, Optional< std::vector< std::string > &> errMessages)
static INetwork * CreateRaw(NetworkOptions networkOptions={})
This layer represents a mean operation.
This layer represents a comparison operation.
std::unique_ptr< INetwork, void(*)(INetwork *network)> INetworkPtr
IConnectableLayer * AddNormalizationLayer(const NormalizationDescriptor &normalizationDescriptor, const char *name=nullptr) override
Adds a normalization layer to the network.
OptimizeForType< Layer, AddBroadcastReshapeLayerImpl > AddBroadcastReshapeLayer
std::unique_ptr< ScopedCpuTensorHandle > m_Weight
A unique pointer to store Weight values.
A Pooling2dDescriptor for the Pooling2dLayer.
This layer dequantizes the input tensor.
A NormalizationDescriptor for the NormalizationLayer.
IConnectableLayer * AddPermuteLayer(const PermuteDescriptor &permuteDescriptor, const char *name=nullptr) override
Adds a permute layer to the network.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
An InstanceNormalizationDescriptor for InstanceNormalizationLayer.
This layer represents a multiplication operation.
IConnectableLayer * AddGatherLayer(const char *name=nullptr) override
Add Gather layer to the network.
IConnectableLayer * AddSoftmaxLayer(const SoftmaxDescriptor &softmaxDescriptor, const char *name=nullptr) override
Adds a softmax layer to the network.
IConnectableLayer * AddLstmLayer(const LstmDescriptor &descriptor, const LstmInputParams ¶ms, const char *name=nullptr) override
Add a Lstm layer to the network.
A ResizeBilinearDescriptor for the ResizeBilinearLayer.
IConnectableLayer * AddElementwiseUnaryLayer(const ElementwiseUnaryDescriptor &elementwiseUnaryDescriptor, const char *name=nullptr) override
Add an ElementwiseUnary layer to the network.
const TensorInfo & GetTensorInfo() const override
static INetworkPtr Create(NetworkOptions networkOptions={})
IConnectableLayer * AddDivisionLayer(const char *name=nullptr) override
Adds a division layer to the network.
EdgeStrategy CalculateEdgeStrategy(BackendsMap &backends, ITensorHandleFactory::FactoryId srcFactoryId, const Layer &layer, const Layer &connectedLayer, TensorHandleFactoryRegistry ®istry, bool importEnabled)
static void Destroy(IOptimizedNetwork *network)
virtual MemorySourceFlags GetImportFlags() const
std::unique_ptr< ScopedCpuTensorHandle > m_InputToForgetWeights
A unique pointer to represent 2D weights tensor with dimensions [input_size, num_units].
OptimizeForType< Layer, ConvertFp32NetworkToBf16Impl > Fp32NetworkToBf16Converter
IConnectableLayer * AddStridedSliceLayer(const StridedSliceDescriptor &stridedSliceDescriptor, const char *name=nullptr) override
Adds a strided slice layer to the network.
A SoftmaxDescriptor for the SoftmaxLayer.
bool IsCpuRefUsed() const
IConnectableLayer * AddQuantizedLstmLayer(const QuantizedLstmInputParams ¶ms, const char *name=nullptr) override
Add a QuantizedLstm layer to the network.
static const FactoryId LegacyFactoryId
This layer represents a fill operation.
IConnectableLayer * AddDetectionPostProcessLayer(const DetectionPostProcessDescriptor &descriptor, const ConstTensor &anchors, const char *name=nullptr) override
Adds a Detection PostProcess layer to the network.
A DepthwiseConvolution2dDescriptor for the DepthwiseConvolution2dLayer.
A FillDescriptor for the FillLayer.
This layer represents a DepthToSpace operation.
A BatchNormalizationDescriptor for the BatchNormalizationLayer.
unsigned int GetNumElements() const
Network(NetworkOptions networkOptions={})
std::map< BackendId, std::unique_ptr< class IBackendInternal > > BackendsMap
This layer represents a resize operation.
A PermuteDescriptor for the PermuteLayer.
std::vector< float > anchors({ 0.5f, 0.5f, 1.0f, 1.0f, 0.5f, 0.5f, 1.0f, 1.0f, 0.5f, 0.5f, 1.0f, 1.0f, 0.5f, 10.5f, 1.0f, 1.0f, 0.5f, 10.5f, 1.0f, 1.0f, 0.5f, 100.5f, 1.0f, 1.0f })