31 #include <common/include/ProfilingGuid.hpp> 33 #include <fmt/format.h> 74 return pNetworkImpl->AddComparisonLayer(comparisonDescriptor, name);
81 return pNetworkImpl->AddConcatLayer(concatDescriptor, name);
90 return pNetworkImpl->AddConvolution2dLayer(convolution2dDescriptor, weights, biases, name);
99 return pNetworkImpl->AddConvolution2dLayer(convolution2dDescriptor, weights, biases, name);
109 return pNetworkImpl->AddConvolution2dLayer(convolution2dDescriptor,
119 return pNetworkImpl->AddConvolution3dLayer(convolution3dDescriptor, name);
126 return pNetworkImpl->AddDepthToSpaceLayer(depthToSpaceDescriptor, name);
136 return pNetworkImpl->AddDepthwiseConvolution2dLayer(convolution2dDescriptor, weights, biases, name);
151 return pNetworkImpl->AddDetectionPostProcessLayer(descriptor, anchors, name);
158 return pNetworkImpl->AddElementwiseUnaryLayer(elementwiseUnaryDescriptor, name);
165 return pNetworkImpl->AddFillLayer(fillDescriptor, name);
171 return pNetworkImpl->AddFullyConnectedLayer(fullyConnectedDescriptor, name);
179 return pNetworkImpl->AddFullyConnectedLayer(fullyConnectedDescriptor,
190 return pNetworkImpl->AddFullyConnectedLayer(fullyConnectedDescriptor, weights, biases, name);
196 return pNetworkImpl->AddPermuteLayer(permuteDescriptor, name);
202 return pNetworkImpl->AddBatchToSpaceNdLayer(batchToSpaceNdDescriptor, name);
208 return pNetworkImpl->AddPooling2dLayer(pooling2dDescriptor, name);
214 return pNetworkImpl->AddActivationLayer(activationDescriptor, name);
220 return pNetworkImpl->AddNormalizationLayer(normalizationDescriptor, name);
225 return pNetworkImpl->AddSliceLayer(sliceDescriptor, name);
230 return pNetworkImpl->AddSoftmaxLayer(softmaxDescriptor, name);
236 return pNetworkImpl->AddSplitterLayer(splitterDescriptor, name);
261 return pNetworkImpl->AddBatchNormalizationLayer(desc, mean, variance, beta, gamma, name);
272 return pNetworkImpl->AddResizeLayer(resizeDescriptor, name);
278 return pNetworkImpl->AddReduceLayer(reduceDescriptor, name);
284 return pNetworkImpl->AddInstanceNormalizationLayer(desc, name);
290 return pNetworkImpl->AddL2NormalizationLayer(desc, name);
296 return pNetworkImpl->AddLogSoftmaxLayer(logSoftmaxDescriptor, name);
308 return pNetworkImpl->AddReshapeLayer(reshapeDescriptor, name);
314 return pNetworkImpl->AddSpaceToBatchNdLayer(spaceToBatchNdDescriptor, name);
320 return pNetworkImpl->AddSpaceToDepthLayer(spaceToDepthDescriptor, name);
336 return pNetworkImpl->AddLstmLayer(descriptor, params, name);
356 return pNetworkImpl->AddMeanLayer(meanDescriptor, name);
373 return pNetworkImpl->AddStridedSliceLayer(stridedSliceDescriptor, name);
402 return pNetworkImpl->AddTransposeConvolution2dLayer(descriptor, weights, biases, name);
408 return pNetworkImpl->AddTransposeLayer(transposeDescriptor, name);
431 return pNetworkImpl->AddQuantizedLstmLayer(params, name);
438 return pNetworkImpl->AddQLstmLayer(descriptor, params, name);
444 return pNetworkImpl->AddLogicalBinaryLayer(descriptor, name);
452 return pNetworkImpl->AddUnidirectionalSequenceLstmLayer(descriptor, params, name);
458 return pNetworkImpl->AddChannelShuffleLayer(descriptor, name);
462 void INetwork::Accept(ILayerVisitor& visitor)
const 475 return new INetwork(networkOptions);
489 : pOptimizedNetworkImpl(new
OptimizedNetworkImpl(*other.pOptimizedNetworkImpl.get(), modelOptions)) {}
545 return m_Graph->SerializeToDot(stream);
550 return m_Graph->GetNumInputs();
555 return m_Graph->GetNumOutputs();
559 Optional<std::vector<std::string>&> errorMessages)
561 std::stringstream fullErrorMessage;
562 fullErrorMessage <<
"ERROR: " << errorMessage;
566 errorMessages.value().push_back(fullErrorMessage.str());
571 Optional<std::vector<std::string>&> warningMessages)
573 std::stringstream fullWarningMessage;
574 fullWarningMessage <<
"WARNING: " << warningMessage;
578 warningMessages.value().push_back(fullWarningMessage.str());
585 Optional<std::vector<std::string>&> errMessages)
587 std::stringstream failureMsg;
599 bool noErrors =
true;
601 for (
unsigned int i = 0; i < numOutputs; i++) {
607 std::stringstream ss;
609 <<
" (" << layer->
GetNameStr() <<
") is of type" 610 <<
" Quantized 8 bit but its scale parameter has not been set";
618 std::stringstream ss;
619 ss <<
"Quantization parameters for Softmax layer (Scale: " <<
621 ") are incorrect and have been updated to Scale: 0.00390625 and Offset: 0";
632 template <
typename LayerT>
635 LayerT* layer = PolymorphicDowncast<LayerT*>(l);
646 layer->m_Weight->template GetConstTensor<armnn::BFloat16>(), info.
GetNumElements(), newValues.data());
662 const std::vector<BackendId>& availablePreferredBackends,
663 std::string& reasonIfUnsupported,
664 Optional<std::vector<std::string>&> errMessages)
669 auto ReturnError = [&](
const Layer* layer)
685 auto ConstantLayerFromFp16ToFp32 = [](
Layer& layer)
689 ConstantLayer* constantLayer = PolymorphicDowncast<ConstantLayer*>(&layer);
695 std::vector<float> newValues(info.GetNumElements());
699 info.GetNumElements(),
712 bool checkType =
false;
716 auto connectedOutputSlot = inputSlot.GetConnectedOutputSlot();
719 if (connectedOutputSlot->GetNumConnections() == 1)
722 ConstantLayerFromFp16ToFp32(connectedOutputSlot->GetOwningLayer());
728 std::vector<ConvertFp16ToFp32Layer*> convertFp16ToFp32Layers;
731 convertFp16ToFp32Layers =
736 std::vector<ConvertFp32ToFp16Layer*> convertFp32ToFp16Layers;
739 convertFp32ToFp16Layers =
744 auto AssignFirstSupportedBackend = [&](
Layer* layer,
BackendId preferredBackend)
746 bool supportedBackendFound =
false;
747 std::string reasonIfUnsupported;
753 reasonIfUnsupported))
755 supportedBackendFound =
true;
759 for (
const auto& backend : availablePreferredBackends)
762 if (backend == preferredBackend)
770 reasonIfUnsupported))
772 supportedBackendFound =
true;
778 return supportedBackendFound;
783 if (!AssignFirstSupportedBackend(convertLayer, backend))
785 return ReturnError(convertLayer);
791 if (!AssignFirstSupportedBackend(convertLayer, backend))
793 return ReturnError(convertLayer);
807 std::vector<ConvertBf16ToFp32Layer*> convertBf16ToFp32Layers;
810 convertBf16ToFp32Layers =
814 ConvertBf16ToFp32Weight<Convolution2dLayer>(layer);
818 ConvertBf16ToFp32Weight<FullyConnectedLayer>(layer);
823 std::vector<ConvertFp32ToBf16Layer*> convertFp32ToBf16Layers;
826 convertFp32ToBf16Layers =
831 auto AssignFirstSupportedBackend = [&](
Layer* layer,
BackendId preferredBackend)
833 bool supportedBackendFound =
false;
834 std::string reasonIfUnsupported;
840 reasonIfUnsupported))
842 supportedBackendFound =
true;
846 for (
const auto& backend : availablePreferredBackends)
849 if (backend == preferredBackend)
857 reasonIfUnsupported))
859 supportedBackendFound =
true;
865 return supportedBackendFound;
870 if (!AssignFirstSupportedBackend(convertLayer, backend))
872 return ReturnError(convertLayer);
878 if (!AssignFirstSupportedBackend(convertLayer, backend))
880 return ReturnError(convertLayer);
888 std::stringstream warningMsg;
890 <<
" is not supported on requested backend " << layer->
GetBackendId().
Get()
893 <<
" (reason: " << reasonIfUnsupported
894 <<
"), falling back to the next backend.";
910 Optional<std::vector<std::string>&> errMessages)
916 auto ReturnError = [&](
const Layer* layer)
923 if (availablePreferredBackends.empty())
925 std::stringstream failureMsg;
926 failureMsg <<
"No preferred backends are available";
933 for (
auto it = firstLayer; it != lastLayer; ++it)
943 layer->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo().GetDataType();
945 layer->GetOutputSlot(0).GetTensorInfo().GetDataType();
947 std::string reasonIfUnsupported;
957 if (layer->GetBackendHint().has_value() &&
962 layer->GetBackendHint().value(),
965 availablePreferredBackends,
975 for (
const auto& backend : availablePreferredBackends)
977 if (layer->GetBackendHint().has_value() &&
978 layer->GetBackendHint().value() == backend)
989 availablePreferredBackends,
1025 layer->SetBackendId(cpuBackendId);
1030 return ReturnError(layer);
1035 for (
auto it = firstLayer; it != lastLayer; ++it)
1041 BackendId connectedBackendId = layer->GetOutputSlot(0).GetConnection(0)->GetOwningLayer().GetBackendId();
1042 layer->SetBackendId(connectedBackendId);
1052 Optional<std::vector<std::string>&> errMessages)
1070 auto backendFactory = backendRegistry.GetFactory(selectedBackend);
1071 auto backendObjPtr = backendFactory();
1074 backendObjPtr->RegisterTensorHandleFactories(handleFactoryRegistry);
1076 backends[backendObjPtr->GetId()] = std::move(backendObjPtr);
1086 Optional<std::vector<std::string>&> errMessages)
1098 auto backendObjPtr = backends.find(selectedBackend)->second.get();
1105 [&backendObjPtr](
const Layer& layer)
1111 if (subgraphs.empty())
1118 for (
auto& subgraph : subgraphs)
1122 OptimizationViews optimizationViews = backendObjPtr->OptimizeSubgraphView(*subgraph, modelOptions);
1126 for (
auto& substitution : optimizationViews.GetSubstitutions())
1129 SubgraphView& replacementSubgraph = substitution.m_ReplacementSubgraph;
1130 SubgraphView& substitutableSubgraph = substitution.m_SubstitutableSubgraph;
1134 std::for_each(replacementSubgraph.
begin(), replacementSubgraph.
end(), [&selectedBackend](
Layer* l)
1137 l->SetBackendId(selectedBackend);
1141 if (!optimizationViews.GetFailedSubgraphs().empty())
1143 std::stringstream warningMsg;
1144 warningMsg <<
"Some sub-graph(s) failed to optimized on " << backendObjPtr->GetId() <<
" backend.";
1149 if (!backendObjPtr->GetId().IsCpuRef())
1152 settingsCopy.m_IgnoredBackends.insert(backendObjPtr->GetId());
1156 for (
auto& failedSubgraph : optimizationViews.GetFailedSubgraphs())
1159 std::stringstream subgraphMsg;
1160 subgraphMsg <<
"Re-assigning backends to " << failedSubgraph.GetLayers().size()
1161 <<
" layers inside sub-graph " << count++;
1168 if (reassignmentResult.m_Error)
1191 if (srcFactory && dstFactory &&
1217 if (frmBackend == backends.end() ||
1218 !frmBackend->second->SupportsTensorAllocatorAPI())
1225 std::map<ITensorHandleFactory::FactoryId, int> factoryScores;
1232 const Layer& connectedLayer = connection->GetOwningLayer();
1234 auto toBackend = backends.find(connectedLayer.
GetBackendId());
1235 ARMNN_ASSERT_MSG(toBackend != backends.end(),
"Backend id not found for the connected layer");
1237 if (!toBackend->second.get()->SupportsTensorAllocatorAPI())
1243 auto dstPrefs = toBackend->second.get()->GetHandleFactoryPreferences();
1244 for (
auto&& dst : dstPrefs)
1258 auto it = factoryScores.find(dst);
1259 if (it == factoryScores.end())
1262 factoryScores[dst] = 0;
1271 factoryScores[dst]++;
1274 if (factoryScores[dst] > topScore)
1276 topScore = factoryScores[dst];
1305 if (frmBackend == backends.end() ||
1306 !frmBackend->second->SupportsTensorAllocatorAPI())
1311 bool outputConnection =
false;
1314 const Layer& connectedLayer = connection->GetOwningLayer();
1317 outputConnection =
true;
1325 std::map<ITensorHandleFactory::FactoryId, int> factoryScores;
1326 for (
auto&& pref : srcPrefs)
1331 if (outputConnection)
1334 bool fallbackConnection =
false;
1337 if (inputSlot.GetConnectedOutputSlot()->GetOwningLayer().GetBackendId() != layer.
GetBackendId())
1339 fallbackConnection =
true;
1342 if (fallbackConnection)
1346 if (!factoryCap.empty())
1356 if (!outputConnection)
1360 if (!factoryCap.empty())
1379 auto it = factoryScores.find(pref);
1380 if (it == factoryScores.end())
1383 factoryScores[pref] = 0;
1390 const Layer& connectedLayer = connection->GetOwningLayer();
1392 auto toBackend = backends.find(connectedLayer.
GetBackendId());
1393 ARMNN_ASSERT_MSG(toBackend != backends.end(),
"Backend id not found for the connected layer");
1395 auto dstPrefs = toBackend->second.get()->GetHandleFactoryPreferences();
1396 for (
auto&& src : srcPrefs)
1398 if (factoryScores.find(src) == factoryScores.end())
1403 for (
auto&& dst : dstPrefs)
1408 factoryScores[src]++;
1416 int minScore = std::numeric_limits<int>::max();
1417 for (
auto it : factoryScores)
1419 minScore = std::min(minScore, it.second);
1423 std::vector<ITensorHandleFactory::FactoryId> optimalFactories;
1424 for (
auto it : factoryScores)
1426 if (it.second == minScore)
1428 optimalFactories.push_back(it.first);
1433 for (
auto&& srcPref : srcPrefs)
1435 for (
auto&& comp : optimalFactories)
1437 if (comp == srcPref)
1450 const Layer& connectedLayer,
1454 auto toBackend = backends.find(connectedLayer.
GetBackendId());
1455 ARMNN_ASSERT_MSG(toBackend != backends.end(),
"Backend id not found for the connected layer");
1457 auto dstPrefs = toBackend->second.get()->GetHandleFactoryPreferences();
1480 for (
auto&& pref : dstPrefs)
1482 if (pref == srcFactoryId)
1492 for (
auto&& pref : dstPrefs)
1511 if (srcCapability.empty() && dstCapability.empty() && srcFallback.empty() && dstFallback.empty())
1522 for (
auto&& pref : dstPrefs)
1540 Optional<std::vector<std::string>&> errMessages)
1545 optGraph.
ForEachLayer([&backends, ®istry, &result, &errMessages, importEnabled](
Layer* layer)
1576 unsigned int connectionIdx = 0;
1579 const Layer& connectedLayer = connection->GetOwningLayer();
1582 registry, importEnabled);
1589 errMessages.value().emplace_back(
"Could not find valid strategy required for compatibility" 1590 " between backends.");
1606 const std::vector<BackendId>& backendPreferences,
1609 Optional<std::vector<std::string>&> messages)
1612 auto profiler = inNetwork.
pNetworkImpl->GetGraph().GetProfiler();
1617 if (backendPreferences.empty())
1628 inNetwork.
pNetworkImpl->GetGraph().VerifyConstantLayerSetTensorInfo();
1630 std::unique_ptr<Graph> graph = std::make_unique<Graph>(inNetwork.
pNetworkImpl->GetGraph());
1647 using namespace optimizations;
1699 std::stringstream failureMsg;
1700 failureMsg <<
"None of the preferred backends " << backendPreferences
1718 if (assignBackendsResult.
m_Error)
1733 if (backendOptimizationResult.
m_Error)
1749 tensorHandleFactoryRegistry,
1772 bool NetworkImpl::GetShapeInferenceMethod()
1774 if (m_NetworkOptions.size() > 0 && m_NetworkOptions[0].GetBackendId().Get() ==
"ShapeInferenceMethod")
1776 return m_NetworkOptions[0].GetOption(0).GetValue().AsBool();
1782 : m_NetworkOptions(networkOptions),
1783 m_Graph(
std::make_unique<
Graph>(GetShapeInferenceMethod()))
1798 return m_Graph->AddLayer<
InputLayer>(id, name);
1809 return m_Graph->AddLayer<
CastLayer>(name);
1820 return m_Graph->AddLayer<
ComparisonLayer>(comparisonDescriptor, name);
1832 return m_Graph->AddLayer<
FillLayer>(fillDescriptor, name);
1848 unsigned int numInputs = fullyConnectedDescriptor.
GetNumInputs();
1857 weightsInfo.SetConstant();
1873 biasInfo.SetConstant();
1891 if ( fullyConnectedDescriptor.m_BiasEnabled && numInputs == 3 )
1899 else if ( !fullyConnectedDescriptor.m_BiasEnabled && numInputs == 2 )
1902 layer->m_Bias =
nullptr;
1907 "AddFullyConnectedLayer: Value mismatch. When bias is enabled in the " 1908 "descriptor the number of inputs is expected to be 3 otherwise 2. " 1909 "BiasEnabled={}, numInputs={}",
1910 fullyConnectedDescriptor.m_BiasEnabled,
1920 return m_Graph->AddLayer<
ConcatLayer>(concatDescriptor, name);
1933 const auto layer = m_Graph->AddLayer<
Convolution2dLayer>(convolution2dDescriptor, name);
1935 layer->
m_Weight = std::make_shared<ScopedTensorHandle>(weights);
1937 if (convolution2dDescriptor.m_BiasEnabled)
1939 layer->m_Bias = std::make_shared<ScopedTensorHandle>(biases.
value());
1950 return AddConvolution2dLayerImpl(convolution2dDescriptor, weights, biases, name);
1958 return AddConvolution2dLayerImpl(convolution2dDescriptor, weights, biases, name);
1967 return AddConvolution2dLayerImpl(convolution2dDescriptor, weights, optionalBiases, name);
1995 layer->
m_Weight = std::make_shared<ScopedTensorHandle>(weights);
1997 if (convolution2dDescriptor.m_BiasEnabled)
1999 layer->m_Bias = std::make_shared<ScopedTensorHandle>(biases.
value());
2011 return AddDepthwiseConvolution2dLayerImpl(convolution2dDescriptor, weights, biases, name);
2019 layer->
m_Anchors = std::make_shared<ScopedTensorHandle>(anchors);
2027 return m_Graph->AddLayer<
PermuteLayer>(permuteDescriptor, name);
2033 return m_Graph->AddLayer<
Pooling2dLayer>(pooling2dDescriptor, name);
2039 return m_Graph->AddLayer<
ActivationLayer>(activationDescriptor, name);
2045 return m_Graph->AddLayer<
ArgMinMaxLayer>(argMinMaxDescriptor, name);
2049 normalizationDescriptor,
2057 return m_Graph->AddLayer<
SliceLayer>(sliceDescriptor, name);
2063 return m_Graph->AddLayer<
SoftmaxLayer>(softmaxDescriptor, name);
2069 return m_Graph->AddLayer<
SplitterLayer>(splitterDescriptor, name);
2106 layer->
m_Mean = std::make_shared<ScopedTensorHandle>(mean);
2107 layer->m_Variance = std::make_shared<ScopedTensorHandle>(variance);
2108 layer->m_Beta = std::make_shared<ScopedTensorHandle>(beta);
2109 layer->m_Gamma = std::make_shared<ScopedTensorHandle>(gamma);
2116 return m_Graph->AddLayer<
RankLayer>(name);
2122 return m_Graph->AddLayer<
ReduceLayer>(reduceDescriptor, name);
2127 return m_Graph->AddLayer<
ResizeLayer>(resizeDescriptor, name);
2157 layer->
m_LayerOutput = std::make_shared<ScopedTensorHandle>(input);
2165 return m_Graph->AddLayer<
ReshapeLayer>(reshapeDescriptor, name);
2189 const auto layer = m_Graph->AddLayer<
LstmLayer>(descriptor, name);
2194 layer->m_BasicParameters.m_InputToCellWeights =
2196 layer->m_BasicParameters.m_InputToOutputWeights =
2198 layer->m_BasicParameters.m_RecurrentToForgetWeights =
2200 layer->m_BasicParameters.m_RecurrentToCellWeights =
2202 layer->m_BasicParameters.m_RecurrentToOutputWeights =
2204 layer->m_BasicParameters.m_ForgetGateBias =
2206 layer->m_BasicParameters.m_CellBias =
2207 std::make_shared<ScopedTensorHandle>(*(params.
m_CellBias));
2208 layer->m_BasicParameters.m_OutputGateBias =
2212 if(!descriptor.m_CifgEnabled)
2217 "when CIFG is disabled.");
2222 "AddLstmLayer: Recurrent To Input Weights cannot be NULL " 2223 "when CIFG is disabled.");
2228 "when CIFG is disabled.");
2230 layer->m_CifgParameters.m_InputToInputWeights =
2232 layer->m_CifgParameters.m_RecurrentToInputWeights =
2234 layer->m_CifgParameters.m_InputGateBias =
2239 if(descriptor.m_ProjectionEnabled)
2244 "when projection is enabled.");
2246 layer->m_ProjectionParameters.m_ProjectionWeights =
2250 layer->m_ProjectionParameters.m_ProjectionBias =
2256 if(descriptor.m_PeepholeEnabled)
2258 if(!descriptor.m_CifgEnabled)
2263 "when Peephole is enabled and CIFG disabled.");
2266 layer->m_PeepholeParameters.m_CellToInputWeights =
2273 "when Peephole is enabled.");
2278 "when Peephole is enabled.");
2281 layer->m_PeepholeParameters.m_CellToForgetWeights =
2283 layer->m_PeepholeParameters.m_CellToOutputWeights =
2288 if(descriptor.m_LayerNormEnabled)
2290 if(!descriptor.m_CifgEnabled)
2295 "when layer normalization is enabled and CIFG disabled.");
2297 layer->m_LayerNormParameters.m_InputLayerNormWeights =
2304 "when layer normalization is enabled.");
2309 "when layer normalization is enabled.");
2314 "when layer normalization is enabled.");
2316 layer->m_LayerNormParameters.m_ForgetLayerNormWeights =
2318 layer->m_LayerNormParameters.m_CellLayerNormWeights =
2320 layer->m_LayerNormParameters.m_OutputLayerNormWeights =
2338 return m_Graph->AddLayer<
MeanLayer>(meanDescriptor,name);
2343 return m_Graph->AddLayer<
PadLayer>(padDescriptor,name);
2365 return m_Graph->AddLayer<
GatherLayer>(gatherDescriptor, name);
2395 layer->
m_Weight = std::make_shared<ScopedTensorHandle>(weights);
2397 if (descriptor.m_BiasEnabled)
2399 layer->m_Bias = std::make_shared<ScopedTensorHandle>(biases.
value());
2408 return m_Graph->AddLayer<
TransposeLayer>(transposeDescriptor, name);
2414 return m_Graph->AddLayer<
StackLayer>(stackDescriptor, name);
2432 layer->m_QuantizedLstmParameters.m_InputToForgetWeights =
2434 layer->m_QuantizedLstmParameters.m_InputToCellWeights =
2436 layer->m_QuantizedLstmParameters.m_InputToOutputWeights =
2440 layer->m_QuantizedLstmParameters.m_RecurrentToInputWeights =
2442 layer->m_QuantizedLstmParameters.m_RecurrentToForgetWeights =
2444 layer->m_QuantizedLstmParameters.m_RecurrentToCellWeights =
2446 layer->m_QuantizedLstmParameters.m_RecurrentToOutputWeights =
2450 layer->m_QuantizedLstmParameters.m_InputGateBias =
2452 layer->m_QuantizedLstmParameters.m_ForgetGateBias =
2454 layer->m_QuantizedLstmParameters.m_CellBias =
2455 std::make_shared<ScopedTensorHandle>(params.
GetCellBias());
2456 layer->m_QuantizedLstmParameters.m_OutputGateBias =
2466 const auto layer = m_Graph->AddLayer<
QLstmLayer>(descriptor, name);
2471 layer->m_BasicParameters.m_InputToCellWeights =
2473 layer->m_BasicParameters.m_InputToOutputWeights =
2475 layer->m_BasicParameters.m_RecurrentToForgetWeights =
2477 layer->m_BasicParameters.m_RecurrentToCellWeights =
2479 layer->m_BasicParameters.m_RecurrentToOutputWeights =
2481 layer->m_BasicParameters.m_ForgetGateBias =
2483 layer->m_BasicParameters.m_CellBias =
2484 std::make_shared<ScopedTensorHandle>(*(params.
m_CellBias));
2485 layer->m_BasicParameters.m_OutputGateBias =
2489 if(!descriptor.m_CifgEnabled)
2499 "AddQLstmLayer: Recurrent To Input Weights cannot be NULL");
2507 layer->m_CifgParameters.m_InputToInputWeights =
2509 layer->m_CifgParameters.m_RecurrentToInputWeights =
2511 layer->m_CifgParameters.m_InputGateBias =
2516 if(descriptor.m_ProjectionEnabled)
2523 layer->m_ProjectionParameters.m_ProjectionWeights =
2529 layer->m_ProjectionParameters.m_ProjectionBias =
2536 if(descriptor.m_PeepholeEnabled)
2548 if(!descriptor.m_CifgEnabled)
2555 layer->m_PeepholeParameters.m_CellToInputWeights =
2559 layer->m_PeepholeParameters.m_CellToForgetWeights =
2561 layer->m_PeepholeParameters.m_CellToOutputWeights =
2566 if(descriptor.m_LayerNormEnabled)
2583 if(!descriptor.m_CifgEnabled)
2590 layer->m_LayerNormParameters.m_InputLayerNormWeights =
2594 layer->m_LayerNormParameters.m_ForgetLayerNormWeights =
2596 layer->m_LayerNormParameters.m_CellLayerNormWeights =
2598 layer->m_LayerNormParameters.m_OutputLayerNormWeights =
2620 layer->m_BasicParameters.m_InputToCellWeights =
2622 layer->m_BasicParameters.m_InputToOutputWeights =
2624 layer->m_BasicParameters.m_RecurrentToForgetWeights =
2626 layer->m_BasicParameters.m_RecurrentToCellWeights =
2628 layer->m_BasicParameters.m_RecurrentToOutputWeights =
2630 layer->m_BasicParameters.m_ForgetGateBias =
2632 layer->m_BasicParameters.m_CellBias =
2633 std::make_shared<ScopedTensorHandle>(*(params.
m_CellBias));
2634 layer->m_BasicParameters.m_OutputGateBias =
2638 if(!descriptor.m_CifgEnabled)
2643 "when CIFG is disabled.");
2648 "AddUnidirectionalSequenceLstmLayer: Recurrent To Input Weights cannot be NULL " 2649 "when CIFG is disabled.");
2654 "when CIFG is disabled.");
2656 layer->m_CifgParameters.m_InputToInputWeights =
2658 layer->m_CifgParameters.m_RecurrentToInputWeights =
2660 layer->m_CifgParameters.m_InputGateBias =
2665 if(descriptor.m_ProjectionEnabled)
2670 "when projection is enabled.");
2672 layer->m_ProjectionParameters.m_ProjectionWeights =
2676 layer->m_ProjectionParameters.m_ProjectionBias =
2682 if(descriptor.m_PeepholeEnabled)
2684 if(!descriptor.m_CifgEnabled)
2689 "cannot be NULL when Peephole is enabled and CIFG disabled.");
2692 layer->m_PeepholeParameters.m_CellToInputWeights =
2699 "when Peephole is enabled.");
2704 "when Peephole is enabled.");
2707 layer->m_PeepholeParameters.m_CellToForgetWeights =
2709 layer->m_PeepholeParameters.m_CellToOutputWeights =
2714 if(descriptor.m_LayerNormEnabled)
2716 if(!descriptor.m_CifgEnabled)
2721 "cannot be NULL when layer normalization is enabled and CIFG disabled.");
2723 layer->m_LayerNormParameters.m_InputLayerNormWeights =
2730 "cannot be NULL when layer normalization is enabled.");
2735 "cannot be NULL when layer normalization is enabled.");
2740 "cannot be NULL when layer normalization is enabled.");
2742 layer->m_LayerNormParameters.m_ForgetLayerNormWeights =
2744 layer->m_LayerNormParameters.m_CellLayerNormWeights =
2746 layer->m_LayerNormParameters.m_OutputLayerNormWeights =
2757 layer->Accept(visitor);
2766 layer->ExecuteStrategy(strategy);
2771 : m_Graph(new
Graph(*other.m_Graph.get()))
2773 , m_ModelOptions(modelOptions)
2783 : m_Graph(
std::move(graph)), m_Guid(profiling::
ProfilingService::GetNextGuid()), m_ModelOptions(modelOptions)
A layer that the constant data can be bound to.
OptimizeForConnection< Layer, PermuteLayer, SquashEqualSiblingsImpl< PermuteLayer > > SquashEqualPermuteSiblings
void ReportError(const std::string &errorMessage, Optional< std::vector< std::string > &> errorMessages)
Iterator begin()
Returns iterator pointing to the beginning of the list. Lowercase for range-based for loops...
IConnectableLayer * AddSubtractionLayer(const char *name=nullptr)
Adds a subtraction layer to the network.
bool m_BiasEnabled
Enable/disable bias.
IConnectableLayer * AddReduceLayer(const ReduceDescriptor &reduceDescriptor, const char *name=nullptr)
ModelOptions m_ModelOptions
IConnectableLayer * AddActivationLayer(const ActivationDescriptor &activationDescriptor, const char *name=nullptr)
Adds an activation layer to the network.
ARMNN_NO_DEPRECATE_WARN_END void ExecuteStrategy(IStrategy &strategy) const
This layer represents a minimum operation.
static const FactoryId DeferredFactoryId
Use the workload factory to create the tensor handle.
This layer represents a split operation.
OptimizationResult AssignBackends(OptimizedNetworkImpl *optNetObjPtr, BackendSettings &backendSettings, Graph::Iterator &firstLayer, Graph::Iterator &lastLayer, Optional< std::vector< std::string > &> errMessages)
IConnectableLayer * AddCastLayer(const char *name=nullptr)
Adds a cast layer to the network.
LstmBasicParameters m_BasicParameters
const std::vector< InputSlot * > & GetConnections() const
This layer represents a batch normalization operation.
static void Destroy(INetwork *network)
A ViewsDescriptor for the SplitterLayer.
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
OptimizeForConnection< PermuteLayer, PermuteLayer, OptimizeInversePermutesImpl< PermuteLayer > > OptimizeInversePermutes
std::vector< ConvertFp32ToFp16Layer * > InsertConvertFp32ToFp16LayersAfter(Graph &graph, Layer &layer)
bool m_BiasEnabled
Enable/disable bias.
ARMNN_NO_DEPRECATE_WARN_BEGIN void Accept(ILayerVisitor &visitor) const
void SetEdgeStrategy(unsigned int connectionIndex, EdgeStrategy strategy)
QuantizedLstmParameters m_QuantizedLstmParameters
This layer represents a 2D transpose convolution operation.
virtual Status PrintGraph()
No strategy has been defined. Used internally to verify integrity of optimizations.
std::vector< ConvertFp16ToFp32Layer * > InsertConvertFp16ToFp32LayersBefore(Graph &graph, Layer &layer, bool expectCorrectInputType)
IConnectableLayer * AddResizeLayer(const ResizeDescriptor &resizeDescriptor, const char *name=nullptr)
ShapeInferenceMethod m_shapeInferenceMethod
virtual ~OptimizedNetworkImpl()
A TransposeConvolution2dDescriptor for the TransposeConvolution2dLayer.
const TensorShape & GetShape() const
CPU Execution: Reference C++ kernels.
IConnectableLayer * AddTransposeConvolution2dLayer(const TransposeConvolution2dDescriptor &descriptor, const ConstTensor &weights, const Optional< ConstTensor > &biases, const char *name=nullptr)
Adds a 2D transpose convolution layer to the network.
IConnectableLayer * AddQuantizeLayer(const char *name=nullptr)
OptimizeForExclusiveConnection< PadLayer, Convolution2dLayer, pad_fold::FoldPadIntoConvolution2dImpl > FoldPadIntoConvolution2d
Optimizer::Optimizations MakeOptimizations(Args &&... args)
IConnectableLayer * AddQLstmLayer(const QLstmDescriptor &descriptor, const LstmInputParams ¶ms, const char *name=nullptr)
Add a QLstm layer to the network.
size_t GetNumInputs() const
IConnectableLayer * AddRankLayer(const char *name=nullptr)
A ReshapeDescriptor for the ReshapeLayer.
OptimizeForConnection< TransposeLayer, TransposeLayer, OptimizeInversePermutesImpl< TransposeLayer > > OptimizeInverseTransposes
IConnectableLayer * AddAdditionLayer(const char *name=nullptr)
OptimizeForExclusiveConnection< PadLayer, DepthwiseConvolution2dLayer, pad_fold::FoldPadIntoDepthwiseConvolution2dImpl > FoldPadIntoDepthwiseConvolution2d
IConnectableLayer * AddChannelShuffleLayer(const ChannelShuffleDescriptor &channelShuffleDescriptor, const char *name=nullptr)
static ProfilerManager & GetInstance()
IConnectableLayer * AddGatherLayer(const GatherDescriptor &descriptor, const char *name=nullptr)
Add Gather layer to the network.
OptimizeForConnection< TransposeLayer, BatchToSpaceNdLayer, PermuteAndBatchToSpaceAsDepthToSpaceImpl< TransposeLayer > > TransposeAndBatchToSpaceAsDepthToSpace
OptimizeForExclusiveConnection< DepthwiseConvolution2dLayer, BatchNormalizationLayer, FuseBatchNorm< DepthwiseConvolution2dLayer, armnn::DataType::Float32 > > FuseBatchNormIntoDepthwiseConvolution2DFloat32
IConnectableLayer * AddConstantLayer(const ConstTensor &input, const char *name=nullptr)
Adds a layer with no inputs and a single output, which always corresponds to the passed in constant t...
#define ARMNN_NO_DEPRECATE_WARN_BEGIN
IConnectableLayer * AddDepthwiseConvolution2dLayer(const DepthwiseConvolution2dDescriptor &convolution2dDescriptor, const ConstTensor &weights, const Optional< ConstTensor > &biases, const char *name=nullptr)
Adds a 2D depthwise convolution layer to the network.
ITensorHandleFactory::FactoryId CalculateSlotOptionForOutput(BackendsMap &backends, OutputSlot &slot, TensorHandleFactoryRegistry ®istry)
A ComparisonDescriptor for the ComparisonLayer.
This layer represents a depthwise convolution 2d operation.
static void ConvertBFloat16ToFloat32(const void *srcBFloat16Buffer, size_t numElements, float *dstFloat32Buffer)
bool RequiresCopy(ITensorHandleFactory::FactoryId src, ITensorHandleFactory::FactoryId dst, TensorHandleFactoryRegistry ®istry)
IConnectableLayer * AddPooling2dLayer(const Pooling2dDescriptor &pooling2dDescriptor, const char *name=nullptr)
NetworkImpl(NetworkOptions networkOptions={})
IConnectableLayer * AddStackLayer(const StackDescriptor &descriptor, const char *name=nullptr)
Adds a stack layer to the network.
std::shared_ptr< ConstTensorHandle > m_LayerOutput
IConnectableLayer * AddShapeLayer(const char *name=nullptr)
Adds a shape layer to the network.
std::vector< BackendOptions > ModelOptions
IConnectableLayer * AddMergeLayer(const char *name=nullptr)
A Convolution2dDescriptor for the Convolution2dLayer.
Layer & GetOwningLayer() const
Source backends tensor data can be exported to destination backend tensor without copy...
IConnectableLayer * AddQuantizeLayer(const char *name=nullptr)
Add a quantize layer to the network.
This layer converts data type Float 16 to Float 32.
int Connect(InputSlot &destination)
IConnectableLayer * AddMinimumLayer(const char *name=nullptr)
Add a Minimum layer to the network.
IConnectableLayer * AddQuantizedLstmLayer(const QuantizedLstmInputParams ¶ms, const char *name=nullptr)
IConnectableLayer * AddL2NormalizationLayer(const L2NormalizationDescriptor &desc, const char *name=nullptr)
Adds an L2 normalization layer to the network.
bool m_BiasEnabled
Enable/disable bias.
IConnectableLayer * AddConstantLayer(const ConstTensor &input, const char *name=nullptr)
IConnectableLayer * AddElementwiseUnaryLayer(const ElementwiseUnaryDescriptor &elementwiseUnaryDescriptor, const char *name=nullptr)
IConnectableLayer * AddMergeLayer(const char *name=nullptr)
Adds a merge layer to the network.
IConnectableLayer * AddPermuteLayer(const PermuteDescriptor &permuteDescriptor, const char *name=nullptr)
Adds a permute layer to the network.
IConnectableLayer * AddNormalizationLayer(const NormalizationDescriptor &normalizationDescriptor, const char *name=nullptr)
IConnectableLayer * AddSliceLayer(const SliceDescriptor &sliceDescriptor, const char *name=nullptr)
Adds a slice layer to the network.
static void Pass(Graph &graph, const Optimizations &optimizations)
OptimizeForExclusiveConnection< DepthwiseConvolution2dLayer, BatchNormalizationLayer, FuseBatchNorm< DepthwiseConvolution2dLayer, armnn::DataType::Float16 > > FuseBatchNormIntoDepthwiseConvolution2DFloat16
IConnectableLayer * AddFloorLayer(const char *name=nullptr)
Adds a floor layer to the network.
IConnectableLayer * AddPooling2dLayer(const Pooling2dDescriptor &pooling2dDescriptor, const char *name=nullptr)
Adds a pooling layer to the network.
This layer represents a SpaceToDepth operation.
IConnectableLayer * AddMeanLayer(const MeanDescriptor &meanDescriptor, const char *name=nullptr)
This layer represents a reshape operation.
OptimizeForExclusiveConnection< Convolution2dLayer, BatchNormalizationLayer, FuseBatchNorm< Convolution2dLayer, armnn::DataType::Float16 > > FuseBatchNormIntoConvolution2DFloat16
OptimizeForExclusiveConnection< Convolution2dLayer, BatchNormalizationLayer, FuseBatchNorm< Convolution2dLayer, armnn::DataType::Float32 > > FuseBatchNormIntoConvolution2DFloat32
#define ARMNN_LOG(severity)
IConnectableLayer * AddMinimumLayer(const char *name=nullptr)
std::shared_ptr< ConstTensorHandle > m_Weight
A unique pointer to store Weight values.
IConnectableLayer * AddFillLayer(const FillDescriptor &fillDescriptor, const char *name=nullptr)
Add an Fill layer to the network.
Main network class which provides the interface for building up a neural network. ...
This layer represents an activation operation with the specified activation function.
IConnectableLayer * AddSpaceToDepthLayer(const SpaceToDepthDescriptor &spaceToDepthDescriptor, const char *name=nullptr)
BackendRegistry & BackendRegistryInstance()
This layer converts data type BFloat16 to Float32.
LayerT * ConvertBf16ToFp32Weight(Layer *l)
std::vector< BackendOptions > NetworkOptions
bool m_ReduceFp32ToBf16
Reduces all Fp32 operators in the model to Bf16 for faster processing.
std::shared_ptr< ConstTensorHandle > m_Mean
A unique pointer to store Mean values.
A LogicalBinaryDescriptor for the LogicalBinaryLayer.
IConnectableLayer * AddConvolution2dLayer(const Convolution2dDescriptor &convolution2dDescriptor, const ConstTensor &weights, const Optional< ConstTensor > &biases, const char *name=nullptr)
Adds a 2D convolution layer to the network.
This layer represents an unknown operation in the input graph.
OptimizeForConnection< Layer, ReshapeLayer, SquashEqualSiblingsImpl< ReshapeLayer > > SquashEqualReshapeSiblings
IConnectableLayer * AddLogSoftmaxLayer(const LogSoftmaxDescriptor &logSoftmaxDescriptor, const char *name=nullptr)
Adds a log softmax layer to the network.
IConnectableLayer * AddResizeLayer(const ResizeDescriptor &resizeDescriptor, const char *name=nullptr)
Adds a resize layer to the network.
IConnectableLayer * AddActivationLayer(const ActivationDescriptor &activationDescriptor, const char *name=nullptr)
This layer represents a detection postprocess operator.
BackendIdSet m_SupportedBackends
OptimizeForConnection< Layer, TransposeLayer, MoveTransposeUpImpl > MoveTransposeUp
IConnectableLayer * AddBatchToSpaceNdLayer(const BatchToSpaceNdDescriptor &batchToSpaceNdDescriptor, const char *name=nullptr)
Adds a batch to space ND layer to the network.
OptimizationResult ReturnWithError(OptimizationResult res, const Layer *layer, const BackendSettings &backendSettings, Optional< std::vector< std::string > &> errMessages)
Copyright (c) 2021 ARM Limited and Contributors.
This layer represents a pad operation.
This layer represents a LSTM operation.
void IgnoreUnused(Ts &&...)
IConnectableLayer * AddDivisionLayer(const char *name=nullptr)
Adds a division layer to the network.
void SetBackendId(const BackendId &id)
const std::vector< InputSlot > & GetInputSlots() const
bool IsBackendSupported(const BackendId &backend) const
LayerList::const_iterator Iterator
This layer represents a reduction operation.
IConnectableLayer * AddDepthwiseConvolution2dLayer(const DepthwiseConvolution2dDescriptor &convolution2dDescriptor, const ConstTensor &weights, const Optional< ConstTensor > &biases, const char *name=nullptr)
IConnectableLayer * AddLogSoftmaxLayer(const LogSoftmaxDescriptor &logSoftmaxDescriptor, const char *name=nullptr)
A SpaceToDepthDescriptor for the SpaceToDepthLayer.
This layer represents a permutation operation.
unsigned int GetNumOutputSlots() const override
Returns the number of connectable output slots.
This layer represents a SpaceToBatchNd operation.
A BatchToSpaceNdDescriptor for the BatchToSpaceNdLayer.
OptimizeForType< Layer, AddDebugImpl > InsertDebugLayer
virtual Status SerializeToDot(std::ostream &stream) const
IConnectableLayer * AddFullyConnectedLayer(const FullyConnectedDescriptor &fullyConnectedDescriptor, const char *name=nullptr)
Adds a fully connected layer to the network.
OptimizeForConnection< ReshapeLayer, ReshapeLayer, OptimizeConsecutiveReshapesImpl > OptimizeConsecutiveReshapes
IConnectableLayer * AddMeanLayer(const MeanDescriptor &meanDescriptor, const char *name=nullptr)
Add a Mean layer to the network.
Private implementation of INetwork.
#define ARMNN_SCOPED_PROFILING_EVENT(backendId, name)
int LayerBindingId
Type of identifiers for bindable layers (inputs, outputs).
IConnectableLayer * AddInputLayer(LayerBindingId id, const char *name=nullptr)
Adds an input layer to the network.
This layer represents a elementwiseUnary operation.
constexpr const char * GetDataTypeName(DataType dataType)
IConnectableLayer * AddStridedSliceLayer(const StridedSliceDescriptor &stridedSliceDescriptor, const char *name=nullptr)
Adds a strided slice layer to the network.
A ResizeBilinearDescriptor for the ResizeBilinearLayer.
A StackDescriptor for the StackLayer.
Destination backend can work directly with tensors on source backend.
IConnectableLayer * AddGatherLayer(const GatherDescriptor &gatherDescriptor, const char *name=nullptr)
virtual std::vector< ITensorHandleFactory::FactoryId > GetHandleFactoryPreferences() const
(Optional) Returns a vector of supported TensorHandleFactory ids in preference order.
IConnectableLayer * AddSpaceToDepthLayer(const SpaceToDepthDescriptor &spaceToDepthDescriptor, const char *name=nullptr)
Adds a space to depth layer to the network.
OptimizeForConnection< ConvertFp16ToFp32Layer, ConvertFp32ToFp16Layer, OptimizeInverseConversionsImpl > OptimizeInverseConversionsFp16
IConnectableLayer * AddSoftmaxLayer(const SoftmaxDescriptor &softmaxDescriptor, const char *name=nullptr)
Adds a softmax layer to the network.
bool m_ReduceFp32ToFp16
Reduces all Fp32 operators in the model to Fp16 for faster processing.
The SubgraphView class represents a subgraph of a Graph.
profiling::ProfilingGuid GetGuid() const
IConnectableLayer * AddFloorLayer(const char *name=nullptr)
A PadDescriptor for the PadLayer.
This layer represents an instance normalization operation.
std::shared_ptr< ConstTensorHandle > m_InputToForgetWeights
A unique pointer to represent 2D weights tensor with dimensions [num_units, inputSize] (QSymmS8)...
IConnectableLayer * AddMultiplicationLayer(const char *name=nullptr)
OptimizeForConnection< PermuteLayer, BatchToSpaceNdLayer, PermuteAndBatchToSpaceAsDepthToSpaceImpl< PermuteLayer > > PermuteAndBatchToSpaceAsDepthToSpace
IConnectableLayer * AddQLstmLayer(const QLstmDescriptor &descriptor, const LstmInputParams ¶ms, const char *name=nullptr)
OptimizeForConnection< Layer, PermuteLayer, MovePermuteUpImpl > MovePermuteUp
This layer represents a Logical Binary operation.
IConnectableLayer * AddStridedSliceLayer(const StridedSliceDescriptor &stridedSliceDescriptor, const char *name=nullptr)
std::unique_ptr< NetworkImpl > pNetworkImpl
A layer user-provided data can be bound to (e.g. inputs, outputs).
friend IOptimizedNetworkPtr Optimize(const INetwork &inNetwork, const std::vector< BackendId > &backendPreferences, const IDeviceSpec &deviceSpec, const OptimizerOptions &options, Optional< std::vector< std::string > &> messages)
Create an optimized version of the network.
void ForEachLayer(Func func) const
virtual std::vector< Capability > GetCapabilities(const IConnectableLayer *layer, const IConnectableLayer *connectedLayer, CapabilityClass capabilityClass)
IConnectableLayer * AddInputLayer(LayerBindingId id, const char *name=nullptr)
This layer dequantizes the input tensor.
ConvertConstants< Float32ToFloat16, IsFloat16Layer > ConvertConstantsFloatToHalf
OptimizeForType< TransposeLayer, TransposeAsReshapeImpl > TransposeAsReshape
This layer represents a Gather operator.
This layer represents a fully connected operation.
An LstmDescriptor for the LstmLayer.
#define ARMNN_NO_DEPRECATE_WARN_END
IConnectableLayer * AddLstmLayer(const LstmDescriptor &descriptor, const LstmInputParams ¶ms, const char *name=nullptr)
Add a Lstm layer to the network.
IConnectableLayer * AddTransposeConvolution2dLayer(const TransposeConvolution2dDescriptor &descriptor, const ConstTensor &weights, const Optional< ConstTensor > &biases, const char *name=nullptr)
#define ARMNN_ASSERT_MSG(COND, MSG)
IConnectableLayer * AddRankLayer(const char *name=nullptr)
Adds a rank layer to the network.
This layer represents a QuantizedLstm operation.
This layer represents a log softmax operation.
OptimizationResult ApplyBackendOptimizations(OptimizedNetworkImpl *optNetObjPtr, BackendSettings &backendSettings, BackendsMap &backends, const ModelOptions &modelOptions, Optional< std::vector< std::string > &> errMessages)
IConnectableLayer * AddPreluLayer(const char *name=nullptr)
IConnectableLayer * AddConvolution3dLayer(const Convolution3dDescriptor &convolution3dDescriptor, const char *name=nullptr)
Adds a 3D convolution layer to the network.
A L2NormalizationDescriptor for the L2NormalizationLayer.
int32_t GetQuantizationOffset() const
LstmBasicParameters m_BasicParameters
An ArgMinMaxDescriptor for ArgMinMaxLayer.
float GetQuantizationScale() const
DataType GetDataType() const
const std::shared_ptr< IProfiler > & GetProfiler() const
An OriginsDescriptor for the ConcatLayer.
A ReduceDescriptor for the REDUCE operators.
IConnectableLayer * AddDetectionPostProcessLayer(const DetectionPostProcessDescriptor &descriptor, const ConstTensor &anchors, const char *name=nullptr)
bool has_value() const noexcept
A FullyConnectedDescriptor for the FullyConnectedLayer.
bool m_BiasEnabled
Enable/disable bias.
This layer represents a stack operation.
A tensor defined by a TensorInfo (shape and data type) and an immutable backing store.
This layer represents a merge operation.
This layer represents a softmax operation.
Validate all output shapes.
IConnectableLayer * AddInstanceNormalizationLayer(const InstanceNormalizationDescriptor &desc, const char *name=nullptr)
Adds an instance normalization layer to the network.
const std::string & GetNameStr() const
LayerType GetType() const override
Returns the armnn::LayerType of this layer.
std::vector< ConvertBf16ToFp32Layer * > InsertConvertBf16ToFp32LayersBefore(Graph &graph, Layer &layer, bool expectCorrectInputType)
A GatherDescriptor for the GatherLayer.
std::unique_ptr< OptimizedNetworkImpl > pOptimizedNetworkImpl
IConnectableLayer * AddDivisionLayer(const char *name=nullptr)
This layer represents a BatchToSpaceNd operation.
IConnectableLayer * AddOutputLayer(LayerBindingId id, const char *name=nullptr)
Adds an output layer to the network.
std::vector< SubgraphViewPtr > Subgraphs
std::unique_ptr< IOptimizedNetwork, void(*)(IOptimizedNetwork *network)> IOptimizedNetworkPtr
virtual size_t GetNumOutputs() const
IConnectableLayer * AddArgMinMaxLayer(const ArgMinMaxDescriptor &desc, const char *name=nullptr)
IConnectableLayer * AddOutputLayer(LayerBindingId id, const char *name=nullptr)
IConnectableLayer * AddDequantizeLayer(const char *name=nullptr)
void SetQuantizationScale(float scale)
This layer represents a ArgMinMax operation.
IConnectableLayer * AddConcatLayer(const ConcatDescriptor &concatDescriptor, const char *name=nullptr)
#define ARMNN_ASSERT(COND)
A StandInDescriptor for the StandIn layer.
A QLstmDescriptor for the QLstmLayer.
BackendIdVector GetAvailablePreferredBackends() const
Device specific knowledge to be passed to the optimizer.
IConnectableLayer * AddSwitchLayer(const char *name=nullptr)
static bool IsLayerSupported(const BackendId &backendId, const IConnectableLayer &layer, Optional< DataType > dataType, std::string &outReasonIfUnsupported)
ArmNN performs an optimization on each model/network before it gets loaded for execution.
static void ConvertFloat16To32(const void *srcFloat16Buffer, size_t numElements, float *dstFloat32Buffer)
IConnectableLayer * AddSubtractionLayer(const char *name=nullptr)
An ActivationDescriptor for the ActivationLayer.
IConnectableLayer * AddLogicalBinaryLayer(const LogicalBinaryDescriptor &logicalBinaryDescriptor, const char *name=nullptr)
IConnectableLayer * AddChannelShuffleLayer(const ChannelShuffleDescriptor &descriptor, const char *name=nullptr)
Add a ChannelShuffle layer to the network.
std::vector< ConvertFp32ToBf16Layer * > InsertConvertFp32ToBf16LayersAfter(Graph &graph, Layer &layer)
const BackendId & GetBackendId() const
This layer represents a floor operation.
IConnectableLayer * AddBatchNormalizationLayer(const BatchNormalizationDescriptor &desc, const ConstTensor &mean, const ConstTensor &variance, const ConstTensor &beta, const ConstTensor &gamma, const char *name=nullptr)
Adds a batch normalization layer to the network.
INetwork(NetworkOptions networkOptions={})
A SliceDescriptor for the SliceLayer.
IConnectableLayer * AddSpaceToBatchNdLayer(const SpaceToBatchNdDescriptor &spaceToBatchNdDescriptor, const char *name=nullptr)
Adds a space to batch layer to the network.
A Convolution3dDescriptor for the Convolution3dLayer.
This layer represents a normalization operation.
virtual MemorySourceFlags GetExportFlags() const
IConnectableLayer * AddStandInLayer(const StandInDescriptor &descriptor, const char *name=nullptr)
This layer represents a pooling 2d operation.
This layer converts data type Float 32 to Float 16.
This layer represents a transpose operation.
IConnectableLayer * AddPermuteLayer(const PermuteDescriptor &permuteDescriptor, const char *name=nullptr)
This layer represents an addition operation.
IConnectableLayer * AddComparisonLayer(const ComparisonDescriptor &comparisonDescriptor, const char *name=nullptr)
Add a Comparison layer to the network.
QLstmBasicParameters m_BasicParameters
void SubstituteSubgraph(SubgraphView &subgraph, IConnectableLayer *substituteLayer)
Substitutes the given sub-graph with either a new layer or a new sub-graph.
IConnectableLayer * AddL2NormalizationLayer(const L2NormalizationDescriptor &desc, const char *name=nullptr)
IConnectableLayer * AddArgMinMaxLayer(const ArgMinMaxDescriptor &desc, const char *name=nullptr)
Adds an ArgMinMax layer to the network.
bool CheckScaleSetOnQuantizedType(Layer *layer, Optional< std::vector< std::string > &> errMessages)
IConnectableLayer * AddSplitterLayer(const ViewsDescriptor &splitterDescriptor, const char *name=nullptr)
Adds a splitter layer to the network.
void SetTensorHandleFactory(const ITensorHandleFactory::FactoryId &id)
A SpaceToBatchNdDescriptor for the SpaceToBatchNdLayer.
OptimizeForType< PermuteLayer, PermuteAsReshapeImpl > PermuteAsReshape
bool IsWarningOnly() const
OptimizeForConnection< Layer, TransposeLayer, SquashEqualSiblingsImpl< TransposeLayer > > SquashEqualTransposeSiblings
This layer represents a QLstm operation.
IConnectableLayer * AddAdditionLayer(const char *name=nullptr)
Adds an addition layer to the network.
BackendIdVector m_PreferredBackends
This layer represents a subtraction operation.
IConnectableLayer * AddUnidirectionalSequenceLstmLayer(const UnidirectionalSequenceLstmDescriptor &descriptor, const LstmInputParams ¶ms, const char *name=nullptr)
Add a UnidirectionalSequenceLstm layer to the network.
IConnectableLayer * AddTransposeLayer(const TransposeDescriptor &transposeDescriptor, const char *name=nullptr)
This layer calculates both true and false outputs for input.
uint32_t GetNumInputs() const
Get the number of views/inputs.
This layer represents a LSTM operation.
IConnectableLayer * AddReshapeLayer(const ReshapeDescriptor &reshapeDescriptor, const char *name=nullptr)
Adds a reshape layer to the network.
EmptyOptional is used to initialize the Optional class in case we want to have default value for an O...
void RegisterProfiler(IProfiler *profiler)
IConnectableLayer * AddPadLayer(const PadDescriptor &padDescriptor, const char *name=nullptr)
Adds a fully pad layer to the network.
ConvertConstants< Float16ToFloat32, IsFloat32Layer > ConvertConstantsHalfToFloat
A ElementwiseUnaryDescriptor for the ElementwiseUnaryLayer.
std::shared_ptr< ConstTensorHandle > m_InputToForgetWeights
A unique pointer to represent 2D weights tensor with dimensions [input_size, num_units].
size_t GetNumOutputs() const
IConnectableLayer * AddUnidirectionalSequenceLstmLayer(const UnidirectionalSequenceLstmDescriptor &descriptor, const LstmInputParams ¶ms, const char *name=nullptr)
static Subgraphs SelectSubgraphs(Graph &graph, const LayerSelectorFunction &selector)
Selects subgraphs from a graph based on the selector function and the algorithm.
IConnectableLayer * AddFullyConnectedLayer(const FullyConnectedDescriptor &fullyConnectedDescriptor, const char *name=nullptr)
IConnectableLayer * AddReshapeLayer(const ReshapeDescriptor &reshapeDescriptor, const char *name=nullptr)
This layer represents a L2 normalization operation.
This layer represents a cast operation.
IConnectableLayer * AddMaximumLayer(const char *name=nullptr)
IConnectableLayer * AddShapeLayer(const char *name=nullptr)
std::shared_ptr< ConstTensorHandle > m_Weight
A unique pointer to store Weight values.
BackendsMap CreateSupportedBackends(TensorHandleFactoryRegistry &handleFactoryRegistry, BackendSettings &backendSettings)
IConnectableLayer * AddCastLayer(const char *name=nullptr)
OptimizeForConnection< ConvertFp32ToFp16Layer, ConvertFp16ToFp32Layer, OptimizeInverseConversionsImpl > OptimizeInverseConversionsFp32
IConnectableLayer * AddStackLayer(const StackDescriptor &stackDescriptor, const char *name=nullptr)
IConnectableLayer * AddSoftmaxLayer(const SoftmaxDescriptor &softmaxDescriptor, const char *name=nullptr)
const std::string & Get() const
IConnectableLayer * AddNormalizationLayer(const NormalizationDescriptor &normalizationDescriptor, const char *name=nullptr)
Adds a normalization layer to the network.
BackendIdSet m_SelectedBackends
OptimizeForExclusiveConnection< PadLayer, Pooling2dLayer, pad_fold::FoldPadIntoPooling2dImpl > FoldPadIntoPooling2d
IConnectableLayer * AddFillLayer(const FillDescriptor &fillDescriptor, const char *name=nullptr)
Iterator end()
Returns iterator pointing to the end of the list. Lowercase for range-based for loops.
const Graph & GetGraph() const
OptimizationResult AttemptBackendAssignment(BackendSettings &backendSettings, Graph &graph, Layer *layer, BackendId backend, DataType dataTypeIn, DataType dataTypeOut, const std::vector< BackendId > &availablePreferredBackends, std::string &reasonIfUnsupported, Optional< std::vector< std::string > &> errMessages)
ITensorHandleFactory * GetFactory(ITensorHandleFactory::FactoryId id) const
Find a TensorHandleFactory by Id Returns nullptr if not found.
void SetTensorInfo(const TensorInfo &tensorInfo) override
A MeanDescriptor for the MeanLayer.
This layer represents a division operation.
Status SerializeToDot(std::ostream &stream) const
IConnectableLayer * AddSpaceToBatchNdLayer(const SpaceToBatchNdDescriptor &spaceToBatchNdDescriptor, const char *name=nullptr)
IConnectableLayer * AddQuantizedLstmLayer(const QuantizedLstmInputParams ¶ms, const char *name=nullptr)
Add a QuantizedLstm layer to the network.
This layer represents a strided slice operation.
ITensorHandleFactory::FactoryId CalculateSlotOption(BackendsMap &backends, OutputSlot &outputSlot, TensorHandleFactoryRegistry ®istry, bool importEnabled)
This layer represents a maximum operation.
const OutputSlot & GetOutputSlot(unsigned int index=0) const override
Get the const output slot handle by slot index.
Infer missing output shapes and validate all output shapes.
OptimizeForType< Layer, ConvertFp32NetworkToFp16Impl > Fp32NetworkToFp16Converter
A TransposeDescriptor for the TransposeLayer.
A StridedSliceDescriptor for the StridedSliceLayer.
IConnectableLayer * AddComparisonLayer(const ComparisonDescriptor &comparisonDescriptor, const char *name=nullptr)
OptimizationResult SelectTensorHandleStrategy(Graph &optGraph, BackendsMap &backends, TensorHandleFactoryRegistry ®istry, bool importEnabled, Optional< std::vector< std::string > &> errMessages)
void ReportWarning(const std::string &warningMessage, Optional< std::vector< std::string > &> warningMessages)
This layer represents a convolution 2d operation.
This layer represents a convolution 3d operation.
This layer converts data type Float32 to BFloat16.
void SetQuantizationOffset(int32_t offset)
IConnectableLayer * AddSwitchLayer(const char *name=nullptr)
Adds a switch layer to the network.
IConnectableLayer * AddDetectionPostProcessLayer(const DetectionPostProcessDescriptor &descriptor, const ConstTensor &anchors, const char *name=nullptr)
Adds a Detection PostProcess layer to the network.
static INetwork * CreateRaw(NetworkOptions networkOptions={})
IConnectableLayer * AddMultiplicationLayer(const char *name=nullptr)
Adds a multiplication layer to the network.
IConnectableLayer * AddStandInLayer(const StandInDescriptor &descriptor, const char *name=nullptr)
Add a stand-in layer for a type unknown to the Arm NN framework.
This layer represents a mean operation.
IOptimizedNetwork(const IOptimizedNetwork &other, const ModelOptions &modelOptions)
This layer represents a comparison operation.
std::unique_ptr< INetwork, void(*)(INetwork *network)> INetworkPtr
IConnectableLayer * AddBatchNormalizationLayer(const BatchNormalizationDescriptor &desc, const ConstTensor &mean, const ConstTensor &variance, const ConstTensor &beta, const ConstTensor &gamma, const char *name=nullptr)
IConnectableLayer * AddReduceLayer(const ReduceDescriptor &reduceDescriptor, const char *name=nullptr)
Adds a reduce layer to the network.
IConnectableLayer * AddElementwiseUnaryLayer(const ElementwiseUnaryDescriptor &elementwiseUnaryDescriptor, const char *name=nullptr)
Add an ElementwiseUnary layer to the network.
OptimizeForType< Layer, AddBroadcastReshapeLayerImpl > AddBroadcastReshapeLayer
IConnectableLayer * AddDequantizeLayer(const char *name=nullptr)
Adds a Dequantize layer to the network.
IConnectableLayer * AddConvolution3dLayer(const Convolution3dDescriptor &convolution3dDescriptor, const char *name=nullptr)
A Pooling2dDescriptor for the Pooling2dLayer.
This layer dequantizes the input tensor.
A NormalizationDescriptor for the NormalizationLayer.
IConnectableLayer * AddPreluLayer(const char *name=nullptr)
Adds a PReLU layer to the network.
OptimizedNetworkImpl(const OptimizedNetworkImpl &other, const ModelOptions &modelOptions)
IConnectableLayer * AddDepthToSpaceLayer(const DepthToSpaceDescriptor &depthToSpaceDescriptor, const char *name=nullptr)
IConnectableLayer * AddMaximumLayer(const char *name=nullptr)
Add a Maximum layer to the network.
An InstanceNormalizationDescriptor for InstanceNormalizationLayer.
This layer represents a multiplication operation.
IConnectableLayer * AddConvolution2dLayer(const Convolution2dDescriptor &convolution2dDescriptor, const ConstTensor &weights, const Optional< ConstTensor > &biases, const char *name=nullptr)
ARMNN_NO_DEPRECATE_WARN_END void ExecuteStrategy(IStrategy &strategy) const
std::shared_ptr< ConstTensorHandle > m_Anchors
A unique pointer to store Anchor values.
IConnectableLayer * AddSplitterLayer(const ViewsDescriptor &splitterDescriptor, const char *name=nullptr)
IConnectableLayer * AddBatchToSpaceNdLayer(const BatchToSpaceNdDescriptor &batchToSpaceNdDescriptor, const char *name=nullptr)
A ChannelShuffleDescriptor for the ChannelShuffle operator.
IConnectableLayer * AddLstmLayer(const LstmDescriptor &descriptor, const LstmInputParams ¶ms, const char *name=nullptr)
IConnectableLayer * AddPadLayer(const PadDescriptor &padDescriptor, const char *name=nullptr)
std::shared_ptr< ConstTensorHandle > m_InputToInputWeights
A unique pointer to represent 2D weights tensor with dimensions [outputSize, inputSize] (QAsymm8)...
const TensorInfo & GetTensorInfo() const override
static INetworkPtr Create(NetworkOptions networkOptions={})
IConnectableLayer * AddLogicalBinaryLayer(const LogicalBinaryDescriptor &descriptor, const char *name=nullptr)
Adds a Logical Binary layer to the network.
EdgeStrategy CalculateEdgeStrategy(BackendsMap &backends, ITensorHandleFactory::FactoryId srcFactoryId, const Layer &layer, const Layer &connectedLayer, TensorHandleFactoryRegistry ®istry, bool importEnabled)
static void Destroy(IOptimizedNetwork *network)
IConnectableLayer * AddConcatLayer(const ConcatDescriptor &concatDescriptor, const char *name=nullptr)
Adds a concatenation layer to the network.
virtual MemorySourceFlags GetImportFlags() const
OptimizeForType< Layer, ConvertFp32NetworkToBf16Impl > Fp32NetworkToBf16Converter
A SoftmaxDescriptor for the SoftmaxLayer.
const char * GetLayerTypeAsCString(LayerType type)
virtual bool SupportsMapUnmap() const
void AddCompatibilityLayers(std::map< BackendId, std::unique_ptr< class IBackendInternal >> &backends, TensorHandleFactoryRegistry ®istry)
Modifies the graph in-place, removing edges connecting layers using different compute devices...
std::shared_ptr< ConstTensorHandle > m_Weight
A unique pointer to store weight values.
bool IsCpuRefUsed() const
static const FactoryId LegacyFactoryId
ITensorHandleFactory::FactoryId CalculateSlotOptionForInput(BackendsMap &backends, OutputSlot &slot, TensorHandleFactoryRegistry ®istry, bool importEnabled)
This layer represents a fill operation.
A DepthwiseConvolution2dDescriptor for the DepthwiseConvolution2dLayer.
A FillDescriptor for the FillLayer.
This layer represents a DepthToSpace operation.
A BatchNormalizationDescriptor for the BatchNormalizationLayer.
IConnectableLayer * AddTransposeLayer(const TransposeDescriptor &transposeDescriptor, const char *name=nullptr)
Adds a transpose layer to the network.
unsigned int GetNumElements() const
virtual size_t GetNumInputs() const
std::map< BackendId, std::unique_ptr< class IBackendInternal > > BackendsMap
This layer represents a resize operation.
A PermuteDescriptor for the PermuteLayer.
IConnectableLayer * AddSliceLayer(const SliceDescriptor &sliceDescriptor, const char *name=nullptr)
IConnectableLayer * AddDepthToSpaceLayer(const DepthToSpaceDescriptor &depthToSpaceDescriptor, const char *name=nullptr)
Adds a depth to space layer to the network.
LayerType
When adding a new layer, adapt also the LastLayer enum value in the enum class LayerType below...
bool m_ConstantWeights
Enable/disable constant weights and biases.
IConnectableLayer * AddInstanceNormalizationLayer(const InstanceNormalizationDescriptor &desc, const char *name=nullptr)
OptimizeForType< FullyConnectedLayer, RedirectMembersToConstantInputsImpl > RedirectMembersToConstantInputs