ArmNN
 23.05
INetwork.hpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 #pragma once
6 
8 #include <armnn/Deprecated.hpp>
10 #include <armnn/IStrategy.hpp>
11 #include <armnn/NetworkFwd.hpp>
12 #include <armnn/Optional.hpp>
13 #include <armnn/TensorFwd.hpp>
14 #include <armnn/Logging.hpp>
16 
17 #include <memory>
18 #include <vector>
19 
20 namespace armnn
21 {
22 /// @brief An input connection slot for a layer.
23 /// The input slot can be connected to an output slot of the preceding layer in the graph.
24 /// Only one connection to the input slot is allowed.
26 {
27 public:
28  virtual const IOutputSlot* GetConnection() const = 0;
29  virtual IOutputSlot* GetConnection() = 0;
30  virtual const IConnectableLayer& GetOwningIConnectableLayer() const = 0;
32  virtual unsigned int GetSlotIndex() const = 0;
33 
34 protected:
35  /// Not user deletable.
37 };
38 
39 /// @brief An output connection slot for a layer.
40 /// The output slot may be connected to 1 or more input slots of subsequent layers in the graph.
42 {
43 public:
44  virtual unsigned int GetNumConnections() const = 0;
45  virtual const IInputSlot* GetConnection(unsigned int index) const = 0;
46  virtual IInputSlot* GetConnection(unsigned int outputindex) = 0;
47 
48  virtual void SetTensorInfo(const TensorInfo& tensorInfo) = 0;
49  virtual const TensorInfo& GetTensorInfo() const = 0;
50  virtual bool IsTensorInfoSet() const = 0;
51 
52  virtual int Connect(IInputSlot& destination) = 0;
53  virtual void Disconnect(IInputSlot& slot) = 0;
54 
55  virtual unsigned int CalculateIndexOnOwner() const = 0;
56 
57  virtual LayerGuid GetOwningLayerGuid() const = 0;
58 
59  virtual const IConnectableLayer& GetOwningIConnectableLayer() const = 0;
61 
62 protected:
63  /// Not user deletable.
65 };
66 
67 /// @brief Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
69 {
70 public:
71  /// Returns the name of the layer
72  virtual const char* GetName() const = 0;
73 
74  /// Returns the number of connectable input slots
75  virtual unsigned int GetNumInputSlots() const = 0;
76 
77  /// Returns the number of connectable output slots
78  virtual unsigned int GetNumOutputSlots() const = 0;
79 
80  /// Get a const input slot handle by slot index
81  virtual const IInputSlot& GetInputSlot(unsigned int index) const = 0;
82 
83  /// Get the input slot handle by slot index
84  virtual IInputSlot& GetInputSlot(unsigned int index) = 0;
85 
86  /// Get the const output slot handle by slot index
87  virtual const IOutputSlot& GetOutputSlot(unsigned int index) const = 0;
88 
89  /// Get the output slot handle by slot index
90  virtual IOutputSlot& GetOutputSlot(unsigned int index) = 0;
91 
92  /// Infer the shape of the output(s) based on the provided input shape(s)
93  virtual std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape>& inputShapes) const = 0;
94 
95  /// Returns the unique id of the layer
96  virtual LayerGuid GetGuid() const = 0;
97 
98  /// Apply a visitor to this layer
99  virtual void ExecuteStrategy(IStrategy& strategy) const = 0;
100 
101  /// Provide a hint for the optimizer as to which backend to prefer for this layer.
102  /// By providing a BackendSelectionHint there is no guarantee the input backend supports that layer.
103  /// If IsLayerSupported() returns false with the backend hint, we default to calling IsLayerSupported()
104  /// on the BackendPreferences vector. Use SetBackendId() if we can guarantee a backend supports that
105  /// layer (IsLayerSupported returns true for a specific backend).
106  virtual void BackendSelectionHint(Optional<BackendId> backend) = 0;
107 
108  /// Returns the armnn::LayerType of this layer
109  virtual LayerType GetType() const = 0;
110 
111  /// If the layer has a descriptor return it.
112  /// The base descriptor can then be cast to the correct descriptor class.
113  /// If the layer has no associated descriptor a struct of type NullDescriptor will be returned.
114  /// Note: NullDescriptors can be detected because they return true when
115  /// the BaseDescriptor IsNull function is invoked.
116  virtual const BaseDescriptor& GetParameters() const = 0;
117 
118  /// Set the backend of the IConnectableLayer.
119  /// By using SetBackendId() we guarantee that the input backend supports that
120  /// layer (IsLayerSupported returns true for a specific backend). If there is
121  /// no guarantee the input backend supports that layer use BackendSelectionHint().
122  virtual void SetBackendId(const BackendId& id) = 0;
123 
124  using ConstantTensors = std::vector<std::reference_wrapper<std::shared_ptr<ConstTensorHandle>>>;
125 
126  // Returns ConstantTensors of this Layer if it has any, otherwise returns empty vector.
128 
129  using ImmutableConstantTensors = std::vector<std::reference_wrapper<const std::shared_ptr<ConstTensorHandle>>>;
130 
131  // Returns ConstantTensors of this Layer if it has any, otherwise returns empty vector.
133 
134 protected:
135  /// Objects are not deletable via the handle
137 };
138 
140 {
141  ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable OptimizerOptionsOpaque instead.", "24.02")
143  : m_ReduceFp32ToFp16(false)
144  , m_Debug(false)
145  , m_DebugToFile(false)
146  , m_ReduceFp32ToBf16(false)
148  , m_ImportEnabled(false)
149  , m_ModelOptions()
150  , m_ProfilingEnabled(false)
151  , m_ExportEnabled(false)
152  , m_AllowExpandedDims(false)
153  {}
154 
155  ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable OptimizerOptionsOpaque instead.", "24.02")
156  OptimizerOptions(bool reduceFp32ToFp16, bool debug, bool reduceFp32ToBf16, bool importEnabled,
157  ModelOptions modelOptions = {}, bool exportEnabled = false, bool debugToFile = false)
158  : m_ReduceFp32ToFp16(reduceFp32ToFp16)
159  , m_Debug(debug)
160  , m_DebugToFile(debugToFile)
161  , m_ReduceFp32ToBf16(reduceFp32ToBf16)
163  , m_ImportEnabled(importEnabled)
164  , m_ModelOptions(modelOptions)
165  , m_ProfilingEnabled(false)
166  , m_ExportEnabled(exportEnabled)
167  , m_AllowExpandedDims(false)
168  {
169  }
170 
171  ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable OptimizerOptionsOpaque instead.", "24.02")
172  OptimizerOptions(bool reduceFp32ToFp16, bool debug, bool reduceFp32ToBf16 = false,
173  ShapeInferenceMethod shapeInferenceMethod = armnn::ShapeInferenceMethod::ValidateOnly,
174  bool importEnabled = false, ModelOptions modelOptions = {}, bool exportEnabled = false,
175  bool debugToFile = false, bool allowExpandedDims = false)
176  : m_ReduceFp32ToFp16(reduceFp32ToFp16)
177  , m_Debug(debug)
178  , m_DebugToFile(debugToFile)
179  , m_ReduceFp32ToBf16(reduceFp32ToBf16)
180  , m_shapeInferenceMethod(shapeInferenceMethod)
181  , m_ImportEnabled(importEnabled)
182  , m_ModelOptions(modelOptions)
183  , m_ProfilingEnabled(false)
184  , m_ExportEnabled(exportEnabled)
185  , m_AllowExpandedDims(allowExpandedDims)
186  {
187  }
188 
189  const std::string ToString() const
190  {
191  std::stringstream stream;
192  stream << "OptimizerOptions: \n";
193  stream << "\tReduceFp32ToFp16: " << m_ReduceFp32ToFp16 << "\n";
194  stream << "\tReduceFp32ToBf16: " << m_ReduceFp32ToBf16 << "\n";
195  stream << "\tDebug: " << m_Debug << "\n";
196  stream << "\tDebug to file: " << m_DebugToFile << "\n";
197  stream << "\tShapeInferenceMethod: " <<
199  ? "ValidateOnly" : "InferAndValidate") << "\n";
200  stream << "\tImportEnabled: " << m_ImportEnabled << "\n";
201  stream << "\tExportEnabled: " << m_ExportEnabled << "\n";
202  stream << "\tProfilingEnabled: " << m_ProfilingEnabled << "\n";
203  stream << "\tAllowExpandedDims: " << m_AllowExpandedDims << "\n";
204 
205  stream << "\tModelOptions: \n";
206  for (auto optionsGroup : m_ModelOptions)
207  {
208  for (size_t i=0; i < optionsGroup.GetOptionCount(); i++)
209  {
210  const armnn::BackendOptions::BackendOption option = optionsGroup.GetOption(i);
211  stream << "\t\tBackend: " << optionsGroup.GetBackendId() << "\n"
212  << "\t\t\tOption: " << option.GetName() << "\n"
213  << "\t\t\tValue: " << std::string(option.GetValue().ToString()) << "\n";
214  }
215  }
216 
217  return stream.str();
218  }
219 
220  /// Reduces all Fp32 operators in the model to Fp16 for faster processing.
221  /// @Note This feature works best if all operators of the model are in Fp32. ArmNN will add conversion layers
222  /// between layers that weren't in Fp32 in the first place or if the operator is not supported in Fp16.
223  /// The overhead of these conversions can lead to a slower overall performance if too many conversions are
224  /// required.
226 
227  /// Add debug data for easier troubleshooting
228  bool m_Debug;
229 
230  /// Pass debug data to separate output files for easier troubleshooting
232 
233  /// @Note This feature has been replaced by enabling Fast Math in compute library backend options.
234  /// This is currently a placeholder option
236 
237  /// Infer output size when not available
239 
240  /// Enable Import
242 
243  /// Enable Model Options
245 
246  /// Enable profiling dump of the optimizer phase
248 
249  /// Enable Export
251 
252  /// When calculating tensor sizes, dimensions of size == 1 will be ignored
254 };
255 
256 /// ArmNN performs an optimization on each model/network before it gets loaded for execution. OptimizerOptions provides
257 /// a set of features that allows the user to customize this optimization on a per model basis.
259 
261 {
262 public:
266 
267  OptimizerOptionsOpaque(const OptimizerOptions& OptimizerStruct);
268 
270 
271  OptimizerOptionsOpaque(bool reduceFp32ToFp16, bool debug, bool reduceFp32ToBf16, bool importEnabled,
272  ModelOptions modelOptions = {}, bool exportEnabled = false, bool debugToFile = false);
273 
274  OptimizerOptionsOpaque(bool reduceFp32ToFp16, bool debug, bool reduceFp32ToBf16 = false,
276  bool importEnabled = false, ModelOptions modelOptions = {}, bool exportEnabled = false,
277  bool debugToFile = false, bool allowExpandedDims = false);
278 
279  const std::string ToString() const;
280 
281  bool GetProfilingEnabled() const;
282 
283  bool GetImportEnabled() const;
284 
285  bool GetExportEnabled() const;
286 
287  bool GetReduceFp32ToFp16() const;
288 
289  bool GetReduceFp32ToBf16() const;
290 
291  bool GetDebugEnabled() const;
292 
293  bool GetDebugToFileEnabled() const;
294 
295  bool GetAllowExpandedDims() const;
296 
298 
300 
301  void SetImportEnabled(bool ImportState);
302 
303  void SetExportEnabled(bool ExportState);
304 
305  void SetProfilingEnabled(bool ProfilingState);
306 
307  void SetDebugEnabled(bool DebugState);
308 
309  void SetDebugToFileEnabled(bool DebugFileState);
310 
311  void SetReduceFp32ToFp16(bool ReduceFp32ToFp16State);
312 
313  void SetShapeInferenceMethod(armnn::ShapeInferenceMethod ShapeInferenceMethodType);
314 
316 
317  void SetAllowExpandedDims(bool ExpandedDimsAllowed);
318 
319 private:
320 
321  std::unique_ptr<armnn::OptimizerOptionsOpaqueImpl> p_OptimizerOptionsImpl;
322 
323 };
324 
325 class IWorkloadFactory;
326 class NetworkImpl;
327 using INetworkPtr = std::unique_ptr<INetwork, void(*)(INetwork* network)>;
328 using IOptimizedNetworkPtr = std::unique_ptr<IOptimizedNetwork, void(*)(IOptimizedNetwork* network)>;
329 
330 using CompiledBlobDeleter = std::function<void(const void*)>;
331 using CompiledBlobPtr = std::unique_ptr<void, CompiledBlobDeleter>;
332 
333 /// Main network class which provides the interface for building up a neural network.
334 /// This object is subsequently required by the IRuntime::Load() method.
335 class INetwork
336 {
337 public:
338  static INetwork* CreateRaw(const NetworkOptions& networkOptions = {});
339  static INetworkPtr Create(const NetworkOptions& networkOptions = {});
340  static void Destroy(INetwork* network);
341 
342  Status PrintGraph();
343 
344  /// Adds an input layer to the network.
345  /// @param id - User generated id to uniquely identify a particular input. The same id needs to be specified.
346  /// when passing the inputs to the IRuntime::EnqueueWorkload() function.
347  /// @param name - Optional name for the layer.
348  /// @return - Interface for configuring the layer.
349  IConnectableLayer* AddInputLayer(LayerBindingId id, const char* name = nullptr);
350 
351  /// Adds an ArgMinMax layer to the network.
352  /// @param desc - Parameters for the L2 normalization operation.
353  /// @param name - Optional name for the layer.
354  /// @return - Interface for configuring the layer.
356  const char* name = nullptr);
357 
358  /// Adds a cast layer to the network.
359  /// @param name - Optional name for the layer.
360  /// @return - Interface for configuring the layer.
361  IConnectableLayer* AddCastLayer(const char* name = nullptr);
362 
363  /// Add a Comparison layer to the network.
364  /// @param name - Optional name for the layer.
365  /// @param desc - Descriptor for the comparison operation.
366  /// @return - Interface for configuring the layer.
367  IConnectableLayer* AddComparisonLayer(const ComparisonDescriptor& comparisonDescriptor,
368  const char* name = nullptr);
369 
370  /// Adds a concatenation layer to the network.
371  /// @param concatDescriptor - ConcatDescriptor (synonym for OriginsDescriptor) to configure the concatenation
372  /// process. Number of Views must be equal to the number of inputs, and their order
373  /// must match - e.g. first view corresponds to the first input, second view to the
374  /// second input, etc....
375  /// @param name - Optional name for the layer.
376  /// @return - Interface for configuring the layer.
377  IConnectableLayer* AddConcatLayer(const ConcatDescriptor& concatDescriptor,
378  const char* name = nullptr);
379 
380  /// Adds a 2D convolution layer to the network.
381  /// @param convolution2dDescriptor - Description of the 2D convolution layer.
382  /// @param name - Optional name for the layer.
383  /// @return - Interface for configuring the layer.
384  IConnectableLayer* AddConvolution2dLayer(const Convolution2dDescriptor& convolution2dDescriptor,
385  const char* name = nullptr);
386 
387  /// Adds a 3D convolution layer to the network.
388  /// @param convolution3dDescriptor - Description of the 3D convolution layer.
389  /// @param name - Optional name for the layer.
390  /// @return - Interface for configuring the layer.
391  IConnectableLayer* AddConvolution3dLayer(const Convolution3dDescriptor& convolution3dDescriptor,
392  const char* name = nullptr);
393 
394  /// Adds a depth to space layer to the network.
395  /// @param depthToSpaceDescriptor - Parameters for the depth to space operation.
396  /// @param name - Optional name for the layer.
397  /// @return - Interface for configuring the layer.
398  IConnectableLayer* AddDepthToSpaceLayer(const DepthToSpaceDescriptor& depthToSpaceDescriptor,
399  const char* name = nullptr);
400 
401  /// Adds a 2D depthwise convolution layer to the network.
402  /// @param convolution2dDescriptor - Description of the 2D depthwise convolution layer.
403  /// @param name - Optional name for the layer.
404  /// @return - Interface for configuring the layer.
406  const char* name = nullptr);
407 
408  /// Adds a Dequantize layer to the network.
409  /// @return - Interface for configuring the layer.
410  IConnectableLayer* AddDequantizeLayer(const char* name = nullptr);
411 
412  /// Adds a Detection PostProcess layer to the network.
413  /// @param descriptor - Description of the Detection PostProcess layer.
414  /// @param anchors - Tensor for anchors.
415  /// @param name - Optional name for the layer.
416  /// @return - Interface for configuring the layer.
418  const DetectionPostProcessDescriptor& descriptor,
419  const ConstTensor& anchors,
420  const char* name = nullptr);
421 
422  /// Add an ElementwiseBinary layer to the network.
423  /// @param name - Optional name for the layer.
424  /// @param desc - Descriptor for the elementwiseBinary operations.
425  /// @return - Interface for configuring the layer.
427  const char* name = nullptr);
428 
429  /// Add an ElementwiseUnary layer to the network.
430  /// @param name - Optional name for the layer.
431  /// @param desc - Descriptor for the elementwiseUnary operations.
432  /// @return - Interface for configuring the layer.
433  IConnectableLayer* AddElementwiseUnaryLayer(const ElementwiseUnaryDescriptor& elementwiseUnaryDescriptor,
434  const char* name = nullptr);
435 
436  /// Add an Fill layer to the network.
437  /// @param name - Optional name for the layer.
438  /// @param fillDescriptor - Descriptor for the fill operation.
439  /// @return - Interface for configuring the layer.
440  IConnectableLayer* AddFillLayer(const FillDescriptor& fillDescriptor,
441  const char* name = nullptr);
442 
443 
444  /// Adds a fully connected layer to the network.
445  /// @param fullyConnectedDescriptor - Description of the fully connected layer.
446  /// @return - Interface for configuring the layer.
447  ///
448  /// @note Weights and biases are passed in as inputs. If they are constant tensors you can simply store
449  /// them in a ConstantLayer as seen below. A full example can be found in samples/SimpleSample.cpp.
450  ///
451  /// @code
452  /// // Make sure the IsConstant flag is set on the weightsInfo before passing it to the ConstTensor.
453  /// ConstTensor weights(weightsInfo, weightsData);
454  ///
455  /// // Constant layer that now holds weights data for FullyConnected
456  /// IConnectableLayer* const constantWeightsLayer = myNetwork->AddConstantLayer(weights, "weights");
457  ///
458  /// FullyConnectedDescriptor fullyConnectedDesc;
459  /// IConnectableLayer* const fullyConnectedLayer = myNetwork->AddFullyConnectedLayer(fullyConnectedDesc,
460  /// "fully connected");
461  /// IConnectableLayer* InputLayer = myNetwork->AddInputLayer(0);
462  /// InputLayer->GetOutputSlot(0).Connect(fullyConnectedLayer->GetInputSlot(0));
463  /// constantWeightsLayer->GetOutputSlot(0).Connect(fullyConnectedLayer->GetInputSlot(1));
464  /// @endcode
465  IConnectableLayer* AddFullyConnectedLayer(const FullyConnectedDescriptor& fullyConnectedDescriptor,
466  const char* name = nullptr);
467 
468  /// Adds a permute layer to the network.
469  /// @param permuteDescriptor - PermuteDescriptor to configure the permute.
470  /// @param name - Optional name for the layer.
471  /// @return - Interface for configuring the layer.
472  IConnectableLayer* AddPermuteLayer(const PermuteDescriptor& permuteDescriptor,
473  const char* name = nullptr);
474 
475  /// Adds a batch to space ND layer to the network.
476  /// @param batchToSpaceNdDescriptor - Description of the layer.
477  /// @param name - Optional name for the layer.
478  /// @return - Interface for configuring the layer.
479  IConnectableLayer* AddBatchToSpaceNdLayer(const BatchToSpaceNdDescriptor& batchToSpaceNdDescriptor,
480  const char* name = nullptr);
481 
482  /// Adds a 2D pooling layer to the network.
483  /// @param pooling2dDescriptor - Pooling2dDescriptor to configure the pooling.
484  /// @param name - Optional name for the layer.
485  /// @return - Interface for configuring the layer.
486  IConnectableLayer* AddPooling2dLayer(const Pooling2dDescriptor& pooling2dDescriptor,
487  const char* name = nullptr);
488 
489  /// Adds a 3D pooling layer to the network.
490  /// @param pooling3dDescriptor - Pooling3dDescriptor to configure the pooling.
491  /// @param name - Optional name for the layer.
492  /// @return - Interface for configuring the layer.
493  IConnectableLayer* AddPooling3dLayer(const Pooling3dDescriptor& pooling3dDescriptor,
494  const char* name = nullptr);
495 
496  /// Adds a Precompiled layer to the network.
497  /// Method use is for backend users.
498  /// @param preCompiledDescriptor - PreCompiledDescriptor contains parameters for the Precompiled layer.
499  /// @param compiledBlobPtr - CompiledBlobPtr pre-compiled object set for the Precompiled layer.
500  /// @param backend - optional BackendId set for the Precompiled layer.
501  /// @return - Interface for configuring the layer.
502  IConnectableLayer* AddPrecompiledLayer(const PreCompiledDescriptor& preCompiledDescriptor,
503  CompiledBlobPtr compiledBlobPtr,
504  const Optional<BackendId>& backend,
505  const char* name = nullptr);
506 
507  /// Adds an activation layer to the network.
508  /// @param activationDescriptor - ActivationDescriptor to configure the activation.
509  /// @param name - Optional name for the layer.
510  /// @return - Interface for configuring the layer.
511  IConnectableLayer* AddActivationLayer(const ActivationDescriptor& activationDescriptor,
512  const char* name = nullptr);
513 
514  /// Adds a normalization layer to the network.
515  /// @param normalizationDescriptor - NormalizationDescriptor to configure the normalization.
516  /// @param name - Optional name for the layer.
517  /// @return - Interface for configuring the layer.
518  IConnectableLayer* AddNormalizationLayer(const NormalizationDescriptor& normalizationDescriptor,
519  const char* name = nullptr);
520 
521  /// Adds a slice layer to the network.
522  /// @param sliceDescriptor - SliceDescriptor to configure the slice operation.
523  /// @param name - Optional name for the layer.
524  /// @return - Interface for configuring the layer.
525  IConnectableLayer* AddSliceLayer(const SliceDescriptor& sliceDescriptor, const char* name = nullptr);
526 
527  /// Adds a softmax layer to the network.
528  /// If the data type is QAsymm8, then the output quantization parameters
529  /// must have a scale of 1/256 and an offset of 0
530  /// @param softmaxDescriptor - SoftmaxDescriptor to configure the softmax.
531  /// @param name - Optional name for the layer.
532  /// @return - Interface for configuring the layer.
533  IConnectableLayer* AddSoftmaxLayer(const SoftmaxDescriptor& softmaxDescriptor,
534  const char* name = nullptr);
535 
536  /// Adds a splitter layer to the network.
537  /// @param splitterDescriptor - ViewsDescriptor to configure the splitting process.
538  /// Number of Views must be equal to the number of outputs,
539  /// and their order must match - e.g. first view corresponds to
540  /// the first output, second view to the second output, etc....
541  /// @param name - Optional name for the layer.
542  /// @return - Interface for configuring the layer.
543  IConnectableLayer* AddSplitterLayer(const ViewsDescriptor& splitterDescriptor,
544  const char* name = nullptr);
545 
546  /// Adds a merge layer to the network.
547  /// @param name - Optional name for the layer.
548  /// @return - Interface for configuring the layer.
549  IConnectableLayer* AddMergeLayer(const char* name = nullptr);
550 
551  /// Adds an addition layer to the network.
552  /// @param name - Optional name for the layer.
553  /// @return - Interface for configuring the layer.
554  ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use AddElementwiseBinaryLayer instead", "24.02")
555  IConnectableLayer* AddAdditionLayer(const char* name = nullptr);
556 
557  /// Adds a multiplication layer to the network.
558  /// @param name - Optional name for the layer.
559  /// @return - Interface for configuring the layer.
560  ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use AddElementwiseBinaryLayer instead", "24.02")
561  IConnectableLayer* AddMultiplicationLayer(const char* name = nullptr);
562 
563  /// Adds a batch normalization layer to the network.
564  /// @param mean - Pre-calculated mean for each channel.
565  /// @param variance - Pre-calculated variance for each channel.
566  /// @param beta - Per-channel additive factor.
567  /// @param gamma - Per-channel multiplicative factor.
568  /// @return - Interface for configuring the layer.
569  /// @param name - Optional name for the layer.
571  const ConstTensor& mean,
572  const ConstTensor& variance,
573  const ConstTensor& beta,
574  const ConstTensor& gamma,
575  const char* name = nullptr);
576 
577  /// Adds a rank layer to the network.
578  /// @param name - Optional name for the layer.
579  /// @return - Interface for configuring the layer.
580  IConnectableLayer* AddRankLayer(const char* name = nullptr);
581 
582  /// Adds a resize layer to the network.
583  /// @param resizeDescriptor - Parameters for the resize operation.
584  /// @param name - Optional name for the layer.
585  /// @return - Interface for configuring the layer.
586  IConnectableLayer* AddResizeLayer(const ResizeDescriptor& resizeDescriptor,
587  const char* name = nullptr);
588 
589  /// Adds a reduce layer to the network.
590  /// @param ReduceDescriptor - Parameters for the reduce operation.
591  /// @param name - Optional name for the layer.
592  /// @return - Interface for configuring the layer.
593  IConnectableLayer* AddReduceLayer(const ReduceDescriptor& reduceDescriptor,
594  const char* name = nullptr);
595 
596  /// Adds an instance normalization layer to the network.
597  /// @param desc - Parameters for the instance normalization operation.
598  /// @param name - Optional name for the layer.
599  /// @return - Interface for configuring the layer.
601  const char* name = nullptr);
602 
603  /// Adds an L2 normalization layer to the network.
604  /// Normalization is performed along dimension 1, but requires a 4d input.
605  /// @param desc - Parameters for the L2 normalization operation.
606  /// @param name - Optional name for the layer.
607  /// @return - Interface for configuring the layer.
609  const char* name = nullptr);
610 
611  /// Adds a log softmax layer to the network.
612  /// @param logSoftmaxDescriptor - LogSoftmaxDescriptor to configure the log softmax.
613  /// @param name - Optional name for the layer.
614  /// @return - Interface for configuring the layer.
615  IConnectableLayer* AddLogSoftmaxLayer(const LogSoftmaxDescriptor& logSoftmaxDescriptor,
616  const char* name = nullptr);
617 
618  /// Adds a layer with no inputs and a single output, which always corresponds to
619  /// the passed in constant tensor.
620  /// @param input - Tensor to be provided as the only output of the layer. The layer will maintain
621  /// its own copy of the tensor data, meaning the memory referenced by @a input can
622  /// be freed or reused after this function is called.
623  /// @param name - Optional name for the layer.
624  /// @return - Interface for configuring the layer.
626  const char* name = nullptr);
627 
628  /// Adds a reshape layer to the network.
629  /// @param reshapeDescriptor - Parameters for the reshape operation.
630  /// @param name - Optional name for the layer.
631  /// @return - Interface for configuring the layer.
632  IConnectableLayer* AddReshapeLayer(const ReshapeDescriptor& reshapeDescriptor,
633  const char* name = nullptr);
634 
635  /// Adds a shape layer to the network.
636  /// @param name - Optional name for the layer.
637  /// @return - Interface for configuring the layer.
638  IConnectableLayer* AddShapeLayer(const char* name = nullptr);
639 
640  /// Adds a space to batch layer to the network.
641  /// @param spaceToBatchNdDescriptor - Parameters for the space to batch operation.
642  /// @param name - Optional name for the layer.
643  /// @return - Interface for configuring the layer.
644  IConnectableLayer* AddSpaceToBatchNdLayer(const SpaceToBatchNdDescriptor& spaceToBatchNdDescriptor,
645  const char* name = nullptr);
646 
647  /// Adds a space to depth layer to the network.
648  /// @param spaceToDepthDescriptor - Parameters for the space to depth operation.
649  /// @param name - Optional name for the layer.
650  /// @return - Interface for configuring the layer.
651  IConnectableLayer* AddSpaceToDepthLayer(const SpaceToDepthDescriptor& spaceToDepthDescriptor,
652  const char* name = nullptr);
653 
654  /// Adds a floor layer to the network.
655  /// @param name - Optional name for the layer.
656  /// @return - Interface for configuring the layer.
657  IConnectableLayer* AddFloorLayer(const char* name = nullptr);
658 
659  /// Adds an output layer to the network.
660  /// @param id - User generated id to uniquely identify a particular output. The same id needs to be specified
661  /// when passing the outputs to the IRuntime::EnqueueWorkload() function.
662  /// @param name - Optional name for the layer.
663  /// @return - Interface for configuring the layer.
664  IConnectableLayer* AddOutputLayer(LayerBindingId id, const char* name = nullptr);
665 
666  /// Add a Lstm layer to the network
667  /// @param descriptor - Parameters for the Lstm operation
668  /// @param params - Weights and biases for the LSTM cell
669  /// @param name - Optional name for the layer
670  /// @return - Interface for configuring the layer.
671  IConnectableLayer* AddLstmLayer(const LstmDescriptor& descriptor,
672  const LstmInputParams& params,
673  const char* name = nullptr);
674 
675  /// Adds a division layer to the network.
676  /// @param name - Optional name for the layer.
677  /// @return - Interface for configuring the layer.
678  ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use AddElementwiseBinaryLayer instead", "24.02")
679  IConnectableLayer* AddDivisionLayer(const char* name = nullptr);
680 
681  /// Adds a subtraction layer to the network.
682  /// @param name - Optional name for the layer.
683  /// @return - Interface for configuring the layer.
684  ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use AddElementwiseBinaryLayer instead", "24.02")
685  IConnectableLayer* AddSubtractionLayer(const char* name = nullptr);
686 
687  /// Add a Maximum layer to the network.
688  /// @param name - Optional name for the layer.
689  /// @return - Interface for configuring the layer.
690  ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use AddElementwiseBinaryLayer instead", "24.02")
691  IConnectableLayer* AddMaximumLayer(const char* name = nullptr);
692 
693  /// Add a Mean layer to the network.
694  /// @param meanDescriptor - Parameters for the mean operation.
695  /// @param name - Optional name for the layer.
696  /// @return - Interface for configuring the layer.
697  IConnectableLayer* AddMeanLayer(const MeanDescriptor& meanDescriptor, const char* name = nullptr);
698 
699  /// Adds a fully pad layer to the network.
700  /// @param paddings - n by 2 tensor, where n is the rank of the input tensor,
701  /// such that paddings[i,0] indicates the amount of padding to add in front of dimonsion i, and
702  /// paddings[i,1] indicates the amount of padding to add after the end of dimension i
703  /// @param name - Optional name for the layer.
704  /// @return - Interface for configuring the layer.
705  IConnectableLayer* AddPadLayer(const PadDescriptor& padDescriptor,
706  const char* name = nullptr);
707 
708  /// Add a quantize layer to the network
709  ///@param name - Optional name for the layer.
710  /// @return - Interface for configuring the layer.
711  IConnectableLayer* AddQuantizeLayer(const char* name = nullptr);
712 
713  /// Adds a strided slice layer to the network.
714  /// @param StridedSliceDescriptor - Parameters for the strided slice operation.
715  /// @param name - Optional name for the layer.
716  /// @return - Interface for configuring the layer.
717  IConnectableLayer* AddStridedSliceLayer(const StridedSliceDescriptor& stridedSliceDescriptor,
718  const char* name = nullptr);
719 
720  /// Add a Minimum layer to the network.
721  /// @param name - Optional name for the layer.
722  /// @return - Interface for configuring the layer.
723  ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use AddElementwiseBinaryLayer instead", "24.02")
724  IConnectableLayer* AddMinimumLayer(const char* name = nullptr);
725 
726  /// Add Gather layer to the network.
727  /// @param descriptor - Description of the gather layer.
728  /// @param name - Optional name for the layer.
729  /// @return - Interface for configuring the layer.
731  const char* name = nullptr);
732 
733  /// Add GatherNd layer to the network.
734  /// @param name - Optional name for the layer.
735  /// @return - Interface for configuring the layer.
736  IConnectableLayer* AddGatherNdLayer(const char* name = nullptr);
737 
738  /// Adds a switch layer to the network.
739  /// @param name - Optional name for the layer.
740  /// @return - Interface for configuring the layer.
741  IConnectableLayer* AddSwitchLayer(const char* name = nullptr);
742 
743  /// Adds a PReLU layer to the network.
744  /// @param name - Optional name for the layer.
745  /// @return - Interface for configuring the layer.
746  IConnectableLayer* AddPreluLayer(const char* name = nullptr);
747 
748  /// Adds a 2D transpose convolution layer to the network.
749  /// @param descriptor - Description of the 2D transpose convolution layer.
750  /// @param weights - Tensor for the weights data.
751  /// @param biases - Optional tensor for the bias data.
752  /// @param name - Optional name for the layer.
753  /// @return - Interface for configuring the layer.
755  const ConstTensor& weights,
756  const Optional<ConstTensor>& biases,
757  const char* name = nullptr);
758 
759  /// Adds a transpose layer to the network.
760  /// @param transposeDescriptor - TransposeDescriptor to configure the transpose.
761  /// @param name - Optional name for the layer.
762  /// @return - Interface for configuring the layer.
763  IConnectableLayer* AddTransposeLayer(const TransposeDescriptor& transposeDescriptor,
764  const char* name = nullptr);
765 
766  /// Adds a stack layer to the network.
767  /// @param descriptor - Description of the stack layer.
768  /// @param name - Optional name for the layer.
769  /// @return - Interface for configuring the layer.
771  const char* name = nullptr);
772 
773  /// Add a stand-in layer for a type unknown to the Arm NN framework.
774  /// Note: Due to the nature of this layer, no validation can be performed by the framework.
775  /// Furthermore, Any model containing this layer cannot make use of dynamic tensors since the
776  /// tensor sizes cannot be inferred.
777  /// @descriptor - Descriptor for the StandIn layer.
778  /// @return - Interface for configuring the layer.
780  const char* name = nullptr);
781 
782  /// Add a QuantizedLstm layer to the network
783  /// @param params - The weights and biases for the Quantized LSTM cell
784  /// @param name - Optional name for the layer
785  /// @return - Interface for configuring the layer.
787  const char* name = nullptr);
788 
789  /// Add a QLstm layer to the network
790  /// @param descriptor - Parameters for the QLstm operation
791  /// @param params - Weights and biases for the layer
792  /// @param name - Optional name for the layer
793  /// @return - Interface for configuring the layer.
795  const LstmInputParams& params,
796  const char* name = nullptr);
797 
798  /// Adds a Logical Binary layer to the network.
799  /// @param descriptor - Description of the Logical Binary layer.
800  /// @param name - Optional name for the layer.
801  /// @return - Interface for configuring the layer.
803  const char* name = nullptr);
804 
805  /// Add a UnidirectionalSequenceLstm layer to the network
806  /// @param descriptor - Parameters for the UnidirectionalSequenceLstm operation
807  /// @param params - Weights and biases for the UnidirectionalSequenceLstm
808  /// @param name - Optional name for the layer
809  /// @return - Interface for configuring the layer.
811  const LstmInputParams& params,
812  const char* name = nullptr);
813 
814  /// Add a ChannelShuffle layer to the network
815  /// @param descriptor - Parameters for the ChannelShuffle operation
816  /// @param name - Optional name for the layer
817  /// @return - Interface for configuring the layer
819  const char* name = nullptr);
820 
821  /// Add a BatchMatMul layer to the network
822  /// @param descriptor - Parameters for the BatchMatMul operation
823  /// @param name - Optional name for the layer
824  /// @return - Interface for configuring the layer
826  const char* name = nullptr);
827 
828  void ExecuteStrategy(IStrategy& strategy) const;
829 
830 protected:
831  ~INetwork();
832 
833  friend void VisitLayersTopologically(const INetwork* inputNetwork, IStrategy& strategy);
835  friend TensorInfo GetInputTensorInfo(const INetwork* network);
836  friend IOptimizedNetworkPtr Optimize(const INetwork& network,
837  const std::vector<BackendId>& backendPreferences,
838  const IDeviceSpec& deviceSpec,
839  const OptimizerOptions& options,
840  Optional<std::vector<std::string>&> messages);
841  friend IOptimizedNetworkPtr Optimize(const INetwork& network,
842  const std::vector<BackendId>& backendPreferences,
843  const IDeviceSpec& deviceSpec,
844  const OptimizerOptionsOpaque& options,
845  Optional<std::vector<std::string>&> messages);
846 
847  INetwork(NetworkOptions networkOptions = {});
848 
849  std::unique_ptr<NetworkImpl> pNetworkImpl;
850 };
851 
852 namespace experimental
853 {
854 class AsyncNetworkImpl;
855 class WorkingMemHandle;
856 }
857 
858 struct BackendSettings;
859 struct OptimizationResult;
860 class OptimizedNetworkImpl;
861 class IProfiler;
863 {
864 public:
865  static void Destroy(IOptimizedNetwork* network);
866 
867  Status PrintGraph();
868  Status SerializeToDot(std::ostream& stream) const;
869 
870  arm::pipe::ProfilingGuid GetGuid() const;
871 
872  size_t GetNumInputs() const;
873  size_t GetNumOutputs() const;
874 
875  void ExecuteStrategy(IStrategy& strategy) const;
876 
877  /// Creates a copy of the IOptimizedNetwork. The IOptimizedNetwork will not be reoptimized,
878  /// the provided ModelOptions will only be used when creating a LoadedNetwork.
879  IOptimizedNetwork(const IOptimizedNetwork& other, const ModelOptions& modelOptions);
880  IOptimizedNetwork(std::unique_ptr<Graph> graph);
881  IOptimizedNetwork(std::unique_ptr<OptimizedNetworkImpl> impl);
883 
884  const std::shared_ptr<IProfiler>& GetProfiler() const;
885 
886 protected:
887  friend class LoadedNetwork;
888 
891 
892  friend Graph& GetGraphForTesting(IOptimizedNetwork* optNetPtr);
894  friend IOptimizedNetworkPtr Optimize(const INetwork& inNetwork,
895  const std::vector<BackendId>& backendPreferences,
896  const IDeviceSpec& deviceSpec,
897  const OptimizerOptionsOpaque& options,
898  Optional<std::vector<std::string>&> messages);
899  friend IOptimizedNetworkPtr Optimize(const Graph& inGraph,
900  const std::vector<BackendId>& backendPreferences,
901  const IDeviceSpec& deviceSpec,
902  const OptimizerOptionsOpaque& options,
903  Optional<std::vector<std::string>&> messages);
904 
905  IOptimizedNetwork(std::unique_ptr<Graph> graph, const ModelOptions& modelOptions);
906 
907  std::unique_ptr<OptimizedNetworkImpl> pOptimizedNetworkImpl;
908 };
909 
910 /// Create an optimized version of the network
911 /// @param network INetwork description of the network to be optimized.
912 /// @param backendPreferences The choice of the backend ordered by user preferences.
913 /// @param deviceSpec DeviceSpec object as queried from the runtime. See IRuntime::GetDeviceSpec()
914 /// @param messages If there are failures or warnings a string describing same will be added to the vector
915 /// @param options OptimizerOptions object with optimizer configuration options
916 /// @return An IOptimizedNetworkPtr interface to the optimized network, throws an exception derived from
917 /// armnn::Exception if process fails.
918 
919 IOptimizedNetworkPtr Optimize(const INetwork& network,
920  const std::vector<BackendId>& backendPreferences,
921  const IDeviceSpec& deviceSpec,
923  Optional<std::vector<std::string>&> messages = EmptyOptional());
924 
925 /// Create an optimized version of the network
926 /// @param inGraph Graph to be optimized.
927 /// @param backendPreferences The choice of the backend ordered by user preferences.
928 /// @param deviceSpec DeviceSpec object as queried from the runtime. See IRuntime::GetDeviceSpec()
929 /// @param messages If there are failures or warnings a string describing same will be added to the vector
930 /// @param options OptimizerOptions object with optimizer configuration options
931 /// @return An IOptimizedNetworkPtr interface to the optimized network, throws an exception derived from
932 /// armnn::Exception if process fails.
933 
934 IOptimizedNetworkPtr Optimize(const Graph& inGraph,
935  const std::vector<BackendId>& backendPreferences,
936  const IDeviceSpec& deviceSpec,
937  const OptimizerOptionsOpaque& options,
938  Optional<std::vector<std::string>&> messages = EmptyOptional());
939 
940 /// Accept legacy OptimizerOptions
941 IOptimizedNetworkPtr Optimize(const Graph& inGraph,
942  const std::vector<BackendId>& backendPreferences,
943  const IDeviceSpec& deviceSpec,
944  const OptimizerOptions& options,
945  Optional<std::vector<std::string>&> messages = EmptyOptional());
946 
947 /// Accept legacy OptimizerOptions
948 IOptimizedNetworkPtr Optimize(const INetwork& network,
949  const std::vector<BackendId>& backendPreferences,
950  const IDeviceSpec& deviceSpec,
951  const OptimizerOptions& options,
952  Optional<std::vector<std::string>&> messages = EmptyOptional());
953 
954 } //namespace armnn
armnn::IConnectableLayer::ExecuteStrategy
virtual void ExecuteStrategy(IStrategy &strategy) const =0
Apply a visitor to this layer.
armnn::INetwork::AddConvolution2dLayer
IConnectableLayer * AddConvolution2dLayer(const Convolution2dDescriptor &convolution2dDescriptor, const char *name=nullptr)
Adds a 2D convolution layer to the network.
Definition: Network.cpp:271
armnn::OptimizerOptionsOpaque
Definition: INetwork.hpp:260
armnn::INetwork::AddDepthwiseConvolution2dLayer
IConnectableLayer * AddDepthwiseConvolution2dLayer(const DepthwiseConvolution2dDescriptor &convolution2dDescriptor, const char *name=nullptr)
Adds a 2D depthwise convolution layer to the network.
Definition: Network.cpp:291
armnn::IOptimizedNetwork::GetGraphForTesting
friend Graph & GetGraphForTesting(IOptimizedNetwork *optNetPtr)
Definition: TestUtils.cpp:49
armnn::BackendId
Definition: BackendId.hpp:75
armnn::OptimizerOptionsOpaque::GetDebugEnabled
bool GetDebugEnabled() const
Definition: Network.cpp:180
armnn::IOutputSlot::GetOwningIConnectableLayer
virtual const IConnectableLayer & GetOwningIConnectableLayer() const =0
armnn::IOptimizedNetwork::GetProfiler
const std::shared_ptr< IProfiler > & GetProfiler() const
Definition: Network.cpp:691
armnn::BackendOptions::BackendOption::GetValue
Var GetValue() const
Definition: BackendOptions.hpp:252
armnn::IConnectableLayer::InferOutputShapes
virtual std::vector< TensorShape > InferOutputShapes(const std::vector< TensorShape > &inputShapes) const =0
Infer the shape of the output(s) based on the provided input shape(s)
armnn::INetwork::AddFloorLayer
IConnectableLayer * AddFloorLayer(const char *name=nullptr)
Adds a floor layer to the network.
Definition: Network.cpp:485
armnn::GatherDescriptor
A GatherDescriptor for the GatherLayer.
Definition: Descriptors.hpp:932
armnn::NormalizationDescriptor
A NormalizationDescriptor for the NormalizationLayer.
Definition: Descriptors.hpp:757
armnn::TransposeDescriptor
A TransposeDescriptor for the TransposeLayer.
Definition: Descriptors.hpp:1457
armnn::OptimizerOptionsOpaque::GetDebugToFileEnabled
bool GetDebugToFileEnabled() const
Definition: Network.cpp:185
armnn::INetwork::AddConstantLayer
IConnectableLayer * AddConstantLayer(const ConstTensor &input, const char *name=nullptr)
Adds a layer with no inputs and a single output, which always corresponds to the passed in constant t...
Definition: Network.cpp:461
armnn::INetwork::AddTransposeConvolution2dLayer
IConnectableLayer * AddTransposeConvolution2dLayer(const TransposeConvolution2dDescriptor &descriptor, const ConstTensor &weights, const Optional< ConstTensor > &biases, const char *name=nullptr)
Adds a 2D transpose convolution layer to the network.
Definition: Network.cpp:572
armnn::IOptimizedNetworkPtr
std::unique_ptr< IOptimizedNetwork, void(*)(IOptimizedNetwork *network)> IOptimizedNetworkPtr
Definition: INetwork.hpp:328
armnn::ElementwiseUnaryDescriptor
A ElementwiseUnaryDescriptor for the ElementwiseUnaryLayer.
Definition: Descriptors.hpp:129
armnn::PadDescriptor
A PadDescriptor for the PadLayer.
Definition: Descriptors.hpp:1163
armnn::SoftmaxDescriptor
A SoftmaxDescriptor for the SoftmaxLayer.
Definition: Descriptors.hpp:177
armnn::INetwork::pNetworkImpl
std::unique_ptr< NetworkImpl > pNetworkImpl
Definition: INetwork.hpp:849
armnn::IOptimizedNetwork::Optimize
friend IOptimizedNetworkPtr Optimize(const INetwork &inNetwork, const std::vector< BackendId > &backendPreferences, const IDeviceSpec &deviceSpec, const OptimizerOptionsOpaque &options, Optional< std::vector< std::string > & > messages)
Create an optimized version of the network.
Definition: Network.cpp:2003
armnn::OptimizerOptions::m_AllowExpandedDims
bool m_AllowExpandedDims
When calculating tensor sizes, dimensions of size == 1 will be ignored.
Definition: INetwork.hpp:253
armnn::StackDescriptor
A StackDescriptor for the StackLayer.
Definition: Descriptors.hpp:1218
armnn::INetwork::AddPooling3dLayer
IConnectableLayer * AddPooling3dLayer(const Pooling3dDescriptor &pooling3dDescriptor, const char *name=nullptr)
Adds a 3D pooling layer to the network.
Definition: Network.cpp:355
armnn::SliceDescriptor
A SliceDescriptor for the SliceLayer.
Definition: Descriptors.hpp:1195
armnn::INetwork::Optimize
friend IOptimizedNetworkPtr Optimize(const INetwork &network, const std::vector< BackendId > &backendPreferences, const IDeviceSpec &deviceSpec, const OptimizerOptions &options, Optional< std::vector< std::string > & > messages)
Accept legacy OptimizerOptions.
Definition: Network.cpp:1990
NetworkFwd.hpp
armnn::INetwork::AddDivisionLayer
IConnectableLayer * AddDivisionLayer(const char *name=nullptr)
Adds a division layer to the network.
Definition: Network.cpp:501
armnn::IOptimizedNetwork::SerializeToDot
Status SerializeToDot(std::ostream &stream) const
Definition: Network.cpp:686
armnn::OptimizerOptions::m_ExportEnabled
bool m_ExportEnabled
Enable Export.
Definition: INetwork.hpp:250
armnn::OptimizerOptions::m_ReduceFp32ToFp16
bool m_ReduceFp32ToFp16
Reduces all Fp32 operators in the model to Fp16 for faster processing.
Definition: INetwork.hpp:225
armnn::INetwork::CreateRaw
static INetwork * CreateRaw(const NetworkOptions &networkOptions={})
Definition: Network.cpp:647
armnn::INetwork::GetInputTensorInfo
friend TensorInfo GetInputTensorInfo(const INetwork *network)
armnn::IOutputSlot::~IOutputSlot
~IOutputSlot()
Not user deletable.
Definition: INetwork.hpp:64
armnn::INetwork::AddQLstmLayer
IConnectableLayer * AddQLstmLayer(const QLstmDescriptor &descriptor, const LstmInputParams &params, const char *name=nullptr)
Add a QLstm layer to the network.
Definition: Network.cpp:609
armnn::INetwork::AddLogSoftmaxLayer
IConnectableLayer * AddLogSoftmaxLayer(const LogSoftmaxDescriptor &logSoftmaxDescriptor, const char *name=nullptr)
Adds a log softmax layer to the network.
Definition: Network.cpp:455
armnn::LayerBindingId
int LayerBindingId
Type of identifiers for bindable layers (inputs, outputs).
Definition: Types.hpp:301
armnn::ActivationDescriptor
An ActivationDescriptor for the ActivationLayer.
Definition: Descriptors.hpp:36
armnn::ShapeInferenceMethod::ValidateOnly
@ ValidateOnly
Validate all output shapes.
armnn::IConnectableLayer
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
Definition: INetwork.hpp:68
Optional.hpp
armnn::LstmDescriptor
An LstmDescriptor for the LstmLayer.
Definition: Descriptors.hpp:1069
armnn::FullyConnectedDescriptor
A FullyConnectedDescriptor for the FullyConnectedLayer.
Definition: Descriptors.hpp:495
armnn::OptimizerOptionsOpaque::OptimizerOptionsOpaque
OptimizerOptionsOpaque()
Definition: Network.cpp:48
armnn::OptimizerOptions::m_ModelOptions
ModelOptions m_ModelOptions
Enable Model Options.
Definition: INetwork.hpp:244
armnn::LoadedNetwork
Definition: LoadedNetwork.hpp:42
armnn::IInputSlot::GetOwningIConnectableLayer
virtual const IConnectableLayer & GetOwningIConnectableLayer() const =0
armnn::IOptimizedNetwork
Definition: INetwork.hpp:862
armnn::OptimizerOptionsOpaque::GetShapeInferenceMethod
armnn::ShapeInferenceMethod GetShapeInferenceMethod() const
Definition: Network.cpp:200
armnn::INetwork::AddDetectionPostProcessLayer
IConnectableLayer * AddDetectionPostProcessLayer(const DetectionPostProcessDescriptor &descriptor, const ConstTensor &anchors, const char *name=nullptr)
Adds a Detection PostProcess layer to the network.
Definition: Network.cpp:305
armnn::INetwork::AddSubtractionLayer
IConnectableLayer * AddSubtractionLayer(const char *name=nullptr)
Adds a subtraction layer to the network.
Definition: Network.cpp:508
armnn::BatchMatMulDescriptor
A BatchMatMulDescriptor for the BatchMatMul operator.
Definition: Descriptors.hpp:1551
armnn::ResizeDescriptor
A ResizeDescriptor for the ResizeLayer.
Definition: Descriptors.hpp:952
armnn::INetwork::AddSliceLayer
IConnectableLayer * AddSliceLayer(const SliceDescriptor &sliceDescriptor, const char *name=nullptr)
Adds a slice layer to the network.
Definition: Network.cpp:381
armnn::StridedSliceDescriptor
A StridedSliceDescriptor for the StridedSliceLayer.
Definition: Descriptors.hpp:1270
armnn::ConstTensor
A tensor defined by a TensorInfo (shape and data type) and an immutable backing store.
Definition: Tensor.hpp:327
armnn::Pooling3dDescriptor
A Pooling3dDescriptor for the Pooling3dLayer.
Definition: Descriptors.hpp:419
armnn::ReduceDescriptor
A ReduceDescriptor for the REDUCE operators.
Definition: Descriptors.hpp:1505
armnn::IOptimizedNetwork::AsyncNetworkImpl
friend class experimental::AsyncNetworkImpl
Definition: INetwork.hpp:889
armnn::BackendOptions
Struct for the users to pass backend specific options.
Definition: BackendOptions.hpp:22
armnn::INetwork::TestConnectionPreservation
friend class TestConnectionPreservation
Definition: INetwork.hpp:834
armnn::OptimizerOptionsOpaque::GetAllowExpandedDims
bool GetAllowExpandedDims() const
Definition: Network.cpp:190
armnn::ModelOptions
std::vector< BackendOptions > ModelOptions
Definition: BackendOptions.hpp:18
armnn::ComparisonDescriptor
A ComparisonDescriptor for the ComparisonLayer.
Definition: Descriptors.hpp:89
armnn::IConnectableLayer::ConstantTensors
std::vector< std::reference_wrapper< std::shared_ptr< ConstTensorHandle > >> ConstantTensors
Definition: INetwork.hpp:124
armnn::StandInDescriptor
A StandInDescriptor for the StandIn layer.
Definition: Descriptors.hpp:1248
armnn::INetwork::AddElementwiseBinaryLayer
IConnectableLayer * AddElementwiseBinaryLayer(const ElementwiseBinaryDescriptor &elementwiseUnaryDescriptor, const char *name=nullptr)
Add an ElementwiseBinary layer to the network.
Definition: Network.cpp:313
ARMNN_DEPRECATED_MSG_REMOVAL_DATE
#define ARMNN_DEPRECATED_MSG_REMOVAL_DATE(message, removed_in_release)
Definition: Deprecated.hpp:44
armnn::INetwork::AddQuantizeLayer
IConnectableLayer * AddQuantizeLayer(const char *name=nullptr)
Add a quantize layer to the network.
Definition: Network.cpp:533
IStrategy.hpp
armnn::INetwork::AddMinimumLayer
IConnectableLayer * AddMinimumLayer(const char *name=nullptr)
Add a Minimum layer to the network.
Definition: Network.cpp:544
TensorHandle.hpp
armnn::ViewsDescriptor
A ViewsDescriptor for the SplitterLayer.
Definition: Descriptors.hpp:244
armnn::IWorkloadFactory
Definition: WorkloadFactory.hpp:22
TensorFwd.hpp
armnn::PreCompiledDescriptor
A PreCompiledDescriptor for the PreCompiledLayer.
Definition: Descriptors.hpp:1334
armnn::IConnectableLayer::GetOutputSlot
virtual const IOutputSlot & GetOutputSlot(unsigned int index) const =0
Get the const output slot handle by slot index.
armnn::OptimizerOptionsOpaque::SetShapeInferenceMethod
void SetShapeInferenceMethod(armnn::ShapeInferenceMethod ShapeInferenceMethodType)
Definition: Network.cpp:140
armnn::INetwork::AddPooling2dLayer
IConnectableLayer * AddPooling2dLayer(const Pooling2dDescriptor &pooling2dDescriptor, const char *name=nullptr)
Adds a 2D pooling layer to the network.
Definition: Network.cpp:349
armnn::ShapeInferenceMethod
ShapeInferenceMethod
The ShapeInferenceMethod modify how the output shapes are treated.
Definition: Types.hpp:232
armnn::OptimizerOptionsOpaque::SetImportEnabled
void SetImportEnabled(bool ImportState)
Definition: Network.cpp:110
armnn::INetwork::AddBatchMatMulLayer
IConnectableLayer * AddBatchMatMulLayer(const BatchMatMulDescriptor &descriptor, const char *name=nullptr)
Add a BatchMatMul layer to the network.
Definition: Network.cpp:636
armnn::IOptimizedNetwork::GetNumOutputs
size_t GetNumOutputs() const
Definition: Network.cpp:706
armnn::Optimize
IOptimizedNetworkPtr Optimize(const INetwork &network, const std::vector< BackendId > &backendPreferences, const IDeviceSpec &deviceSpec, const OptimizerOptionsOpaque &options=OptimizerOptionsOpaque(), Optional< std::vector< std::string > & > messages=EmptyOptional())
Create an optimized version of the network.
Definition: Network.cpp:2003
armnn::IOutputSlot::IsTensorInfoSet
virtual bool IsTensorInfoSet() const =0
armnn::OptimizerOptionsOpaque::SetAllowExpandedDims
void SetAllowExpandedDims(bool ExpandedDimsAllowed)
Definition: Network.cpp:145
armnn::INetwork::AddReduceLayer
IConnectableLayer * AddReduceLayer(const ReduceDescriptor &reduceDescriptor, const char *name=nullptr)
Adds a reduce layer to the network.
Definition: Network.cpp:437
armnn::IOutputSlot::Connect
virtual int Connect(IInputSlot &destination)=0
armnn::INetwork::AddDequantizeLayer
IConnectableLayer * AddDequantizeLayer(const char *name=nullptr)
Adds a Dequantize layer to the network.
Definition: Network.cpp:299
armnn::INetwork::Destroy
static void Destroy(INetwork *network)
Definition: Network.cpp:657
armnn
Copyright (c) 2021 ARM Limited and Contributors.
Definition: 01_00_quick_start.dox:6
armnn::INetwork::AddMaximumLayer
IConnectableLayer * AddMaximumLayer(const char *name=nullptr)
Add a Maximum layer to the network.
Definition: Network.cpp:515
armnn::IStrategy
Definition: IStrategy.hpp:16
armnn::CompiledBlobDeleter
std::function< void(const void *)> CompiledBlobDeleter
Definition: INetwork.hpp:330
armnn::BackendOptions::Var::ToString
std::string ToString()
Definition: BackendOptions.hpp:124
armnn::IInputSlot
An input connection slot for a layer.
Definition: INetwork.hpp:25
armnn::OptimizerOptionsOpaque::GetReduceFp32ToFp16
bool GetReduceFp32ToFp16() const
Definition: Network.cpp:170
armnn::INetwork::AddStackLayer
IConnectableLayer * AddStackLayer(const StackDescriptor &descriptor, const char *name=nullptr)
Adds a stack layer to the network.
Definition: Network.cpp:591
armnn::IOutputSlot::Disconnect
virtual void Disconnect(IInputSlot &slot)=0
armnn::INetwork::INetwork
INetwork(NetworkOptions networkOptions={})
Definition: Network.cpp:44
armnn::BackendOptions::BackendOption::GetName
std::string GetName() const
Definition: BackendOptions.hpp:251
armnn::BatchToSpaceNdDescriptor
A BatchToSpaceNdDescriptor for the BatchToSpaceNdLayer.
Definition: Descriptors.hpp:863
armnn::INetwork::AddPrecompiledLayer
IConnectableLayer * AddPrecompiledLayer(const PreCompiledDescriptor &preCompiledDescriptor, CompiledBlobPtr compiledBlobPtr, const Optional< BackendId > &backend, const char *name=nullptr)
Adds a Precompiled layer to the network.
Definition: Network.cpp:361
armnn::IConnectableLayer::GetInputSlot
virtual const IInputSlot & GetInputSlot(unsigned int index) const =0
Get a const input slot handle by slot index.
armnn::SpaceToDepthDescriptor
A SpaceToDepthDescriptor for the SpaceToDepthLayer.
Definition: Descriptors.hpp:1042
armnn::INetwork::AddMultiplicationLayer
IConnectableLayer * AddMultiplicationLayer(const char *name=nullptr)
Adds a multiplication layer to the network.
Definition: Network.cpp:409
armnn::OptimizerOptionsOpaque::~OptimizerOptionsOpaque
~OptimizerOptionsOpaque()
armnn::NetworkOptions
std::vector< BackendOptions > NetworkOptions
Definition: BackendOptions.hpp:16
armnn::INetwork::AddNormalizationLayer
IConnectableLayer * AddNormalizationLayer(const NormalizationDescriptor &normalizationDescriptor, const char *name=nullptr)
Adds a normalization layer to the network.
Definition: Network.cpp:375
armnn::OptimizerOptions::m_ProfilingEnabled
bool m_ProfilingEnabled
Enable profiling dump of the optimizer phase.
Definition: INetwork.hpp:247
armnn::DetectionPostProcessDescriptor
Definition: Descriptors.hpp:701
armnn::INetwork::AddSpaceToBatchNdLayer
IConnectableLayer * AddSpaceToBatchNdLayer(const SpaceToBatchNdDescriptor &spaceToBatchNdDescriptor, const char *name=nullptr)
Adds a space to batch layer to the network.
Definition: Network.cpp:473
armnn::FillDescriptor
A FillDescriptor for the FillLayer.
Definition: Descriptors.hpp:913
armnn::INetwork::AddConvolution3dLayer
IConnectableLayer * AddConvolution3dLayer(const Convolution3dDescriptor &convolution3dDescriptor, const char *name=nullptr)
Adds a 3D convolution layer to the network.
Definition: Network.cpp:277
armnn::LayerType
LayerType
When adding a new layer, adapt also the LastLayer enum value in the enum class LayerType below.
Definition: Types.hpp:479
armnn::INetwork::AddL2NormalizationLayer
IConnectableLayer * AddL2NormalizationLayer(const L2NormalizationDescriptor &desc, const char *name=nullptr)
Adds an L2 normalization layer to the network.
Definition: Network.cpp:449
armnn::IOutputSlot::GetTensorInfo
virtual const TensorInfo & GetTensorInfo() const =0
armnn::OptimizerOptionsOpaque::SetReduceFp32ToFp16
void SetReduceFp32ToFp16(bool ReduceFp32ToFp16State)
Definition: Network.cpp:135
armnn::IOutputSlot::GetNumConnections
virtual unsigned int GetNumConnections() const =0
armnn::INetwork::AddTransposeLayer
IConnectableLayer * AddTransposeLayer(const TransposeDescriptor &transposeDescriptor, const char *name=nullptr)
Adds a transpose layer to the network.
Definition: Network.cpp:580
armnn::INetwork::AddStandInLayer
IConnectableLayer * AddStandInLayer(const StandInDescriptor &descriptor, const char *name=nullptr)
Add a stand-in layer for a type unknown to the Arm NN framework.
Definition: Network.cpp:597
armnn::IOutputSlot::CalculateIndexOnOwner
virtual unsigned int CalculateIndexOnOwner() const =0
armnn::INetwork::AddAdditionLayer
IConnectableLayer * AddAdditionLayer(const char *name=nullptr)
Adds an addition layer to the network.
Definition: Network.cpp:402
armnn::OptimizerOptionsOpaque::GetProfilingEnabled
bool GetProfilingEnabled() const
Definition: Network.cpp:155
armnn::INetwork::AddPadLayer
IConnectableLayer * AddPadLayer(const PadDescriptor &padDescriptor, const char *name=nullptr)
Adds a fully pad layer to the network.
Definition: Network.cpp:527
armnn::DepthwiseConvolution2dDescriptor
A DepthwiseConvolution2dDescriptor for the DepthwiseConvolution2dLayer.
Definition: Descriptors.hpp:647
armnn::MeanDescriptor
A MeanDescriptor for the MeanLayer.
Definition: Descriptors.hpp:1139
armnn::IConnectableLayer::GetNumOutputSlots
virtual unsigned int GetNumOutputSlots() const =0
Returns the number of connectable output slots.
armnn::INetwork::AddInstanceNormalizationLayer
IConnectableLayer * AddInstanceNormalizationLayer(const InstanceNormalizationDescriptor &desc, const char *name=nullptr)
Adds an instance normalization layer to the network.
Definition: Network.cpp:443
armnn::OptimizerOptions::m_DebugToFile
bool m_DebugToFile
Pass debug data to separate output files for easier troubleshooting.
Definition: INetwork.hpp:231
armnn::OptimizerOptionsOpaque::SetDebugEnabled
void SetDebugEnabled(bool DebugState)
Definition: Network.cpp:125
armnn::IDeviceSpec
Device specific knowledge to be passed to the optimizer.
Definition: Types.hpp:291
armnn::IOptimizedNetwork::GetGuid
arm::pipe::ProfilingGuid GetGuid() const
Definition: Network.cpp:696
armnn::IOptimizedNetwork::Destroy
static void Destroy(IOptimizedNetwork *network)
Definition: Network.cpp:676
armnn::INetwork::AddMergeLayer
IConnectableLayer * AddMergeLayer(const char *name=nullptr)
Adds a merge layer to the network.
Definition: Network.cpp:397
armnn::CompiledBlobPtr
std::unique_ptr< void, CompiledBlobDeleter > CompiledBlobPtr
Definition: INetwork.hpp:331
armnn::IOutputSlot::GetConnection
virtual const IInputSlot * GetConnection(unsigned int index) const =0
armnn::IOptimizedNetwork::pOptimizedNetworkImpl
std::unique_ptr< OptimizedNetworkImpl > pOptimizedNetworkImpl
Definition: INetwork.hpp:907
armnn::L2NormalizationDescriptor
A L2NormalizationDescriptor for the L2NormalizationLayer.
Definition: Descriptors.hpp:797
armnn::INetwork::AddPermuteLayer
IConnectableLayer * AddPermuteLayer(const PermuteDescriptor &permuteDescriptor, const char *name=nullptr)
Adds a permute layer to the network.
Definition: Network.cpp:337
armnn::IConnectableLayer::GetGuid
virtual LayerGuid GetGuid() const =0
Returns the unique id of the layer.
armnn::ChannelShuffleDescriptor
A ChannelShuffleDescriptor for the ChannelShuffle operator.
Definition: Descriptors.hpp:1529
armnn::IOptimizedNetwork::PrintGraph
Status PrintGraph()
Definition: Network.cpp:681
armnn::IOutputSlot
An output connection slot for a layer.
Definition: INetwork.hpp:41
armnn::Convolution3dDescriptor
A Convolution3dDescriptor for the Convolution3dLayer.
Definition: Descriptors.hpp:576
armnn::OptimizerOptions::ToString
const std::string ToString() const
Definition: INetwork.hpp:189
armnn::IConnectableLayer::~IConnectableLayer
~IConnectableLayer()
Objects are not deletable via the handle.
Definition: INetwork.hpp:136
armnn::TensorInfo
Definition: Tensor.hpp:152
armnn::Convolution2dDescriptor
A Convolution2dDescriptor for the Convolution2dLayer.
Definition: Descriptors.hpp:522
armnn::INetwork::AddComparisonLayer
IConnectableLayer * AddComparisonLayer(const ComparisonDescriptor &comparisonDescriptor, const char *name=nullptr)
Add a Comparison layer to the network.
Definition: Network.cpp:257
armnn::IOptimizedNetwork::IOptimizedNetwork
IOptimizedNetwork(const IOptimizedNetwork &other, const ModelOptions &modelOptions)
Creates a copy of the IOptimizedNetwork.
Definition: Network.cpp:662
armnn::INetwork::AddCastLayer
IConnectableLayer * AddCastLayer(const char *name=nullptr)
Adds a cast layer to the network.
Definition: Network.cpp:252
armnn::BatchNormalizationDescriptor
A BatchNormalizationDescriptor for the BatchNormalizationLayer.
Definition: Descriptors.hpp:816
armnn::OptimizerOptionsOpaque::SetDebugToFileEnabled
void SetDebugToFileEnabled(bool DebugFileState)
Definition: Network.cpp:130
armnn::BackendOptions::BackendOption
Definition: BackendOptions.hpp:215
armnn::QLstmDescriptor
A QLstmDescriptor for the QLstmLayer.
Definition: Descriptors.hpp:1347
armnn::EmptyOptional
EmptyOptional is used to initialize the Optional class in case we want to have default value for an O...
Definition: Optional.hpp:32
armnn::OptimizerOptions::m_ReduceFp32ToBf16
bool m_ReduceFp32ToBf16
@Note This feature has been replaced by enabling Fast Math in compute library backend options.
Definition: INetwork.hpp:235
armnn::Status
Status
Definition: Types.hpp:42
DescriptorsFwd.hpp
armnn::IOptimizedNetwork::ExecuteStrategy
void ExecuteStrategy(IStrategy &strategy) const
Definition: Network.cpp:2967
armnn::OptimizerOptions::m_shapeInferenceMethod
ShapeInferenceMethod m_shapeInferenceMethod
Infer output size when not available.
Definition: INetwork.hpp:238
armnn::OptimizerOptionsOpaque::operator=
OptimizerOptionsOpaque & operator=(OptimizerOptionsOpaque other)
Definition: Network.cpp:95
armnn::INetwork::AddActivationLayer
IConnectableLayer * AddActivationLayer(const ActivationDescriptor &activationDescriptor, const char *name=nullptr)
Adds an activation layer to the network.
Definition: Network.cpp:369
armnn::IInputSlot::~IInputSlot
~IInputSlot()
Not user deletable.
Definition: INetwork.hpp:36
armnn::INetwork::AddGatherNdLayer
IConnectableLayer * AddGatherNdLayer(const char *name=nullptr)
Add GatherNd layer to the network.
Definition: Network.cpp:557
armnn::QuantizedLstmInputParams
Definition: QuantizedLstmParams.hpp:13
armnn::IInputSlot::GetConnection
virtual const IOutputSlot * GetConnection() const =0
armnn::IOptimizedNetwork::GetNumInputs
size_t GetNumInputs() const
Definition: Network.cpp:701
armnn::OptimizerOptionsOpaque::GetImportEnabled
bool GetImportEnabled() const
Definition: Network.cpp:160
armnn::INetwork::AddChannelShuffleLayer
IConnectableLayer * AddChannelShuffleLayer(const ChannelShuffleDescriptor &descriptor, const char *name=nullptr)
Add a ChannelShuffle layer to the network.
Definition: Network.cpp:630
armnn::INetwork::AddGatherLayer
IConnectableLayer * AddGatherLayer(const GatherDescriptor &descriptor, const char *name=nullptr)
Add Gather layer to the network.
Definition: Network.cpp:551
armnn::BoostLogSeverityMapping::debug
@ debug
armnn::INetwork::Create
static INetworkPtr Create(const NetworkOptions &networkOptions={})
Definition: Network.cpp:652
armnn::IConnectableLayer::SetBackendId
virtual void SetBackendId(const BackendId &id)=0
Set the backend of the IConnectableLayer.
armnn::INetwork::AddDepthToSpaceLayer
IConnectableLayer * AddDepthToSpaceLayer(const DepthToSpaceDescriptor &depthToSpaceDescriptor, const char *name=nullptr)
Adds a depth to space layer to the network.
Definition: Network.cpp:284
armnn::BaseDescriptor
Base class for all descriptors.
Definition: Descriptors.hpp:22
armnn::OriginsDescriptor
An OriginsDescriptor for the ConcatLayer.
Definition: Descriptors.hpp:201
armnn::OptimizerOptionsOpaque::GetExportEnabled
bool GetExportEnabled() const
Definition: Network.cpp:165
armnn::ReshapeDescriptor
A ReshapeDescriptor for the ReshapeLayer.
Definition: Descriptors.hpp:990
armnn::INetwork::AddPreluLayer
IConnectableLayer * AddPreluLayer(const char *name=nullptr)
Adds a PReLU layer to the network.
Definition: Network.cpp:567
armnn::OptimizerOptionsOpaque::ToString
const std::string ToString() const
Definition: Network.cpp:205
armnn::IOutputSlot::GetOwningLayerGuid
virtual LayerGuid GetOwningLayerGuid() const =0
armnn::PermuteDescriptor
A PermuteDescriptor for the PermuteLayer.
Definition: Descriptors.hpp:149
armnn::TransposeConvolution2dDescriptor
A TransposeConvolution2dDescriptor for the TransposeConvolution2dLayer.
Definition: Descriptors.hpp:1407
armnn::Graph
Definition: Graph.hpp:30
armnn::INetwork::AddSpaceToDepthLayer
IConnectableLayer * AddSpaceToDepthLayer(const SpaceToDepthDescriptor &spaceToDepthDescriptor, const char *name=nullptr)
Adds a space to depth layer to the network.
Definition: Network.cpp:479
armnn::INetwork::AddSwitchLayer
IConnectableLayer * AddSwitchLayer(const char *name=nullptr)
Adds a switch layer to the network.
Definition: Network.cpp:562
armnn::IOptimizedNetwork::~IOptimizedNetwork
~IOptimizedNetwork()
armnn::IInputSlot::GetSlotIndex
virtual unsigned int GetSlotIndex() const =0
armnn::IOutputSlot::SetTensorInfo
virtual void SetTensorInfo(const TensorInfo &tensorInfo)=0
armnn::INetwork::AddMeanLayer
IConnectableLayer * AddMeanLayer(const MeanDescriptor &meanDescriptor, const char *name=nullptr)
Add a Mean layer to the network.
Definition: Network.cpp:522
armnn::INetwork::VisitLayersTopologically
friend void VisitLayersTopologically(const INetwork *inputNetwork, IStrategy &strategy)
armnn::INetwork::ExecuteStrategy
void ExecuteStrategy(IStrategy &strategy) const
Definition: Network.cpp:642
armnn::INetwork::AddShapeLayer
IConnectableLayer * AddShapeLayer(const char *name=nullptr)
Adds a shape layer to the network.
Definition: Network.cpp:586
armnn::Pooling2dDescriptor
A Pooling2dDescriptor for the Pooling2dLayer.
Definition: Descriptors.hpp:359
armnn::IOptimizedNetwork::GetModelOptionsForTesting
friend ModelOptions & GetModelOptionsForTesting(IOptimizedNetwork *optNetPtr)
Definition: TestUtils.cpp:54
armnn::LogicalBinaryDescriptor
A LogicalBinaryDescriptor for the LogicalBinaryLayer.
Definition: Descriptors.hpp:1485
armnn::INetwork::~INetwork
~INetwork()
armnn::INetwork::AddResizeLayer
IConnectableLayer * AddResizeLayer(const ResizeDescriptor &resizeDescriptor, const char *name=nullptr)
Adds a resize layer to the network.
Definition: Network.cpp:431
armnn::INetwork::AddConcatLayer
IConnectableLayer * AddConcatLayer(const ConcatDescriptor &concatDescriptor, const char *name=nullptr)
Adds a concatenation layer to the network.
Definition: Network.cpp:264
armnn::INetwork::AddBatchToSpaceNdLayer
IConnectableLayer * AddBatchToSpaceNdLayer(const BatchToSpaceNdDescriptor &batchToSpaceNdDescriptor, const char *name=nullptr)
Adds a batch to space ND layer to the network.
Definition: Network.cpp:343
armnn::INetwork
Main network class which provides the interface for building up a neural network.
Definition: INetwork.hpp:335
armnn::Optional
Definition: Optional.hpp:270
armnn::INetwork::PrintGraph
Status PrintGraph()
Definition: Network.cpp:236
Logging.hpp
armnn::OptimizerOptionsOpaque::AddModelOption
void AddModelOption(armnn::BackendOptions)
Definition: Network.cpp:150
armnn::INetwork::AddFillLayer
IConnectableLayer * AddFillLayer(const FillDescriptor &fillDescriptor, const char *name=nullptr)
Add an Fill layer to the network.
Definition: Network.cpp:325
armnn::OptimizerOptionsOpaque::GetReduceFp32ToBf16
bool GetReduceFp32ToBf16() const
Definition: Network.cpp:175
armnn::OptimizerOptionsOpaqueImpl
Definition: Network.hpp:296
armnn::INetwork::AddQuantizedLstmLayer
IConnectableLayer * AddQuantizedLstmLayer(const QuantizedLstmInputParams &params, const char *name=nullptr)
Add a QuantizedLstm layer to the network.
Definition: Network.cpp:603
armnn::IConnectableLayer::GetName
virtual const char * GetName() const =0
Returns the name of the layer.
armnn::INetwork::AddElementwiseUnaryLayer
IConnectableLayer * AddElementwiseUnaryLayer(const ElementwiseUnaryDescriptor &elementwiseUnaryDescriptor, const char *name=nullptr)
Add an ElementwiseUnary layer to the network.
Definition: Network.cpp:319
armnn::INetwork::AddStridedSliceLayer
IConnectableLayer * AddStridedSliceLayer(const StridedSliceDescriptor &stridedSliceDescriptor, const char *name=nullptr)
Adds a strided slice layer to the network.
Definition: Network.cpp:538
armnn::INetwork::AddRankLayer
IConnectableLayer * AddRankLayer(const char *name=nullptr)
Adds a rank layer to the network.
Definition: Network.cpp:426
armnn::INetworkPtr
std::unique_ptr< INetwork, void(*)(INetwork *network)> INetworkPtr
Definition: INetwork.hpp:327
armnn::ArgMinMaxDescriptor
An ArgMinMaxDescriptor for ArgMinMaxLayer.
Definition: Descriptors.hpp:67
armnn::NetworkImpl
Private implementation of INetwork.
Definition: Network.hpp:32
armnn::IConnectableLayer::GetType
virtual LayerType GetType() const =0
Returns the armnn::LayerType of this layer.
armnn::INetwork::AddUnidirectionalSequenceLstmLayer
IConnectableLayer * AddUnidirectionalSequenceLstmLayer(const UnidirectionalSequenceLstmDescriptor &descriptor, const LstmInputParams &params, const char *name=nullptr)
Add a UnidirectionalSequenceLstm layer to the network.
Definition: Network.cpp:622
armnn::LstmInputParams
Definition: LstmParams.hpp:13
BackendOptions.hpp
armnn::IConnectableLayer::GetNumInputSlots
virtual unsigned int GetNumInputSlots() const =0
Returns the number of connectable input slots.
armnn::experimental::WorkingMemHandle
Definition: WorkingMemHandle.hpp:29
LayerGuid
arm::pipe::ProfilingGuid LayerGuid
Define LayerGuid type.
Definition: Types.hpp:26
armnn::IConnectableLayer::ImmutableConstantTensors
std::vector< std::reference_wrapper< const std::shared_ptr< ConstTensorHandle > >> ImmutableConstantTensors
Definition: INetwork.hpp:129
armnn::InstanceNormalizationDescriptor
An InstanceNormalizationDescriptor for InstanceNormalizationLayer.
Definition: Descriptors.hpp:835
armnn::INetwork::AddBatchNormalizationLayer
IConnectableLayer * AddBatchNormalizationLayer(const BatchNormalizationDescriptor &desc, const ConstTensor &mean, const ConstTensor &variance, const ConstTensor &beta, const ConstTensor &gamma, const char *name=nullptr)
Adds a batch normalization layer to the network.
Definition: Network.cpp:416
armnn::INetwork::AddInputLayer
IConnectableLayer * AddInputLayer(LayerBindingId id, const char *name=nullptr)
Adds an input layer to the network.
Definition: Network.cpp:241
armnn::INetwork::AddArgMinMaxLayer
IConnectableLayer * AddArgMinMaxLayer(const ArgMinMaxDescriptor &desc, const char *name=nullptr)
Adds an ArgMinMax layer to the network.
Definition: Network.cpp:246
armnn::OptimizerOptions
Definition: INetwork.hpp:139
armnn::ElementwiseBinaryDescriptor
A ElementwiseBinaryDescriptor for the ElementwiseBinaryLayer.
Definition: Descriptors.hpp:109
armnn::INetwork::AddLogicalBinaryLayer
IConnectableLayer * AddLogicalBinaryLayer(const LogicalBinaryDescriptor &descriptor, const char *name=nullptr)
Adds a Logical Binary layer to the network.
Definition: Network.cpp:616
armnn::IConnectableLayer::BackendSelectionHint
virtual void BackendSelectionHint(Optional< BackendId > backend)=0
Provide a hint for the optimizer as to which backend to prefer for this layer.
armnn::INetwork::AddLstmLayer
IConnectableLayer * AddLstmLayer(const LstmDescriptor &descriptor, const LstmInputParams &params, const char *name=nullptr)
Add a Lstm layer to the network.
Definition: Network.cpp:494
armnn::OptimizerOptionsOpaque::SetExportEnabled
void SetExportEnabled(bool ExportState)
Definition: Network.cpp:115
armnn::INetwork::AddSplitterLayer
IConnectableLayer * AddSplitterLayer(const ViewsDescriptor &splitterDescriptor, const char *name=nullptr)
Adds a splitter layer to the network.
Definition: Network.cpp:391
armnn::OptimizerOptions::m_Debug
bool m_Debug
Add debug data for easier troubleshooting.
Definition: INetwork.hpp:228
armnn::INetwork::AddSoftmaxLayer
IConnectableLayer * AddSoftmaxLayer(const SoftmaxDescriptor &softmaxDescriptor, const char *name=nullptr)
Adds a softmax layer to the network.
Definition: Network.cpp:385
armnn::OptimizerOptionsOpaque::SetProfilingEnabled
void SetProfilingEnabled(bool ProfilingState)
Definition: Network.cpp:120
armnn::IConnectableLayer::GetParameters
virtual const BaseDescriptor & GetParameters() const =0
If the layer has a descriptor return it.
armnn::INetwork::AddOutputLayer
IConnectableLayer * AddOutputLayer(LayerBindingId id, const char *name=nullptr)
Adds an output layer to the network.
Definition: Network.cpp:489
armnn::INetwork::AddFullyConnectedLayer
IConnectableLayer * AddFullyConnectedLayer(const FullyConnectedDescriptor &fullyConnectedDescriptor, const char *name=nullptr)
Adds a fully connected layer to the network.
Definition: Network.cpp:331
armnn::INetwork::AddReshapeLayer
IConnectableLayer * AddReshapeLayer(const ReshapeDescriptor &reshapeDescriptor, const char *name=nullptr)
Adds a reshape layer to the network.
Definition: Network.cpp:467
armnn::IConnectableLayer::GetConstantTensorsByRef
virtual ConstantTensors GetConstantTensorsByRef()=0
Deprecated.hpp
armnn::OptimizerOptionsOpaque::GetModelOptions
armnn::ModelOptions GetModelOptions() const
Definition: Network.cpp:195
armnn::SpaceToBatchNdDescriptor
A SpaceToBatchNdDescriptor for the SpaceToBatchNdLayer.
Definition: Descriptors.hpp:1010
armnn::OptimizerOptions::m_ImportEnabled
bool m_ImportEnabled
Enable Import.
Definition: INetwork.hpp:241