From 45c250a5d6e1669d2670282a55b48b3d727e381b Mon Sep 17 00:00:00 2001 From: Nikhil Raj Date: Wed, 10 Nov 2021 15:01:25 +0000 Subject: Remove use guide section from doxygen * This guide has now been moved to the Quick Start section in doxygen Signed-off-by: Nikhil Raj Change-Id: I758915c43f0e9e116f7308482db34d560d7ba0d9 --- docs/04_contributor.dox | 39 +++++++ docs/04_use_guides.dox | 21 ---- docs/05_00_software_components.dox | 46 ++++++++ docs/05_01_parsers.dox | 207 +++++++++++++++++++++++++++++++++ docs/05_02_deserializer_serializer.dox | 184 +++++++++++++++++++++++++++++ docs/05_03_delegate.dox | 178 ++++++++++++++++++++++++++++ docs/05_contributor.dox | 39 ------- docs/06_00_software_tools.dox | 46 -------- docs/06_01_parsers.dox | 207 --------------------------------- docs/06_02_deserializer_serializer.dox | 184 ----------------------------- docs/06_03_delegate.dox | 178 ---------------------------- docs/Doxyfile | 11 +- 12 files changed, 659 insertions(+), 681 deletions(-) create mode 100644 docs/04_contributor.dox delete mode 100644 docs/04_use_guides.dox create mode 100644 docs/05_00_software_components.dox create mode 100644 docs/05_01_parsers.dox create mode 100644 docs/05_02_deserializer_serializer.dox create mode 100644 docs/05_03_delegate.dox delete mode 100644 docs/05_contributor.dox delete mode 100644 docs/06_00_software_tools.dox delete mode 100644 docs/06_01_parsers.dox delete mode 100644 docs/06_02_deserializer_serializer.dox delete mode 100644 docs/06_03_delegate.dox diff --git a/docs/04_contributor.dox b/docs/04_contributor.dox new file mode 100644 index 0000000000..5cbb6c3b8c --- /dev/null +++ b/docs/04_contributor.dox @@ -0,0 +1,39 @@ +/// Copyright (c) 2021 ARM Limited and Contributors. All rights reserved. +/// +/// SPDX-License-Identifier: MIT +/// + +namespace armnn +{ +/** + +@page contribguides Contribution Guides +@tableofcontents + +This is a collection of guides that should help you contribute code to Arm NN. Before you get started, please +take a look into our /ref md_Contributor_Guide section. + + - @subpage md_src_backends_README \n + This guide explains how to add your own backend to Arm NN. This might be useful if you would like to accelerate neural + networks on hardware that Arm NN currently doesn't support. + + + - @subpage md_src_dynamic_README \n + Arm NN allows you to load a backend dynamically on runtime. To find out how that can be done take a look at this guide. + +**/ +} + + +/// Create pages for each tool so they appear nicely in the doxygen tree-view. Subpages are not listed there. +/// Also we can overwrite the page name this way. +namespace armnn +{ +/** + +@page md_src_backends_README Backend Developer Guide + +@page md_src_dynamic_README Dynamically loadable Backend + +**/ +} diff --git a/docs/04_use_guides.dox b/docs/04_use_guides.dox deleted file mode 100644 index 1ecef4a215..0000000000 --- a/docs/04_use_guides.dox +++ /dev/null @@ -1,21 +0,0 @@ -/// Copyright (c) 2021 ARM Limited and Contributors. All rights reserved. -/// -/// SPDX-License-Identifier: MIT -/// - -namespace armnn -{ -/** -@page useguides Integration Guides -@tableofcontents - -This page links all guides that explain how to use Arm NN tools and how to integrate them into your own project. -Some of these guides may not be hosted in our repository and will lead you to guides on -https://developer.arm.com/solutions/machine-learning-on-arm/developer-material/how-to-guides - - -## TfLite delegate guides - - @subpage md_delegate_IntegrateDelegateIntoPython - -**/ -} \ No newline at end of file diff --git a/docs/05_00_software_components.dox b/docs/05_00_software_components.dox new file mode 100644 index 0000000000..2d65daed1e --- /dev/null +++ b/docs/05_00_software_components.dox @@ -0,0 +1,46 @@ +/// Copyright (c) 2021 ARM Limited and Contributors. All rights reserved. +/// +/// SPDX-License-Identifier: MIT +/// + +namespace armnn +{ +/** +@page swtools Software Components + +On this page you can find all software components contained in the Arm NN repository. You will find links to how-to guides and +other helpful information in each section. + + - @subpage delegate + - @subpage parsers + - @subpage md_python_pyarmnn_README + - @subpage serializer + - @subpage deserializer + - @subpage md_src_armnnConverter_README + - @subpage md_tests_ImageCSVFileGenerator_README + - @subpage md_tests_ImageTensorGenerator_README + - @subpage md_tests_ModelAccuracyTool-Armnn_README +**/ +} + + +/// Create pages for each tool so they appear nicely in the doxygen tree-view. Subpages are not listed there. +/// +/// Note: The parser, serializer and deserializer pages are created in 01_parsers.dox or 02_deserializer_serializer.dox +namespace armnn +{ +/** + +@page md_python_pyarmnn_README PyArmNN + +@page md_src_armnnConverter_README Converter + +@page md_tests_ModelAccuracyTool-Armnn_README ModelAccuracyTool + +@page md_tests_ImageCSVFileGenerator_README ImageCSVFileGenerator + +@page md_tests_ImageTensorGenerator_README ImageTensorGenerator + +**/ +} + diff --git a/docs/05_01_parsers.dox b/docs/05_01_parsers.dox new file mode 100644 index 0000000000..e7124ced94 --- /dev/null +++ b/docs/05_01_parsers.dox @@ -0,0 +1,207 @@ +/// Copyright (c) 2021 ARM Limited and Contributors. All rights reserved. +/// +/// SPDX-License-Identifier: MIT +/// + +namespace armnn +{ +/** +@page parsers Parsers + +@tableofcontents +Execute models from different machine learning platforms efficiently with our parsers. Simply choose a parser according +to the model you want to run e.g. If you've got a model in onnx format (.onnx) use our onnx-parser. + +If you would like to run a Tensorflow Lite (TfLite) model you probably also want to take a look at our @ref delegate. + +All parsers are written in C++ but it is also possible to use them in python. For more information on our python +bindings take a look into the @ref md_python_pyarmnn_README section. + +

+ + + + +@section S5_onnx_parser Arm NN Onnx Parser + +`armnnOnnxParser` is a library for loading neural networks defined in ONNX protobuf files into the Arm NN runtime. + +## ONNX operators that the Arm NN SDK supports + +This reference guide provides a list of ONNX operators the Arm NN SDK currently supports. + +The Arm NN SDK ONNX parser currently only supports fp32 operators. + +### Fully supported + +- Add + - See the ONNX [Add documentation](https://github.com/onnx/onnx/blob/master/docs/Operators.md#Add) for more information + +- AveragePool + - See the ONNX [AveragePool documentation](https://github.com/onnx/onnx/blob/master/docs/Operators.md#AveragePool) for more information. + +- Concat + - See the ONNX [Concat documentation](https://github.com/onnx/onnx/blob/master/docs/Operators.md#Concat) for more information. + +- Constant + - See the ONNX [Constant documentation](https://github.com/onnx/onnx/blob/master/docs/Operators.md#Constant) for more information. + +- Clip + - See the ONNX [Clip documentation](https://github.com/onnx/onnx/blob/master/docs/Operators.md#Clip) for more information. + +- Flatten + - See the ONNX [Flatten documentation](https://github.com/onnx/onnx/blob/master/docs/Operators.md#Flatten) for more information. + +- Gather + - See the ONNX [Gather documentation](https://github.com/onnx/onnx/blob/master/docs/Operators.md#Gather) for more information. + +- GlobalAveragePool + - See the ONNX [GlobalAveragePool documentation](https://github.com/onnx/onnx/blob/master/docs/Operators.md#GlobalAveragePool) for more information. + +- LeakyRelu + - See the ONNX [LeakyRelu documentation](https://github.com/onnx/onnx/blob/master/docs/Operators.md#LeakyRelu) for more information. + +- MaxPool + - See the ONNX [max_pool documentation](https://github.com/onnx/onnx/blob/master/docs/Operators.md#MaxPool) for more information. + +- Relu + - See the ONNX [Relu documentation](https://github.com/onnx/onnx/blob/master/docs/Operators.md#Relu) for more information. + +- Reshape + - See the ONNX [Reshape documentation](https://github.com/onnx/onnx/blob/master/docs/Operators.md#Reshape) for more information. + +- Shape + - See the ONNX [Shape documentation](https://github.com/onnx/onnx/blob/master/docs/Operators.md#Shape) for more information. + +- Sigmoid + - See the ONNX [Sigmoid documentation](https://github.com/onnx/onnx/blob/master/docs/Operators.md#Sigmoid) for more information. + +- Tanh + - See the ONNX [Tanh documentation](https://github.com/onnx/onnx/blob/master/docs/Operators.md#Tanh) for more information. + +- Unsqueeze + - See the ONNX [Unsqueeze documentation](https://github.com/onnx/onnx/blob/master/docs/Operators.md#Unsqueeze) for more information. + +### Partially supported + +- Conv + - The parser only supports 2D convolutions with a group = 1 or group = #Nb_of_channel (depthwise convolution) +- BatchNormalization + - The parser does not support training mode. See the ONNX [BatchNormalization documentation](https://github.com/onnx/onnx/blob/master/docs/Operators.md#BatchNormalization) for more information. +- Gemm + - The parser only supports constant bias or non-constant bias where bias dimension = 1. See the ONNX [Gemm documentation](https://github.com/onnx/onnx/blob/master/docs/Operators.md#Gemm) for more information. +- MatMul + - The parser only supports constant weights in a fully connected layer. See the ONNX [MatMul documentation](https://github.com/onnx/onnx/blob/master/docs/Operators.md#MatMul) for more information. + +## Tested networks + +Arm tested these operators with the following ONNX fp32 neural networks: +- Mobilenet_v2. See the ONNX [MobileNet documentation](https://github.com/onnx/models/tree/master/vision/classification/mobilenet) for more information. +- Simple MNIST. This is no longer directly documented by ONNX. The model and test data may be downloaded [from the ONNX model zoo](https://onnxzoo.blob.core.windows.net/models/opset_8/mnist/mnist.tar.gz). + +More machine learning operators will be supported in future releases. +



+ + + + +@section S6_tf_lite_parser Arm NN Tf Lite Parser + +`armnnTfLiteParser` is a library for loading neural networks defined by TensorFlow Lite FlatBuffers files +into the Arm NN runtime. + +## TensorFlow Lite operators that the Arm NN SDK supports + +This reference guide provides a list of TensorFlow Lite operators the Arm NN SDK currently supports. + +### Fully supported +The Arm NN SDK TensorFlow Lite parser currently supports the following operators: + +- ABS +- ADD +- ARG_MAX +- ARG_MIN +- AVERAGE_POOL_2D, Supported Fused Activation: RELU , RELU6 , TANH, NONE +- BATCH_TO_SPACE +- CONCATENATION, Supported Fused Activation: RELU , RELU6 , TANH, NONE +- CONV_2D, Supported Fused Activation: RELU , RELU6 , TANH, NONE +- CONV_3D, Supported Fused Activation: RELU , RELU6 , TANH, NONE +- DEPTH_TO_SPACE +- DEPTHWISE_CONV_2D, Supported Fused Activation: RELU , RELU6 , TANH, NONE +- DEQUANTIZE +- DIV +- ELU +- EQUAL +- EXP +- EXPAND_DIMS +- FULLY_CONNECTED, Supported Fused Activation: RELU , RELU6 , TANH, NONE +- GATHER +- GREATER +- GREATER_EQUAL +- HARD_SWISH +- LEAKY_RELU +- LESS +- LESS_EQUAL +- LOGICAL_NOT +- LOGISTIC +- L2_NORMALIZATION +- MAX_POOL_2D, Supported Fused Activation: RELU , RELU6 , TANH, NONE +- MAXIMUM +- MEAN +- MINIMUM +- MIRROR_PAD +- MUL +- NEG +- NOT_EQUAL +- PACK +- PAD +- PRELU +- QUANTIZE +- RELU +- RELU6 +- REDUCE_MAX +- REDUCE_MIN +- REDUCE_PROD +- RESHAPE +- RESIZE_BILINEAR +- RESIZE_NEAREST_NEIGHBOR +- RSQRT +- SHAPE +- SLICE +- SOFTMAX +- SPACE_TO_BATCH +- SPLIT +- SPLIT_V +- SQUEEZE +- STRIDED_SLICE +- SUB +- SUM +- TANH +- TRANSPOSE +- TRANSPOSE_CONV +- UNPACK + +### Custom Operator +- TFLite_Detection_PostProcess + +## Tested networks +Arm tested these operators with the following TensorFlow Lite neural network: +- [Quantized MobileNet](http://download.tensorflow.org/models/mobilenet_v1_2018_02_22/mobilenet_v1_1.0_224_quant.tgz) +- [Quantized SSD MobileNet](http://download.tensorflow.org/models/object_detection/ssd_mobilenet_v1_quantized_300x300_coco14_sync_2018_07_18.tar.gz) +- DeepSpeech v1 converted from [TensorFlow model](https://github.com/mozilla/DeepSpeech/releases/tag/v0.4.1) +- DeepSpeaker +- [DeepLab v3+](https://www.tensorflow.org/lite/models/segmentation/overview) +- FSRCNN +- EfficientNet-lite +- RDN converted from [TensorFlow model](https://github.com/hengchuan/RDN-TensorFlow) +- Quantized RDN (CpuRef) +- [Quantized Inception v3](http://download.tensorflow.org/models/tflite_11_05_08/inception_v3_quant.tgz) +- [Quantized Inception v4](http://download.tensorflow.org/models/inception_v4_299_quant_20181026.tgz) (CpuRef) +- Quantized ResNet v2 50 (CpuRef) +- Quantized Yolo v3 (CpuRef) + +More machine learning operators will be supported in future releases. + +**/ +} + diff --git a/docs/05_02_deserializer_serializer.dox b/docs/05_02_deserializer_serializer.dox new file mode 100644 index 0000000000..5d4dc43a74 --- /dev/null +++ b/docs/05_02_deserializer_serializer.dox @@ -0,0 +1,184 @@ +/// Copyright (c) 2021 ARM Limited and Contributors. All rights reserved. +/// +/// SPDX-License-Identifier: MIT +/// + +namespace armnn +{ +/** +@page serializer Serializer +@tableofcontents + +The `armnnSerializer` is a library for serializing an Arm NN network to a stream. + +@section serializersupport Supported Layers + +This reference guide provides a list of layers which can be serialized by the Arm NN SDK. + +@subsection serializersupportflully Fully supported + +The Arm NN SDK Serializer currently supports the following layers: + +- Activation +- Addition +- ArgMinMax +- BatchToSpaceNd +- BatchNormalization +- Cast +- ChannelShuffle +- Comparison +- Concat +- Constant +- Convolution2d +- Convolution3d +- DepthToSpace +- DepthwiseConvolution2d +- Dequantize +- DetectionPostProcess +- Division +- ElementwiseUnary +- Fill +- Floor +- FullyConnected +- Gather +- Input +- InstanceNormalization +- L2Normalization +- Logical +- LogSoftmax +- Lstm +- Maximum +- Mean +- Merge +- Minimum +- Multiplication +- Normalization +- Output +- Pad (Constant, Symmetric, Reflect) +- Permute +- Pooling2d +- Prelu +- QLstm +- Quantize +- QuantizedLstm +- Rank +- Reduce +- Reshape +- Resize +- Shape +- Slice +- Softmax +- SpaceToBatchNd +- SpaceToDepth +- Splitter +- Stack +- StandIn +- StridedSlice +- Subtraction +- Switch +- Transpose +- TransposeConvolution2d +- UnidirectionalSequenceLstm + +More machine learning layers will be supported in future releases. + +@subsection serializersupportdeprecated Deprecated layers + +Some layers have been deprecated and replaced by others layers. In order to maintain backward compatibility, serializations of these deprecated layers will deserialize to the layers that have replaced them, as follows: + +- Abs will deserialize as ElementwiseUnary +- Equal will deserialize as Comparison +- Greater will deserialize as Comparison +- Merger will deserialize as Concat +- ResizeBilinear will deserialize as Resize +- Rsqrt will deserialize as ElementwiseUnary +



+ +@page deserializer Deserializer +@tableofcontents + +The `armnnDeserializer` is a library for loading neural networks defined by Arm NN FlatBuffers files +into the Arm NN runtime. + +@section deserializersupport Supported Layers + +This reference guide provides a list of layers which can be deserialized by the Arm NN SDK. + +@subsection deserializersupportfully Fully supported + +The Arm NN SDK Deserialize parser currently supports the following layers: + +- Abs +- Activation +- Addition +- ArgMinMax +- BatchToSpaceNd +- BatchNormalization +- Cast +- ChannelShuffle +- Concat +- Comparison +- Constant +- Convolution2d +- DepthToSpace +- DepthwiseConvolution2d +- Dequantize +- DetectionPostProcess +- Division +- ElementwiseUnary +- Fill +- Floor +- FullyConnected +- Gather +- Input +- InstanceNormalization +- L2Normalization +- Logical +- LogSoftmax +- Lstm +- Maximum +- Mean +- Merge +- Minimum +- Multiplication +- Normalization +- Output +- Pad +- Permute +- Pooling2d +- Prelu +- Quantize +- QLstm +- QuantizedLstm +- Rank +- Reduce +- Reshape +- Resize +- ResizeBilinear +- Slice +- Softmax +- SpaceToBatchNd +- SpaceToDepth +- Splitter +- Stack +- StandIn +- StridedSlice +- Subtraction +- Switch +- Transpose +- TransposeConvolution2d +- UnidirectionalSequenceLstm + +More machine learning layers will be supported in future releases. + +@subsection deserializersupportdeprecated Deprecated layers + +Some layers have been deprecated and replaced by others layers. In order to maintain backward compatibility, serializations of these deprecated layers will deserialize to the layers that have replaced them, as follows: + +- Equal will deserialize as Comparison +- Merger will deserialize as Concat +- Greater will deserialize as Comparison +- ResizeBilinear will deserialize as Resize + +**/ +} \ No newline at end of file diff --git a/docs/05_03_delegate.dox b/docs/05_03_delegate.dox new file mode 100644 index 0000000000..b3caf8cbf8 --- /dev/null +++ b/docs/05_03_delegate.dox @@ -0,0 +1,178 @@ +/// Copyright (c) 2021 ARM Limited and Contributors. All rights reserved. +/// +/// SPDX-License-Identifier: MIT +/// + +namespace armnn +{ +/** +@page delegate TfLite Delegate +@tableofcontents + + +@section delegateintro About the delegate +'armnnDelegate' is a library for accelerating certain TensorFlow Lite (TfLite) operators on Arm hardware. It can be +integrated in TfLite using its delegation mechanism. TfLite will then delegate the execution of operators supported by +Arm NN to Arm NN. + +The main difference to our @ref S6_tf_lite_parser is the amount of operators you can run with it. If none of the active +backends support an operation in your model you won't be able to execute it with our parser. In contrast to that, TfLite +only delegates operations to the armnnDelegate if it does support them and otherwise executes them itself. In other +words, every TfLite model can be executed and every operation in your model that we can accelerate will be accelerated. +That is the reason why the armnnDelegate is our recommended way to accelerate TfLite models. + +If you need help building the armnnDelegate, please take a look at our [build guide](delegate/BuildGuideNative.md). +An example how to setup TfLite to integrate the armnnDelegate can be found in this +guide: [Integrate the delegate into python](delegate/IntegrateDelegateIntoPython.md) + + +@section delegatesupport Supported Operators +This reference guide provides a list of TensorFlow Lite operators the Arm NN SDK currently supports. + +@subsection delegatefullysupported Fully supported + +The Arm NN SDK TensorFlow Lite delegate currently supports the following operators: + +- ABS + +- ADD + +- ARGMAX + +- ARGMIN + +- AVERAGE_POOL_2D, Supported Fused Activation: RELU , RELU6 , TANH, NONE + +- BATCH_TO_SPACE_ND + +- CAST + +- CONCATENATION, Supported Fused Activation: RELU , RELU6 , TANH, NONE + +- CONV_2D, Supported Fused Activation: RELU , RELU6 , TANH, NONE + +- CONV_3D, Supported Fused Activation: RELU , RELU6 , TANH, NONE + +- DEPTH_TO_SPACE + +- DEPTHWISE_CONV_2D, Supported Fused Activation: RELU , RELU6 , TANH, NONE + +- DEQUANTIZE + +- DIV + +- EQUAL + +- ELU + +- EXP + +- FULLY_CONNECTED, Supported Fused Activation: RELU , RELU6 , TANH, NONE + +- FLOOR + +- GATHER + +- GREATER + +- GREATER_OR_EQUAL + +- HARD_SWISH + +- LESS + +- LESS_OR_EQUAL + +- LOCAL_RESPONSE_NORMALIZATION + +- LOGICAL_AND + +- LOGICAL_NOT + +- LOGICAL_OR + +- LOGISTIC + +- LOG_SOFTMAX + +- LSTM + +- L2_NORMALIZATION + +- L2_POOL_2D + +- MAXIMUM + +- MAX_POOL_2D, Supported Fused Activation: RELU , RELU6 , TANH, NONE + +- MEAN + +- MINIMUM + +- MIRROR_PAD + +- MUL + +- NEG + +- NOT_EQUAL + +- PACK + +- PAD + +- PRELU + +- QUANTIZE + +- RANK + +- REDUCE_MAX + +- REDUCE_MIN + +- RESHAPE + +- RESIZE_BILINEAR + +- RESIZE_NEAREST_NEIGHBOR + +- RELU + +- RELU6 + +- RSQRT + +- SHAPE + +- SOFTMAX + +- SPACE_TO_BATCH_ND + +- SPACE_TO_DEPTH + +- SPLIT + +- SPLIT_V + +- SQRT + +- STRIDED_SLICE + +- SUB + +- SUM + +- TANH + +- TRANSPOSE + +- TRANSPOSE_CONV + +- UNIDIRECTIONAL_SEQUENCE_LSTM + +- UNPACK + +More machine learning operators will be supported in future releases. +**/ +} \ No newline at end of file diff --git a/docs/05_contributor.dox b/docs/05_contributor.dox deleted file mode 100644 index 5cbb6c3b8c..0000000000 --- a/docs/05_contributor.dox +++ /dev/null @@ -1,39 +0,0 @@ -/// Copyright (c) 2021 ARM Limited and Contributors. All rights reserved. -/// -/// SPDX-License-Identifier: MIT -/// - -namespace armnn -{ -/** - -@page contribguides Contribution Guides -@tableofcontents - -This is a collection of guides that should help you contribute code to Arm NN. Before you get started, please -take a look into our /ref md_Contributor_Guide section. - - - @subpage md_src_backends_README \n - This guide explains how to add your own backend to Arm NN. This might be useful if you would like to accelerate neural - networks on hardware that Arm NN currently doesn't support. - - - - @subpage md_src_dynamic_README \n - Arm NN allows you to load a backend dynamically on runtime. To find out how that can be done take a look at this guide. - -**/ -} - - -/// Create pages for each tool so they appear nicely in the doxygen tree-view. Subpages are not listed there. -/// Also we can overwrite the page name this way. -namespace armnn -{ -/** - -@page md_src_backends_README Backend Developer Guide - -@page md_src_dynamic_README Dynamically loadable Backend - -**/ -} diff --git a/docs/06_00_software_tools.dox b/docs/06_00_software_tools.dox deleted file mode 100644 index e560f44882..0000000000 --- a/docs/06_00_software_tools.dox +++ /dev/null @@ -1,46 +0,0 @@ -/// Copyright (c) 2021 ARM Limited and Contributors. All rights reserved. -/// -/// SPDX-License-Identifier: MIT -/// - -namespace armnn -{ -/** -@page swtools Software Tools - -On this page you can find all software tools contained in the Arm NN repository. You will find links to how-to guides and -other helpful information in each section. - - - @subpage delegate - - @subpage parsers - - @subpage md_python_pyarmnn_README - - @subpage serializer - - @subpage deserializer - - @subpage md_src_armnnConverter_README - - @subpage md_tests_ImageCSVFileGenerator_README - - @subpage md_tests_ImageTensorGenerator_README - - @subpage md_tests_ModelAccuracyTool-Armnn_README -**/ -} - - -/// Create pages for each tool so they appear nicely in the doxygen tree-view. Subpages are not listed there. -/// -/// Note: The parser, serializer and deserializer pages are created in 01_parsers.dox or 02_deserializer_serializer.dox -namespace armnn -{ -/** - -@page md_python_pyarmnn_README PyArmNN - -@page md_src_armnnConverter_README Converter - -@page md_tests_ModelAccuracyTool-Armnn_README ModelAccuracyTool - -@page md_tests_ImageCSVFileGenerator_README ImageCSVFileGenerator - -@page md_tests_ImageTensorGenerator_README ImageTensorGenerator - -**/ -} - diff --git a/docs/06_01_parsers.dox b/docs/06_01_parsers.dox deleted file mode 100644 index e7124ced94..0000000000 --- a/docs/06_01_parsers.dox +++ /dev/null @@ -1,207 +0,0 @@ -/// Copyright (c) 2021 ARM Limited and Contributors. All rights reserved. -/// -/// SPDX-License-Identifier: MIT -/// - -namespace armnn -{ -/** -@page parsers Parsers - -@tableofcontents -Execute models from different machine learning platforms efficiently with our parsers. Simply choose a parser according -to the model you want to run e.g. If you've got a model in onnx format (.onnx) use our onnx-parser. - -If you would like to run a Tensorflow Lite (TfLite) model you probably also want to take a look at our @ref delegate. - -All parsers are written in C++ but it is also possible to use them in python. For more information on our python -bindings take a look into the @ref md_python_pyarmnn_README section. - -

- - - - -@section S5_onnx_parser Arm NN Onnx Parser - -`armnnOnnxParser` is a library for loading neural networks defined in ONNX protobuf files into the Arm NN runtime. - -## ONNX operators that the Arm NN SDK supports - -This reference guide provides a list of ONNX operators the Arm NN SDK currently supports. - -The Arm NN SDK ONNX parser currently only supports fp32 operators. - -### Fully supported - -- Add - - See the ONNX [Add documentation](https://github.com/onnx/onnx/blob/master/docs/Operators.md#Add) for more information - -- AveragePool - - See the ONNX [AveragePool documentation](https://github.com/onnx/onnx/blob/master/docs/Operators.md#AveragePool) for more information. - -- Concat - - See the ONNX [Concat documentation](https://github.com/onnx/onnx/blob/master/docs/Operators.md#Concat) for more information. - -- Constant - - See the ONNX [Constant documentation](https://github.com/onnx/onnx/blob/master/docs/Operators.md#Constant) for more information. - -- Clip - - See the ONNX [Clip documentation](https://github.com/onnx/onnx/blob/master/docs/Operators.md#Clip) for more information. - -- Flatten - - See the ONNX [Flatten documentation](https://github.com/onnx/onnx/blob/master/docs/Operators.md#Flatten) for more information. - -- Gather - - See the ONNX [Gather documentation](https://github.com/onnx/onnx/blob/master/docs/Operators.md#Gather) for more information. - -- GlobalAveragePool - - See the ONNX [GlobalAveragePool documentation](https://github.com/onnx/onnx/blob/master/docs/Operators.md#GlobalAveragePool) for more information. - -- LeakyRelu - - See the ONNX [LeakyRelu documentation](https://github.com/onnx/onnx/blob/master/docs/Operators.md#LeakyRelu) for more information. - -- MaxPool - - See the ONNX [max_pool documentation](https://github.com/onnx/onnx/blob/master/docs/Operators.md#MaxPool) for more information. - -- Relu - - See the ONNX [Relu documentation](https://github.com/onnx/onnx/blob/master/docs/Operators.md#Relu) for more information. - -- Reshape - - See the ONNX [Reshape documentation](https://github.com/onnx/onnx/blob/master/docs/Operators.md#Reshape) for more information. - -- Shape - - See the ONNX [Shape documentation](https://github.com/onnx/onnx/blob/master/docs/Operators.md#Shape) for more information. - -- Sigmoid - - See the ONNX [Sigmoid documentation](https://github.com/onnx/onnx/blob/master/docs/Operators.md#Sigmoid) for more information. - -- Tanh - - See the ONNX [Tanh documentation](https://github.com/onnx/onnx/blob/master/docs/Operators.md#Tanh) for more information. - -- Unsqueeze - - See the ONNX [Unsqueeze documentation](https://github.com/onnx/onnx/blob/master/docs/Operators.md#Unsqueeze) for more information. - -### Partially supported - -- Conv - - The parser only supports 2D convolutions with a group = 1 or group = #Nb_of_channel (depthwise convolution) -- BatchNormalization - - The parser does not support training mode. See the ONNX [BatchNormalization documentation](https://github.com/onnx/onnx/blob/master/docs/Operators.md#BatchNormalization) for more information. -- Gemm - - The parser only supports constant bias or non-constant bias where bias dimension = 1. See the ONNX [Gemm documentation](https://github.com/onnx/onnx/blob/master/docs/Operators.md#Gemm) for more information. -- MatMul - - The parser only supports constant weights in a fully connected layer. See the ONNX [MatMul documentation](https://github.com/onnx/onnx/blob/master/docs/Operators.md#MatMul) for more information. - -## Tested networks - -Arm tested these operators with the following ONNX fp32 neural networks: -- Mobilenet_v2. See the ONNX [MobileNet documentation](https://github.com/onnx/models/tree/master/vision/classification/mobilenet) for more information. -- Simple MNIST. This is no longer directly documented by ONNX. The model and test data may be downloaded [from the ONNX model zoo](https://onnxzoo.blob.core.windows.net/models/opset_8/mnist/mnist.tar.gz). - -More machine learning operators will be supported in future releases. -



- - - - -@section S6_tf_lite_parser Arm NN Tf Lite Parser - -`armnnTfLiteParser` is a library for loading neural networks defined by TensorFlow Lite FlatBuffers files -into the Arm NN runtime. - -## TensorFlow Lite operators that the Arm NN SDK supports - -This reference guide provides a list of TensorFlow Lite operators the Arm NN SDK currently supports. - -### Fully supported -The Arm NN SDK TensorFlow Lite parser currently supports the following operators: - -- ABS -- ADD -- ARG_MAX -- ARG_MIN -- AVERAGE_POOL_2D, Supported Fused Activation: RELU , RELU6 , TANH, NONE -- BATCH_TO_SPACE -- CONCATENATION, Supported Fused Activation: RELU , RELU6 , TANH, NONE -- CONV_2D, Supported Fused Activation: RELU , RELU6 , TANH, NONE -- CONV_3D, Supported Fused Activation: RELU , RELU6 , TANH, NONE -- DEPTH_TO_SPACE -- DEPTHWISE_CONV_2D, Supported Fused Activation: RELU , RELU6 , TANH, NONE -- DEQUANTIZE -- DIV -- ELU -- EQUAL -- EXP -- EXPAND_DIMS -- FULLY_CONNECTED, Supported Fused Activation: RELU , RELU6 , TANH, NONE -- GATHER -- GREATER -- GREATER_EQUAL -- HARD_SWISH -- LEAKY_RELU -- LESS -- LESS_EQUAL -- LOGICAL_NOT -- LOGISTIC -- L2_NORMALIZATION -- MAX_POOL_2D, Supported Fused Activation: RELU , RELU6 , TANH, NONE -- MAXIMUM -- MEAN -- MINIMUM -- MIRROR_PAD -- MUL -- NEG -- NOT_EQUAL -- PACK -- PAD -- PRELU -- QUANTIZE -- RELU -- RELU6 -- REDUCE_MAX -- REDUCE_MIN -- REDUCE_PROD -- RESHAPE -- RESIZE_BILINEAR -- RESIZE_NEAREST_NEIGHBOR -- RSQRT -- SHAPE -- SLICE -- SOFTMAX -- SPACE_TO_BATCH -- SPLIT -- SPLIT_V -- SQUEEZE -- STRIDED_SLICE -- SUB -- SUM -- TANH -- TRANSPOSE -- TRANSPOSE_CONV -- UNPACK - -### Custom Operator -- TFLite_Detection_PostProcess - -## Tested networks -Arm tested these operators with the following TensorFlow Lite neural network: -- [Quantized MobileNet](http://download.tensorflow.org/models/mobilenet_v1_2018_02_22/mobilenet_v1_1.0_224_quant.tgz) -- [Quantized SSD MobileNet](http://download.tensorflow.org/models/object_detection/ssd_mobilenet_v1_quantized_300x300_coco14_sync_2018_07_18.tar.gz) -- DeepSpeech v1 converted from [TensorFlow model](https://github.com/mozilla/DeepSpeech/releases/tag/v0.4.1) -- DeepSpeaker -- [DeepLab v3+](https://www.tensorflow.org/lite/models/segmentation/overview) -- FSRCNN -- EfficientNet-lite -- RDN converted from [TensorFlow model](https://github.com/hengchuan/RDN-TensorFlow) -- Quantized RDN (CpuRef) -- [Quantized Inception v3](http://download.tensorflow.org/models/tflite_11_05_08/inception_v3_quant.tgz) -- [Quantized Inception v4](http://download.tensorflow.org/models/inception_v4_299_quant_20181026.tgz) (CpuRef) -- Quantized ResNet v2 50 (CpuRef) -- Quantized Yolo v3 (CpuRef) - -More machine learning operators will be supported in future releases. - -**/ -} - diff --git a/docs/06_02_deserializer_serializer.dox b/docs/06_02_deserializer_serializer.dox deleted file mode 100644 index 5d4dc43a74..0000000000 --- a/docs/06_02_deserializer_serializer.dox +++ /dev/null @@ -1,184 +0,0 @@ -/// Copyright (c) 2021 ARM Limited and Contributors. All rights reserved. -/// -/// SPDX-License-Identifier: MIT -/// - -namespace armnn -{ -/** -@page serializer Serializer -@tableofcontents - -The `armnnSerializer` is a library for serializing an Arm NN network to a stream. - -@section serializersupport Supported Layers - -This reference guide provides a list of layers which can be serialized by the Arm NN SDK. - -@subsection serializersupportflully Fully supported - -The Arm NN SDK Serializer currently supports the following layers: - -- Activation -- Addition -- ArgMinMax -- BatchToSpaceNd -- BatchNormalization -- Cast -- ChannelShuffle -- Comparison -- Concat -- Constant -- Convolution2d -- Convolution3d -- DepthToSpace -- DepthwiseConvolution2d -- Dequantize -- DetectionPostProcess -- Division -- ElementwiseUnary -- Fill -- Floor -- FullyConnected -- Gather -- Input -- InstanceNormalization -- L2Normalization -- Logical -- LogSoftmax -- Lstm -- Maximum -- Mean -- Merge -- Minimum -- Multiplication -- Normalization -- Output -- Pad (Constant, Symmetric, Reflect) -- Permute -- Pooling2d -- Prelu -- QLstm -- Quantize -- QuantizedLstm -- Rank -- Reduce -- Reshape -- Resize -- Shape -- Slice -- Softmax -- SpaceToBatchNd -- SpaceToDepth -- Splitter -- Stack -- StandIn -- StridedSlice -- Subtraction -- Switch -- Transpose -- TransposeConvolution2d -- UnidirectionalSequenceLstm - -More machine learning layers will be supported in future releases. - -@subsection serializersupportdeprecated Deprecated layers - -Some layers have been deprecated and replaced by others layers. In order to maintain backward compatibility, serializations of these deprecated layers will deserialize to the layers that have replaced them, as follows: - -- Abs will deserialize as ElementwiseUnary -- Equal will deserialize as Comparison -- Greater will deserialize as Comparison -- Merger will deserialize as Concat -- ResizeBilinear will deserialize as Resize -- Rsqrt will deserialize as ElementwiseUnary -



- -@page deserializer Deserializer -@tableofcontents - -The `armnnDeserializer` is a library for loading neural networks defined by Arm NN FlatBuffers files -into the Arm NN runtime. - -@section deserializersupport Supported Layers - -This reference guide provides a list of layers which can be deserialized by the Arm NN SDK. - -@subsection deserializersupportfully Fully supported - -The Arm NN SDK Deserialize parser currently supports the following layers: - -- Abs -- Activation -- Addition -- ArgMinMax -- BatchToSpaceNd -- BatchNormalization -- Cast -- ChannelShuffle -- Concat -- Comparison -- Constant -- Convolution2d -- DepthToSpace -- DepthwiseConvolution2d -- Dequantize -- DetectionPostProcess -- Division -- ElementwiseUnary -- Fill -- Floor -- FullyConnected -- Gather -- Input -- InstanceNormalization -- L2Normalization -- Logical -- LogSoftmax -- Lstm -- Maximum -- Mean -- Merge -- Minimum -- Multiplication -- Normalization -- Output -- Pad -- Permute -- Pooling2d -- Prelu -- Quantize -- QLstm -- QuantizedLstm -- Rank -- Reduce -- Reshape -- Resize -- ResizeBilinear -- Slice -- Softmax -- SpaceToBatchNd -- SpaceToDepth -- Splitter -- Stack -- StandIn -- StridedSlice -- Subtraction -- Switch -- Transpose -- TransposeConvolution2d -- UnidirectionalSequenceLstm - -More machine learning layers will be supported in future releases. - -@subsection deserializersupportdeprecated Deprecated layers - -Some layers have been deprecated and replaced by others layers. In order to maintain backward compatibility, serializations of these deprecated layers will deserialize to the layers that have replaced them, as follows: - -- Equal will deserialize as Comparison -- Merger will deserialize as Concat -- Greater will deserialize as Comparison -- ResizeBilinear will deserialize as Resize - -**/ -} \ No newline at end of file diff --git a/docs/06_03_delegate.dox b/docs/06_03_delegate.dox deleted file mode 100644 index b3caf8cbf8..0000000000 --- a/docs/06_03_delegate.dox +++ /dev/null @@ -1,178 +0,0 @@ -/// Copyright (c) 2021 ARM Limited and Contributors. All rights reserved. -/// -/// SPDX-License-Identifier: MIT -/// - -namespace armnn -{ -/** -@page delegate TfLite Delegate -@tableofcontents - - -@section delegateintro About the delegate -'armnnDelegate' is a library for accelerating certain TensorFlow Lite (TfLite) operators on Arm hardware. It can be -integrated in TfLite using its delegation mechanism. TfLite will then delegate the execution of operators supported by -Arm NN to Arm NN. - -The main difference to our @ref S6_tf_lite_parser is the amount of operators you can run with it. If none of the active -backends support an operation in your model you won't be able to execute it with our parser. In contrast to that, TfLite -only delegates operations to the armnnDelegate if it does support them and otherwise executes them itself. In other -words, every TfLite model can be executed and every operation in your model that we can accelerate will be accelerated. -That is the reason why the armnnDelegate is our recommended way to accelerate TfLite models. - -If you need help building the armnnDelegate, please take a look at our [build guide](delegate/BuildGuideNative.md). -An example how to setup TfLite to integrate the armnnDelegate can be found in this -guide: [Integrate the delegate into python](delegate/IntegrateDelegateIntoPython.md) - - -@section delegatesupport Supported Operators -This reference guide provides a list of TensorFlow Lite operators the Arm NN SDK currently supports. - -@subsection delegatefullysupported Fully supported - -The Arm NN SDK TensorFlow Lite delegate currently supports the following operators: - -- ABS - -- ADD - -- ARGMAX - -- ARGMIN - -- AVERAGE_POOL_2D, Supported Fused Activation: RELU , RELU6 , TANH, NONE - -- BATCH_TO_SPACE_ND - -- CAST - -- CONCATENATION, Supported Fused Activation: RELU , RELU6 , TANH, NONE - -- CONV_2D, Supported Fused Activation: RELU , RELU6 , TANH, NONE - -- CONV_3D, Supported Fused Activation: RELU , RELU6 , TANH, NONE - -- DEPTH_TO_SPACE - -- DEPTHWISE_CONV_2D, Supported Fused Activation: RELU , RELU6 , TANH, NONE - -- DEQUANTIZE - -- DIV - -- EQUAL - -- ELU - -- EXP - -- FULLY_CONNECTED, Supported Fused Activation: RELU , RELU6 , TANH, NONE - -- FLOOR - -- GATHER - -- GREATER - -- GREATER_OR_EQUAL - -- HARD_SWISH - -- LESS - -- LESS_OR_EQUAL - -- LOCAL_RESPONSE_NORMALIZATION - -- LOGICAL_AND - -- LOGICAL_NOT - -- LOGICAL_OR - -- LOGISTIC - -- LOG_SOFTMAX - -- LSTM - -- L2_NORMALIZATION - -- L2_POOL_2D - -- MAXIMUM - -- MAX_POOL_2D, Supported Fused Activation: RELU , RELU6 , TANH, NONE - -- MEAN - -- MINIMUM - -- MIRROR_PAD - -- MUL - -- NEG - -- NOT_EQUAL - -- PACK - -- PAD - -- PRELU - -- QUANTIZE - -- RANK - -- REDUCE_MAX - -- REDUCE_MIN - -- RESHAPE - -- RESIZE_BILINEAR - -- RESIZE_NEAREST_NEIGHBOR - -- RELU - -- RELU6 - -- RSQRT - -- SHAPE - -- SOFTMAX - -- SPACE_TO_BATCH_ND - -- SPACE_TO_DEPTH - -- SPLIT - -- SPLIT_V - -- SQRT - -- STRIDED_SLICE - -- SUB - -- SUM - -- TANH - -- TRANSPOSE - -- TRANSPOSE_CONV - -- UNIDIRECTIONAL_SEQUENCE_LSTM - -- UNPACK - -More machine learning operators will be supported in future releases. -**/ -} \ No newline at end of file diff --git a/docs/Doxyfile b/docs/Doxyfile index 1ddb3e3f08..43fd269621 100644 --- a/docs/Doxyfile +++ b/docs/Doxyfile @@ -817,12 +817,11 @@ INPUT = ./docs/01_00_quick_start.dox \ ./docs/01_01_delegate_start_guide.dox \ ./docs/02_operator_list.dox \ ./docs/03_build_guides.dox \ - ./docs/04_use_guides.dox \ - ./docs/05_contributor.dox \ - ./docs/06_00_software_tools.dox \ - ./docs/06_01_parsers.dox \ - ./docs/06_02_deserializer_serializer.dox \ - ./docs/06_03_delegate.dox \ + ./docs/04_contributor.dox \ + ./docs/05_00_software_components.dox \ + ./docs/05_01_parsers.dox \ + ./docs/05_02_deserializer_serializer.dox \ + ./docs/05_03_delegate.dox \ ./docs/FAQ.md \ ./tests/ImageCSVFileGenerator/README.md \ ./tests/ImageTensorGenerator/README.md \ -- cgit v1.2.1