From 9a5e5b7032788249055f40edd763420f6bdba8fe Mon Sep 17 00:00:00 2001 From: Ryan OShea Date: Fri, 28 Jul 2023 17:38:19 +0100 Subject: IVGCVSW-7703 Ensure PyArmNN has been updated with new features added in ArmNN * Adds BatchMatMul layer and descriptor to pyarmnn * Adds ReverseV2 layer to pyarmnn * Adds ElementWiseBinary layer and descriptor to pyarmnn * Adds Tile layer and descriptor to pyarmnn * Adds network test for each layer Signed-off-by: Ryan OShea Change-Id: I07116d9e7b1eb6b6f8a687d8ba7cfbd11c912d0d --- .../src/pyarmnn/swig/modules/armnn_descriptors.i | 136 +++++++++++++++++++++ .../src/pyarmnn/swig/modules/armnn_network.i | 55 +++++++++ python/pyarmnn/test/test_network.py | 4 + 3 files changed, 195 insertions(+) diff --git a/python/pyarmnn/src/pyarmnn/swig/modules/armnn_descriptors.i b/python/pyarmnn/src/pyarmnn/swig/modules/armnn_descriptors.i index 9374945daf..755d3c50e9 100644 --- a/python/pyarmnn/src/pyarmnn/swig/modules/armnn_descriptors.i +++ b/python/pyarmnn/src/pyarmnn/swig/modules/armnn_descriptors.i @@ -89,6 +89,104 @@ struct ArgMinMaxDescriptor bool operator ==(const ArgMinMaxDescriptor &rhs) const; }; +%feature("docstring", + " + A descriptor for the BatchMatMul layer. See `INetwork.AddBatchMatMulLayer()`. + + Contains: + m_TransposeX (bool): Transpose the slices of input tensor X. Transpose and Adjoint can not both be set to true for the same tensor at the same time. + m_TransposeY (bool): Transpose the slices of input tensor Y. Transpose and Adjoint can not both be set to true for the same tensor at the same time. + m_AdjointX (bool): Adjoint the slices of input tensor X. Transpose and Adjoint can not both be set to true for the same tensor at the same time. + m_AdjointY (bool): Adjoint the slices of input tensor Y. Transpose and Adjoint can not both be set to true for the same tensor at the same time. + m_DataLayoutX (DataLayout): Data layout of input tensor X, such as NHWC/NDHWC (leave as default for arbitrary layout). + m_DatalayoutY (DataLayout): Data layout of input tensor X, such as NHWC/NDHWC (leave as default for arbitrary layout) + ") BatchMatMulDescriptor; +struct BatchMatMulDescriptor +{ + BatchMatMulDescriptor(bool transposeX = false, + bool transposeY = false, + bool adjointX = false, + bool adjointY = false, + DataLayout dataLayoutX = DataLayout::NCHW, + DataLayout dataLayoutY = DataLayout::NCHW) + : m_TransposeX(transposeX) + , m_TransposeY(transposeY) + , m_AdjointX(adjointX) + , m_AdjointY(adjointY) + , m_DataLayoutX(dataLayoutX) + , m_DataLayoutY(dataLayoutY) + {} + + bool operator ==(const BatchMatMulDescriptor &rhs) const + { + return m_TransposeX == rhs.m_TransposeX && + m_TransposeY == rhs.m_TransposeY && + m_AdjointX == rhs.m_AdjointX && + m_AdjointY == rhs.m_AdjointY && + m_DataLayoutX == rhs.m_DataLayoutX && + m_DataLayoutY == rhs.m_DataLayoutY; + } + + bool m_TransposeX; + bool m_TransposeY; + bool m_AdjointX; + bool m_AdjointY; + DataLayout m_DataLayoutX; + DataLayout m_DataLayoutY; + + static std::pair, std::pair> GetAxesToMul( + const BatchMatMulDescriptor& desc, + const armnn::TensorShape& tensorXShape, + const armnn::TensorShape& tensorYShape); + + static std::pair, std::vector> GetAxesNotMul( + const BatchMatMulDescriptor& desc, + const armnn::TensorShape& inputXShape, + const armnn::TensorShape& inputYShape); + + %feature("docstring", + " + Static helper to get the two axes (for each input) for multiplication + Args: + dataLayout (DataLayout) + tensorShape (TensorShape) + + Returns: + std::pair + ") GetAxesToMul; + static std::pair GetAxesToMul( + DataLayout dataLayout, + const TensorShape& tensorShape); + + %feature("docstring", + " + Static helper to get the two axes (for each input) that will not be multiplied together + Args: + dataLayout (DataLayout) + tensorShape (TensorShape) + + Returns: + std::vector + ") GetAxesToNotMul; + static std::vector GetAxesNotMul( + DataLayout dataLayout, + const TensorShape& tensorShape); + + %feature("docstring", + " + Static helper to get the axes which will be transposed + Args: + dataLayout (DataLayout) + tensorShape (TensorShape) + + Returns: + PermutationVector + ") GetPermuteVec; + static PermutationVector GetPermuteVec( + armnn::DataLayout dataLayout, + const armnn::TensorShape& tensorShape); +}; + %feature("docstring", " A descriptor for the BatchNormalization layer. See `INetwork.AddBatchNormalizationLayer()`. @@ -677,6 +775,26 @@ struct PadDescriptor bool operator ==(const PadDescriptor& rhs) const; }; +%feature("docstring", + " + A descriptor for the ElementwiseBinary layer. See `INetwork.AddElementwiseBinaryLayer()`. + Contains: + m_Operation (int): Indicates which Binary operation to use. (`BinaryOperation_Add`, `BinaryOperation_Div`, + `BinaryOperation_Maximum`, `BinaryOperation_Minimum`, `BinaryOperation_Mul`, `BinaryOperation_Sub`, + `BinaryOperation_SqDiff`, `BinaryOperation_Power`) + Default: `BinaryOperation_Add`. + + ") ElementwiseBinaryDescriptor; +struct ElementwiseBinaryDescriptor +{ + ElementwiseBinaryDescriptor(); + ElementwiseBinaryDescriptor(BinaryOperation operation); + + BinaryOperation m_Operation; + + bool operator ==(const ElementwiseBinaryDescriptor &rhs) const; +}; + %feature("docstring", " A descriptor for the ElementwiseUnary layer. See `INetwork.AddElementwiseUnaryLayer()`. @@ -1209,6 +1327,24 @@ struct LogicalBinaryDescriptor bool operator ==(const LogicalBinaryDescriptor &rhs) const; }; +%feature("docstring", + " + A descriptor for the Tile layer. See `INetwork.AddTileLayer()`. + + Contains: + m_Multiples (std::vector): The vector to multiply the input shape by + + ") TileDescriptor; +struct TileDescriptor +{ + TileDescriptor(); + TileDescriptor(const std::vector& multiples); + + std::vector m_Multiples; + + bool operator ==(const TileDescriptor &rhs) const; +}; + %feature("docstring", " A descriptor for the Transpose layer. See `INetwork.AddTransposeLayer()`. diff --git a/python/pyarmnn/src/pyarmnn/swig/modules/armnn_network.i b/python/pyarmnn/src/pyarmnn/swig/modules/armnn_network.i index c9eef8630d..0b7f55d1cc 100644 --- a/python/pyarmnn/src/pyarmnn/swig/modules/armnn_network.i +++ b/python/pyarmnn/src/pyarmnn/swig/modules/armnn_network.i @@ -394,6 +394,21 @@ public: const char* name = nullptr); + %feature("docstring", + " + Adds a Batch Matrix Multiplication layer to the network. + + Args: + desc (BatchMatMulDescriptor): Parameters for the BatchMatMul layer. + name (str): Optional name for the layer. + + Returns: + IConnectableLayer: Interface for configuring the layer. + ") AddBatchMatMulLayer; + armnn::IConnectableLayer* AddBatchMatMulLayer(const armnn::BatchMatMulDescriptor& desc, + const char* name = nullptr); + + %feature("docstring", " Adds a Batch Normalization layer to the network. @@ -593,6 +608,20 @@ public: ") AddDivisionLayer; armnn::IConnectableLayer* AddDivisionLayer(const char* name = nullptr); + %feature("docstring", + " + Adds an Elementwise Binary layer to the network. Type of binary operation to use is decided by elementwiseBinaryDescriptor. Binary operations supported are (Add, Div, Maximum, Minimum, Mul, Sub, SqDiff, Power) + + Args: + elementwiseBinaryDescriptor (ElementwiseBinaryDescriptor): ElementwiseBinaryDescriptor to configure the choice of binary operation added to the network. + name (str): Optional name for the layer. + + Returns: + IConnectableLayer: Interface for configuring the layer. + ") AddElementwiseBinaryLayer; + armnn::IConnectableLayer* AddElementwiseBinaryLayer(const ElementwiseBinaryDescriptor& elementwiseBinaryDescriptor, + const char* name = nullptr); + %feature("docstring", " Adds an Elementwise Unary layer to the network. Type of unary operation to use is decided by elementwiseUnaryDescriptor. Unary operations supported are (Abs, Exp, Neg, Rsqrt, Sqrt) @@ -941,6 +970,18 @@ public: armnn::IConnectableLayer* AddResizeLayer(const armnn::ResizeDescriptor& resizeDescriptor, const char* name = nullptr); + %feature("docstring", + " + Adds a ReverseV2 layer to the network. + + Args: + name (str): Optional name for the layer. + + Returns: + IConnectableLayer: Interface for configuring the layer. + ") AddReverseV2Layer; + armnn::IConnectableLayer* AddReverseV2Layer(const char* name = nullptr); + %feature("docstring", " Adds a Shape layer to the network. @@ -1120,6 +1161,20 @@ public: armnn::IConnectableLayer* AddLogicalBinaryLayer(const armnn::LogicalBinaryDescriptor& logicalBinaryDescriptor, const char* name = nullptr); + %feature("docstring", + " + Adds a Tile layer to the network. + + Args: + tileDescriptor (TileDescriptor): Description of the tile layer. + name (str): Optional name for the layer. + + Returns: + IConnectableLayer: Interface for configuring the layer. + ") AddTileLayer; + armnn::IConnectableLayer* AddTileLayer(const armnn::TileDescriptor& tileDescriptor, + const char* name = nullptr); + %feature("docstring", " Adds a Transpose layer to the network. diff --git a/python/pyarmnn/test/test_network.py b/python/pyarmnn/test/test_network.py index 88be5a8e7f..2fe8938978 100644 --- a/python/pyarmnn/test/test_network.py +++ b/python/pyarmnn/test/test_network.py @@ -190,6 +190,7 @@ def test_serialize_to_dot_mode_readonly(network_file, get_runtime, tmpdir): 'AddActivationLayer', 'AddAdditionLayer', 'AddArgMinMaxLayer', + 'AddBatchMatMulLayer', 'AddBatchNormalizationLayer', 'AddBatchToSpaceNdLayer', 'AddCastLayer', @@ -204,6 +205,7 @@ def test_serialize_to_dot_mode_readonly(network_file, get_runtime, tmpdir): 'AddDequantizeLayer', 'AddDetectionPostProcessLayer', 'AddDivisionLayer', + 'AddElementWiseBinaryLayer', 'AddElementwiseUnaryLayer', 'AddFloorLayer', 'AddFillLayer', @@ -234,6 +236,7 @@ def test_serialize_to_dot_mode_readonly(network_file, get_runtime, tmpdir): 'AddReduceLayer', 'AddReshapeLayer', 'AddResizeLayer', + 'AddReverseV2Layer', 'AddShapeLayer', 'AddSliceLayer', 'AddSoftmaxLayer', @@ -245,6 +248,7 @@ def test_serialize_to_dot_mode_readonly(network_file, get_runtime, tmpdir): 'AddStridedSliceLayer', 'AddSubtractionLayer', 'AddSwitchLayer', + 'AddTileLayer', 'AddTransposeConvolution2dLayer', 'AddTransposeLayer' ]) -- cgit v1.2.1