From 6124ce60b54eb5639ed19d46c79fce21cca2c83b Mon Sep 17 00:00:00 2001 From: Sheri Zhang Date: Tue, 4 May 2021 14:03:13 +0100 Subject: Update operator list part3 Partially resolve: COMPMID-4199 Signed-off-by: Sheri Zhang Change-Id: Id24702d258fb4e04ad948e7cf6c0efd98d2a5456 Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/5561 Reviewed-by: TeresaARM Comments-Addressed: Arm Jenkins Tested-by: Arm Jenkins --- arm_compute/runtime/CL/functions/CLComparison.h | 10 +- arm_compute/runtime/CL/functions/CLCrop.h | 8 ++ .../CL/functions/CLDeconvolutionLayerUpsample.h | 9 ++ .../CL/functions/CLDirectDeconvolutionLayer.h | 14 +++ .../runtime/CL/functions/CLElementWiseUnaryLayer.h | 63 +++++++++++ .../runtime/CL/functions/CLElementwiseOperations.h | 124 ++++++++++++++++----- .../CL/functions/CLGEMMDeconvolutionLayer.h | 13 ++- .../runtime/CL/functions/CLGEMMLowpOutputStage.h | 12 +- arm_compute/runtime/CL/functions/CLLogicalAnd.h | 8 ++ arm_compute/runtime/CL/functions/CLLogicalNot.h | 8 ++ arm_compute/runtime/CL/functions/CLLogicalOr.h | 8 ++ arm_compute/runtime/CL/functions/CLSoftmaxLayer.h | 11 ++ .../CL/functions/CLWinogradInputTransform.h | 12 +- 13 files changed, 270 insertions(+), 30 deletions(-) (limited to 'arm_compute/runtime/CL/functions') diff --git a/arm_compute/runtime/CL/functions/CLComparison.h b/arm_compute/runtime/CL/functions/CLComparison.h index 8cc3e96ec5..3f984900ee 100644 --- a/arm_compute/runtime/CL/functions/CLComparison.h +++ b/arm_compute/runtime/CL/functions/CLComparison.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2020 Arm Limited. + * Copyright (c) 2018-2021 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -39,6 +39,14 @@ class CLComparison : public ICLSimpleFunction { public: /** Initialise the kernel's inputs and outputs. + * + * Valid data layouts: + * - All + * + * Valid data type configurations: + * |src0 |src1 |dst | + * |:--------|:--------|:--------| + * |All |All |U8 | * * @param[in] input1 Source tensor. Data types supported: All. * The input1 tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0. diff --git a/arm_compute/runtime/CL/functions/CLCrop.h b/arm_compute/runtime/CL/functions/CLCrop.h index dc509b5b84..d2b72a5eff 100644 --- a/arm_compute/runtime/CL/functions/CLCrop.h +++ b/arm_compute/runtime/CL/functions/CLCrop.h @@ -55,6 +55,14 @@ public: * * @note Supported tensor rank: up to 4 * + * Valid data layouts: + * - NHWC + * + * Valid data type configurations: + * |src |dst | + * |:--------------|:--------------| + * |All |F32 | + * * @param[in] input Source tensor. Data type supported: All. Data layouts supported: NHWC. * @param[out] output Destination tensor. Data type supported: F32 * @param[in] start Coordinates of where to start cropping the image. diff --git a/arm_compute/runtime/CL/functions/CLDeconvolutionLayerUpsample.h b/arm_compute/runtime/CL/functions/CLDeconvolutionLayerUpsample.h index 6c1302fbf7..344ebd0afb 100644 --- a/arm_compute/runtime/CL/functions/CLDeconvolutionLayerUpsample.h +++ b/arm_compute/runtime/CL/functions/CLDeconvolutionLayerUpsample.h @@ -60,6 +60,15 @@ public: ~CLDeconvolutionLayerUpsample(); /** Initialize the function's source, destination, interpolation type and border_mode. + * + * Valid data layouts: + * - NHWC + * - NCHW + * + * Valid data type configurations: + * |src |dst | + * |:--------------|:--------------| + * |All |All | * * @param[in, out] input Source tensor. Data type supported: All. * @param[out] output Destination tensor. Data type supported: same as @p input. diff --git a/arm_compute/runtime/CL/functions/CLDirectDeconvolutionLayer.h b/arm_compute/runtime/CL/functions/CLDirectDeconvolutionLayer.h index a23500e16b..567de13508 100644 --- a/arm_compute/runtime/CL/functions/CLDirectDeconvolutionLayer.h +++ b/arm_compute/runtime/CL/functions/CLDirectDeconvolutionLayer.h @@ -86,6 +86,20 @@ public: /** Default move assignment operator */ CLDirectDeconvolutionLayer &operator=(CLDirectDeconvolutionLayer &&) = default; /** Set the input, weights, biases and output tensors. + * + * Valid data layouts: + * - NHWC + * - NCHW + * + * Valid data type configurations: + * |src0 |src1 |src2 |dst | + * |:--------------|:------------------|:------|:--------------| + * |F16 |F16 |F16 |F16 | + * |F32 |F32 |F32 |F32 | + * |QASYMM8 |QASYMM8 |S32 |QASYMM8 | + * |QASYMM8_SIGNED |QASYMM8_SIGNED |S32 |QASYMM8_SIGNED | + * |QASYMM8 |QSYMM8_PER_CHANNEL |S32 |QASYMM8 | + * |QASYMM8_SIGNED |QSYMM8_PER_CHANNEL |S32 |QASYMM8_SIGNED | * * @param[in,out] input Input tensor. 3 lower dimensions represent a single input, and an optional 4th dimension for batch of inputs. * Data types supported: QASYMM8_SIGNED/QASYMM8/F16/F32. diff --git a/arm_compute/runtime/CL/functions/CLElementWiseUnaryLayer.h b/arm_compute/runtime/CL/functions/CLElementWiseUnaryLayer.h index fd6942cad5..79b79e89de 100644 --- a/arm_compute/runtime/CL/functions/CLElementWiseUnaryLayer.h +++ b/arm_compute/runtime/CL/functions/CLElementWiseUnaryLayer.h @@ -53,6 +53,15 @@ public: /** Default move assignment operator */ CLRsqrtLayer &operator=(CLRsqrtLayer &&); /** Initialize the function + * + * Valid data layouts: + * - All + * + * Valid data type configurations: + * |src |dst | + * |:--------------|:--------------| + * |F16 |F16 | + * |F32 |F32 | * * @param[in] input Input tensor. Data types supported: F16/F32. * @param[out] output Output tensor. Data types supported: same as @p input. @@ -99,6 +108,15 @@ public: /** Default move assignment operator */ CLExpLayer &operator=(CLExpLayer &&); /** Initialize the function + * + * Valid data layouts: + * - All + * + * Valid data type configurations: + * |src |dst | + * |:--------------|:--------------| + * |F16 |F16 | + * |F32 |F32 | * * @param[in] input Input tensor. Data types supported: F16/F32. * @param[out] output Output tensor. Data types supported: same as @p input. @@ -145,6 +163,15 @@ public: /** Default move assignment operator */ CLNegLayer &operator=(CLNegLayer &&); /** Initialize the function + * + * Valid data layouts: + * - All + * + * Valid data type configurations: + * |src |dst | + * |:--------------|:--------------| + * |F16 |F16 | + * |F32 |F32 | * * @param[in] input Input tensor. Data types supported: F16/F32. * @param[out] output Output tensor. Data types supported: same as @p input. @@ -191,6 +218,15 @@ public: /** Default move assignment operator */ CLSinLayer &operator=(CLSinLayer &&); /** Initialize the function + * + * Valid data layouts: + * - All + * + * Valid data type configurations: + * |src |dst | + * |:--------------|:--------------| + * |F16 |F16 | + * |F32 |F32 | * * @param[in] input Input tensor. Data types supported: F16/F32. * @param[out] output Output tensor. Data types supported: same as @p input. @@ -237,6 +273,15 @@ public: /** Default move assignment operator */ CLLogLayer &operator=(CLLogLayer &&); /** Initialize the function + * + * Valid data layouts: + * - All + * + * Valid data type configurations: + * |src |dst | + * |:--------------|:--------------| + * |F16 |F16 | + * |F32 |F32 | * * @param[in] input Input tensor. Data types supported: F16/F32. * @param[out] output Output tensor. Data types supported: same as @p input. @@ -283,6 +328,15 @@ public: /** Default move assignment operator */ CLAbsLayer &operator=(CLAbsLayer &&); /** Initialize the function + * + * Valid data layouts: + * - All + * + * Valid data type configurations: + * |src |dst | + * |:--------------|:--------------| + * |F16 |F16 | + * |F32 |F32 | * * @param[in] input Input tensor. Data types supported: F16/F32. * @param[out] output Output tensor. Data types supported: same as @p input. @@ -329,6 +383,15 @@ public: /** Default move assignment operator */ CLRoundLayer &operator=(CLRoundLayer &&); /** Initialize the function + * + * Valid data layouts: + * - All + * + * Valid data type configurations: + * |src |dst | + * |:--------------|:--------------| + * |F16 |F16 | + * |F32 |F32 | * * @param[in] input Input tensor. Data types supported: F16/F32. * @param[out] output Output tensor. Data types supported: same as @p input. diff --git a/arm_compute/runtime/CL/functions/CLElementwiseOperations.h b/arm_compute/runtime/CL/functions/CLElementwiseOperations.h index 2b291517f3..555e84a251 100644 --- a/arm_compute/runtime/CL/functions/CLElementwiseOperations.h +++ b/arm_compute/runtime/CL/functions/CLElementwiseOperations.h @@ -55,19 +55,23 @@ public: CLArithmeticAddition &operator=(CLArithmeticAddition &&); /** Initialise the kernel's inputs, output and conversion policy. * - * Valid configurations (Input1,Input2) -> Output : - * - * - (U8,U8) -> U8 - * - (U8,U8) -> S16 - * - (S16,U8) -> S16 - * - (U8,S16) -> S16 - * - (S16,S16) -> S16 - * - (S32,S32) -> S32 - * - (F16,F16) -> F16 - * - (F32,F32) -> F32 - * - (QASYMM8,QASYMM8) -> QASYMM8 - * - (QASYMM8_SIGNED,QASYMM8_SIGNED) -> QASYMM8_SIGNED - * - (QSYMM16,QSYMM16) -> QSYMM16 + * Valid data layouts: + * - All + * + * Valid data type configurations: + * |src0 |src1 |dst | + * |:--------------|:--------------|:--------------| + * |QASYMM8 |QASYMM8 |QASYMM8 | + * |QASYMM8_SIGNED |QASYMM8_SIGNED |QASYMM8_SIGNED | + * |QSYMM16 |QSYMM16 |QASYMM16 | + * |U8 |U8 |U8 | + * |U8 |U8 |S16 | + * |U8 |S16 |S16 | + * |S16 |U8 |S16 | + * |S16 |S16 |S16 | + * |S32 |S32 |S32 | + * |F16 |F16 |F16 | + * |F32 |F32 |F32 | * * @param[in, out] input1 First tensor input. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32. * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0. @@ -161,19 +165,23 @@ public: CLArithmeticSubtraction &operator=(CLArithmeticSubtraction &&); /** Initialise the kernel's inputs, output and conversion policy. * - * Valid configurations (Input1,Input2) -> Output : - * - * - (U8,U8) -> U8 - * - (U8,U8) -> S16 - * - (S16,U8) -> S16 - * - (U8,S16) -> S16 - * - (S16,S16) -> S16 - * - (S32,S32) -> S32 - * - (F16,F16) -> F16 - * - (F32,F32) -> F32 - * - (QASYMM8,QASYMM8) -> QASYMM8 - * - (QASYMM8_SIGNED,QASYMM8_SIGNED) -> QASYMM8_SIGNED - * - (QSYMM16,QSYMM16) -> QSYMM16 + * Valid data layouts: + * - All + * + * Valid data type configurations: + * |src0 |src1 |dst | + * |:--------------|:--------------|:--------------| + * |QASYMM8 |QASYMM8 |QASYMM8 | + * |QASYMM8_SIGNED |QASYMM8_SIGNED |QASYMM8_SIGNED | + * |QSYMM16 |QSYMM16 |QASYMM16 | + * |U8 |U8 |U8 | + * |U8 |U8 |S16 | + * |U8 |S16 |S16 | + * |S16 |U8 |S16 | + * |S16 |S16 |S16 | + * |S32 |S32 |S32 | + * |F16 |F16 |F16 | + * |F32 |F32 |F32 | * * @param[in, out] input1 First tensor input. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32. * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0. @@ -266,6 +274,15 @@ public: /** Default move assignment operator */ CLArithmeticDivision &operator=(CLArithmeticDivision &&); /** Initialise the kernel's inputs, output. + * + * Valid data layouts: + * - All + * + * Valid data type configurations: + * |src0 |src1 |dst | + * |:--------------|:--------------|:--------------| + * |F16 |F16 |F16 | + * |F32 |F32 |F32 | * * @param[in, out] input1 First tensor input. Data types supported: F16/F32. * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0. @@ -326,6 +343,22 @@ public: /** Default move assignment operator */ CLElementwiseMax &operator=(CLElementwiseMax &&); /** Initialise the kernel's inputs, output and conversion policy. + * + * Valid data layouts: + * - All + * + * Valid data type configurations: + * |src0 |src1 |dst | + * |:--------------|:--------------|:--------------| + * |QASYMM8 |QASYMM8 |QASYMM8 | + * |QASYMM8_SIGNED |QASYMM8_SIGNED |QASYMM8_SIGNED | + * |QSYMM16 |QSYMM16 |QASYMM16 | + * |U8 |U8 |U8 | + * |S16 |S16 |S16 | + * |S32 |S32 |S32 | + * |U32 |U32 |U32 | + * |F16 |F16 |F16 | + * |F32 |F32 |F32 | * * @param[in, out] input1 First tensor input. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/U32/F16/F32. * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0. @@ -386,6 +419,22 @@ public: /** Default move assignment operator */ CLElementwiseMin &operator=(CLElementwiseMin &&); /** Initialise the kernel's inputs, output and conversion policy. + * + * Valid data layouts: + * - All + * + * Valid data type configurations: + * |src0 |src1 |dst | + * |:--------------|:--------------|:--------------| + * |QASYMM8 |QASYMM8 |QASYMM8 | + * |QASYMM8_SIGNED |QASYMM8_SIGNED |QASYMM8_SIGNED | + * |QSYMM16 |QSYMM16 |QASYMM16 | + * |U8 |U8 |U8 | + * |S16 |S16 |S16 | + * |S32 |S32 |S32 | + * |U32 |U32 |U32 | + * |F16 |F16 |F16 | + * |F32 |F32 |F32 | * * @param[in, out] input1 First tensor input. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/U32/F16/F32. * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0. @@ -446,6 +495,20 @@ public: /** Default move assignment operator */ CLElementwiseSquaredDiff &operator=(CLElementwiseSquaredDiff &&); /** Initialise the kernel's inputs, output and conversion policy. + * + * Valid data layouts: + * - All + * + * Valid data type configurations: + * |src0 |src1 |dst | + * |:--------------|:--------------|:--------------| + * |QASYMM8 |QASYMM8 |QASYMM8 | + * |QASYMM8_SIGNED |QASYMM8_SIGNED |QASYMM8_SIGNED | + * |QSYMM16 |QSYMM16 |QASYMM16 | + * |U8 |U8 |U8 | + * |S16 |S16 |S16 | + * |F16 |F16 |F16 | + * |F32 |F32 |F32 | * * @param[in, out] input1 First tensor input. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/F16/F32. * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0. @@ -506,6 +569,15 @@ public: /** Default move assignment operator */ CLElementwisePower &operator=(CLElementwisePower &&); /** Initialise the kernel's inputs, output and conversion policy. + * + * Valid data layouts: + * - All + * + * Valid data type configurations: + * |src0 |src1 |dst | + * |:--------------|:--------------|:--------------| + * |F16 |F16 |F16 | + * |F32 |F32 |F32 | * * @param[in, out] input1 First tensor input. Data types supported: F16/F32. * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0. diff --git a/arm_compute/runtime/CL/functions/CLGEMMDeconvolutionLayer.h b/arm_compute/runtime/CL/functions/CLGEMMDeconvolutionLayer.h index 32af0f9427..6e482c98e7 100644 --- a/arm_compute/runtime/CL/functions/CLGEMMDeconvolutionLayer.h +++ b/arm_compute/runtime/CL/functions/CLGEMMDeconvolutionLayer.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2020 Arm Limited. + * Copyright (c) 2019-2021 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -92,6 +92,17 @@ public: /** Default desctructor */ ~CLGEMMDeconvolutionLayer(); /** Set the input, weights, biases and output tensors. + * + * Valid data layouts: + * - NHWC + * + * Valid data type configurations: + * |src0 |src1 |src2 |dst | + * |:--------------|:------------------|:--------|:--------------| + * |F16 |F16 |F16 |F16 | + * |F32 |F32 |F32 |F32 | + * |QASYMM8 |QASYMM8 |S32 |QASYMM8 | + * |QASYMM8_SIGNED |QASYMM8_SIGNED |S32 |QASYMM8_SIGNED | * * @param[in,out] input Input tensor. 3 lower dimensions represent a single input, and an optional 4th dimension for batch of inputs. * Data types supported: QASYMM8/QASYMM8_SIGNED/F16/F32. Data layout supported: NHWC diff --git a/arm_compute/runtime/CL/functions/CLGEMMLowpOutputStage.h b/arm_compute/runtime/CL/functions/CLGEMMLowpOutputStage.h index 0f051ecffd..a60992a0f4 100644 --- a/arm_compute/runtime/CL/functions/CLGEMMLowpOutputStage.h +++ b/arm_compute/runtime/CL/functions/CLGEMMLowpOutputStage.h @@ -287,11 +287,21 @@ class CLGEMMLowpOutputStage : public ICLSimpleFunction { public: /** Initialise the kernel's inputs, output + * + * Valid data layouts: + * - All + * + * Valid data type configurations: + * |src0 |src1 |dst | + * |:--------------|:-------------|:-------------| + * |S32 |S32 |QASYMM8 | + * |S32 |S32 |QASYMM8_SIGNED| + * |S32 |S32 |QSYMM16 | * * @param[in] input Input tensor. Data type supported: S32 * @param[in] bias Biases tensor. Only shared biases supported and it can be a nullptr if the biases addition is not required. * Biases are 1D tensor with dimensions [OFM]. Data type supported: Same as @p input. - * @param[out] output Output tensor. Data type supported: QASYMM8/QASYMM8_SIGNED + * @param[out] output Output tensor. Data type supported: QASYMM8/QASYMM8_SIGNED/QSYMM16 * @param[in] info GEMMLowp output stage metadata. */ void configure(const ICLTensor *input, const ICLTensor *bias, ICLTensor *output, const GEMMLowpOutputStageInfo &info); diff --git a/arm_compute/runtime/CL/functions/CLLogicalAnd.h b/arm_compute/runtime/CL/functions/CLLogicalAnd.h index f7038ee97a..61a15816eb 100644 --- a/arm_compute/runtime/CL/functions/CLLogicalAnd.h +++ b/arm_compute/runtime/CL/functions/CLLogicalAnd.h @@ -86,6 +86,14 @@ public: /** Default move assignment operator */ CLLogicalAnd &operator=(CLLogicalAnd &&); /** Initialize the function + * + * Valid data layouts: + * - All + * + * Valid data type configurations: + * |src0 |src1 |dst | + * |:--------------|:-------------|:------------| + * |U8 |U8 |U8 | * * @param[in] input1 Input tensor. Data types supported: U8. * @param[in] input2 Input tensor. Data types supported: same as @p input1. diff --git a/arm_compute/runtime/CL/functions/CLLogicalNot.h b/arm_compute/runtime/CL/functions/CLLogicalNot.h index 772f16b942..27fd0f9c9f 100644 --- a/arm_compute/runtime/CL/functions/CLLogicalNot.h +++ b/arm_compute/runtime/CL/functions/CLLogicalNot.h @@ -57,6 +57,14 @@ public: /** Default move assignment operator */ CLLogicalNot &operator=(CLLogicalNot &&); /** Initialize the function + * + * Valid data layouts: + * - All + * + * Valid data type configurations: + * |src |dst | + * |:--------------|:------------| + * |U8 |U8 | * * @param[in] input Input tensor. Data types supported: U8. * @param[out] output Output tensor. Data types supported: same as @p input. diff --git a/arm_compute/runtime/CL/functions/CLLogicalOr.h b/arm_compute/runtime/CL/functions/CLLogicalOr.h index 948baee9d9..b9ffb4a449 100644 --- a/arm_compute/runtime/CL/functions/CLLogicalOr.h +++ b/arm_compute/runtime/CL/functions/CLLogicalOr.h @@ -86,6 +86,14 @@ public: /** Default move assignment operator */ CLLogicalOr &operator=(CLLogicalOr &&); /** Initialize the function + * + * Valid data layouts: + * - All + * + * Valid data type configurations: + * |src0 |src1 |dst | + * |:--------------|:-------------|:------------| + * |U8 |U8 |U8 | * * @param[in] input1 Input tensor. Data types supported: U8. * @param[in] input2 Input tensor. Data types supported: same as @p input1. diff --git a/arm_compute/runtime/CL/functions/CLSoftmaxLayer.h b/arm_compute/runtime/CL/functions/CLSoftmaxLayer.h index ddb35ae56f..721a47144e 100644 --- a/arm_compute/runtime/CL/functions/CLSoftmaxLayer.h +++ b/arm_compute/runtime/CL/functions/CLSoftmaxLayer.h @@ -59,6 +59,17 @@ public: /** Default destructor */ ~CLSoftmaxLayerGeneric(); /** Set the input and output tensors. + * + * Valid data layouts: + * - All + * + * Valid data type configurations: + * |src |dst | + * |:--------------|:--------------| + * |QASYMM8 |QASYMM8 | + * |QASYMM8_SIGNED |QASYMM8_SIGNED | + * |F16 |F16 | + * |F32 |F32 | * * @param[in] input Source tensor. Data types supported: QASYMM8/QASYMM8_SIGNED/F16/F32 for Softmax and F16/F32 for Log Softmax * @param[out] output Destination tensor. Data types supported: same as @p input diff --git a/arm_compute/runtime/CL/functions/CLWinogradInputTransform.h b/arm_compute/runtime/CL/functions/CLWinogradInputTransform.h index 8cd809cc1f..d644591b57 100644 --- a/arm_compute/runtime/CL/functions/CLWinogradInputTransform.h +++ b/arm_compute/runtime/CL/functions/CLWinogradInputTransform.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2020 Arm Limited. + * Copyright (c) 2018-2021 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -40,6 +40,16 @@ class CLWinogradInputTransform : public ICLSimpleFunction { public: /** Set the input and output tensors. + * + * Valid data layouts: + * - NHWC + * - NCHW + * + * Valid data type configurations: + * |src |dst | + * |:--------------|:--------------| + * |F16 |F16 | + * |F32 |F32 | * * @note Winograd input transform supports the following configurations for NCWH data layout * F(output tile, kernel size):F(2x2, 3x3), F(2x1, 3x1), F(1x2, 1x3), -- cgit v1.2.1